hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f70d7e3d03834e47c1b983c805108d7a309a34af
759
py
Python
kivyeExemplosDocumentacao/kivy_venv/Scripts/rst2html4.py
wemerson-henrique/kivy
3cb6061a2d19b01e86c3738206f30c8a853763d4
[ "MIT" ]
null
null
null
kivyeExemplosDocumentacao/kivy_venv/Scripts/rst2html4.py
wemerson-henrique/kivy
3cb6061a2d19b01e86c3738206f30c8a853763d4
[ "MIT" ]
null
null
null
kivyeExemplosDocumentacao/kivy_venv/Scripts/rst2html4.py
wemerson-henrique/kivy
3cb6061a2d19b01e86c3738206f30c8a853763d4
[ "MIT" ]
null
null
null
#!C:\Users\WEMERSON\Documents\kivy\kivy_venv\Scripts\python.exe # $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing (X)HTML. The output conforms to XHTML 1.0 transitional and almost to HTML 4.01 transitional (except for closing empty tags). """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates (X)HTML documents from standalone reStructuredText ' 'sources. ' + default_description) publish_cmdline(writer_name='html4', description=description)
28.111111
78
0.748353
try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates (X)HTML documents from standalone reStructuredText ' 'sources. ' + default_description) publish_cmdline(writer_name='html4', description=description)
true
true
f70d7ed397a7bcf7151bee4cf4e26cefe9f07c32
4,432
py
Python
.vim/bundle/ultisnips/pythonx/UltiSnips/text_objects/_snippet_instance.py
geecoo/vim
efe3bdc2edb3419deafe9af676c4f6c9107c9edf
[ "MIT" ]
null
null
null
.vim/bundle/ultisnips/pythonx/UltiSnips/text_objects/_snippet_instance.py
geecoo/vim
efe3bdc2edb3419deafe9af676c4f6c9107c9edf
[ "MIT" ]
null
null
null
.vim/bundle/ultisnips/pythonx/UltiSnips/text_objects/_snippet_instance.py
geecoo/vim
efe3bdc2edb3419deafe9af676c4f6c9107c9edf
[ "MIT" ]
null
null
null
#!/usr/bin/env python # encoding: utf-8 """A Snippet instance is an instance of a Snippet Definition. That is, when the user expands a snippet, a SnippetInstance is created to keep track of the corresponding TextObjects. The Snippet itself is also a TextObject. """ from UltiSnips import _vim from UltiSnips.position import Position from UltiSnips.text_objects._base import EditableTextObject, \ NoneditableTextObject class SnippetInstance(EditableTextObject): """See module docstring.""" # pylint:disable=protected-access def __init__(self, snippet, parent, initial_text, start, end, visual_content, last_re, globals): if start is None: start = Position(0, 0) if end is None: end = Position(0, 0) self.snippet = snippet self._cts = 0 self.locals = {'match': last_re} self.globals = globals self.visual_content = visual_content EditableTextObject.__init__(self, parent, start, end, initial_text) def replace_initial_text(self): """Puts the initial text of all text elements into Vim.""" def _place_initial_text(obj): """recurses on the children to do the work.""" obj.overwrite() if isinstance(obj, EditableTextObject): for child in obj._children: _place_initial_text(child) _place_initial_text(self) def replay_user_edits(self, cmds): """Replay the edits the user has done to keep endings of our Text objects in sync with reality.""" for cmd in cmds: self._do_edit(cmd) def update_textobjects(self): """Update the text objects that should change automagically after the users edits have been replayed. This might also move the Cursor """ vc = _VimCursor(self) done = set() not_done = set() def _find_recursive(obj): """Finds all text objects and puts them into 'not_done'.""" if isinstance(obj, EditableTextObject): for child in obj._children: _find_recursive(child) not_done.add(obj) _find_recursive(self) counter = 10 while (done != not_done) and counter: # Order matters for python locals! for obj in sorted(not_done - done): if obj._update(done): done.add(obj) counter -= 1 if not counter: raise RuntimeError( 'The snippets content did not converge: Check for Cyclic ' 'dependencies or random strings in your snippet. You can use ' "'if not snip.c' to make sure to only expand random output " 'once.') vc.to_vim() self._del_child(vc) def select_next_tab(self, backwards=False): """Selects the next tabstop or the previous if 'backwards' is True.""" if self._cts is None: return if backwards: cts_bf = self._cts res = self._get_prev_tab(self._cts) if res is None: self._cts = cts_bf return self._tabstops.get(self._cts, None) self._cts, ts = res return ts else: res = self._get_next_tab(self._cts) if res is None: self._cts = None return self._tabstops.get(0, None) else: self._cts, ts = res return ts return self._tabstops[self._cts] def _get_tabstop(self, requester, no): # SnippetInstances are completely self contained, therefore, we do not # need to ask our parent for Tabstops cached_parent = self._parent self._parent = None rv = EditableTextObject._get_tabstop(self, requester, no) self._parent = cached_parent return rv class _VimCursor(NoneditableTextObject): """Helper class to keep track of the Vim Cursor when text objects expand and move.""" def __init__(self, parent): NoneditableTextObject.__init__( self, parent, _vim.buf.cursor, _vim.buf.cursor, tiebreaker=Position(-1, -1)) def to_vim(self): """Moves the cursor in the Vim to our position.""" assert self._start == self._end _vim.buf.cursor = self._start
32.115942
78
0.59657
from UltiSnips import _vim from UltiSnips.position import Position from UltiSnips.text_objects._base import EditableTextObject, \ NoneditableTextObject class SnippetInstance(EditableTextObject): def __init__(self, snippet, parent, initial_text, start, end, visual_content, last_re, globals): if start is None: start = Position(0, 0) if end is None: end = Position(0, 0) self.snippet = snippet self._cts = 0 self.locals = {'match': last_re} self.globals = globals self.visual_content = visual_content EditableTextObject.__init__(self, parent, start, end, initial_text) def replace_initial_text(self): def _place_initial_text(obj): obj.overwrite() if isinstance(obj, EditableTextObject): for child in obj._children: _place_initial_text(child) _place_initial_text(self) def replay_user_edits(self, cmds): for cmd in cmds: self._do_edit(cmd) def update_textobjects(self): vc = _VimCursor(self) done = set() not_done = set() def _find_recursive(obj): if isinstance(obj, EditableTextObject): for child in obj._children: _find_recursive(child) not_done.add(obj) _find_recursive(self) counter = 10 while (done != not_done) and counter: for obj in sorted(not_done - done): if obj._update(done): done.add(obj) counter -= 1 if not counter: raise RuntimeError( 'The snippets content did not converge: Check for Cyclic ' 'dependencies or random strings in your snippet. You can use ' "'if not snip.c' to make sure to only expand random output " 'once.') vc.to_vim() self._del_child(vc) def select_next_tab(self, backwards=False): if self._cts is None: return if backwards: cts_bf = self._cts res = self._get_prev_tab(self._cts) if res is None: self._cts = cts_bf return self._tabstops.get(self._cts, None) self._cts, ts = res return ts else: res = self._get_next_tab(self._cts) if res is None: self._cts = None return self._tabstops.get(0, None) else: self._cts, ts = res return ts return self._tabstops[self._cts] def _get_tabstop(self, requester, no): cached_parent = self._parent self._parent = None rv = EditableTextObject._get_tabstop(self, requester, no) self._parent = cached_parent return rv class _VimCursor(NoneditableTextObject): def __init__(self, parent): NoneditableTextObject.__init__( self, parent, _vim.buf.cursor, _vim.buf.cursor, tiebreaker=Position(-1, -1)) def to_vim(self): assert self._start == self._end _vim.buf.cursor = self._start
true
true
f70d7f4078b609a962c2e557efc5fb82fa7f88e4
1,371
py
Python
finder.py
FelixPcll/Open-CV-Sandbox
1a9a304f9f209cecc3b266b54b59a460e0f4d6e8
[ "Apache-2.0" ]
null
null
null
finder.py
FelixPcll/Open-CV-Sandbox
1a9a304f9f209cecc3b266b54b59a460e0f4d6e8
[ "Apache-2.0" ]
null
null
null
finder.py
FelixPcll/Open-CV-Sandbox
1a9a304f9f209cecc3b266b54b59a460e0f4d6e8
[ "Apache-2.0" ]
null
null
null
"""Docstring """ import cv2 #import numpy as np def find_in_face(haarcascade, rec=False): """Press 'k' for quit """ face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') smile_cascade = cv2.CascadeClassifier(haarcascade) cap = cv2.VideoCapture(0) if rec: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640, 480)) while True: _, original = cap.read() gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (fx, fy, fw, fh) in faces: cv2.rectangle(original, pt1=(fx, fy), pt2=( fx+fw, fy+fh), color=(0, 0, 255), thickness=2) roi_gray = gray[fy:fy+fh, fx:fx+fw] roi_color = original[fy:fy+fh, fx:fx+fw] smiles = smile_cascade.detectMultiScale(roi_gray) for (sx, sy, sw, sh) in smiles: cv2.rectangle(roi_color, pt1=(sx, sy), pt2=( sx+sw, sy+sh), color=(255, 0, 0), thickness=2) if rec: out.write(original) cv2.imshow('Image', original) if cv2.waitKey(1) & 0xFF == ord('k'): break cap.release() if rec: out.release() cv2.destroyAllWindows() find_in_face('haarcascade_eye.xml', rec=False)
26.882353
79
0.57841
import cv2 def find_in_face(haarcascade, rec=False): face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') smile_cascade = cv2.CascadeClassifier(haarcascade) cap = cv2.VideoCapture(0) if rec: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640, 480)) while True: _, original = cap.read() gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (fx, fy, fw, fh) in faces: cv2.rectangle(original, pt1=(fx, fy), pt2=( fx+fw, fy+fh), color=(0, 0, 255), thickness=2) roi_gray = gray[fy:fy+fh, fx:fx+fw] roi_color = original[fy:fy+fh, fx:fx+fw] smiles = smile_cascade.detectMultiScale(roi_gray) for (sx, sy, sw, sh) in smiles: cv2.rectangle(roi_color, pt1=(sx, sy), pt2=( sx+sw, sy+sh), color=(255, 0, 0), thickness=2) if rec: out.write(original) cv2.imshow('Image', original) if cv2.waitKey(1) & 0xFF == ord('k'): break cap.release() if rec: out.release() cv2.destroyAllWindows() find_in_face('haarcascade_eye.xml', rec=False)
true
true
f70d7f822489ab016bc95880bbd0a5fbffc9331c
1,308
py
Python
model/protonet.py
ashblib/protocell
037c3aa6ab2250eae09889729d512c243518e282
[ "MIT" ]
null
null
null
model/protonet.py
ashblib/protocell
037c3aa6ab2250eae09889729d512c243518e282
[ "MIT" ]
null
null
null
model/protonet.py
ashblib/protocell
037c3aa6ab2250eae09889729d512c243518e282
[ "MIT" ]
null
null
null
import torch.nn as nn import torch class ProtoNetBig(nn.Module): def __init__(self, x_dim=23433, hid_dim=[2000, 1000, 500, 250], z_dim=100): super(ProtoNetBig, self).__init__() self.linear0 = nn.Linear(x_dim, hid_dim[0]) self.bn1 = nn.BatchNorm1d(hid_dim[0]) self.linear1 = nn.Linear(hid_dim[0], hid_dim[1]) self.bn2 = nn.BatchNorm1d(hid_dim[1]) self.linear2 = nn.Linear(hid_dim[1] + hid_dim[0], hid_dim[2]) self.bn3 = nn.BatchNorm1d(hid_dim[2]) self.linear3 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2], hid_dim[3]) self.bn4 = nn.BatchNorm1d(hid_dim[3]) self.linear4 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2] + hid_dim[3], z_dim) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(inplace=True) def forward(self, x): out = self.dropout(self.bn1(self.relu(self.linear0(x)))) out1 = self.dropout(self.bn2(self.relu(self.linear1(out)))) out2 = torch.cat([out, out1], 1) out3 = self.dropout(self.bn3(self.relu(self.linear2(out2)))) out4 = torch.cat([out, out1, out3], 1) out5 = self.dropout(self.bn4(self.relu(self.linear3(out4)))) out6 = torch.cat([out, out1, out3, out5], 1) out7 = self.linear4(out6) return out7
48.444444
90
0.619266
import torch.nn as nn import torch class ProtoNetBig(nn.Module): def __init__(self, x_dim=23433, hid_dim=[2000, 1000, 500, 250], z_dim=100): super(ProtoNetBig, self).__init__() self.linear0 = nn.Linear(x_dim, hid_dim[0]) self.bn1 = nn.BatchNorm1d(hid_dim[0]) self.linear1 = nn.Linear(hid_dim[0], hid_dim[1]) self.bn2 = nn.BatchNorm1d(hid_dim[1]) self.linear2 = nn.Linear(hid_dim[1] + hid_dim[0], hid_dim[2]) self.bn3 = nn.BatchNorm1d(hid_dim[2]) self.linear3 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2], hid_dim[3]) self.bn4 = nn.BatchNorm1d(hid_dim[3]) self.linear4 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2] + hid_dim[3], z_dim) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(inplace=True) def forward(self, x): out = self.dropout(self.bn1(self.relu(self.linear0(x)))) out1 = self.dropout(self.bn2(self.relu(self.linear1(out)))) out2 = torch.cat([out, out1], 1) out3 = self.dropout(self.bn3(self.relu(self.linear2(out2)))) out4 = torch.cat([out, out1, out3], 1) out5 = self.dropout(self.bn4(self.relu(self.linear3(out4)))) out6 = torch.cat([out, out1, out3, out5], 1) out7 = self.linear4(out6) return out7
true
true
f70d7ff4243d506dde9a8af610c28d0217c59943
464
py
Python
riverswim_variants/__init__.py
RevanMacQueen/Riverswim-Variants
a3593c6b2960185e1815b79aba5a2ccdb6ff9ea7
[ "MIT" ]
null
null
null
riverswim_variants/__init__.py
RevanMacQueen/Riverswim-Variants
a3593c6b2960185e1815b79aba5a2ccdb6ff9ea7
[ "MIT" ]
null
null
null
riverswim_variants/__init__.py
RevanMacQueen/Riverswim-Variants
a3593c6b2960185e1815b79aba5a2ccdb6ff9ea7
[ "MIT" ]
1
2022-03-08T05:29:00.000Z
2022-03-08T05:29:00.000Z
from gym.envs.registration import register register( id='scaled-riverswim-v0', entry_point='riverswim_variants.envs:ScaledRiverSwimEnv', max_episode_steps=20, ) register( id='stochastic-riverswim-v0', entry_point='riverswim_variants.envs:StochasticRiverSwimEnv', max_episode_steps=20, ) register( id='skewed-stochastic-riverswim-v0', entry_point='riverswim_variants.envs:SkewedStochasticRiverSwimEnv', max_episode_steps=20, )
24.421053
71
0.767241
from gym.envs.registration import register register( id='scaled-riverswim-v0', entry_point='riverswim_variants.envs:ScaledRiverSwimEnv', max_episode_steps=20, ) register( id='stochastic-riverswim-v0', entry_point='riverswim_variants.envs:StochasticRiverSwimEnv', max_episode_steps=20, ) register( id='skewed-stochastic-riverswim-v0', entry_point='riverswim_variants.envs:SkewedStochasticRiverSwimEnv', max_episode_steps=20, )
true
true
f70d80da40a72c207edfcfc1509e820846f0b731
7,620
py
Python
sold2/model/model_util.py
XiaoJake/SOLD2
ddd36788c112136be2975ee29b096df979571bb2
[ "MIT" ]
null
null
null
sold2/model/model_util.py
XiaoJake/SOLD2
ddd36788c112136be2975ee29b096df979571bb2
[ "MIT" ]
null
null
null
sold2/model/model_util.py
XiaoJake/SOLD2
ddd36788c112136be2975ee29b096df979571bb2
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.nn.init as init from .nets.backbone import HourglassBackbone, SuperpointBackbone from .nets.junction_decoder import SuperpointDecoder from .nets.heatmap_decoder import PixelShuffleDecoder from .nets.descriptor_decoder import SuperpointDescriptor def get_model(model_cfg=None, loss_weights=None, mode="train"): """ Get model based on the model configuration. """ # Check dataset config is given if model_cfg is None: raise ValueError("[Error] The model config is required!") # List the supported options here print("\n\n\t--------Initializing model----------") supported_arch = ["simple"] if not model_cfg["model_architecture"] in supported_arch: raise ValueError( "[Error] The model architecture is not in supported arch!") if model_cfg["model_architecture"] == "simple": model = SOLD2Net(model_cfg) else: raise ValueError( "[Error] The model architecture is not in supported arch!") # Optionally register loss weights to the model if mode == "train": if loss_weights is not None: for param_name, param in loss_weights.items(): if isinstance(param, nn.Parameter): print("\t [Debug] Adding %s with value %f to model" % (param_name, param.item())) model.register_parameter(param_name, param) else: raise ValueError( "[Error] the loss weights can not be None in dynamic weighting mode during training.") # Display some summary info. print("\tModel architecture: %s" % model_cfg["model_architecture"]) print("\tBackbone: %s" % model_cfg["backbone"]) print("\tJunction decoder: %s" % model_cfg["junction_decoder"]) print("\tHeatmap decoder: %s" % model_cfg["heatmap_decoder"]) print("\t-------------------------------------") return model class SOLD2Net(nn.Module): """ Full network for SOLD². """ def __init__(self, model_cfg): super(SOLD2Net, self).__init__() self.name = model_cfg["model_name"] self.cfg = model_cfg # List supported network options self.supported_backbone = ["lcnn", "superpoint"] self.backbone_net, self.feat_channel = self.get_backbone() # List supported junction decoder options self.supported_junction_decoder = ["superpoint_decoder"] self.junction_decoder = self.get_junction_decoder() # List supported heatmap decoder options self.supported_heatmap_decoder = ["pixel_shuffle", "pixel_shuffle_single"] self.heatmap_decoder = self.get_heatmap_decoder() # List supported descriptor decoder options if "descriptor_decoder" in self.cfg: self.supported_descriptor_decoder = ["superpoint_descriptor"] self.descriptor_decoder = self.get_descriptor_decoder() # Initialize the model weights self.apply(weight_init) def forward(self, input_images): # The backbone features = self.backbone_net(input_images) # junction decoder junctions = self.junction_decoder(features) # heatmap decoder heatmaps = self.heatmap_decoder(features) outputs = {"junctions": junctions, "heatmap": heatmaps} # Descriptor decoder if "descriptor_decoder" in self.cfg: outputs["descriptors"] = self.descriptor_decoder(features) return outputs def get_backbone(self): """ Retrieve the backbone encoder network. """ if not self.cfg["backbone"] in self.supported_backbone: raise ValueError( "[Error] The backbone selection is not supported.") # lcnn backbone (stacked hourglass) if self.cfg["backbone"] == "lcnn": backbone_cfg = self.cfg["backbone_cfg"] backbone = HourglassBackbone(**backbone_cfg) feat_channel = 256 elif self.cfg["backbone"] == "superpoint": backbone_cfg = self.cfg["backbone_cfg"] backbone = SuperpointBackbone() feat_channel = 128 else: raise ValueError( "[Error] The backbone selection is not supported.") return backbone, feat_channel def get_junction_decoder(self): """ Get the junction decoder. """ if (not self.cfg["junction_decoder"] in self.supported_junction_decoder): raise ValueError( "[Error] The junction decoder selection is not supported.") # superpoint decoder if self.cfg["junction_decoder"] == "superpoint_decoder": decoder = SuperpointDecoder(self.feat_channel, self.cfg["backbone"]) else: raise ValueError( "[Error] The junction decoder selection is not supported.") return decoder def get_heatmap_decoder(self): """ Get the heatmap decoder. """ if not self.cfg["heatmap_decoder"] in self.supported_heatmap_decoder: raise ValueError( "[Error] The heatmap decoder selection is not supported.") # Pixel_shuffle decoder if self.cfg["heatmap_decoder"] == "pixel_shuffle": if self.cfg["backbone"] == "lcnn": decoder = PixelShuffleDecoder(self.feat_channel, num_upsample=2) elif self.cfg["backbone"] == "superpoint": decoder = PixelShuffleDecoder(self.feat_channel, num_upsample=3) else: raise ValueError("[Error] Unknown backbone option.") # Pixel_shuffle decoder with single channel output elif self.cfg["heatmap_decoder"] == "pixel_shuffle_single": if self.cfg["backbone"] == "lcnn": decoder = PixelShuffleDecoder( self.feat_channel, num_upsample=2, output_channel=1) elif self.cfg["backbone"] == "superpoint": decoder = PixelShuffleDecoder( self.feat_channel, num_upsample=3, output_channel=1) else: raise ValueError("[Error] Unknown backbone option.") else: raise ValueError( "[Error] The heatmap decoder selection is not supported.") return decoder def get_descriptor_decoder(self): """ Get the descriptor decoder. """ if (not self.cfg["descriptor_decoder"] in self.supported_descriptor_decoder): raise ValueError( "[Error] The descriptor decoder selection is not supported.") # SuperPoint descriptor if self.cfg["descriptor_decoder"] == "superpoint_descriptor": decoder = SuperpointDescriptor(self.feat_channel) else: raise ValueError( "[Error] The descriptor decoder selection is not supported.") return decoder def weight_init(m): """ Weight initialization function. """ # Conv2D if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) # Batchnorm elif isinstance(m, nn.BatchNorm2d): init.normal_(m.weight.data, mean=1, std=0.02) init.constant_(m.bias.data, 0) # Linear elif isinstance(m, nn.Linear): init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) else: pass
37.352941
102
0.608793
import torch import torch.nn as nn import torch.nn.init as init from .nets.backbone import HourglassBackbone, SuperpointBackbone from .nets.junction_decoder import SuperpointDecoder from .nets.heatmap_decoder import PixelShuffleDecoder from .nets.descriptor_decoder import SuperpointDescriptor def get_model(model_cfg=None, loss_weights=None, mode="train"): if model_cfg is None: raise ValueError("[Error] The model config is required!") print("\n\n\t--------Initializing model----------") supported_arch = ["simple"] if not model_cfg["model_architecture"] in supported_arch: raise ValueError( "[Error] The model architecture is not in supported arch!") if model_cfg["model_architecture"] == "simple": model = SOLD2Net(model_cfg) else: raise ValueError( "[Error] The model architecture is not in supported arch!") if mode == "train": if loss_weights is not None: for param_name, param in loss_weights.items(): if isinstance(param, nn.Parameter): print("\t [Debug] Adding %s with value %f to model" % (param_name, param.item())) model.register_parameter(param_name, param) else: raise ValueError( "[Error] the loss weights can not be None in dynamic weighting mode during training.") print("\tModel architecture: %s" % model_cfg["model_architecture"]) print("\tBackbone: %s" % model_cfg["backbone"]) print("\tJunction decoder: %s" % model_cfg["junction_decoder"]) print("\tHeatmap decoder: %s" % model_cfg["heatmap_decoder"]) print("\t-------------------------------------") return model class SOLD2Net(nn.Module): def __init__(self, model_cfg): super(SOLD2Net, self).__init__() self.name = model_cfg["model_name"] self.cfg = model_cfg self.supported_backbone = ["lcnn", "superpoint"] self.backbone_net, self.feat_channel = self.get_backbone() self.supported_junction_decoder = ["superpoint_decoder"] self.junction_decoder = self.get_junction_decoder() self.supported_heatmap_decoder = ["pixel_shuffle", "pixel_shuffle_single"] self.heatmap_decoder = self.get_heatmap_decoder() if "descriptor_decoder" in self.cfg: self.supported_descriptor_decoder = ["superpoint_descriptor"] self.descriptor_decoder = self.get_descriptor_decoder() self.apply(weight_init) def forward(self, input_images): features = self.backbone_net(input_images) junctions = self.junction_decoder(features) heatmaps = self.heatmap_decoder(features) outputs = {"junctions": junctions, "heatmap": heatmaps} if "descriptor_decoder" in self.cfg: outputs["descriptors"] = self.descriptor_decoder(features) return outputs def get_backbone(self): if not self.cfg["backbone"] in self.supported_backbone: raise ValueError( "[Error] The backbone selection is not supported.") if self.cfg["backbone"] == "lcnn": backbone_cfg = self.cfg["backbone_cfg"] backbone = HourglassBackbone(**backbone_cfg) feat_channel = 256 elif self.cfg["backbone"] == "superpoint": backbone_cfg = self.cfg["backbone_cfg"] backbone = SuperpointBackbone() feat_channel = 128 else: raise ValueError( "[Error] The backbone selection is not supported.") return backbone, feat_channel def get_junction_decoder(self): if (not self.cfg["junction_decoder"] in self.supported_junction_decoder): raise ValueError( "[Error] The junction decoder selection is not supported.") if self.cfg["junction_decoder"] == "superpoint_decoder": decoder = SuperpointDecoder(self.feat_channel, self.cfg["backbone"]) else: raise ValueError( "[Error] The junction decoder selection is not supported.") return decoder def get_heatmap_decoder(self): if not self.cfg["heatmap_decoder"] in self.supported_heatmap_decoder: raise ValueError( "[Error] The heatmap decoder selection is not supported.") if self.cfg["heatmap_decoder"] == "pixel_shuffle": if self.cfg["backbone"] == "lcnn": decoder = PixelShuffleDecoder(self.feat_channel, num_upsample=2) elif self.cfg["backbone"] == "superpoint": decoder = PixelShuffleDecoder(self.feat_channel, num_upsample=3) else: raise ValueError("[Error] Unknown backbone option.") elif self.cfg["heatmap_decoder"] == "pixel_shuffle_single": if self.cfg["backbone"] == "lcnn": decoder = PixelShuffleDecoder( self.feat_channel, num_upsample=2, output_channel=1) elif self.cfg["backbone"] == "superpoint": decoder = PixelShuffleDecoder( self.feat_channel, num_upsample=3, output_channel=1) else: raise ValueError("[Error] Unknown backbone option.") else: raise ValueError( "[Error] The heatmap decoder selection is not supported.") return decoder def get_descriptor_decoder(self): if (not self.cfg["descriptor_decoder"] in self.supported_descriptor_decoder): raise ValueError( "[Error] The descriptor decoder selection is not supported.") if self.cfg["descriptor_decoder"] == "superpoint_descriptor": decoder = SuperpointDescriptor(self.feat_channel) else: raise ValueError( "[Error] The descriptor decoder selection is not supported.") return decoder def weight_init(m): if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight.data) if m.bias is not None: init.normal_(m.bias.data) elif isinstance(m, nn.BatchNorm2d): init.normal_(m.weight.data, mean=1, std=0.02) init.constant_(m.bias.data, 0) elif isinstance(m, nn.Linear): init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) else: pass
true
true
f70d80f76d6e011cd307a69711f004a7e641cdc4
2,414
py
Python
dipy/reconst/tests/test_peakdf.py
JohnGriffiths/dipy
5fb38e9b77547cdaf5eb140730444535733ae01d
[ "BSD-3-Clause" ]
1
2016-09-08T19:23:51.000Z
2016-09-08T19:23:51.000Z
dipy/reconst/tests/test_peakdf.py
JohnGriffiths/dipy
5fb38e9b77547cdaf5eb140730444535733ae01d
[ "BSD-3-Clause" ]
null
null
null
dipy/reconst/tests/test_peakdf.py
JohnGriffiths/dipy
5fb38e9b77547cdaf5eb140730444535733ae01d
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import numpy.testing as npt from dipy.reconst.peaks import default_sphere, peaks_from_model def test_PeaksAndMetricsDirectionGetter(): class SillyModel(object): def fit(self, data, mask=None): return SillyFit(self) class SillyFit(object): def __init__(self, model): self.model = model def odf(self, sphere): odf = np.zeros(sphere.theta.shape) r = np.random.randint(0, len(odf)) odf[r] = 1 return odf def get_direction(dg, point, dir): newdir = dir.copy() state = dg.get_direction(point, newdir) return (state, np.array(newdir)) data = np.random.random((3, 4, 5, 2)) peaks = peaks_from_model(SillyModel(), data, default_sphere, relative_peak_threshold=.5, min_separation_angle=25) peaks._initialize() up = np.zeros(3) up[2] = 1. down = -up for i in range(3-1): for j in range(4-1): for k in range(5-1): point = np.array([i, j, k], dtype=float) # Test that the angle threshold rejects points peaks.ang_thr = 0. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 1) # Here we leverage the fact that we know Hemispheres project # all their vertices into the z >= 0 half of the sphere. peaks.ang_thr = 90. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) expected_dir = peaks.peak_dirs[i, j, k, 0] npt.assert_array_almost_equal(nd, expected_dir) state, nd = get_direction(peaks, point, down) npt.assert_array_almost_equal(nd, -expected_dir) # Check that we can get directions at non-integer points point += np.random.random(3) state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) # Check that points are rounded to get initial direction point -= .5 id = peaks.initial_direction(point) # id should be a (1, 3) array npt.assert_array_almost_equal(id, [expected_dir]) if __name__ == "__main__": npt.run_module_suite()
32.621622
76
0.555095
import numpy as np import numpy.testing as npt from dipy.reconst.peaks import default_sphere, peaks_from_model def test_PeaksAndMetricsDirectionGetter(): class SillyModel(object): def fit(self, data, mask=None): return SillyFit(self) class SillyFit(object): def __init__(self, model): self.model = model def odf(self, sphere): odf = np.zeros(sphere.theta.shape) r = np.random.randint(0, len(odf)) odf[r] = 1 return odf def get_direction(dg, point, dir): newdir = dir.copy() state = dg.get_direction(point, newdir) return (state, np.array(newdir)) data = np.random.random((3, 4, 5, 2)) peaks = peaks_from_model(SillyModel(), data, default_sphere, relative_peak_threshold=.5, min_separation_angle=25) peaks._initialize() up = np.zeros(3) up[2] = 1. down = -up for i in range(3-1): for j in range(4-1): for k in range(5-1): point = np.array([i, j, k], dtype=float) peaks.ang_thr = 0. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 1) peaks.ang_thr = 90. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) expected_dir = peaks.peak_dirs[i, j, k, 0] npt.assert_array_almost_equal(nd, expected_dir) state, nd = get_direction(peaks, point, down) npt.assert_array_almost_equal(nd, -expected_dir) point += np.random.random(3) state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) point -= .5 id = peaks.initial_direction(point) npt.assert_array_almost_equal(id, [expected_dir]) if __name__ == "__main__": npt.run_module_suite()
true
true
f70d822462b0e11e8a773b1e4086ec0734f75d3c
4,226
py
Python
assignment/assignment2/q3_nature.py
reactivetype/cs234-reinforcement-learning
693a90854d6548157ac8ec1c70a90b08810aec1b
[ "MIT" ]
null
null
null
assignment/assignment2/q3_nature.py
reactivetype/cs234-reinforcement-learning
693a90854d6548157ac8ec1c70a90b08810aec1b
[ "MIT" ]
null
null
null
assignment/assignment2/q3_nature.py
reactivetype/cs234-reinforcement-learning
693a90854d6548157ac8ec1c70a90b08810aec1b
[ "MIT" ]
null
null
null
import tensorflow as tf import tensorflow.contrib.layers as layers from utils.general import get_logger from utils.test_env import EnvTest from q1_schedule import LinearExploration, LinearSchedule from q2_linear import Linear from configs.q3_nature import config class NatureQN(Linear): """ Implementing DeepMind's Nature paper. Here are the relevant urls. https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf """ def get_q_values_op(self, state, scope, reuse=False): """ Returns Q values for all actions Args: state: (tf tensor) shape = (batch_size, img height, img width, nchannels) scope: (string) scope name, that specifies if target network or not reuse: (bool) reuse of variables in the scope Returns: out: (tf tensor) of shape = (batch_size, num_actions) """ # this information might be useful num_actions = self.env.action_space.n ############################################################## """ TODO: implement the computation of Q values like in the paper https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf you may find the section "model architecture" of the appendix of the nature paper particulary useful. store your result in out of shape = (batch_size, num_actions) HINT: you may find tensorflow.contrib.layers useful (imported) make sure to understand the use of the scope param make sure to flatten() the tensor before connecting it to fully connected layers you can use any other methods from tensorflow you are not allowed to import extra packages (like keras, lasagne, cafe, etc.) """ ############################################################## ################ YOUR CODE HERE - 10-15 lines ################ with tf.variable_scope(scope, reuse): conv1 = layers.conv2d(inputs = state, num_outputs = 32, kernel_size = [8, 8], stride = 4, activation_fn = tf.nn.relu, reuse = reuse, scope = "Conv1") conv2 = layers.conv2d(inputs = conv1, num_outputs = 64, kernel_size = [4, 4], stride = 2, activation_fn = tf.nn.relu, reuse=reuse, scope = "Conv2") conv3 = layers.conv2d(inputs=conv2, num_outputs=64, kernel_size=[3, 3], stride=1, activation_fn=tf.nn.relu, reuse=reuse, scope="Conv3") flattened = layers.flatten(conv3, scope = "flattened") hidden_fc = layers.fully_connected(inputs = flattened, num_outputs = 512, activation_fn = tf.nn.relu, reuse=reuse, scope = "hidden-fc") out = layers.fully_connected(inputs = hidden_fc, num_outputs = num_actions, activation_fn = None, reuse=reuse, scope = "output-Q") ############################################################## ######################## END YOUR CODE ####################### return out """ Use deep Q network for test environment. """ if __name__ == '__main__': env = EnvTest((80, 80, 1)) # exploration strategy exp_schedule = LinearExploration(env, config.eps_begin, config.eps_end, config.eps_nsteps) # learning rate schedule lr_schedule = LinearSchedule(config.lr_begin, config.lr_end, config.lr_nsteps) # train model model = NatureQN(env, config) model.run(exp_schedule, lr_schedule)
42.26
96
0.505206
import tensorflow as tf import tensorflow.contrib.layers as layers from utils.general import get_logger from utils.test_env import EnvTest from q1_schedule import LinearExploration, LinearSchedule from q2_linear import Linear from configs.q3_nature import config class NatureQN(Linear): def get_q_values_op(self, state, scope, reuse=False): num_actions = self.env.action_space.n
true
true
f70d82790541d8df0e5c0d0f122d90643ea30c26
8,368
py
Python
openmdao/approximation_schemes/complex_step.py
onodip/OpenMDAO
96a99806fb3a547b881d2ad3da2733bca9978567
[ "Apache-2.0" ]
null
null
null
openmdao/approximation_schemes/complex_step.py
onodip/OpenMDAO
96a99806fb3a547b881d2ad3da2733bca9978567
[ "Apache-2.0" ]
null
null
null
openmdao/approximation_schemes/complex_step.py
onodip/OpenMDAO
96a99806fb3a547b881d2ad3da2733bca9978567
[ "Apache-2.0" ]
null
null
null
"""Complex Step derivative approximations.""" from __future__ import division, print_function from itertools import groupby from six.moves import range import numpy as np from openmdao.approximation_schemes.approximation_scheme import ApproximationScheme from openmdao.utils.name_maps import abs_key2rel_key DEFAULT_CS_OPTIONS = { 'step': 1e-15, 'form': 'forward', } class ComplexStep(ApproximationScheme): r""" Approximation scheme using complex step to calculate derivatives. For example, using a step size of 'h' will approximate the derivative in the following way: .. math:: f'(x) = \Im{\frac{f(x+ih)}{h}}. Attributes ---------- _exec_list : list A list of which derivatives (in execution order) to compute. The entries are of the form (of, wrt, options), where of and wrt are absolute names and options is a dictionary. """ def __init__(self): """ Initialize the ApproximationScheme. """ super(ComplexStep, self).__init__() self._exec_list = [] def add_approximation(self, abs_key, kwargs): """ Use this approximation scheme to approximate the derivative d(of)/d(wrt). Parameters ---------- abs_key : tuple(str,str) Absolute name pairing of (of, wrt) for the derivative. kwargs : dict Additional keyword arguments, to be interpreted by sub-classes. """ of, wrt = abs_key options = DEFAULT_CS_OPTIONS.copy() options.update(kwargs) self._exec_list.append((of, wrt, options)) @staticmethod def _key_fun(approx_tuple): """ Compute the sorting key for an approximation tuple. Parameters ---------- approx_tuple : tuple(str, str, dict) A given approximated derivative (of, wrt, options) Returns ------- tuple(str, str, float) Sorting key (wrt, form, step_size) """ options = approx_tuple[2] return (approx_tuple[1], options['form'], options['step']) def _init_approximations(self): """ Prepare for later approximations. """ # itertools.groupby works like `uniq` rather than the SQL query, meaning that it will only # group adjacent items with identical keys. self._exec_list.sort(key=self._key_fun) # TODO: Automatic sparse FD by constructing a graph of variable dependence? def compute_approximations(self, system, jac=None, deriv_type='partial'): """ Execute the system to compute the approximate sub-Jacobians. Parameters ---------- system : System System on which the execution is run. jac : None or dict-like If None, update system with the approximated sub-Jacobians. Otherwise, store the approximations in the given dict-like object. deriv_type : str One of 'total' or 'partial', indicating if total or partial derivatives are being approximated. """ if jac is None: jac = system._jacobian if deriv_type == 'total': current_vec = system._outputs elif deriv_type == 'partial': current_vec = system._residuals else: raise ValueError('deriv_type must be one of "total" or "partial"') # Turn on complex step. system._inputs._vector_info._under_complex_step = True # create a scratch array out_tmp = system._outputs.get_data() results_clone = current_vec._clone(True) # To support driver src_indices, we need to override some checks in Jacobian, but do it # selectively. uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \ not isinstance(jac, dict) for key, approximations in groupby(self._exec_list, self._key_fun): # groupby (along with this key function) will group all 'of's that have the same wrt and # step size. wrt, form, delta = key if form == 'reverse': delta *= -1.0 fact = 1.0 / delta if wrt in system._owns_approx_wrt_idx: in_idx = system._owns_approx_wrt_idx[wrt] in_size = len(in_idx) else: if wrt in system._var_abs2meta: in_size = system._var_abs2meta[wrt]['size'] in_idx = range(in_size) outputs = [] # Note: If access to `approximations` is required again in the future, we will need to # throw it in a list first. The groupby iterator only works once. for approx_tuple in approximations: of = approx_tuple[0] # TODO: Sparse derivatives if of in system._owns_approx_of_idx: out_idx = system._owns_approx_of_idx[of] out_size = len(out_idx) else: out_size = system._var_abs2meta[of]['size'] outputs.append((of, np.zeros((out_size, in_size)))) for i_count, idx in enumerate(in_idx): # Run the Finite Difference input_delta = [(wrt, idx, delta)] result = self._run_point_complex(system, input_delta, out_tmp, results_clone, deriv_type) for of, subjac in outputs: if of in system._owns_approx_of_idx: out_idx = system._owns_approx_of_idx[of] subjac[:, i_count] = result._imag_views_flat[of][out_idx] * fact else: subjac[:, i_count] = result._imag_views_flat[of] * fact for of, subjac in outputs: rel_key = abs_key2rel_key(system, (of, wrt)) if uses_src_indices: jac._override_checks = True jac[rel_key] = subjac if uses_src_indices: jac._override_checks = False # Turn off complex step. system._inputs._vector_info._under_complex_step = False def _run_point_complex(self, system, input_deltas, out_tmp, result_clone, deriv_type='partial'): """ Perturb the system inputs with a complex step, runs, and returns the results. Parameters ---------- system : System The system having its derivs approximated. input_deltas : list List of (input name, indices, delta) tuples, where input name is an absolute name. out_tmp : ndarray An array the same size as the system outputs that is used for temporary storage. result_clone : Vector A vector cloned from the outputs vector. Used to store the results. deriv_type : str One of 'total' or 'partial', indicating if total or partial derivatives are being approximated. Returns ------- Vector Copy of the results from running the perturbed system. """ # TODO: MPI inputs = system._inputs outputs = system._outputs if deriv_type == 'total': run_model = system.run_solve_nonlinear results_vec = outputs elif deriv_type == 'partial': run_model = system.run_apply_nonlinear results_vec = system._residuals else: raise ValueError('deriv_type must be one of "total" or "partial"') for in_name, idxs, delta in input_deltas: if in_name in outputs._imag_views_flat: outputs._imag_views_flat[in_name][idxs] += delta else: inputs._imag_views_flat[in_name][idxs] += delta results_vec.get_data(out_tmp) run_model() # TODO: Grab only results of interest result_clone.set_vec(results_vec) results_vec.set_data(out_tmp) for in_name, idxs, delta in input_deltas: if in_name in outputs._imag_views_flat: outputs._imag_views_flat[in_name][idxs] -= delta else: inputs._imag_views_flat[in_name][idxs] -= delta return result_clone
34.866667
100
0.589627
from __future__ import division, print_function from itertools import groupby from six.moves import range import numpy as np from openmdao.approximation_schemes.approximation_scheme import ApproximationScheme from openmdao.utils.name_maps import abs_key2rel_key DEFAULT_CS_OPTIONS = { 'step': 1e-15, 'form': 'forward', } class ComplexStep(ApproximationScheme): def __init__(self): super(ComplexStep, self).__init__() self._exec_list = [] def add_approximation(self, abs_key, kwargs): of, wrt = abs_key options = DEFAULT_CS_OPTIONS.copy() options.update(kwargs) self._exec_list.append((of, wrt, options)) @staticmethod def _key_fun(approx_tuple): options = approx_tuple[2] return (approx_tuple[1], options['form'], options['step']) def _init_approximations(self): self._exec_list.sort(key=self._key_fun) def compute_approximations(self, system, jac=None, deriv_type='partial'): if jac is None: jac = system._jacobian if deriv_type == 'total': current_vec = system._outputs elif deriv_type == 'partial': current_vec = system._residuals else: raise ValueError('deriv_type must be one of "total" or "partial"') system._inputs._vector_info._under_complex_step = True out_tmp = system._outputs.get_data() results_clone = current_vec._clone(True) uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \ not isinstance(jac, dict) for key, approximations in groupby(self._exec_list, self._key_fun): wrt, form, delta = key if form == 'reverse': delta *= -1.0 fact = 1.0 / delta if wrt in system._owns_approx_wrt_idx: in_idx = system._owns_approx_wrt_idx[wrt] in_size = len(in_idx) else: if wrt in system._var_abs2meta: in_size = system._var_abs2meta[wrt]['size'] in_idx = range(in_size) outputs = [] for approx_tuple in approximations: of = approx_tuple[0] if of in system._owns_approx_of_idx: out_idx = system._owns_approx_of_idx[of] out_size = len(out_idx) else: out_size = system._var_abs2meta[of]['size'] outputs.append((of, np.zeros((out_size, in_size)))) for i_count, idx in enumerate(in_idx): input_delta = [(wrt, idx, delta)] result = self._run_point_complex(system, input_delta, out_tmp, results_clone, deriv_type) for of, subjac in outputs: if of in system._owns_approx_of_idx: out_idx = system._owns_approx_of_idx[of] subjac[:, i_count] = result._imag_views_flat[of][out_idx] * fact else: subjac[:, i_count] = result._imag_views_flat[of] * fact for of, subjac in outputs: rel_key = abs_key2rel_key(system, (of, wrt)) if uses_src_indices: jac._override_checks = True jac[rel_key] = subjac if uses_src_indices: jac._override_checks = False system._inputs._vector_info._under_complex_step = False def _run_point_complex(self, system, input_deltas, out_tmp, result_clone, deriv_type='partial'): inputs = system._inputs outputs = system._outputs if deriv_type == 'total': run_model = system.run_solve_nonlinear results_vec = outputs elif deriv_type == 'partial': run_model = system.run_apply_nonlinear results_vec = system._residuals else: raise ValueError('deriv_type must be one of "total" or "partial"') for in_name, idxs, delta in input_deltas: if in_name in outputs._imag_views_flat: outputs._imag_views_flat[in_name][idxs] += delta else: inputs._imag_views_flat[in_name][idxs] += delta results_vec.get_data(out_tmp) run_model() result_clone.set_vec(results_vec) results_vec.set_data(out_tmp) for in_name, idxs, delta in input_deltas: if in_name in outputs._imag_views_flat: outputs._imag_views_flat[in_name][idxs] -= delta else: inputs._imag_views_flat[in_name][idxs] -= delta return result_clone
true
true
f70d82ff53eaea18aadbc5b3005b76647beb287d
196
py
Python
nipy/neurospin/register/__init__.py
yarikoptic/NiPy-OLD
8759b598ac72d3b9df7414642c7a662ad9c55ece
[ "BSD-3-Clause" ]
1
2015-08-22T16:14:45.000Z
2015-08-22T16:14:45.000Z
nipy/neurospin/register/__init__.py
yarikoptic/NiPy-OLD
8759b598ac72d3b9df7414642c7a662ad9c55ece
[ "BSD-3-Clause" ]
null
null
null
nipy/neurospin/register/__init__.py
yarikoptic/NiPy-OLD
8759b598ac72d3b9df7414642c7a662ad9c55ece
[ "BSD-3-Clause" ]
null
null
null
from iconic_matcher import IconicMatcher #from realign4d import TimeSeries, realign4d, resample4d import transform from numpy.testing import Tester test = Tester().test bench = Tester().bench
19.6
56
0.806122
from iconic_matcher import IconicMatcher import transform from numpy.testing import Tester test = Tester().test bench = Tester().bench
true
true
f70d83e37661707a388cc938f1d05f4e528ac00e
3,117
py
Python
scripts/hailey.py
wcdawn/dotfiles
d069b53b7f19b53767df9e8d67b38b1d6fbf4e28
[ "MIT" ]
1
2019-01-30T01:34:21.000Z
2019-01-30T01:34:21.000Z
scripts/hailey.py
wcdawn/dotfiles
d069b53b7f19b53767df9e8d67b38b1d6fbf4e28
[ "MIT" ]
null
null
null
scripts/hailey.py
wcdawn/dotfiles
d069b53b7f19b53767df9e8d67b38b1d6fbf4e28
[ "MIT" ]
null
null
null
#!/bin/python # argument processing import sys, getopt # date and time import datetime import pytz # weather from weather import weatherFormat, twoColumn from ansi import ansi_escape # graphics/image import PIL # requires python-pillow from PIL import Image # webcams import webcam # for organ import requests from io import BytesIO # default options doWeather = True doImage = True whichWebcam = 0 # logical input # -w -- do weather # -i -- do image # integer input # -c -- (1,2) for (lake_view, tree_view) webcam # arguemnt processing myopts, args = getopt.getopt(sys.argv[1:], 'w:i:c:') # o -- option # a -- argument for o, a in myopts: if o == '-w': doWeather = bool(int(a)) elif o == '-i': doImage = bool(int(a)) elif o == '-c': whichWebcam = int(a) else: print(o, a) print('Usage: {:s}') # date/time in Montana now = datetime.datetime.now(pytz.timezone('America/Denver')) print('The time in Las Cruces is:') print(now.strftime('%a %w %b %Y %H:%M:%S %Z')) # days until fname = '/home/wcdawn/hailey/next_visit.txt' fobj = open(fname, 'r') date_str = fobj.readlines() fobj.close() # strip removes leading whitespace, trailing whitespace, and newline characters date_str = date_str[0].strip() next_visit = datetime.datetime.strptime(date_str, '%Y-%m-%d') now = datetime.datetime.now() diff = next_visit - now print() print('Days until next visit: {:d}'.format(diff.days + 1)) # display an image image_fname = '/home/wcdawn/hailey/christmas_pic/portland_canard.jpg' if (doImage): image = Image.open(image_fname) maxsize = (640, 640) image.thumbnail(maxsize, PIL.Image.ANTIALIAS) image.show() # weather location_dict = { 'Missoula': [46.856339, -113.995292], 'Flathead': [47.876957, -114.032290]} location_dict = { 'Las Cruces': [32.288111, -106.743986]} if (doWeather): weather_list = [] for key in location_dict: weather_list.append(weatherFormat(key, location_dict[key][0], location_dict[key][1])) if (len(location_dict) == 1): for i in range(len(weather_list[0])): print(weather_list[0][i]) elif (len(location_dict) == 2): padded_width = 40 for i in range(len(weather_list[0])): blank_size = padded_width - len(ansi_escape.sub('', weather_list[0][i])) print(weather_list[0][i] + blank_size * ' ' + weather_list[1][i]) # webcams # http://webcam.flbs.umt.edu/view/viewer_index.shtml?id=2731 lake_view = 'http://webcam.flbs.umt.edu/mjpg/video.mjpg' # http://webcam2.flbs.umt.edu/view/viewer_index.shtml?id=4824 tree_view = 'http://webcam2.flbs.umt.edu/mjpg/video.mjpg' # https://weather.nmsu.edu/webcams/nmcc-fbg/ organ_view = 'https://weather.nmsu.edu/files/cameras/nmcc-fbg/nmcc-fbg.jpg' if (whichWebcam == 1): webcam.dispWebcam(lake_view) elif (whichWebcam == 2): webcam.dispWebcam(tree_view) elif (whichWebcam == 3): response = requests.get(organ_view) image = Image.open(BytesIO(response.content)) maxsize = (640, 640) image.thumbnail(maxsize, PIL.Image.ANTIALIAS) image.show()
25.975
84
0.667629
import sys, getopt import datetime import pytz from weather import weatherFormat, twoColumn from ansi import ansi_escape import PIL from PIL import Image import webcam import requests from io import BytesIO doWeather = True doImage = True whichWebcam = 0 myopts, args = getopt.getopt(sys.argv[1:], 'w:i:c:') for o, a in myopts: if o == '-w': doWeather = bool(int(a)) elif o == '-i': doImage = bool(int(a)) elif o == '-c': whichWebcam = int(a) else: print(o, a) print('Usage: {:s}') now = datetime.datetime.now(pytz.timezone('America/Denver')) print('The time in Las Cruces is:') print(now.strftime('%a %w %b %Y %H:%M:%S %Z')) fname = '/home/wcdawn/hailey/next_visit.txt' fobj = open(fname, 'r') date_str = fobj.readlines() fobj.close() date_str = date_str[0].strip() next_visit = datetime.datetime.strptime(date_str, '%Y-%m-%d') now = datetime.datetime.now() diff = next_visit - now print() print('Days until next visit: {:d}'.format(diff.days + 1)) image_fname = '/home/wcdawn/hailey/christmas_pic/portland_canard.jpg' if (doImage): image = Image.open(image_fname) maxsize = (640, 640) image.thumbnail(maxsize, PIL.Image.ANTIALIAS) image.show() location_dict = { 'Missoula': [46.856339, -113.995292], 'Flathead': [47.876957, -114.032290]} location_dict = { 'Las Cruces': [32.288111, -106.743986]} if (doWeather): weather_list = [] for key in location_dict: weather_list.append(weatherFormat(key, location_dict[key][0], location_dict[key][1])) if (len(location_dict) == 1): for i in range(len(weather_list[0])): print(weather_list[0][i]) elif (len(location_dict) == 2): padded_width = 40 for i in range(len(weather_list[0])): blank_size = padded_width - len(ansi_escape.sub('', weather_list[0][i])) print(weather_list[0][i] + blank_size * ' ' + weather_list[1][i]) lake_view = 'http://webcam.flbs.umt.edu/mjpg/video.mjpg' tree_view = 'http://webcam2.flbs.umt.edu/mjpg/video.mjpg' organ_view = 'https://weather.nmsu.edu/files/cameras/nmcc-fbg/nmcc-fbg.jpg' if (whichWebcam == 1): webcam.dispWebcam(lake_view) elif (whichWebcam == 2): webcam.dispWebcam(tree_view) elif (whichWebcam == 3): response = requests.get(organ_view) image = Image.open(BytesIO(response.content)) maxsize = (640, 640) image.thumbnail(maxsize, PIL.Image.ANTIALIAS) image.show()
true
true
f70d852df7ec2783451aeb34c885f027064d3454
1,151
py
Python
tests/test_planning.py
acjackman/note-clerk
1284c41d4a0c5f4a515af188c48576299d15cda6
[ "MIT" ]
1
2020-03-18T19:45:19.000Z
2020-03-18T19:45:19.000Z
tests/test_planning.py
acjackman/note-clerk
1284c41d4a0c5f4a515af188c48576299d15cda6
[ "MIT" ]
240
2020-03-23T00:41:15.000Z
2022-03-31T20:46:04.000Z
tests/test_planning.py
acjackman/note-clerk
1284c41d4a0c5f4a515af188c48576299d15cda6
[ "MIT" ]
null
null
null
import datetime as dt import pytest from note_clerk import planning @pytest.mark.parametrize( "date, quarter", [ (dt.datetime(2020, 1, 1), dt.datetime(2020, 1, 1)), (dt.datetime(2020, 1, 2), dt.datetime(2020, 1, 1)), (dt.datetime(2020, 4, 1), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 4, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 5, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 6, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 7, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 8, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 9, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 10, 2), dt.datetime(2020, 10, 1)), (dt.datetime(2020, 11, 2), dt.datetime(2020, 10, 1)), (dt.datetime(2020, 12, 2), dt.datetime(2020, 10, 1)), ], ) def test_quarter_start(date: dt.datetime, quarter: dt.datetime) -> None: adjusted = planning.quarter_start(date) assert adjusted == quarter def print_with_header(header: str, text: str) -> None: line = "*" * (len(header) + 4) print(f"{line}\n* {header} *\n{line}\n{text}")
34.878788
72
0.582971
import datetime as dt import pytest from note_clerk import planning @pytest.mark.parametrize( "date, quarter", [ (dt.datetime(2020, 1, 1), dt.datetime(2020, 1, 1)), (dt.datetime(2020, 1, 2), dt.datetime(2020, 1, 1)), (dt.datetime(2020, 4, 1), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 4, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 5, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 6, 2), dt.datetime(2020, 4, 1)), (dt.datetime(2020, 7, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 8, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 9, 2), dt.datetime(2020, 7, 1)), (dt.datetime(2020, 10, 2), dt.datetime(2020, 10, 1)), (dt.datetime(2020, 11, 2), dt.datetime(2020, 10, 1)), (dt.datetime(2020, 12, 2), dt.datetime(2020, 10, 1)), ], ) def test_quarter_start(date: dt.datetime, quarter: dt.datetime) -> None: adjusted = planning.quarter_start(date) assert adjusted == quarter def print_with_header(header: str, text: str) -> None: line = "*" * (len(header) + 4) print(f"{line}\n* {header} *\n{line}\n{text}")
true
true
f70d853bd60308a51a14837c119525ba68a9fc78
576
py
Python
pdc/apps/package/migrations/0003_buildimage_releases.py
tzhaoredhat/automation
a1867dc2d3591fdae1fa7f80d457c25f9705070e
[ "MIT" ]
18
2015-12-15T17:56:18.000Z
2021-04-10T13:49:48.000Z
pdc/apps/package/migrations/0003_buildimage_releases.py
tzhaoredhat/automation
a1867dc2d3591fdae1fa7f80d457c25f9705070e
[ "MIT" ]
303
2015-11-18T07:37:06.000Z
2021-05-26T12:34:01.000Z
pdc/apps/package/migrations/0003_buildimage_releases.py
tzhaoredhat/automation
a1867dc2d3591fdae1fa7f80d457c25f9705070e
[ "MIT" ]
27
2015-11-19T20:33:54.000Z
2021-03-25T08:15:28.000Z
# -*- coding: utf-8 -*- # # Copyright (c) 2015 Red Hat # Licensed under The MIT License (MIT) # http://opensource.org/licenses/MIT # from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('release', '0002_auto_20150512_0719'), ('package', '0002_auto_20150512_0714'), ] operations = [ migrations.AddField( model_name='buildimage', name='releases', field=models.ManyToManyField(to='release.Release'), ), ]
22.153846
63
0.631944
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('release', '0002_auto_20150512_0719'), ('package', '0002_auto_20150512_0714'), ] operations = [ migrations.AddField( model_name='buildimage', name='releases', field=models.ManyToManyField(to='release.Release'), ), ]
true
true
f70d8561e7f64d7ca452a60d51ddc95627b583d7
2,715
py
Python
openGaussBase/testcase/KEYWORDS/reloptions/Opengauss_Function_Keyword_Reloptions_Case0020.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
openGaussBase/testcase/KEYWORDS/reloptions/Opengauss_Function_Keyword_Reloptions_Case0020.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
openGaussBase/testcase/KEYWORDS/reloptions/Opengauss_Function_Keyword_Reloptions_Case0020.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ ''' #-- @testpoint:opengauss关键字reloptions(非保留),作为目录对象名 ''' import unittest from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() commonsh = CommonSH('dbuser') constant = Constant() class Hostname(unittest.TestCase): def setUp(self): logger.info("------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 开始执行--------------------------") # 关键字作为目录对象名不带双引号 - 成功 def test_reloptions_1(self): SqlMdg = commonsh.execut_db_sql('''create directory reloptions as '/tmp/'; drop directory reloptions;''') logger.info(SqlMdg) self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg) self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg) # 关键字作为目录对象名带双引号—成功 def test_reloptions_2(self): SqlMdg = commonsh.execut_db_sql('''create directory "reloptions" as '/tmp/'; drop directory "reloptions";''') logger.info(SqlMdg) self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg) self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg) # 关键字作为目录对象名带单引号 - 合理报错 def test_reloptions_3(self): SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'reloptions';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) SqlMdg = commonsh.execut_db_sql(''' create directory 'reloptions' as '/tmp/';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) #关键字作为目录对象名带反引号 - 合理报错 def test_reloptions_4(self): SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`reloptions\`;''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) SqlMdg = commonsh.execut_db_sql('''create directory \`reloptions\` as '/tmp/';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) def tearDown(self): logger.info('------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 执行结束--------------------------')
36.689189
126
0.669613
import unittest from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() commonsh = CommonSH('dbuser') constant = Constant() class Hostname(unittest.TestCase): def setUp(self): logger.info("------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 开始执行--------------------------") def test_reloptions_1(self): SqlMdg = commonsh.execut_db_sql('''create directory reloptions as '/tmp/'; drop directory reloptions;''') logger.info(SqlMdg) self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg) self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg) def test_reloptions_2(self): SqlMdg = commonsh.execut_db_sql('''create directory "reloptions" as '/tmp/'; drop directory "reloptions";''') logger.info(SqlMdg) self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg) self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg) def test_reloptions_3(self): SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'reloptions';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) SqlMdg = commonsh.execut_db_sql(''' create directory 'reloptions' as '/tmp/';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) def test_reloptions_4(self): SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`reloptions\`;''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) SqlMdg = commonsh.execut_db_sql('''create directory \`reloptions\` as '/tmp/';''') logger.info(SqlMdg) self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg) def tearDown(self): logger.info('------------------------ Opengauss_Function_Keyword_Reloptions_Case0020 执行结束--------------------------')
true
true
f70d85b46cd37f7c5bcf77fee22d4a9a8c561ef2
488
py
Python
clusterpy/core/contiguity/__init__.py
clusterpy/clusterpy
f08136b806b6c3bdba53422e1bc0e19459a983fa
[ "BSD-3-Clause" ]
48
2015-03-23T14:11:40.000Z
2022-03-25T01:55:32.000Z
clusterpy/core/contiguity/__init__.py
Pandinosaurus/clusterpy
f08136b806b6c3bdba53422e1bc0e19459a983fa
[ "BSD-3-Clause" ]
8
2015-02-22T17:49:38.000Z
2020-11-20T18:03:32.000Z
clusterpy/core/contiguity/__init__.py
Pandinosaurus/clusterpy
f08136b806b6c3bdba53422e1bc0e19459a983fa
[ "BSD-3-Clause" ]
27
2016-03-06T23:53:19.000Z
2021-11-21T19:36:20.000Z
# encoding: latin2 """Data generator module """ __author__ = "Juan C. Duque, Alejandro Betancourt" __credits__ = "Copyright (c) 2009-10 Juan C. Duque" __license__ = "New BSD License" __version__ = "1.0.0" __maintainer__ = "RiSE Group" __email__ = "contacto@rise-group.org" from weightsFromAreas import weightsFromAreas from intersections import fixIntersections from transformations import dict2matrix from transformations import dict2sparseMatrix from output import dict2gal, dict2csv
28.705882
51
0.79918
__author__ = "Juan C. Duque, Alejandro Betancourt" __credits__ = "Copyright (c) 2009-10 Juan C. Duque" __license__ = "New BSD License" __version__ = "1.0.0" __maintainer__ = "RiSE Group" __email__ = "contacto@rise-group.org" from weightsFromAreas import weightsFromAreas from intersections import fixIntersections from transformations import dict2matrix from transformations import dict2sparseMatrix from output import dict2gal, dict2csv
true
true
f70d86040a54ad30812690780127fc9fd94d4f71
392
py
Python
derrida/wsgi.py
making-books-ren-today/test_eval_4_derrmar
615796efeb517cd12cfb1f8b67e0150f6aaaea66
[ "Apache-2.0" ]
11
2017-04-27T19:28:54.000Z
2021-02-10T23:44:39.000Z
derrida/wsgi.py
making-books-ren-today/test_eval_4_derrmar
615796efeb517cd12cfb1f8b67e0150f6aaaea66
[ "Apache-2.0" ]
280
2017-03-02T14:23:18.000Z
2021-12-01T14:16:48.000Z
derrida/wsgi.py
making-books-ren-today/test_eval_4_derrmar
615796efeb517cd12cfb1f8b67e0150f6aaaea66
[ "Apache-2.0" ]
null
null
null
""" WSGI config for derrida project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "derrida.settings") application = get_wsgi_application()
23.058824
78
0.785714
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "derrida.settings") application = get_wsgi_application()
true
true
f70d86a4b0ce8aad33a5069f14e24dc6de5745d3
5,852
py
Python
dashboard/modules/job/tests/test_cli_integration.py
jianoaix/ray
1701b923bc83905f8961c06a6a173e3eba46a936
[ "Apache-2.0" ]
null
null
null
dashboard/modules/job/tests/test_cli_integration.py
jianoaix/ray
1701b923bc83905f8961c06a6a173e3eba46a936
[ "Apache-2.0" ]
41
2021-09-21T01:13:48.000Z
2022-03-19T07:12:22.000Z
dashboard/modules/job/tests/test_cli_integration.py
jianoaix/ray
1701b923bc83905f8961c06a6a173e3eba46a936
[ "Apache-2.0" ]
1
2019-09-24T16:24:49.000Z
2019-09-24T16:24:49.000Z
from contextlib import contextmanager import json import os import logging import sys import subprocess from typing import Optional, Tuple import pytest logger = logging.getLogger(__name__) @contextmanager def set_env_var(key: str, val: Optional[str] = None): old_val = os.environ.get(key, None) if val is not None: os.environ[key] = val elif key in os.environ: del os.environ[key] yield if key in os.environ: del os.environ[key] if old_val is not None: os.environ[key] = old_val @pytest.fixture def ray_start_stop(): subprocess.check_output(["ray", "start", "--head"]) try: with set_env_var("RAY_ADDRESS", "http://127.0.0.1:8265"): yield finally: subprocess.check_output(["ray", "stop", "--force"]) @contextmanager def ray_cluster_manager(): """ Used not as fixture in case we want to set RAY_ADDRESS first. """ subprocess.check_output(["ray", "start", "--head"]) try: yield finally: subprocess.check_output(["ray", "stop", "--force"]) def _run_cmd(cmd: str, should_fail=False) -> Tuple[str, str]: """Convenience wrapper for subprocess.run. We always run with shell=True to simulate the CLI. Asserts that the process succeeds/fails depending on should_fail. Returns (stdout, stderr). """ print(f"Running command: '{cmd}'") p: subprocess.CompletedProcess = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) if p.returncode == 0: print("Command succeeded.") if should_fail: raise RuntimeError( f"Expected command to fail, but got exit code: {p.returncode}." ) else: print(f"Command failed with exit code: {p.returncode}.") if not should_fail: raise RuntimeError( f"Expected command to succeed, but got exit code: {p.returncode}." ) return p.stdout.decode("utf-8"), p.stderr.decode("utf-8") class TestJobSubmitHook: """Tests the RAY_JOB_SUBMIT_HOOK env var.""" def test_hook(self, ray_start_stop): with set_env_var("RAY_JOB_SUBMIT_HOOK", "ray._private.test_utils.job_hook"): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hook intercepted: echo hello" in stdout class TestRayAddress: """ Integration version of job CLI test that ensures interaction with the following components are working as expected: 1) Ray client: use of RAY_ADDRESS and ray.init() in job_head.py 2) Ray dashboard: `ray start --head` """ def test_empty_ray_address(self, ray_start_stop): with set_env_var("RAY_ADDRESS", None): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hello" in stdout assert "succeeded" in stdout @pytest.mark.parametrize( "ray_client_address", ["127.0.0.1:8265", "ray://127.0.0.1:8265"] ) def test_ray_client_address(self, ray_start_stop, ray_client_address: str): with set_env_var("RAY_ADDRESS", ray_client_address): _run_cmd("ray job submit -- echo hello", should_fail=True) def test_valid_http_ray_address(self, ray_start_stop): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hello" in stdout assert "succeeded" in stdout class TestJobSubmit: def test_basic_submit(self, ray_start_stop): """Should tail logs and wait for process to exit.""" cmd = "sleep 1 && echo hello && sleep 1 && echo hello" stdout, _ = _run_cmd(f"ray job submit -- bash -c '{cmd}'") assert "hello\nhello" in stdout assert "succeeded" in stdout def test_submit_no_wait(self, ray_start_stop): """Should exit immediately w/o printing logs.""" cmd = "echo hello && sleep 1000" stdout, _ = _run_cmd(f"ray job submit --no-wait -- bash -c '{cmd}'") assert "hello" not in stdout assert "Tailing logs until the job exits" not in stdout class TestJobStop: def test_basic_stop(self, ray_start_stop): """Should wait until the job is stopped.""" cmd = "sleep 1000" job_id = "test_basic_stop" _run_cmd(f"ray job submit --no-wait --job-id={job_id} -- {cmd}") stdout, _ = _run_cmd(f"ray job stop {job_id}") assert "Waiting for job" in stdout assert f"Job '{job_id}' was stopped" in stdout def test_stop_no_wait(self, ray_start_stop): """Should not wait until the job is stopped.""" cmd = "echo hello && sleep 1000" job_id = "test_stop_no_wait" _run_cmd(f"ray job submit --no-wait --job-id={job_id} -- bash -c '{cmd}'") stdout, _ = _run_cmd(f"ray job stop --no-wait {job_id}") assert "Waiting for job" not in stdout assert f"Job '{job_id}' was stopped" not in stdout class TestJobList: def test_empty(self, ray_start_stop): stdout, _ = _run_cmd("ray job list") assert "{}" in stdout def test_list(self, ray_start_stop): _run_cmd("ray job submit --job-id='hello_id' -- echo hello") runtime_env = {"env_vars": {"TEST": "123"}} _run_cmd( "ray job submit --job-id='hi_id' " f"--runtime-env-json='{json.dumps(runtime_env)}' -- echo hi" ) stdout, _ = _run_cmd("ray job list") assert "JobInfo" in stdout assert "123" in stdout assert "hello_id" in stdout assert "hi_id" in stdout def test_quote_escaping(ray_start_stop): cmd = "echo \"hello 'world'\"" job_id = "test_quote_escaping" stdout, _ = _run_cmd( f"ray job submit --job-id={job_id} -- {cmd}", ) assert "hello 'world'" in stdout if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
31.294118
84
0.626111
from contextlib import contextmanager import json import os import logging import sys import subprocess from typing import Optional, Tuple import pytest logger = logging.getLogger(__name__) @contextmanager def set_env_var(key: str, val: Optional[str] = None): old_val = os.environ.get(key, None) if val is not None: os.environ[key] = val elif key in os.environ: del os.environ[key] yield if key in os.environ: del os.environ[key] if old_val is not None: os.environ[key] = old_val @pytest.fixture def ray_start_stop(): subprocess.check_output(["ray", "start", "--head"]) try: with set_env_var("RAY_ADDRESS", "http://127.0.0.1:8265"): yield finally: subprocess.check_output(["ray", "stop", "--force"]) @contextmanager def ray_cluster_manager(): subprocess.check_output(["ray", "start", "--head"]) try: yield finally: subprocess.check_output(["ray", "stop", "--force"]) def _run_cmd(cmd: str, should_fail=False) -> Tuple[str, str]: print(f"Running command: '{cmd}'") p: subprocess.CompletedProcess = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) if p.returncode == 0: print("Command succeeded.") if should_fail: raise RuntimeError( f"Expected command to fail, but got exit code: {p.returncode}." ) else: print(f"Command failed with exit code: {p.returncode}.") if not should_fail: raise RuntimeError( f"Expected command to succeed, but got exit code: {p.returncode}." ) return p.stdout.decode("utf-8"), p.stderr.decode("utf-8") class TestJobSubmitHook: def test_hook(self, ray_start_stop): with set_env_var("RAY_JOB_SUBMIT_HOOK", "ray._private.test_utils.job_hook"): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hook intercepted: echo hello" in stdout class TestRayAddress: def test_empty_ray_address(self, ray_start_stop): with set_env_var("RAY_ADDRESS", None): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hello" in stdout assert "succeeded" in stdout @pytest.mark.parametrize( "ray_client_address", ["127.0.0.1:8265", "ray://127.0.0.1:8265"] ) def test_ray_client_address(self, ray_start_stop, ray_client_address: str): with set_env_var("RAY_ADDRESS", ray_client_address): _run_cmd("ray job submit -- echo hello", should_fail=True) def test_valid_http_ray_address(self, ray_start_stop): stdout, _ = _run_cmd("ray job submit -- echo hello") assert "hello" in stdout assert "succeeded" in stdout class TestJobSubmit: def test_basic_submit(self, ray_start_stop): cmd = "sleep 1 && echo hello && sleep 1 && echo hello" stdout, _ = _run_cmd(f"ray job submit -- bash -c '{cmd}'") assert "hello\nhello" in stdout assert "succeeded" in stdout def test_submit_no_wait(self, ray_start_stop): cmd = "echo hello && sleep 1000" stdout, _ = _run_cmd(f"ray job submit --no-wait -- bash -c '{cmd}'") assert "hello" not in stdout assert "Tailing logs until the job exits" not in stdout class TestJobStop: def test_basic_stop(self, ray_start_stop): cmd = "sleep 1000" job_id = "test_basic_stop" _run_cmd(f"ray job submit --no-wait --job-id={job_id} -- {cmd}") stdout, _ = _run_cmd(f"ray job stop {job_id}") assert "Waiting for job" in stdout assert f"Job '{job_id}' was stopped" in stdout def test_stop_no_wait(self, ray_start_stop): cmd = "echo hello && sleep 1000" job_id = "test_stop_no_wait" _run_cmd(f"ray job submit --no-wait --job-id={job_id} -- bash -c '{cmd}'") stdout, _ = _run_cmd(f"ray job stop --no-wait {job_id}") assert "Waiting for job" not in stdout assert f"Job '{job_id}' was stopped" not in stdout class TestJobList: def test_empty(self, ray_start_stop): stdout, _ = _run_cmd("ray job list") assert "{}" in stdout def test_list(self, ray_start_stop): _run_cmd("ray job submit --job-id='hello_id' -- echo hello") runtime_env = {"env_vars": {"TEST": "123"}} _run_cmd( "ray job submit --job-id='hi_id' " f"--runtime-env-json='{json.dumps(runtime_env)}' -- echo hi" ) stdout, _ = _run_cmd("ray job list") assert "JobInfo" in stdout assert "123" in stdout assert "hello_id" in stdout assert "hi_id" in stdout def test_quote_escaping(ray_start_stop): cmd = "echo \"hello 'world'\"" job_id = "test_quote_escaping" stdout, _ = _run_cmd( f"ray job submit --job-id={job_id} -- {cmd}", ) assert "hello 'world'" in stdout if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
true
true
f70d8714e61a4d5271dd1487b84f2e80f3c91e72
1,783
py
Python
src/tests/ftest/util/check_for_pool.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
null
null
null
src/tests/ftest/util/check_for_pool.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
null
null
null
src/tests/ftest/util/check_for_pool.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python ''' (C) Copyright 2017-2019 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE The Government's rights to use, modify, reproduce, release, perform, display, or disclose this software are subject to the terms of the Apache License as provided in Contract No. B609815. Any reproduction of computer software, computer software documentation, or portions thereof marked with this legend must also reproduce the markings. ''' from __future__ import print_function import subprocess def check_for_pool(host, uuid): """ Function to check if pool folder exist on server Args: host: Server host name uuid: Pool uuid to check if exists return: resp: subprocess return code """ cmd = "test -e /mnt/daos/" + uuid resp = subprocess.call(["ssh", host, cmd]) if resp == 0: print ('%s exists' %uuid) else: print ('%s does not exist' %uuid) return resp def cleanup_pools(hosts): """ To cleanup the pool and content from /mnt/daps/ Args: hosts[list]: Lists of servers name return" None """ for host in hosts: cmd = "rm -rf /mnt/daos/*" subprocess.call(["ssh", host, cmd])
31.839286
79
0.690409
from __future__ import print_function import subprocess def check_for_pool(host, uuid): cmd = "test -e /mnt/daos/" + uuid resp = subprocess.call(["ssh", host, cmd]) if resp == 0: print ('%s exists' %uuid) else: print ('%s does not exist' %uuid) return resp def cleanup_pools(hosts): for host in hosts: cmd = "rm -rf /mnt/daos/*" subprocess.call(["ssh", host, cmd])
true
true
f70d8858c160355ce2e4c4e67267abae44d1517d
930
py
Python
tests/rules/test_git_rebase_no_changes.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
75,504
2015-04-08T18:22:19.000Z
2022-03-31T23:59:52.000Z
tests/rules/test_git_rebase_no_changes.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
1,160
2015-04-17T18:47:12.000Z
2022-03-30T20:42:26.000Z
tests/rules/test_git_rebase_no_changes.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
4,399
2015-04-17T18:36:04.000Z
2022-03-31T07:01:03.000Z
import pytest from thefuck.rules.git_rebase_no_changes import match, get_new_command from thefuck.types import Command @pytest.fixture def output(): return '''Applying: Test commit No changes - did you forget to use 'git add'? If there is nothing left to stage, chances are that something else already introduced the same changes; you might want to skip this patch. When you have resolved this problem, run "git rebase --continue". If you prefer to skip this patch, run "git rebase --skip" instead. To check out the original branch and stop rebasing, run "git rebase --abort". ''' def test_match(output): assert match(Command('git rebase --continue', output)) assert not match(Command('git rebase --continue', '')) assert not match(Command('git rebase --skip', '')) def test_get_new_command(output): assert (get_new_command(Command('git rebase --continue', output)) == 'git rebase --skip')
32.068966
77
0.727957
import pytest from thefuck.rules.git_rebase_no_changes import match, get_new_command from thefuck.types import Command @pytest.fixture def output(): return '''Applying: Test commit No changes - did you forget to use 'git add'? If there is nothing left to stage, chances are that something else already introduced the same changes; you might want to skip this patch. When you have resolved this problem, run "git rebase --continue". If you prefer to skip this patch, run "git rebase --skip" instead. To check out the original branch and stop rebasing, run "git rebase --abort". ''' def test_match(output): assert match(Command('git rebase --continue', output)) assert not match(Command('git rebase --continue', '')) assert not match(Command('git rebase --skip', '')) def test_get_new_command(output): assert (get_new_command(Command('git rebase --continue', output)) == 'git rebase --skip')
true
true
f70d88b536f91eeb75cbe4c3aa4f91d5651da5c2
994
py
Python
psets/pset7/houses/roster.py
malgulam/cs50x
e394f1dab3cbfc2a0c11f840fe8fb9dc8cd2b98b
[ "Apache-2.0" ]
null
null
null
psets/pset7/houses/roster.py
malgulam/cs50x
e394f1dab3cbfc2a0c11f840fe8fb9dc8cd2b98b
[ "Apache-2.0" ]
null
null
null
psets/pset7/houses/roster.py
malgulam/cs50x
e394f1dab3cbfc2a0c11f840fe8fb9dc8cd2b98b
[ "Apache-2.0" ]
null
null
null
# TODO import sys from sys import argv import sqlite3 if len(argv) != 2: print("Usage: python roster.py Gryffindor") sys.exit(1) #setting house choice house_choice = argv[1].lower() #working on database db_file = 'students.db' conn = sqlite3.connect(db_file) c = conn.cursor() #connect to db and retrieve house names #todo: get rid of DISTINCT c.execute('''SELECT DISTINCT house from students''') houses = c.fetchall() if house_choice not in houses: print(f'{house_choice} not house in houses.Houses are: {houses}') #retrieve name and birth of persons in that house c.execute(f'''SELECT first, middle, last, birth FROM students WHERE lower(house)="{house_choice}" ORDER BY last, first''') roster = c.fetchall() #since the middle name will be None if not present it's easier to append with each row names_ = list for row in roster: if row[1] != None: print(f'{row[0]} {row[1]} {row[2]} born, {row[3]}') else: print(f'{row[0]} {row[2]} born, {row[3]}')
27.611111
122
0.688129
import sys from sys import argv import sqlite3 if len(argv) != 2: print("Usage: python roster.py Gryffindor") sys.exit(1) house_choice = argv[1].lower() db_file = 'students.db' conn = sqlite3.connect(db_file) c = conn.cursor() c.execute('''SELECT DISTINCT house from students''') houses = c.fetchall() if house_choice not in houses: print(f'{house_choice} not house in houses.Houses are: {houses}') c.execute(f'''SELECT first, middle, last, birth FROM students WHERE lower(house)="{house_choice}" ORDER BY last, first''') roster = c.fetchall() names_ = list for row in roster: if row[1] != None: print(f'{row[0]} {row[1]} {row[2]} born, {row[3]}') else: print(f'{row[0]} {row[2]} born, {row[3]}')
true
true
f70d8a3d6e019d9852325e39076f9a22e675d4f4
3,833
py
Python
python/base16vlq.py
milahu/random-0bsd
ec031ad1f3e405893ead8954dfab16aecd07f809
[ "MIT" ]
null
null
null
python/base16vlq.py
milahu/random-0bsd
ec031ad1f3e405893ead8954dfab16aecd07f809
[ "MIT" ]
null
null
null
python/base16vlq.py
milahu/random-0bsd
ec031ad1f3e405893ead8954dfab16aecd07f809
[ "MIT" ]
null
null
null
""" base16vlq.py base16 unsigned variable length quantity (VLQ) based on https://gist.github.com/mjpieters/86b0d152bb51d5f5979346d11005588b https://github.com/Rich-Harris/vlq to encode *signed* integers, we would need _abc_len == 17 python -c $'from base16vlq import encode\nfor n in range(0, 64):\n print(f"{n:3d} {encode(n):<3s} ", end="")\n if (n+1) % 8 == 0:\n print()' _shift_size = 3 _carry_flag = 8 = 1000 = 2^3 _mask = 7 = 111 = 2^3-1 _len_abc = 16 = 10000 = 2^4 _bytemax = 15 = 1111 = 2^4-1 _abc_chars = (,:<[$*?)~=>]@&% 0 ( 1 , 2 : 3 < 4 [ 5 $ 6 * 7 ? 8 ), 9 ~, 10 =, 11 >, 12 ], 13 @, 14 &, 15 %, 16 ): 17 ~: 18 =: 19 >: 20 ]: 21 @: 22 &: 23 %: 24 )< 25 ~< 26 =< 27 >< 28 ]< 29 @< 30 &< 31 %< 32 )[ 33 ~[ 34 =[ 35 >[ 36 ][ 37 @[ 38 &[ 39 %[ 40 )$ 41 ~$ 42 =$ 43 >$ 44 ]$ 45 @$ 46 &$ 47 %$ 48 )* 49 ~* 50 =* 51 >* 52 ]* 53 @* 54 &* 55 %* 56 )? 57 ~? 58 =? 59 >? 60 ]? 61 @? 62 &? 63 %? """ from typing import List _abc_chars = b"""(,:<[$*?)~=>]@&%""" # 0123456701234567 # remaining special chars: {}#"'^`;| _abc_table = [None] * (max(_abc_chars) + 1) for i, b in enumerate(_abc_chars): _abc_table[b] = i #_shift_size = 5 # base64 _shift_size = 3 # base16 # one bit is needed for the carry_flag _carry_flag = 1 << _shift_size _mask = (1 << _shift_size) - 1 # 2^4-1 = 15 _bytemax = _mask | _carry_flag _len_abc = _bytemax + 1 # unsigned #_len_abc = _bytemax + 2 # signed? if False: print(f"_shift_size = {_shift_size}") print(f"_carry_flag = {_carry_flag}") print(f"_mask = {_mask}") print(f"_bytemax = {_bytemax}") print(f"_abc_chars = {_abc_chars.decode()}") print(f"_len_abc = {_len_abc}") assert len(_abc_chars) == _len_abc def decode(vlq_code: str) -> List[int]: """Decode Base16 VLQ value""" num_list = [] shift_size, carry_flag, mask = _shift_size, _carry_flag, _mask shift = num = 0 # use byte values and a table to go from base16 characters to integers for clamped in map(_abc_table.__getitem__, vlq_code.encode("ascii")): num += (clamped & mask) << shift if clamped & carry_flag: shift += shift_size continue ## read sign bit #num_sign = -1 if (num & 1) else +1 #num = (num >> 1) * num_sign num_list.append(num) shift = num = 0 return num_list def encode(*num_list: int) -> str: """Encode integers to a VLQ value""" clamped_list = [] shift_size = _shift_size carry_flag = _carry_flag mask = _mask for num in num_list: ## write sign bit #num = (abs(num) << 1) | int(num < 0) if type(num) != int or num < 0: raise ValueError("num must be unsigned integer") while True: clamped = num & mask num = num >> shift_size if num > 0: clamped = clamped | carry_flag clamped_list.append(clamped) if num == 0: break return bytes(map(_abc_chars.__getitem__, clamped_list)).decode() # python -c 'from base16vlq import _test; _test()' def _test(): """throws on error""" for num in range(0, 1024): arr1 = [num, num] code = encode(*arr1) arr2 = decode(code) if not arr1 == arr2: print(f"arr1 = {arr1}") print(f"code = {code}") print(f"arr2 = {arr2}") assert arr1 == arr2 assert decode(encode(1234))[0] == 1234 try: encode(-1) except ValueError: pass try: encode(1.1) except ValueError: pass try: encode("a") except ValueError: pass
29.945313
147
0.525959
from typing import List _abc_chars = b"""(,:<[$*?)~=>]@&%""" table = [None] * (max(_abc_chars) + 1) for i, b in enumerate(_abc_chars): _abc_table[b] = i #_shift_size = 5 # base64 _shift_size = 3 # base16 # one bit is needed for the carry_flag _carry_flag = 1 << _shift_size _mask = (1 << _shift_size) - 1 # 2^4-1 = 15 _bytemax = _mask | _carry_flag _len_abc = _bytemax + 1 # unsigned #_len_abc = _bytemax + 2 # signed? if False: print(f"_shift_size = {_shift_size}") print(f"_carry_flag = {_carry_flag}") print(f"_mask = {_mask}") print(f"_bytemax = {_bytemax}") print(f"_abc_chars = {_abc_chars.decode()}") print(f"_len_abc = {_len_abc}") assert len(_abc_chars) == _len_abc def decode(vlq_code: str) -> List[int]: num_list = [] shift_size, carry_flag, mask = _shift_size, _carry_flag, _mask shift = num = 0 # use byte values and a table to go from base16 characters to integers for clamped in map(_abc_table.__getitem__, vlq_code.encode("ascii")): num += (clamped & mask) << shift if clamped & carry_flag: shift += shift_size continue ## read sign bit #num_sign = -1 if (num & 1) else +1 #num = (num >> 1) * num_sign num_list.append(num) shift = num = 0 return num_list def encode(*num_list: int) -> str: clamped_list = [] shift_size = _shift_size carry_flag = _carry_flag mask = _mask for num in num_list: ## write sign bit #num = (abs(num) << 1) | int(num < 0) if type(num) != int or num < 0: raise ValueError("num must be unsigned integer") while True: clamped = num & mask num = num >> shift_size if num > 0: clamped = clamped | carry_flag clamped_list.append(clamped) if num == 0: break return bytes(map(_abc_chars.__getitem__, clamped_list)).decode() # python -c 'from base16vlq import _test; _test()' def _test(): for num in range(0, 1024): arr1 = [num, num] code = encode(*arr1) arr2 = decode(code) if not arr1 == arr2: print(f"arr1 = {arr1}") print(f"code = {code}") print(f"arr2 = {arr2}") assert arr1 == arr2 assert decode(encode(1234))[0] == 1234 try: encode(-1) except ValueError: pass try: encode(1.1) except ValueError: pass try: encode("a") except ValueError: pass
true
true
f70d8bd6655f06912e9d1f9d9456dc8e6c6ab973
15,880
py
Python
cnld/util.py
bdshieh/cnl-dyna
9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b
[ "MIT" ]
3
2020-07-08T14:42:50.000Z
2021-11-12T06:11:15.000Z
cnld/util.py
bdshieh/cnl-dyna
9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b
[ "MIT" ]
null
null
null
cnld/util.py
bdshieh/cnl-dyna
9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b
[ "MIT" ]
null
null
null
''' Utility functions. ''' import argparse import functools import itertools import os import sqlite3 as sql from contextlib import closing from copy import deepcopy from itertools import repeat import numpy as np import pandas as pd import scipy as sp import scipy.fftpack import scipy.signal from cnld import abstract from scipy.spatial.distance import cdist ''' GEOMETRY-RELATED FUNCTIONS ''' def meshview(v1, v2, v3, mode='cartesian', as_list=True): ''' ''' if mode.lower() in ('cart', 'cartesian'): x, y, z = np.meshgrid(v1, v2, v3, indexing='ij') elif mode.lower() in ('sph', 'spherical'): r, theta, phi = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = sph2cart(r, theta, phi) elif mode.lower() in ('sec', 'sector'): r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = sec2cart(r, alpha, beta) elif mode.lower() in ('dp', 'dpolar'): r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = dp2cart(r, alpha, beta) if as_list: return np.c_[x.ravel('F'), y.ravel('F'), z.ravel('F')] else: return x, y, z def sec2cart(r, alpha, beta): ''' ''' z = r / np.sqrt(np.tan(alpha)**2 + np.tan(beta)**2 + 1) x = z * np.tan(alpha) y = z * np.tan(beta) # alpha_p = np.arctan(np.tan(alpha) * np.cos(beta)) # x = np.sin(alpha_p) * r # y = -np.sin(beta) * r * np.cos(alpha_p) # z = np.sqrt(r**2 - x**2 - y**2) # px = -px # pyp = np.arctan(np.cos(px) * np.sin(py) / np.cos(py)) # x = r * np.sin(pyp) # y = -r * np.cos(pyp) * np.sin(px) # z = r * np.cos(px) * np.cos(pyp) return x, y, z def cart2sec(x, y, z): ''' ''' r = np.sqrt(x**2 + y**2 + z**2) alpha = np.arccos(z / (np.sqrt(x**2 + z**2))) * np.sign(x) beta = np.arccos(z / (np.sqrt(y**2 + z**2))) * np.sign(y) # r = np.sqrt(x**2 + y**2 + z**2) # alpha_p = np.arcsin(x / r) # beta = -np.arcsin(-y / r / np.cos(alpha_p)) # alpha = np.arctan(np.tan(alpha_p) / np.cos(beta)) return r, alpha, beta def sph2cart(r, theta, phi): ''' ''' x = r * np.cos(theta) * np.sin(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(phi) return x, y, z def cart2sph(x, y, z): ''' ''' r = np.sqrt(x**2 + y**2 + z**2) theta = np.arctan(y / x) phi = np.arccos(z / r) return r, theta, phi def cart2dp(x, y, z): ''' ''' r = np.sqrt(x**2 + y**2 + z**2) alpha = np.arccos((np.sqrt(y**2 + z**2) / r)) beta = np.arccos((np.sqrt(x**2 + z**2) / r)) return r, alpha, beta def dp2cart(r, alpha, beta): ''' ''' z = r * (1 - np.sin(alpha)**2 - np.sin(beta)**2) x = r * np.sin(alpha) y = r * np.sin(beta) return x, y, z def rotation_matrix(vec, angle): ''' ''' if isinstance(vec, str): string = vec.lower() if string == 'x': vec = [1, 0, 0] elif string == '-x': vec = [-1, 0, 0] elif string == 'y': vec = [0, 1, 0] elif string == '-y': vec = [0, -1, 0] elif string == 'z': vec = [0, 0, 1] elif string == '-x': vec = [0, 0, -1] x, y, z = vec a = angle r = np.zeros((3, 3)) r[0, 0] = np.cos(a) + x**2 * (1 - np.cos(a)) r[0, 1] = x * y * (1 - np.cos(a)) - z * np.sin(a) r[0, 2] = x * z * (1 - np.cos(a)) + y * np.sin(a) r[1, 0] = y * x * (1 - np.cos(a)) + z * np.sin(a) r[1, 1] = np.cos(a) + y**2 * (1 - np.cos(a)) r[1, 2] = y * z * (1 - np.cos(a)) - x * np.sin(a) r[2, 0] = z * x * (1 - np.cos(a)) - z * np.sin(a) r[2, 1] = z * y * (1 - np.cos(a)) + x * np.sin(a) r[2, 2] = np.cos(a) + z**2 * (1 - np.cos(a)) return r def rotate_nodes(nodes, vec, angle): ''' ''' rmatrix = rotation_matrix(vec, angle) return rmatrix.dot(nodes.T).T def distance(*args): ''' ''' return cdist(*np.atleast_2d(*args)) ''' SIGNAL PROCESSING AND RF DATA FUNCTIONS ''' def gausspulse(fc, fbw, fs): ''' ''' cutoff = scipy.signal.gausspulse('cutoff', fc=fc, bw=fbw, tpr=-100, bwr=-3) adj_cutoff = np.ceil(cutoff * fs) / fs t = np.arange(-adj_cutoff, adj_cutoff + 1 / fs, 1 / fs) pulse, _ = sp.signal.gausspulse(t, fc=fc, bw=fbw, retquad=True, bwr=-3) return pulse, t def nextpow2(n): ''' ''' return 2**int(np.ceil(np.log2(n))) def envelope(rf_data, N=None, axis=-1): ''' ''' return np.abs(scipy.signal.hilbert(np.atleast_2d(rf_data), N, axis=axis)) def qbutter(x, fn, fs=1, btype='lowpass', n=4, plot=False, axis=-1): ''' ''' wn = fn / (fs / 2.) b, a = sp.signal.butter(n, wn, btype) fx = sp.signal.lfilter(b, a, x, axis=axis) return fx def qfirwin(x, fn, fs=1, btype='lowpass', ntaps=80, plot=False, axis=-1, window='hamming'): ''' ''' if btype.lower() in ('lowpass', 'low'): pass_zero = 1 elif btype.lower() in ('bandpass', 'band'): pass_zero = 0 elif btype.lower() in ('highpass', 'high'): pass_zero = 0 wn = fn / (fs / 2.) b = sp.signal.firwin(ntaps, wn, pass_zero=pass_zero, window=window) fx = np.apply_along_axis(lambda x: np.convolve(x, b), axis, x) return fx def qfft(s, nfft=None, fs=1, dr=100, fig=None, **kwargs): ''' Quick FFT plot. Returns frequency bins and FFT in dB. ''' s = np.atleast_2d(s) nsig, nsample = s.shape if nfft is None: nfft = nsample # if fig is None: # fig = plt.figure(tight_layout=1) # ax = fig.add_subplot(111) # else: # ax = fig.get_axes()[0] if nfft > nsample: s = np.pad(s, ((0, 0), (0, nfft - nsample)), mode='constant') elif nfft < nsample: s = s[:, :nfft] ft = sp.fftpack.fft(s, axis=1) freqs = sp.fftpack.fftfreq(nfft, 1 / fs) ftdb = 20 * np.log10(np.abs(ft) / (np.max(np.abs(ft), axis=1)[..., None])) ftdb[ftdb < -dr] = -dr cutoff = (nfft + 1) // 2 # ax.plot(freqs[:cutoff], ftdb[:, :cutoff].T, **kwargs) # ax.set_xlabel('Frequency (Hz)') # ax.set_ylabel('Magnitude (dB re max)') # fig.show() return freqs[:cutoff], ftdb[:, :cutoff] ''' JOB-RELATED FUNCTIONS ''' def chunks(iterable, n): res = [] for el in iterable: res.append(el) if len(res) == n: yield res res = [] if res: yield res def create_jobs(*args, mode='zip', is_complete=None): ''' Convenience function for creating jobs (sets of input arguments) for multiprocessing Pool. Supports zip and product combinations, and automatic chunking of iterables. ''' static_args = list() static_idx = list() iterable_args = list() iterable_idx = list() for arg_no, arg in enumerate(args): if isinstance(arg, (tuple, list)): iterable, chunksize = arg if chunksize == 1: iterable_args.append(iterable) else: iterable_args.append(chunks(iterable, chunksize)) iterable_idx.append(arg_no) else: static_args.append(itertools.repeat(arg)) static_idx.append(arg_no) if not iterable_args and not static_args: return if not iterable_args: yield 1, tuple(args[i] for i in static_idx) if not static_args: repeats = itertools.repeat(()) else: repeats = zip(*static_args) if mode.lower() == 'product': combos = itertools.product(*iterable_args) elif mode.lower() == 'zip': combos = zip(*iterable_args) elif mode.lower() == 'zip_longest': combos = itertools.zip_longest(*iterable_args) for job_id, (r, p) in enumerate(zip(repeats, combos)): # skip jobs that have been completed if is_complete is not None and is_complete[job_id]: continue res = r + p # reorder vals according to input order yield job_id + 1, tuple(res[i] for i in np.argsort(static_idx + iterable_idx)) ''' DATABASE FUNCTIONS ''' def open_db(f): def decorator(firstarg, *args, **kwargs): if isinstance(firstarg, sql.Connection): return f(firstarg, *args, **kwargs) else: # if os.path.isfile(firstarg): with closing(sql.connect(firstarg)) as con: return f(con, *args, **kwargs) # else: # raise IOError return decorator def read_db(f): def decorator(firstarg, *args, **kwargs): if isinstance(firstarg, sql.Connection): return f(firstarg, *args, **kwargs) else: if os.path.isfile(firstarg): with closing(sql.connect(firstarg)) as con: return f(con, *args, **kwargs) else: raise IOError('File does not exist') return decorator @open_db def table_exists(con, name): query = '''SELECT count(*) FROM sqlite_master WHERE type='table' and name=?''' return con.execute(query, (name, )).fetchone()[0] != 0 @open_db def create_metadata_table(con, **kwargs): table = [[str(v) for v in list(kwargs.values())]] columns = list(kwargs.keys()) pd.DataFrame(table, columns=columns, dtype=str).to_sql('metadata', con, if_exists='replace', index=False) @open_db def create_progress_table(con, njobs): with con: # create table con.execute( 'CREATE TABLE progress (job_id INTEGER PRIMARY KEY, is_complete boolean)') # insert values con.executemany('INSERT INTO progress (is_complete) VALUES (?)', repeat((False, ), njobs)) @open_db def get_progress(con): table = pd.read_sql('SELECT is_complete FROM progress ORDER BY job_id', con) is_complete = np.array(table).squeeze() ijob = sum(is_complete) + 1 return is_complete, ijob @open_db def update_progress(con, job_id): with con: con.execute('UPDATE progress SET is_complete=1 WHERE job_id=?', [ job_id, ]) ''' SCRIPTING FUNCTIONS ''' def script_parser(main, config_def): ''' General script command-line interface with 'config' and 'run' subcommands. ''' if isinstance(config_def, dict): # create config abstract type based on supplied dict Config = abstract.register_type('Config', config_def) else: # config abstract type already defined Config = config_def # config subcommand generates a default configuration template def config(args): if args.file: abstract.dump(Config(), args.file) else: print(Config()) # run subcommand will load the config file and pass to main def run(args): if args.config: cfg = Config(**abstract.load(args.config)) else: cfg = Config() return main(cfg, args) # create argument parser parser = argparse.ArgumentParser() # define config subparser subparsers = parser.add_subparsers(help='sub-command help') config_parser = subparsers.add_parser('config', help='config_help') config_parser.add_argument('-f', '--file', nargs='?') config_parser.set_defaults(func=config) # define run subparser run_parser = subparsers.add_parser('run', help='run_help') run_parser.add_argument('config', nargs='?') run_parser.add_argument('-f', '--file', nargs='?') run_parser.add_argument('-t', '--threads', nargs='?', type=int) run_parser.add_argument('-w', '--write-over', action='store_true') run_parser.set_defaults(func=run) return parser, run_parser def script_parser2(main, config_def): ''' General script command-line interface with 'config' and 'run' subcommands. ''' if isinstance(config_def, dict): # create config abstract type based on supplied dict Config = abstract.register_type('Config', config_def) else: # config abstract type already defined Config = config_def # run def run(args): if args.show_config: print(Config()) return if args.generate_config: abstract.dump(Config(), args.generate_config) return if args.file: if args.config: cfg = Config(**abstract.load(args.config)) else: cfg = Config() return main(cfg, args) # create argument parser parser = argparse.ArgumentParser() parser.add_argument('-g', '--generate-config') parser.add_argument('-s', '--show-config', action='store_true') parser.add_argument('file', nargs='?') parser.add_argument('-c', '--config') parser.add_argument('-t', '--threads', type=int) parser.add_argument('-w', '--write-over', action='store_true') parser.set_defaults(func=run) return parser ''' MISC FUNCTIONS ''' def memoize_old(func): ''' Simple memoizer to cache repeated function calls. ''' def ishashable(obj): try: hash(obj) except TypeError: return False return True def make_hashable(obj): if not ishashable(obj): # use tostring on ndarray since str returns truncated output if isinstance(obj, np.ndarray): return obj.tostring() return str(obj) # round float arguments to avoid round-off error affecting cache if isinstance(obj, float): return round(obj, 18) return obj memo = {} @functools.wraps(func) def decorator(*args, **kwargs): # key = tuple(make_hashable(a) for a in args) key = (tuple(make_hashable(a) for a in args), tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items()))) if key not in memo: memo[key] = func(*args, **kwargs) # return a deep copy to avoid issues with mutable return objects return deepcopy(memo[key]) return decorator def memoize(func, maxsize=20): ''' Simple memoizer to cache repeated function calls. ''' def ishashable(obj): try: hash(obj) except TypeError: return False return True def make_hashable(obj): if hasattr(obj, '_memoize'): return obj._memoize() if not ishashable(obj): # use tostring on ndarray since str returns truncated output if isinstance(obj, np.ndarray): return obj.tostring() return str(obj) # round float arguments to avoid round-off error affecting cache if isinstance(obj, float): return round(obj, 18) return obj func.memo = {} @functools.wraps(func) def decorator(*args, **kwargs): # key = tuple(make_hashable(a) for a in args) key = (tuple(make_hashable(a) for a in args), tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items()))) if key not in func.memo: if len(func.memo) > maxsize: return func(*args, **kwargs) else: func.memo[key] = func(*args, **kwargs) # return a deep copy to avoid issues with mutable return objects return deepcopy(func.memo[key]) return decorator class Counter: def __init__(self): self.count = 0 def increment(self, *args, **kwargs): self.count += 1 def decrement(self, *args, **kwargs): self.count -= 1
26.466667
87
0.555038
import argparse import functools import itertools import os import sqlite3 as sql from contextlib import closing from copy import deepcopy from itertools import repeat import numpy as np import pandas as pd import scipy as sp import scipy.fftpack import scipy.signal from cnld import abstract from scipy.spatial.distance import cdist def meshview(v1, v2, v3, mode='cartesian', as_list=True): if mode.lower() in ('cart', 'cartesian'): x, y, z = np.meshgrid(v1, v2, v3, indexing='ij') elif mode.lower() in ('sph', 'spherical'): r, theta, phi = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = sph2cart(r, theta, phi) elif mode.lower() in ('sec', 'sector'): r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = sec2cart(r, alpha, beta) elif mode.lower() in ('dp', 'dpolar'): r, alpha, beta = np.meshgrid(v1, np.deg2rad(v2), np.deg2rad(v3), indexing='ij') x, y, z = dp2cart(r, alpha, beta) if as_list: return np.c_[x.ravel('F'), y.ravel('F'), z.ravel('F')] else: return x, y, z def sec2cart(r, alpha, beta): z = r / np.sqrt(np.tan(alpha)**2 + np.tan(beta)**2 + 1) x = z * np.tan(alpha) y = z * np.tan(beta) return x, y, z def cart2sec(x, y, z): r = np.sqrt(x**2 + y**2 + z**2) alpha = np.arccos(z / (np.sqrt(x**2 + z**2))) * np.sign(x) beta = np.arccos(z / (np.sqrt(y**2 + z**2))) * np.sign(y) return r, alpha, beta def sph2cart(r, theta, phi): x = r * np.cos(theta) * np.sin(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(phi) return x, y, z def cart2sph(x, y, z): r = np.sqrt(x**2 + y**2 + z**2) theta = np.arctan(y / x) phi = np.arccos(z / r) return r, theta, phi def cart2dp(x, y, z): r = np.sqrt(x**2 + y**2 + z**2) alpha = np.arccos((np.sqrt(y**2 + z**2) / r)) beta = np.arccos((np.sqrt(x**2 + z**2) / r)) return r, alpha, beta def dp2cart(r, alpha, beta): z = r * (1 - np.sin(alpha)**2 - np.sin(beta)**2) x = r * np.sin(alpha) y = r * np.sin(beta) return x, y, z def rotation_matrix(vec, angle): if isinstance(vec, str): string = vec.lower() if string == 'x': vec = [1, 0, 0] elif string == '-x': vec = [-1, 0, 0] elif string == 'y': vec = [0, 1, 0] elif string == '-y': vec = [0, -1, 0] elif string == 'z': vec = [0, 0, 1] elif string == '-x': vec = [0, 0, -1] x, y, z = vec a = angle r = np.zeros((3, 3)) r[0, 0] = np.cos(a) + x**2 * (1 - np.cos(a)) r[0, 1] = x * y * (1 - np.cos(a)) - z * np.sin(a) r[0, 2] = x * z * (1 - np.cos(a)) + y * np.sin(a) r[1, 0] = y * x * (1 - np.cos(a)) + z * np.sin(a) r[1, 1] = np.cos(a) + y**2 * (1 - np.cos(a)) r[1, 2] = y * z * (1 - np.cos(a)) - x * np.sin(a) r[2, 0] = z * x * (1 - np.cos(a)) - z * np.sin(a) r[2, 1] = z * y * (1 - np.cos(a)) + x * np.sin(a) r[2, 2] = np.cos(a) + z**2 * (1 - np.cos(a)) return r def rotate_nodes(nodes, vec, angle): rmatrix = rotation_matrix(vec, angle) return rmatrix.dot(nodes.T).T def distance(*args): return cdist(*np.atleast_2d(*args)) def gausspulse(fc, fbw, fs): cutoff = scipy.signal.gausspulse('cutoff', fc=fc, bw=fbw, tpr=-100, bwr=-3) adj_cutoff = np.ceil(cutoff * fs) / fs t = np.arange(-adj_cutoff, adj_cutoff + 1 / fs, 1 / fs) pulse, _ = sp.signal.gausspulse(t, fc=fc, bw=fbw, retquad=True, bwr=-3) return pulse, t def nextpow2(n): return 2**int(np.ceil(np.log2(n))) def envelope(rf_data, N=None, axis=-1): return np.abs(scipy.signal.hilbert(np.atleast_2d(rf_data), N, axis=axis)) def qbutter(x, fn, fs=1, btype='lowpass', n=4, plot=False, axis=-1): wn = fn / (fs / 2.) b, a = sp.signal.butter(n, wn, btype) fx = sp.signal.lfilter(b, a, x, axis=axis) return fx def qfirwin(x, fn, fs=1, btype='lowpass', ntaps=80, plot=False, axis=-1, window='hamming'): if btype.lower() in ('lowpass', 'low'): pass_zero = 1 elif btype.lower() in ('bandpass', 'band'): pass_zero = 0 elif btype.lower() in ('highpass', 'high'): pass_zero = 0 wn = fn / (fs / 2.) b = sp.signal.firwin(ntaps, wn, pass_zero=pass_zero, window=window) fx = np.apply_along_axis(lambda x: np.convolve(x, b), axis, x) return fx def qfft(s, nfft=None, fs=1, dr=100, fig=None, **kwargs): s = np.atleast_2d(s) nsig, nsample = s.shape if nfft is None: nfft = nsample if nfft > nsample: s = np.pad(s, ((0, 0), (0, nfft - nsample)), mode='constant') elif nfft < nsample: s = s[:, :nfft] ft = sp.fftpack.fft(s, axis=1) freqs = sp.fftpack.fftfreq(nfft, 1 / fs) ftdb = 20 * np.log10(np.abs(ft) / (np.max(np.abs(ft), axis=1)[..., None])) ftdb[ftdb < -dr] = -dr cutoff = (nfft + 1) // 2 return freqs[:cutoff], ftdb[:, :cutoff] def chunks(iterable, n): res = [] for el in iterable: res.append(el) if len(res) == n: yield res res = [] if res: yield res def create_jobs(*args, mode='zip', is_complete=None): static_args = list() static_idx = list() iterable_args = list() iterable_idx = list() for arg_no, arg in enumerate(args): if isinstance(arg, (tuple, list)): iterable, chunksize = arg if chunksize == 1: iterable_args.append(iterable) else: iterable_args.append(chunks(iterable, chunksize)) iterable_idx.append(arg_no) else: static_args.append(itertools.repeat(arg)) static_idx.append(arg_no) if not iterable_args and not static_args: return if not iterable_args: yield 1, tuple(args[i] for i in static_idx) if not static_args: repeats = itertools.repeat(()) else: repeats = zip(*static_args) if mode.lower() == 'product': combos = itertools.product(*iterable_args) elif mode.lower() == 'zip': combos = zip(*iterable_args) elif mode.lower() == 'zip_longest': combos = itertools.zip_longest(*iterable_args) for job_id, (r, p) in enumerate(zip(repeats, combos)): if is_complete is not None and is_complete[job_id]: continue res = r + p yield job_id + 1, tuple(res[i] for i in np.argsort(static_idx + iterable_idx)) def open_db(f): def decorator(firstarg, *args, **kwargs): if isinstance(firstarg, sql.Connection): return f(firstarg, *args, **kwargs) else: with closing(sql.connect(firstarg)) as con: return f(con, *args, **kwargs) return decorator def read_db(f): def decorator(firstarg, *args, **kwargs): if isinstance(firstarg, sql.Connection): return f(firstarg, *args, **kwargs) else: if os.path.isfile(firstarg): with closing(sql.connect(firstarg)) as con: return f(con, *args, **kwargs) else: raise IOError('File does not exist') return decorator @open_db def table_exists(con, name): query = '''SELECT count(*) FROM sqlite_master WHERE type='table' and name=?''' return con.execute(query, (name, )).fetchone()[0] != 0 @open_db def create_metadata_table(con, **kwargs): table = [[str(v) for v in list(kwargs.values())]] columns = list(kwargs.keys()) pd.DataFrame(table, columns=columns, dtype=str).to_sql('metadata', con, if_exists='replace', index=False) @open_db def create_progress_table(con, njobs): with con: con.execute( 'CREATE TABLE progress (job_id INTEGER PRIMARY KEY, is_complete boolean)') con.executemany('INSERT INTO progress (is_complete) VALUES (?)', repeat((False, ), njobs)) @open_db def get_progress(con): table = pd.read_sql('SELECT is_complete FROM progress ORDER BY job_id', con) is_complete = np.array(table).squeeze() ijob = sum(is_complete) + 1 return is_complete, ijob @open_db def update_progress(con, job_id): with con: con.execute('UPDATE progress SET is_complete=1 WHERE job_id=?', [ job_id, ]) def script_parser(main, config_def): if isinstance(config_def, dict): Config = abstract.register_type('Config', config_def) else: Config = config_def def config(args): if args.file: abstract.dump(Config(), args.file) else: print(Config()) def run(args): if args.config: cfg = Config(**abstract.load(args.config)) else: cfg = Config() return main(cfg, args) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(help='sub-command help') config_parser = subparsers.add_parser('config', help='config_help') config_parser.add_argument('-f', '--file', nargs='?') config_parser.set_defaults(func=config) run_parser = subparsers.add_parser('run', help='run_help') run_parser.add_argument('config', nargs='?') run_parser.add_argument('-f', '--file', nargs='?') run_parser.add_argument('-t', '--threads', nargs='?', type=int) run_parser.add_argument('-w', '--write-over', action='store_true') run_parser.set_defaults(func=run) return parser, run_parser def script_parser2(main, config_def): if isinstance(config_def, dict): Config = abstract.register_type('Config', config_def) else: Config = config_def def run(args): if args.show_config: print(Config()) return if args.generate_config: abstract.dump(Config(), args.generate_config) return if args.file: if args.config: cfg = Config(**abstract.load(args.config)) else: cfg = Config() return main(cfg, args) parser = argparse.ArgumentParser() parser.add_argument('-g', '--generate-config') parser.add_argument('-s', '--show-config', action='store_true') parser.add_argument('file', nargs='?') parser.add_argument('-c', '--config') parser.add_argument('-t', '--threads', type=int) parser.add_argument('-w', '--write-over', action='store_true') parser.set_defaults(func=run) return parser def memoize_old(func): def ishashable(obj): try: hash(obj) except TypeError: return False return True def make_hashable(obj): if not ishashable(obj): if isinstance(obj, np.ndarray): return obj.tostring() return str(obj) if isinstance(obj, float): return round(obj, 18) return obj memo = {} @functools.wraps(func) def decorator(*args, **kwargs): key = (tuple(make_hashable(a) for a in args), tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items()))) if key not in memo: memo[key] = func(*args, **kwargs) return deepcopy(memo[key]) return decorator def memoize(func, maxsize=20): def ishashable(obj): try: hash(obj) except TypeError: return False return True def make_hashable(obj): if hasattr(obj, '_memoize'): return obj._memoize() if not ishashable(obj): if isinstance(obj, np.ndarray): return obj.tostring() return str(obj) if isinstance(obj, float): return round(obj, 18) return obj func.memo = {} @functools.wraps(func) def decorator(*args, **kwargs): key = (tuple(make_hashable(a) for a in args), tuple((k, make_hashable(v)) for k, v in sorted(kwargs.items()))) if key not in func.memo: if len(func.memo) > maxsize: return func(*args, **kwargs) else: func.memo[key] = func(*args, **kwargs) return deepcopy(func.memo[key]) return decorator class Counter: def __init__(self): self.count = 0 def increment(self, *args, **kwargs): self.count += 1 def decrement(self, *args, **kwargs): self.count -= 1
true
true
f70d8c4e1cd557c34f07a90a39b102830d82dd0f
5,405
py
Python
tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
datanonymous/TFandroid
89927e863b1ad96184ab09188f62b7e391c896d9
[ "Apache-2.0" ]
36
2016-12-17T15:25:25.000Z
2022-01-29T21:50:53.000Z
tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
shekharpalit/tensorflow
6aa83398ab03bfae822f36772757097bcb98b6ed
[ "Apache-2.0" ]
59
2019-06-17T09:37:49.000Z
2022-01-19T01:21:34.000Z
tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
shekharpalit/tensorflow
6aa83398ab03bfae822f36772757097bcb98b6ed
[ "Apache-2.0" ]
36
2017-07-27T21:12:40.000Z
2022-02-03T16:45:56.000Z
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops.linalg import linalg as linalg_lib from tensorflow.python.ops.linalg import linear_operator_adjoint from tensorflow.python.ops.linalg import linear_operator_test_util from tensorflow.python.platform import test linalg = linalg_lib LinearOperatorAdjoint = linear_operator_adjoint.LinearOperatorAdjoint # pylint: disable=invalid-name class LinearOperatorAdjointTest( linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """Most tests done in the base class LinearOperatorDerivedClassTest.""" def setUp(self): self._atol[dtypes.complex64] = 1e-5 self._rtol[dtypes.complex64] = 1e-5 def _operator_and_matrix(self, build_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False): shape = list(build_info.shape) if ensure_self_adjoint_and_pd: matrix = linear_operator_test_util.random_positive_definite_matrix( shape, dtype, force_well_conditioned=True) else: matrix = linear_operator_test_util.random_tril_matrix( shape, dtype, force_well_conditioned=True, remove_upper=True) lin_op_matrix = matrix if use_placeholder: lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None) if ensure_self_adjoint_and_pd: operator = LinearOperatorAdjoint( linalg.LinearOperatorFullMatrix( lin_op_matrix, is_positive_definite=True, is_self_adjoint=True)) else: operator = LinearOperatorAdjoint( linalg.LinearOperatorLowerTriangular(lin_op_matrix)) return operator, linalg.adjoint(matrix) def test_base_operator_hint_used(self): # The matrix values do not effect auto-setting of the flags. matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix( matrix, is_positive_definite=True, is_non_singular=True, is_self_adjoint=False) operator_adjoint = LinearOperatorAdjoint(operator) self.assertTrue(operator_adjoint.is_positive_definite) self.assertTrue(operator_adjoint.is_non_singular) self.assertFalse(operator_adjoint.is_self_adjoint) def test_supplied_hint_used(self): # The matrix values do not effect auto-setting of the flags. matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix(matrix) operator_adjoint = LinearOperatorAdjoint( operator, is_positive_definite=True, is_non_singular=True, is_self_adjoint=False) self.assertTrue(operator_adjoint.is_positive_definite) self.assertTrue(operator_adjoint.is_non_singular) self.assertFalse(operator_adjoint.is_self_adjoint) def test_contradicting_hints_raise(self): # The matrix values do not effect auto-setting of the flags. matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix( matrix, is_positive_definite=False) with self.assertRaisesRegexp(ValueError, "positive-definite"): LinearOperatorAdjoint(operator, is_positive_definite=True) operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False) with self.assertRaisesRegexp(ValueError, "self-adjoint"): LinearOperatorAdjoint(operator, is_self_adjoint=True) def test_name(self): matrix = [[11., 0.], [1., 8.]] operator = linalg.LinearOperatorFullMatrix( matrix, name="my_operator", is_non_singular=True) operator = LinearOperatorAdjoint(operator) self.assertEqual("my_operator_adjoint", operator.name) class LinearOperatorAdjointNonSquareTest( linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest): """Tests done in the base class NonSquareLinearOperatorDerivedClassTest.""" def _operator_and_matrix(self, build_info, dtype, use_placeholder): shape_before_adjoint = list(build_info.shape) # We need to swap the last two dimensions because we are taking the adjoint # of this operator shape_before_adjoint[-1], shape_before_adjoint[-2] = ( shape_before_adjoint[-2], shape_before_adjoint[-1]) matrix = linear_operator_test_util.random_normal( shape_before_adjoint, dtype=dtype) lin_op_matrix = matrix if use_placeholder: lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None) operator = LinearOperatorAdjoint( linalg.LinearOperatorFullMatrix(lin_op_matrix)) return operator, linalg.adjoint(matrix) if __name__ == "__main__": test.main()
37.797203
101
0.734135
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops.linalg import linalg as linalg_lib from tensorflow.python.ops.linalg import linear_operator_adjoint from tensorflow.python.ops.linalg import linear_operator_test_util from tensorflow.python.platform import test linalg = linalg_lib LinearOperatorAdjoint = linear_operator_adjoint.LinearOperatorAdjoint class LinearOperatorAdjointTest( linear_operator_test_util.SquareLinearOperatorDerivedClassTest): def setUp(self): self._atol[dtypes.complex64] = 1e-5 self._rtol[dtypes.complex64] = 1e-5 def _operator_and_matrix(self, build_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False): shape = list(build_info.shape) if ensure_self_adjoint_and_pd: matrix = linear_operator_test_util.random_positive_definite_matrix( shape, dtype, force_well_conditioned=True) else: matrix = linear_operator_test_util.random_tril_matrix( shape, dtype, force_well_conditioned=True, remove_upper=True) lin_op_matrix = matrix if use_placeholder: lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None) if ensure_self_adjoint_and_pd: operator = LinearOperatorAdjoint( linalg.LinearOperatorFullMatrix( lin_op_matrix, is_positive_definite=True, is_self_adjoint=True)) else: operator = LinearOperatorAdjoint( linalg.LinearOperatorLowerTriangular(lin_op_matrix)) return operator, linalg.adjoint(matrix) def test_base_operator_hint_used(self): matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix( matrix, is_positive_definite=True, is_non_singular=True, is_self_adjoint=False) operator_adjoint = LinearOperatorAdjoint(operator) self.assertTrue(operator_adjoint.is_positive_definite) self.assertTrue(operator_adjoint.is_non_singular) self.assertFalse(operator_adjoint.is_self_adjoint) def test_supplied_hint_used(self): matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix(matrix) operator_adjoint = LinearOperatorAdjoint( operator, is_positive_definite=True, is_non_singular=True, is_self_adjoint=False) self.assertTrue(operator_adjoint.is_positive_definite) self.assertTrue(operator_adjoint.is_non_singular) self.assertFalse(operator_adjoint.is_self_adjoint) def test_contradicting_hints_raise(self): matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorFullMatrix( matrix, is_positive_definite=False) with self.assertRaisesRegexp(ValueError, "positive-definite"): LinearOperatorAdjoint(operator, is_positive_definite=True) operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False) with self.assertRaisesRegexp(ValueError, "self-adjoint"): LinearOperatorAdjoint(operator, is_self_adjoint=True) def test_name(self): matrix = [[11., 0.], [1., 8.]] operator = linalg.LinearOperatorFullMatrix( matrix, name="my_operator", is_non_singular=True) operator = LinearOperatorAdjoint(operator) self.assertEqual("my_operator_adjoint", operator.name) class LinearOperatorAdjointNonSquareTest( linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest): def _operator_and_matrix(self, build_info, dtype, use_placeholder): shape_before_adjoint = list(build_info.shape) shape_before_adjoint[-1], shape_before_adjoint[-2] = ( shape_before_adjoint[-2], shape_before_adjoint[-1]) matrix = linear_operator_test_util.random_normal( shape_before_adjoint, dtype=dtype) lin_op_matrix = matrix if use_placeholder: lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None) operator = LinearOperatorAdjoint( linalg.LinearOperatorFullMatrix(lin_op_matrix)) return operator, linalg.adjoint(matrix) if __name__ == "__main__": test.main()
true
true
f70d8c83df5b721001f8b07794266614e5d089a7
162
py
Python
pyrez/exceptions/SessionLimit.py
pytheous/Pyrez
85f6f27359288b5f0ad70ff543f247843ac326f9
[ "MIT" ]
25
2018-07-26T02:32:14.000Z
2021-09-20T03:26:17.000Z
pyrez/exceptions/SessionLimit.py
pytheous/Pyrez
85f6f27359288b5f0ad70ff543f247843ac326f9
[ "MIT" ]
93
2018-08-26T11:44:25.000Z
2022-03-28T08:22:18.000Z
pyrez/exceptions/SessionLimit.py
pytheous/Pyrez
85f6f27359288b5f0ad70ff543f247843ac326f9
[ "MIT" ]
13
2018-09-05T09:38:07.000Z
2021-08-16T04:39:41.000Z
from .PyrezException import PyrezException class SessionLimit(PyrezException): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
32.4
42
0.722222
from .PyrezException import PyrezException class SessionLimit(PyrezException): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
true
true
f70d8c900ae72ef49e6de06f8fbb1e74a0973a12
43,042
py
Python
pysnmp-with-texts/BIANCA-BRICK-SIF-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/BIANCA-BRICK-SIF-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/BIANCA-BRICK-SIF-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module BIANCA-BRICK-SIF-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-SIF-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:38:43 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion") DisplayString, = mibBuilder.importSymbols("RFC1158-MIB", "DisplayString") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") iso, TimeTicks, Counter32, IpAddress, Gauge32, ModuleIdentity, ObjectIdentity, MibIdentifier, Unsigned32, enterprises, NotificationType, Bits, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "Counter32", "IpAddress", "Gauge32", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "Unsigned32", "enterprises", "NotificationType", "Bits", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272)) bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4)) biboip = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5)) ipSifAliasAddressTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 28), ) if mibBuilder.loadTexts: ipSifAliasAddressTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressTable.setDescription('Contains a alias Address Entry Index,Ip,Mask,Interface ') ipSifAliasAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressAlias")) if mibBuilder.loadTexts: ipSifAliasAddressEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressEntry.setDescription('Contents a Stateful inspection Firewall description for a alias Name') ipSifAliasAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasAddressIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressIndex.setDescription('The Index for the address alias') ipSifAliasAddressAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressAlias.setDescription('Alias Name for the Address Entry') ipSifAliasAddressAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressAddress.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressAddress.setDescription('The ip-address for the Alias') ipSifAliasAddressMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 4), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressMask.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressMask.setDescription('The ip Mask for the Alias') ipSifAliasAddressInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressInterface.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressInterface.setDescription('The interface index for the alias') ipSifAliasAddressMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("range", 3), ("delete", 4))).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressMode.setDescription('Address or Interface Mode') ipSifAliasAddressRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressRange.setDescription('The ip Range for the Alias') ipSifAliasAddressGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 8), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroup.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroup.setDescription('For values greater than zero this entry determines the IP address group this entry belongs to, see also ipSifAliasAddressGroupId') ipSifAliasServiceTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 29), ) if mibBuilder.loadTexts: ipSifAliasServiceTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceTable.setDescription('Contains a alias Service Entry Protocol,Port,Range ') ipSifAliasServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceAlias")) if mibBuilder.loadTexts: ipSifAliasServiceEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceEntry.setDescription('Contains a alias Service Entry Protocol,Port,Range ') ipSifAliasServiceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIndex.setDescription('The Index for the address alias') ipSifAliasServiceAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceAlias.setDescription('Alias Name for the Service Entry') ipSifAliasServiceProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 8, 9, 12, 16, 17, 20, 22, 27, 41, 46, 47, 50, 51, 56, 57, 65, 80, 88, 89, 94, 103, 111, 112, 115, 250, 251, 252, 253, 254, 255, 256))).clone(namedValues=NamedValues(("icmp", 1), ("igmp", 2), ("ggp", 3), ("ip", 4), ("tcp", 6), ("egp", 8), ("igp", 9), ("pup", 12), ("chaos", 16), ("udp", 17), ("hmp", 20), ("xns-idp", 22), ("rdp", 27), ("ipv6", 41), ("rsvp", 46), ("gre", 47), ("esp", 50), ("ah", 51), ("tlsp", 56), ("skip", 57), ("kryptolan", 65), ("iso-ip", 80), ("igrp", 88), ("ospf", 89), ("ipip", 94), ("pim", 103), ("ipx-in-ip", 111), ("vrrp", 112), ("l2tp", 115), ("local", 250), ("internet", 251), ("netmeeting", 252), ("udptcp", 253), ("any", 254), ("delete", 255), ("dont-verify", 256))).clone('any')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setDescription('The protocol for the Service alias') ipSifAliasServicePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 65535)).clone(-1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServicePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServicePort.setDescription('The Port for the Service Alias.') ipSifAliasServiceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceRange.setDescription('The Port Range for the Service Alias.') ipSifAliasServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("predefined", 1), ("custom", 2))).clone('custom')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceType.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceType.setDescription('Specifies wether created by the IP/SIF subsystem itself or created/modified by the administrator.') ipSifAliasServiceGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 7), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroup.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroup.setDescription('For values greater than zero this entry determines the IP service group this entry belongs to, see also ipSifAliasServiceGroupId') ipSifAliasServiceSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setDescription('The Source Port for the Service Alias.') ipSifAliasServiceSourceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setDescription('The Source Port Range for the Service Alias.') ipSifAliasServiceIcmpType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setDescription('The ICMP Type for the Service Alias.') ipSifAliasServiceIcmpCode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setDescription('The ICMP Code for the Service Alias.') ipSifAliasTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 30), ) if mibBuilder.loadTexts: ipSifAliasTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasTable.setDescription('Contains a Stateful Inspection Firewall (SIF) policy.') ipSifAliasEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasOrder")) if mibBuilder.loadTexts: ipSifAliasEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasEntry.setDescription('') ipSifAliasOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasOrder.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasOrder.setDescription('The Order for the Stateful Inspection Entry rule') ipSifAliasSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasSource.setDescription('The alias Source Index for the Entry') ipSifAliasDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasDestination.setDescription('The alias Destination Index for the Entry') ipSifAliasService = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 4), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasService.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasService.setDescription('The alias Protocol/service Index for Entry') ipSifAliasAction = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 255))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("delete", 255))).clone('access')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAction.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAction.setDescription('The Rule for the Filter') ipSifAliasStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasStatus.setDescription('Defines the administrative status of this policy') ipSifAliasPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasPriority.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasPriority.setDescription('Defines the QoS priority of this policy') ipSifAliasClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasClassId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasClassId.setDescription('Internal ID for SIF policy to QoS policy mapping') ipSifRejectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 31), ) if mibBuilder.loadTexts: ipSifRejectTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectTable.setDescription('Contains actually rejected Frames with Source Destination ') ipSifRejectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifRejectIndex")) if mibBuilder.loadTexts: ipSifRejectEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectEntry.setDescription('') ipSifRejectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectIndex.setDescription('The Index for the Reject Entry') ipSifRejectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectSource.setDescription('The Reject Source for the Entry') ipSifRejectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 3), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectDestination.setDescription('The Reject Destination Index for the Entry') ipSifRejectRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectRejects.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectRejects.setDescription('Count of rejected frames') ipSifRejectSilence = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectSilence.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectSilence.setDescription('Last reject in seconds') ipSifRejectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectProtocol.setDescription('The protocol of the rejected Packet') ipSifRejectPortLo = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectPortLo.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectPortLo.setDescription('The lowest Port rejected') ipSifRejectPortHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectPortHigh.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectPortHigh.setDescription('The highest port rejected') ipSifExpectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 35), ) if mibBuilder.loadTexts: ipSifExpectTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectTable.setDescription('Contains expected Sessions with Source Destination ') ipSifExpectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifExpectIndex")) if mibBuilder.loadTexts: ipSifExpectEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectEntry.setDescription('') ipSifExpectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectIndex.setDescription('This field is used for SIF-internal signalling and stores the index for the expected session for later matching.') ipSifExpectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectSource.setDescription('The source-IP-address for the expected session. A value of 0 means ANY source-address.') ipSifExpectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 3), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectDestination.setDescription('The destination-IP-address for the expected session. A value of 0 means ANY destination-address.') ipSifExpectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 6, 17, 255))).clone(namedValues=NamedValues(("igmp", 2), ("ip", 4), ("tcp", 6), ("udp", 17), ("delete", 255))).clone('udp')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifExpectProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectProtocol.setDescription('The protocol of the expected session.') ipSifExpectSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectSourcePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectSourcePort.setDescription('The source-port-number of the expected session. A value of 0 means ANY source-port-number.') ipSifExpectDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectDestPort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectDestPort.setDescription('The destination-port-number of the expected session. A value of 0 means ANY destination-port-number.') ipSifExpectPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectPriority.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectPriority.setDescription('Defines the QoS-priority/policy to be used for the expected SIF-session.') ipSifExpectClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectClassId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectClassId.setDescription('Internal ID for mapping SIF-policy to QoS-policy. Default-value of 0 means NOT SPECIFIED.') ipSifExpectIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectIfIndex.setDescription('Interface-index for which the session is expected. A value of 0 means ANY interface-index.') ipSifAliasAddressGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 47), ) if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setDescription('Defines IP address or interface group alias') ipSifAliasAddressGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressGroupId")) if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setDescription('Defines IP address or interface group alias') ipSifAliasAddressGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setDescription('The unique address group entry ID') ipSifAliasAddressGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setDescription('Alias name for the address group entry') ipSifAliasAddressGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setDescription('The index for the address group entry to be referred by an ipSifAlias entry') ipSifAliasAddressGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("delete", 3))).clone('interface')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setDescription('Specifies wether this entry defines an interface or address group') ipSifAliasServiceGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 48), ) if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setDescription('Defines IP service group alias') ipSifAliasServiceGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceGroupId")) if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setDescription('Defines IP service group alias') ipSifAliasServiceGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setDescription('The unique IP service group entry ID') ipSifAliasServiceGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setDescription('Alias name for the IP service group entry') ipSifAliasServiceGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setDescription('The index for the Ip service group entry to be referred by an ipSifAlias entry') ipSifAliasServiceGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("service", 1), ("delete", 2))).clone('service')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setDescription('Specifies wether this entry defines an IP service group or should be deleted') ipSifPolicyChkTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 49), ) if mibBuilder.loadTexts: ipSifPolicyChkTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkTable.setDescription("MIB interface in order to check the configured SIF polices: - for debugging purposes - for test applications - for configuration frontends NOTE: it's a stateless check, not based on a real IP session context ") ipSifPolicyChkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkProtocol"), (0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkDestPort")) if mibBuilder.loadTexts: ipSifPolicyChkEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkEntry.setDescription('') ipSifPolicyChkSourceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setDescription("The source interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.") ipSifPolicyChkDestIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setDescription("The destination interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.'") ipSifPolicyChkSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkSource.setDescription("The source IP address, 0.0.0.0 means 'don't check'.") ipSifPolicyChkDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 4), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestination.setDescription("The destination IP address, 0.0.0.0 means 'don't check'.") ipSifPolicyChkProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setDescription("The IP protocol number to checked, '0' means 'don't check.") ipSifPolicyChkDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 6), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setDescription("The destination port number (UDP/TCP service) to checked, '0' means 'don't check.") ipSifPolicyChkRule = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("unknown", 4))).clone('unknown')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkRule.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkRule.setDescription('Returns the associated policy, depending on: - ipSifAliasAction - ipSifAliasOrder - ipSifAliasStatus') ipSifPolicyChkRuleOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setDescription('Returns the associated policy order (see ipSifAliasOrder).') ipSifPolicyChkResult = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("unknown", 3))).clone('unknown')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkResult.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkResult.setDescription('Returns the result depending on: - ipSifPolicyChkRule - administrative status (ipSifAdminStatus) - operational status of the SIF engine') ipSifPolicyChkState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 1), ("trigger", 2), ("running", 3), ("done", 4))).clone('initial')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkState.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkState.setDescription('Displays the current status of the policy check, when setting to trigger(2) a new check will be initiated.') ipSifPolicyChkAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setDescription('Determines wether the ipSifAdminStatus should be considered for the policy (check (1)) or not (ingnore (2)).') ipSifPolicyChkOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setDescription('Determines wether the SIF operational status should be considered for the policy (check (1)) or not (ingnore (2)).') ipSifPolicyChkCurrOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('down')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setDescription('The current SIF operational status.') ipSif = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 37)) ipSifAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAdminStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAdminStatus.setDescription('Enable or disable Stateful Inspection Firewall.') ipSifLocalFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifLocalFilter.setStatus('mandatory') if mibBuilder.loadTexts: ipSifLocalFilter.setDescription('Enable or disable filtering on local requests') ipSifInterfaceFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifInterfaceFilter.setStatus('mandatory') if mibBuilder.loadTexts: ipSifInterfaceFilter.setDescription('Enable or disable filtering on same Interface packets') ipSifSysloglevel = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("deny", 1), ("accept", 2), ("verbose", 3), ("none", 4))).clone('verbose')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifSysloglevel.setStatus('mandatory') if mibBuilder.loadTexts: ipSifSysloglevel.setDescription('Levels for less or more Informations in the Syslog, verbose : display all Sif Activity deny : display only rejects, ignore accept : display only accpts none : disable Syslogs') ipSifUdpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(180)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifUdpTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifUdpTimeout.setDescription('Timeout on inactive UDP Session') ipSifTcpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(3600)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifTcpTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifTcpTimeout.setDescription('Timeout on inactive TCP Session') ipSifPPTPTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(86400)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPPTPTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPPTPTimeout.setDescription('Timeout on inactive PPTP Session') ipSifDefaultTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(30)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifDefaultTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifDefaultTimeout.setDescription('Timeout on all other ip Sessions') ipSifMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxSessions.setDescription('Maximum number of monitored sessions') ipSifMaxRejectEntries = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000)).clone(1000)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxRejectEntries.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxRejectEntries.setDescription('Maximum number of ipSifRejectTable entries') ipSifMaxRejectTtl = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 86400)).clone(3600)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxRejectTtl.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxRejectTtl.setDescription('Maximum time to live of the ipSifRejectTable entries in seconds') ipSifInterfaceAliasAutoCreate = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setStatus('mandatory') if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setDescription('Enable or disable automatic creation of interface aliases (see ipSifAliasAddressTable) due to created MPR interfaces visible in ifTable.') ipSifStat = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 46)) ipSifStatCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrSessions.setDescription('Current number of all monitored sessions') ipSifStatCurrUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setDescription('Current number of monitored UDP sessions') ipSifStatCurrTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setDescription('Current number of monitored TCP sessions') ipSifStatCurrOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setDescription('Current number of monitored non-TCP/UDP sessions') ipSifStatCurrExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setDescription('Current number of created expected sessions') ipSifStatTotalUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setDescription('Total number of monitored UDP sessions') ipSifStatTotalTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setDescription('Total number of monitored TCP sessions') ipSifStatTotalOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setDescription('Total number of monitored non-TCP/UDP sessions') ipSifStatTotalExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setDescription('Total number of monitored non-TCP/UDP sessions') mibBuilder.exportSymbols("BIANCA-BRICK-SIF-MIB", ipSifPolicyChkDestPort=ipSifPolicyChkDestPort, ipSifAliasAddressEntry=ipSifAliasAddressEntry, ipSifAliasServiceType=ipSifAliasServiceType, ipSifPolicyChkRule=ipSifPolicyChkRule, ipSifExpectTable=ipSifExpectTable, ipSifInterfaceFilter=ipSifInterfaceFilter, ipSifAliasAddressGroupIndex=ipSifAliasAddressGroupIndex, ipSifAliasSource=ipSifAliasSource, ipSifRejectTable=ipSifRejectTable, ipSifMaxRejectEntries=ipSifMaxRejectEntries, ipSifAliasServiceRange=ipSifAliasServiceRange, ipSifStatCurrSessions=ipSifStatCurrSessions, ipSifExpectEntry=ipSifExpectEntry, ipSifAliasServiceGroupTable=ipSifAliasServiceGroupTable, ipSifPPTPTimeout=ipSifPPTPTimeout, ipSifExpectSourcePort=ipSifExpectSourcePort, ipSifRejectPortHigh=ipSifRejectPortHigh, ipSifLocalFilter=ipSifLocalFilter, ipSifStatCurrExpectedSessions=ipSifStatCurrExpectedSessions, ipSifPolicyChkSource=ipSifPolicyChkSource, ipSifAliasService=ipSifAliasService, ipSifAliasAddressGroupAlias=ipSifAliasAddressGroupAlias, ipSifStatTotalTcpSessions=ipSifStatTotalTcpSessions, ipSifAliasServicePort=ipSifAliasServicePort, ipSifAliasServiceGroupIndex=ipSifAliasServiceGroupIndex, ipSifExpectDestPort=ipSifExpectDestPort, ipSifAliasAddressIndex=ipSifAliasAddressIndex, ipSifPolicyChkRuleOrder=ipSifPolicyChkRuleOrder, ipSifRejectIndex=ipSifRejectIndex, ipSifPolicyChkTable=ipSifPolicyChkTable, ipSifAliasAddressRange=ipSifAliasAddressRange, ipSifAliasAddressTable=ipSifAliasAddressTable, ipSifExpectDestination=ipSifExpectDestination, ipSifPolicyChkState=ipSifPolicyChkState, ipSifAliasServiceGroupEntry=ipSifAliasServiceGroupEntry, ipSifAliasServiceGroupMode=ipSifAliasServiceGroupMode, ipSifTcpTimeout=ipSifTcpTimeout, ipSifAliasTable=ipSifAliasTable, bintec=bintec, ipSifAliasOrder=ipSifAliasOrder, ipSifExpectClassId=ipSifExpectClassId, ipSifStat=ipSifStat, ipSifPolicyChkCurrOperStatus=ipSifPolicyChkCurrOperStatus, ipSifAliasPriority=ipSifAliasPriority, ipSifStatCurrTcpSessions=ipSifStatCurrTcpSessions, ipSifMaxSessions=ipSifMaxSessions, ipSifRejectSource=ipSifRejectSource, ipSifAliasServiceIndex=ipSifAliasServiceIndex, ipSifPolicyChkDestination=ipSifPolicyChkDestination, ipSifAliasServiceGroupAlias=ipSifAliasServiceGroupAlias, ipSifAliasServiceAlias=ipSifAliasServiceAlias, ipSifExpectIfIndex=ipSifExpectIfIndex, ipSifAliasAddressGroupTable=ipSifAliasAddressGroupTable, ipSifPolicyChkEntry=ipSifPolicyChkEntry, ipSif=ipSif, ipSifPolicyChkOperStatus=ipSifPolicyChkOperStatus, ipSifStatCurrUdpSessions=ipSifStatCurrUdpSessions, ipSifRejectEntry=ipSifRejectEntry, ipSifRejectSilence=ipSifRejectSilence, ipSifAliasEntry=ipSifAliasEntry, ipSifAdminStatus=ipSifAdminStatus, ipSifAliasServiceIcmpType=ipSifAliasServiceIcmpType, ipSifAliasAddressGroupEntry=ipSifAliasAddressGroupEntry, ipSifPolicyChkResult=ipSifPolicyChkResult, ipSifAliasAddressMask=ipSifAliasAddressMask, ipSifAliasServiceEntry=ipSifAliasServiceEntry, ipSifAliasServiceSourcePort=ipSifAliasServiceSourcePort, ipSifPolicyChkSourceIfIndex=ipSifPolicyChkSourceIfIndex, ipSifAliasAddressMode=ipSifAliasAddressMode, bibo=bibo, ipSifAliasAddressAddress=ipSifAliasAddressAddress, ipSifUdpTimeout=ipSifUdpTimeout, ipSifStatTotalOtherSessions=ipSifStatTotalOtherSessions, ipSifRejectDestination=ipSifRejectDestination, ipSifPolicyChkAdminStatus=ipSifPolicyChkAdminStatus, ipSifStatTotalUdpSessions=ipSifStatTotalUdpSessions, ipSifPolicyChkProtocol=ipSifPolicyChkProtocol, ipSifAliasAddressGroup=ipSifAliasAddressGroup, ipSifRejectRejects=ipSifRejectRejects, ipSifAliasServiceSourceRange=ipSifAliasServiceSourceRange, ipSifAliasServiceTable=ipSifAliasServiceTable, ipSifMaxRejectTtl=ipSifMaxRejectTtl, ipSifAliasServiceGroupId=ipSifAliasServiceGroupId, ipSifExpectProtocol=ipSifExpectProtocol, ipSifExpectIndex=ipSifExpectIndex, ipSifAliasClassId=ipSifAliasClassId, ipSifAliasStatus=ipSifAliasStatus, biboip=biboip, ipSifInterfaceAliasAutoCreate=ipSifInterfaceAliasAutoCreate, ipSifAliasServiceIcmpCode=ipSifAliasServiceIcmpCode, ipSifAliasAddressInterface=ipSifAliasAddressInterface, ipSifAliasServiceProtocol=ipSifAliasServiceProtocol, ipSifAliasAddressGroupMode=ipSifAliasAddressGroupMode, ipSifRejectPortLo=ipSifRejectPortLo, ipSifAliasAddressAlias=ipSifAliasAddressAlias, ipSifAliasAction=ipSifAliasAction, ipSifRejectProtocol=ipSifRejectProtocol, ipSifStatCurrOtherSessions=ipSifStatCurrOtherSessions, ipSifStatTotalExpectedSessions=ipSifStatTotalExpectedSessions, ipSifExpectPriority=ipSifExpectPriority, ipSifDefaultTimeout=ipSifDefaultTimeout, ipSifExpectSource=ipSifExpectSource, ipSifPolicyChkDestIfIndex=ipSifPolicyChkDestIfIndex, ipSifAliasAddressGroupId=ipSifAliasAddressGroupId, ipSifAliasDestination=ipSifAliasDestination, ipSifSysloglevel=ipSifSysloglevel, ipSifAliasServiceGroup=ipSifAliasServiceGroup)
131.626911
4,773
0.787231
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion") DisplayString, = mibBuilder.importSymbols("RFC1158-MIB", "DisplayString") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") iso, TimeTicks, Counter32, IpAddress, Gauge32, ModuleIdentity, ObjectIdentity, MibIdentifier, Unsigned32, enterprises, NotificationType, Bits, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "Counter32", "IpAddress", "Gauge32", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "Unsigned32", "enterprises", "NotificationType", "Bits", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272)) bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4)) biboip = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5)) ipSifAliasAddressTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 28), ) if mibBuilder.loadTexts: ipSifAliasAddressTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressTable.setDescription('Contains a alias Address Entry Index,Ip,Mask,Interface ') ipSifAliasAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressAlias")) if mibBuilder.loadTexts: ipSifAliasAddressEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressEntry.setDescription('Contents a Stateful inspection Firewall description for a alias Name') ipSifAliasAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasAddressIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressIndex.setDescription('The Index for the address alias') ipSifAliasAddressAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressAlias.setDescription('Alias Name for the Address Entry') ipSifAliasAddressAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressAddress.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressAddress.setDescription('The ip-address for the Alias') ipSifAliasAddressMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 4), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressMask.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressMask.setDescription('The ip Mask for the Alias') ipSifAliasAddressInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressInterface.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressInterface.setDescription('The interface index for the alias') ipSifAliasAddressMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("range", 3), ("delete", 4))).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressMode.setDescription('Address or Interface Mode') ipSifAliasAddressRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressRange.setDescription('The ip Range for the Alias') ipSifAliasAddressGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 28, 1, 8), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroup.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroup.setDescription('For values greater than zero this entry determines the IP address group this entry belongs to, see also ipSifAliasAddressGroupId') ipSifAliasServiceTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 29), ) if mibBuilder.loadTexts: ipSifAliasServiceTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceTable.setDescription('Contains a alias Service Entry Protocol,Port,Range ') ipSifAliasServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceAlias")) if mibBuilder.loadTexts: ipSifAliasServiceEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceEntry.setDescription('Contains a alias Service Entry Protocol,Port,Range ') ipSifAliasServiceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIndex.setDescription('The Index for the address alias') ipSifAliasServiceAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceAlias.setDescription('Alias Name for the Service Entry') ipSifAliasServiceProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 8, 9, 12, 16, 17, 20, 22, 27, 41, 46, 47, 50, 51, 56, 57, 65, 80, 88, 89, 94, 103, 111, 112, 115, 250, 251, 252, 253, 254, 255, 256))).clone(namedValues=NamedValues(("icmp", 1), ("igmp", 2), ("ggp", 3), ("ip", 4), ("tcp", 6), ("egp", 8), ("igp", 9), ("pup", 12), ("chaos", 16), ("udp", 17), ("hmp", 20), ("xns-idp", 22), ("rdp", 27), ("ipv6", 41), ("rsvp", 46), ("gre", 47), ("esp", 50), ("ah", 51), ("tlsp", 56), ("skip", 57), ("kryptolan", 65), ("iso-ip", 80), ("igrp", 88), ("ospf", 89), ("ipip", 94), ("pim", 103), ("ipx-in-ip", 111), ("vrrp", 112), ("l2tp", 115), ("local", 250), ("internet", 251), ("netmeeting", 252), ("udptcp", 253), ("any", 254), ("delete", 255), ("dont-verify", 256))).clone('any')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceProtocol.setDescription('The protocol for the Service alias') ipSifAliasServicePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 65535)).clone(-1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServicePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServicePort.setDescription('The Port for the Service Alias.') ipSifAliasServiceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceRange.setDescription('The Port Range for the Service Alias.') ipSifAliasServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("predefined", 1), ("custom", 2))).clone('custom')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceType.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceType.setDescription('Specifies wether created by the IP/SIF subsystem itself or created/modified by the administrator.') ipSifAliasServiceGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 7), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroup.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroup.setDescription('For values greater than zero this entry determines the IP service group this entry belongs to, see also ipSifAliasServiceGroupId') ipSifAliasServiceSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceSourcePort.setDescription('The Source Port for the Service Alias.') ipSifAliasServiceSourceRange = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65536)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceSourceRange.setDescription('The Source Port Range for the Service Alias.') ipSifAliasServiceIcmpType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIcmpType.setDescription('The ICMP Type for the Service Alias.') ipSifAliasServiceIcmpCode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 29, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceIcmpCode.setDescription('The ICMP Code for the Service Alias.') ipSifAliasTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 30), ) if mibBuilder.loadTexts: ipSifAliasTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasTable.setDescription('Contains a Stateful Inspection Firewall (SIF) policy.') ipSifAliasEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasOrder")) if mibBuilder.loadTexts: ipSifAliasEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasEntry.setDescription('') ipSifAliasOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasOrder.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasOrder.setDescription('The Order for the Stateful Inspection Entry rule') ipSifAliasSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasSource.setDescription('The alias Source Index for the Entry') ipSifAliasDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasDestination.setDescription('The alias Destination Index for the Entry') ipSifAliasService = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 4), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasService.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasService.setDescription('The alias Protocol/service Index for Entry') ipSifAliasAction = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 255))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("delete", 255))).clone('access')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAction.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAction.setDescription('The Rule for the Filter') ipSifAliasStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasStatus.setDescription('Defines the administrative status of this policy') ipSifAliasPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasPriority.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasPriority.setDescription('Defines the QoS priority of this policy') ipSifAliasClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 30, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasClassId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasClassId.setDescription('Internal ID for SIF policy to QoS policy mapping') ipSifRejectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 31), ) if mibBuilder.loadTexts: ipSifRejectTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectTable.setDescription('Contains actually rejected Frames with Source Destination ') ipSifRejectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifRejectIndex")) if mibBuilder.loadTexts: ipSifRejectEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectEntry.setDescription('') ipSifRejectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectIndex.setDescription('The Index for the Reject Entry') ipSifRejectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectSource.setDescription('The Reject Source for the Entry') ipSifRejectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 3), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectDestination.setDescription('The Reject Destination Index for the Entry') ipSifRejectRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectRejects.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectRejects.setDescription('Count of rejected frames') ipSifRejectSilence = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectSilence.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectSilence.setDescription('Last reject in seconds') ipSifRejectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectProtocol.setDescription('The protocol of the rejected Packet') ipSifRejectPortLo = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectPortLo.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectPortLo.setDescription('The lowest Port rejected') ipSifRejectPortHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 31, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifRejectPortHigh.setStatus('mandatory') if mibBuilder.loadTexts: ipSifRejectPortHigh.setDescription('The highest port rejected') ipSifExpectTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 35), ) if mibBuilder.loadTexts: ipSifExpectTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectTable.setDescription('Contains expected Sessions with Source Destination ') ipSifExpectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifExpectIndex")) if mibBuilder.loadTexts: ipSifExpectEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectEntry.setDescription('') ipSifExpectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectIndex.setDescription('This field is used for SIF-internal signalling and stores the index for the expected session for later matching.') ipSifExpectSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectSource.setDescription('The source-IP-address for the expected session. A value of 0 means ANY source-address.') ipSifExpectDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 3), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectDestination.setDescription('The destination-IP-address for the expected session. A value of 0 means ANY destination-address.') ipSifExpectProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 6, 17, 255))).clone(namedValues=NamedValues(("igmp", 2), ("ip", 4), ("tcp", 6), ("udp", 17), ("delete", 255))).clone('udp')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifExpectProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectProtocol.setDescription('The protocol of the expected session.') ipSifExpectSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectSourcePort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectSourcePort.setDescription('The source-port-number of the expected session. A value of 0 means ANY source-port-number.') ipSifExpectDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectDestPort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectDestPort.setDescription('The destination-port-number of the expected session. A value of 0 means ANY destination-port-number.') ipSifExpectPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("default", 1), ("low-latency", 2), ("high", 3), ("medium", 4), ("low", 5))).clone('default')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectPriority.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectPriority.setDescription('Defines the QoS-priority/policy to be used for the expected SIF-session.') ipSifExpectClassId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectClassId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectClassId.setDescription('Internal ID for mapping SIF-policy to QoS-policy. Default-value of 0 means NOT SPECIFIED.') ipSifExpectIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 35, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifExpectIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifExpectIfIndex.setDescription('Interface-index for which the session is expected. A value of 0 means ANY interface-index.') ipSifAliasAddressGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 47), ) if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupTable.setDescription('Defines IP address or interface group alias') ipSifAliasAddressGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasAddressGroupId")) if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupEntry.setDescription('Defines IP address or interface group alias') ipSifAliasAddressGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupId.setDescription('The unique address group entry ID') ipSifAliasAddressGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupAlias.setDescription('Alias name for the address group entry') ipSifAliasAddressGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupIndex.setDescription('The index for the address group entry to be referred by an ipSifAlias entry') ipSifAliasAddressGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 47, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("interface", 1), ("address", 2), ("delete", 3))).clone('interface')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasAddressGroupMode.setDescription('Specifies wether this entry defines an interface or address group') ipSifAliasServiceGroupTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 48), ) if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupTable.setDescription('Defines IP service group alias') ipSifAliasServiceGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifAliasServiceGroupId")) if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupEntry.setDescription('Defines IP service group alias') ipSifAliasServiceGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupId.setDescription('The unique IP service group entry ID') ipSifAliasServiceGroupAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupAlias.setDescription('Alias name for the IP service group entry') ipSifAliasServiceGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupIndex.setDescription('The index for the Ip service group entry to be referred by an ipSifAlias entry') ipSifAliasServiceGroupMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 48, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("service", 1), ("delete", 2))).clone('service')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAliasServiceGroupMode.setDescription('Specifies wether this entry defines an IP service group or should be deleted') ipSifPolicyChkTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 5, 49), ) if mibBuilder.loadTexts: ipSifPolicyChkTable.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkTable.setDescription("MIB interface in order to check the configured SIF polices: - for debugging purposes - for test applications - for configuration frontends NOTE: it's a stateless check, not based on a real IP session context ") ipSifPolicyChkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1), ).setIndexNames((0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkProtocol"), (0, "BIANCA-BRICK-SIF-MIB", "ipSifPolicyChkDestPort")) if mibBuilder.loadTexts: ipSifPolicyChkEntry.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkEntry.setDescription('') ipSifPolicyChkSourceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkSourceIfIndex.setDescription("The source interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.") ipSifPolicyChkDestIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestIfIndex.setDescription("The destination interface index, for example '1' addresses the 'local' interface whereas '0' means 'don't check.'") ipSifPolicyChkSource = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkSource.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkSource.setDescription("The source IP address, 0.0.0.0 means 'don't check'.") ipSifPolicyChkDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 4), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestination.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestination.setDescription("The destination IP address, 0.0.0.0 means 'don't check'.") ipSifPolicyChkProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkProtocol.setDescription("The IP protocol number to checked, '0' means 'don't check.") ipSifPolicyChkDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 6), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkDestPort.setDescription("The destination port number (UDP/TCP service) to checked, '0' means 'don't check.") ipSifPolicyChkRule = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("reject", 3), ("unknown", 4))).clone('unknown')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkRule.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkRule.setDescription('Returns the associated policy, depending on: - ipSifAliasAction - ipSifAliasOrder - ipSifAliasStatus') ipSifPolicyChkRuleOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkRuleOrder.setDescription('Returns the associated policy order (see ipSifAliasOrder).') ipSifPolicyChkResult = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("access", 1), ("deny", 2), ("unknown", 3))).clone('unknown')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkResult.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkResult.setDescription('Returns the result depending on: - ipSifPolicyChkRule - administrative status (ipSifAdminStatus) - operational status of the SIF engine') ipSifPolicyChkState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 1), ("trigger", 2), ("running", 3), ("done", 4))).clone('initial')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkState.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkState.setDescription('Displays the current status of the policy check, when setting to trigger(2) a new check will be initiated.') ipSifPolicyChkAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkAdminStatus.setDescription('Determines wether the ipSifAdminStatus should be considered for the policy (check (1)) or not (ingnore (2)).') ipSifPolicyChkOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("check", 1), ("ignore", 2))).clone('check')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkOperStatus.setDescription('Determines wether the SIF operational status should be considered for the policy (check (1)) or not (ingnore (2)).') ipSifPolicyChkCurrOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 5, 49, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('down')).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPolicyChkCurrOperStatus.setDescription('The current SIF operational status.') ipSif = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 37)) ipSifAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifAdminStatus.setStatus('mandatory') if mibBuilder.loadTexts: ipSifAdminStatus.setDescription('Enable or disable Stateful Inspection Firewall.') ipSifLocalFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifLocalFilter.setStatus('mandatory') if mibBuilder.loadTexts: ipSifLocalFilter.setDescription('Enable or disable filtering on local requests') ipSifInterfaceFilter = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifInterfaceFilter.setStatus('mandatory') if mibBuilder.loadTexts: ipSifInterfaceFilter.setDescription('Enable or disable filtering on same Interface packets') ipSifSysloglevel = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("deny", 1), ("accept", 2), ("verbose", 3), ("none", 4))).clone('verbose')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifSysloglevel.setStatus('mandatory') if mibBuilder.loadTexts: ipSifSysloglevel.setDescription('Levels for less or more Informations in the Syslog, verbose : display all Sif Activity deny : display only rejects, ignore accept : display only accpts none : disable Syslogs') ipSifUdpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(180)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifUdpTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifUdpTimeout.setDescription('Timeout on inactive UDP Session') ipSifTcpTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(3600)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifTcpTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifTcpTimeout.setDescription('Timeout on inactive TCP Session') ipSifPPTPTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(86400)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifPPTPTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifPPTPTimeout.setDescription('Timeout on inactive PPTP Session') ipSifDefaultTimeout = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(30)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifDefaultTimeout.setStatus('mandatory') if mibBuilder.loadTexts: ipSifDefaultTimeout.setDescription('Timeout on all other ip Sessions') ipSifMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxSessions.setDescription('Maximum number of monitored sessions') ipSifMaxRejectEntries = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000)).clone(1000)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxRejectEntries.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxRejectEntries.setDescription('Maximum number of ipSifRejectTable entries') ipSifMaxRejectTtl = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 86400)).clone(3600)).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifMaxRejectTtl.setStatus('mandatory') if mibBuilder.loadTexts: ipSifMaxRejectTtl.setDescription('Maximum time to live of the ipSifRejectTable entries in seconds') ipSifInterfaceAliasAutoCreate = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 37, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setStatus('mandatory') if mibBuilder.loadTexts: ipSifInterfaceAliasAutoCreate.setDescription('Enable or disable automatic creation of interface aliases (see ipSifAliasAddressTable) due to created MPR interfaces visible in ifTable.') ipSifStat = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 5, 46)) ipSifStatCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrSessions.setDescription('Current number of all monitored sessions') ipSifStatCurrUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrUdpSessions.setDescription('Current number of monitored UDP sessions') ipSifStatCurrTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrTcpSessions.setDescription('Current number of monitored TCP sessions') ipSifStatCurrOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrOtherSessions.setDescription('Current number of monitored non-TCP/UDP sessions') ipSifStatCurrExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatCurrExpectedSessions.setDescription('Current number of created expected sessions') ipSifStatTotalUdpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalUdpSessions.setDescription('Total number of monitored UDP sessions') ipSifStatTotalTcpSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalTcpSessions.setDescription('Total number of monitored TCP sessions') ipSifStatTotalOtherSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalOtherSessions.setDescription('Total number of monitored non-TCP/UDP sessions') ipSifStatTotalExpectedSessions = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 5, 46, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setStatus('mandatory') if mibBuilder.loadTexts: ipSifStatTotalExpectedSessions.setDescription('Total number of monitored non-TCP/UDP sessions') mibBuilder.exportSymbols("BIANCA-BRICK-SIF-MIB", ipSifPolicyChkDestPort=ipSifPolicyChkDestPort, ipSifAliasAddressEntry=ipSifAliasAddressEntry, ipSifAliasServiceType=ipSifAliasServiceType, ipSifPolicyChkRule=ipSifPolicyChkRule, ipSifExpectTable=ipSifExpectTable, ipSifInterfaceFilter=ipSifInterfaceFilter, ipSifAliasAddressGroupIndex=ipSifAliasAddressGroupIndex, ipSifAliasSource=ipSifAliasSource, ipSifRejectTable=ipSifRejectTable, ipSifMaxRejectEntries=ipSifMaxRejectEntries, ipSifAliasServiceRange=ipSifAliasServiceRange, ipSifStatCurrSessions=ipSifStatCurrSessions, ipSifExpectEntry=ipSifExpectEntry, ipSifAliasServiceGroupTable=ipSifAliasServiceGroupTable, ipSifPPTPTimeout=ipSifPPTPTimeout, ipSifExpectSourcePort=ipSifExpectSourcePort, ipSifRejectPortHigh=ipSifRejectPortHigh, ipSifLocalFilter=ipSifLocalFilter, ipSifStatCurrExpectedSessions=ipSifStatCurrExpectedSessions, ipSifPolicyChkSource=ipSifPolicyChkSource, ipSifAliasService=ipSifAliasService, ipSifAliasAddressGroupAlias=ipSifAliasAddressGroupAlias, ipSifStatTotalTcpSessions=ipSifStatTotalTcpSessions, ipSifAliasServicePort=ipSifAliasServicePort, ipSifAliasServiceGroupIndex=ipSifAliasServiceGroupIndex, ipSifExpectDestPort=ipSifExpectDestPort, ipSifAliasAddressIndex=ipSifAliasAddressIndex, ipSifPolicyChkRuleOrder=ipSifPolicyChkRuleOrder, ipSifRejectIndex=ipSifRejectIndex, ipSifPolicyChkTable=ipSifPolicyChkTable, ipSifAliasAddressRange=ipSifAliasAddressRange, ipSifAliasAddressTable=ipSifAliasAddressTable, ipSifExpectDestination=ipSifExpectDestination, ipSifPolicyChkState=ipSifPolicyChkState, ipSifAliasServiceGroupEntry=ipSifAliasServiceGroupEntry, ipSifAliasServiceGroupMode=ipSifAliasServiceGroupMode, ipSifTcpTimeout=ipSifTcpTimeout, ipSifAliasTable=ipSifAliasTable, bintec=bintec, ipSifAliasOrder=ipSifAliasOrder, ipSifExpectClassId=ipSifExpectClassId, ipSifStat=ipSifStat, ipSifPolicyChkCurrOperStatus=ipSifPolicyChkCurrOperStatus, ipSifAliasPriority=ipSifAliasPriority, ipSifStatCurrTcpSessions=ipSifStatCurrTcpSessions, ipSifMaxSessions=ipSifMaxSessions, ipSifRejectSource=ipSifRejectSource, ipSifAliasServiceIndex=ipSifAliasServiceIndex, ipSifPolicyChkDestination=ipSifPolicyChkDestination, ipSifAliasServiceGroupAlias=ipSifAliasServiceGroupAlias, ipSifAliasServiceAlias=ipSifAliasServiceAlias, ipSifExpectIfIndex=ipSifExpectIfIndex, ipSifAliasAddressGroupTable=ipSifAliasAddressGroupTable, ipSifPolicyChkEntry=ipSifPolicyChkEntry, ipSif=ipSif, ipSifPolicyChkOperStatus=ipSifPolicyChkOperStatus, ipSifStatCurrUdpSessions=ipSifStatCurrUdpSessions, ipSifRejectEntry=ipSifRejectEntry, ipSifRejectSilence=ipSifRejectSilence, ipSifAliasEntry=ipSifAliasEntry, ipSifAdminStatus=ipSifAdminStatus, ipSifAliasServiceIcmpType=ipSifAliasServiceIcmpType, ipSifAliasAddressGroupEntry=ipSifAliasAddressGroupEntry, ipSifPolicyChkResult=ipSifPolicyChkResult, ipSifAliasAddressMask=ipSifAliasAddressMask, ipSifAliasServiceEntry=ipSifAliasServiceEntry, ipSifAliasServiceSourcePort=ipSifAliasServiceSourcePort, ipSifPolicyChkSourceIfIndex=ipSifPolicyChkSourceIfIndex, ipSifAliasAddressMode=ipSifAliasAddressMode, bibo=bibo, ipSifAliasAddressAddress=ipSifAliasAddressAddress, ipSifUdpTimeout=ipSifUdpTimeout, ipSifStatTotalOtherSessions=ipSifStatTotalOtherSessions, ipSifRejectDestination=ipSifRejectDestination, ipSifPolicyChkAdminStatus=ipSifPolicyChkAdminStatus, ipSifStatTotalUdpSessions=ipSifStatTotalUdpSessions, ipSifPolicyChkProtocol=ipSifPolicyChkProtocol, ipSifAliasAddressGroup=ipSifAliasAddressGroup, ipSifRejectRejects=ipSifRejectRejects, ipSifAliasServiceSourceRange=ipSifAliasServiceSourceRange, ipSifAliasServiceTable=ipSifAliasServiceTable, ipSifMaxRejectTtl=ipSifMaxRejectTtl, ipSifAliasServiceGroupId=ipSifAliasServiceGroupId, ipSifExpectProtocol=ipSifExpectProtocol, ipSifExpectIndex=ipSifExpectIndex, ipSifAliasClassId=ipSifAliasClassId, ipSifAliasStatus=ipSifAliasStatus, biboip=biboip, ipSifInterfaceAliasAutoCreate=ipSifInterfaceAliasAutoCreate, ipSifAliasServiceIcmpCode=ipSifAliasServiceIcmpCode, ipSifAliasAddressInterface=ipSifAliasAddressInterface, ipSifAliasServiceProtocol=ipSifAliasServiceProtocol, ipSifAliasAddressGroupMode=ipSifAliasAddressGroupMode, ipSifRejectPortLo=ipSifRejectPortLo, ipSifAliasAddressAlias=ipSifAliasAddressAlias, ipSifAliasAction=ipSifAliasAction, ipSifRejectProtocol=ipSifRejectProtocol, ipSifStatCurrOtherSessions=ipSifStatCurrOtherSessions, ipSifStatTotalExpectedSessions=ipSifStatTotalExpectedSessions, ipSifExpectPriority=ipSifExpectPriority, ipSifDefaultTimeout=ipSifDefaultTimeout, ipSifExpectSource=ipSifExpectSource, ipSifPolicyChkDestIfIndex=ipSifPolicyChkDestIfIndex, ipSifAliasAddressGroupId=ipSifAliasAddressGroupId, ipSifAliasDestination=ipSifAliasDestination, ipSifSysloglevel=ipSifSysloglevel, ipSifAliasServiceGroup=ipSifAliasServiceGroup)
true
true
f70d8d3d1901c92377d3073f065701dc3eb38e40
19,185
py
Python
azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/trusted_id_providers_operations.py
azuresdkci1x/azure-sdk-for-python-1722
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
[ "MIT" ]
1
2018-11-09T06:16:34.000Z
2018-11-09T06:16:34.000Z
azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/trusted_id_providers_operations.py
azuresdkci1x/azure-sdk-for-python-1722
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
[ "MIT" ]
null
null
null
azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/trusted_id_providers_operations.py
azuresdkci1x/azure-sdk-for-python-1722
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
[ "MIT" ]
1
2018-11-09T06:17:41.000Z
2018-11-09T06:17:41.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError import uuid from .. import models class TrustedIdProvidersOperations(object): """TrustedIdProvidersOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def create_or_update( self, resource_group_name, account_name, trusted_id_provider_name, id_provider, name=None, custom_headers=None, raw=False, **operation_config): """Creates or updates the specified trusted identity provider. During update, the trusted identity provider with the specified name will be replaced with this new provider. :param resource_group_name: The name of the Azure resource group that contains the Data Lake Store account. :type resource_group_name: str :param account_name: The name of the Data Lake Store account to add or replace the trusted identity provider. :type account_name: str :param trusted_id_provider_name: The name of the trusted identity provider. This is used for differentiation of providers in the account. :type trusted_id_provider_name: str :param id_provider: The URL of this trusted identity provider :type id_provider: str :param name: Resource name :type name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`TrustedIdProvider <azure.mgmt.datalake.store.models.TrustedIdProvider>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ parameters = models.TrustedIdProvider(name=name, id_provider=id_provider) # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'TrustedIdProvider') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def update( self, resource_group_name, account_name, trusted_id_provider_name, id_provider=None, custom_headers=None, raw=False, **operation_config): """Updates the specified trusted identity provider. :param resource_group_name: The name of the Azure resource group that contains the Data Lake Store account. :type resource_group_name: str :param account_name: The name of the Data Lake Store account to which to update the trusted identity provider. :type account_name: str :param trusted_id_provider_name: The name of the trusted identity provider. This is used for differentiation of providers in the account. :type trusted_id_provider_name: str :param id_provider: The URL of this trusted identity provider :type id_provider: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`TrustedIdProvider <azure.mgmt.datalake.store.models.TrustedIdProvider>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ parameters = None if id_provider is not None: parameters = models.UpdateTrustedIdProviderParameters(id_provider=id_provider) # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body if parameters is not None: body_content = self._serialize.body(parameters, 'UpdateTrustedIdProviderParameters') else: body_content = None # Construct and send request request = self._client.patch(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config): """Deletes the specified trusted identity provider from the specified Data Lake Store account. :param resource_group_name: The name of the Azure resource group that contains the Data Lake Store account. :type resource_group_name: str :param account_name: The name of the Data Lake Store account from which to delete the trusted identity provider. :type account_name: str :param trusted_id_provider_name: The name of the trusted identity provider to delete. :type trusted_id_provider_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get( self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config): """Gets the specified Data Lake Store trusted identity provider. :param resource_group_name: The name of the Azure resource group that contains the Data Lake Store account. :type resource_group_name: str :param account_name: The name of the Data Lake Store account from which to get the trusted identity provider. :type account_name: str :param trusted_id_provider_name: The name of the trusted identity provider to retrieve. :type trusted_id_provider_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`TrustedIdProvider <azure.mgmt.datalake.store.models.TrustedIdProvider>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def list_by_account( self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config): """Lists the Data Lake Store trusted identity providers within the specified Data Lake Store account. :param resource_group_name: The name of the Azure resource group that contains the Data Lake Store account. :type resource_group_name: str :param account_name: The name of the Data Lake Store account from which to get the trusted identity providers. :type account_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`TrustedIdProviderPaged <azure.mgmt.datalake.store.models.TrustedIdProviderPaged>` :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
48.082707
182
0.675319
from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError import uuid from .. import models class TrustedIdProvidersOperations(object): def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def create_or_update( self, resource_group_name, account_name, trusted_id_provider_name, id_provider, name=None, custom_headers=None, raw=False, **operation_config): parameters = models.TrustedIdProvider(name=name, id_provider=id_provider) url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') body_content = self._serialize.body(parameters, 'TrustedIdProvider') request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def update( self, resource_group_name, account_name, trusted_id_provider_name, id_provider=None, custom_headers=None, raw=False, **operation_config): parameters = None if id_provider is not None: parameters = models.UpdateTrustedIdProviderParameters(id_provider=id_provider) url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if parameters is not None: body_content = self._serialize.body(parameters, 'UpdateTrustedIdProviderParameters') else: body_content = None request = self._client.patch(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config): url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get( self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config): url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('TrustedIdProvider', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def list_by_account( self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config): def internal_paging(next_link=None, raw=False): if not next_link: url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'accountName': self._serialize.url("account_name", account_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
true
true
f70d8d8d0a5e10ab1459d7f03e210358d64e4181
14,027
py
Python
pytorch_lightning/callbacks/model_checkpoint.py
DanielOrtega94/pytorch-lightning
817d25b157b9e4ad1eb0f74f8583b47f31776857
[ "Apache-2.0" ]
4
2021-02-28T11:58:18.000Z
2022-02-03T03:26:45.000Z
pytorch_lightning/callbacks/model_checkpoint.py
linshaoxin-maker/taas
34e11fab167a7beb78fbe6991ff8721dc9208793
[ "MIT" ]
2
2020-06-20T07:03:34.000Z
2020-07-25T01:49:23.000Z
pytorch_lightning/callbacks/model_checkpoint.py
linshaoxin-maker/taas
34e11fab167a7beb78fbe6991ff8721dc9208793
[ "MIT" ]
1
2020-08-28T00:06:18.000Z
2020-08-28T00:06:18.000Z
""" Model Checkpointing =================== Automatically save model checkpoints during training. """ import os import re import numpy as np from typing import Optional import torch from pytorch_lightning import _logger as log from pytorch_lightning.callbacks.base import Callback from pytorch_lightning.utilities import rank_zero_warn, rank_zero_only class ModelCheckpoint(Callback): r""" Save the model after every epoch if it improves. After training finishes, use :attr:`best_model_path` to retrieve the path to the best checkpoint file and :attr:`best_model_score` to retrieve its score. Args: filepath: path to save the model file. Can contain named formatting options to be auto-filled. Example:: # custom path # saves a file like: my/path/epoch_0.ckpt >>> checkpoint_callback = ModelCheckpoint('my/path/') # save any arbitrary metrics like `val_loss`, etc. in name # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt >>> checkpoint_callback = ModelCheckpoint( ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}' ... ) Can also be set to `None`, then it will be set to default location during trainer construction. monitor: quantity to monitor. verbose: verbosity mode. Default: ``False``. save_last: always saves the model at the end of the epoch. Default: ``False``. save_top_k: if `save_top_k == k`, the best k models according to the quantity monitored will be saved. if ``save_top_k == 0``, no models are saved. if ``save_top_k == -1``, all models are saved. Please note that the monitors are checked every `period` epochs. if ``save_top_k >= 2`` and the callback is called multiple times inside an epoch, the name of the saved file will be appended with a version count starting with `v0`. mode: one of {auto, min, max}. If ``save_top_k != 0``, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is automatically inferred from the name of the monitored quantity. save_weights_only: if ``True``, then only the model's weights will be saved (``model.save_weights(filepath)``), else the full model is saved (``model.save(filepath)``). period: Interval (number of epochs) between checkpoints. Example:: >>> from pytorch_lightning import Trainer >>> from pytorch_lightning.callbacks import ModelCheckpoint # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/') >>> trainer = Trainer(checkpoint_callback=checkpoint_callback) # save epoch and val_loss in name # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt >>> checkpoint_callback = ModelCheckpoint( ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}' ... ) # retrieve the best checkpoint after training checkpoint_callback = ModelCheckpoint(filepath='my/path/') trainer = Trainer(checkpoint_callback=checkpoint_callback) model = ... trainer.fit(model) checkpoint_callback.best_model_path """ def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False, save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False, mode: str = 'auto', period: int = 1, prefix: str = ''): super().__init__() if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0: rank_zero_warn( f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0." "All files in this directory will be deleted when a checkpoint is saved!" ) self._rank = 0 self.monitor = monitor self.verbose = verbose if filepath is None: # will be determined by trainer at runtime self.dirpath, self.filename = None, None else: if os.path.isdir(filepath): self.dirpath, self.filename = filepath, '{epoch}' else: filepath = os.path.realpath(filepath) self.dirpath, self.filename = os.path.split(filepath) os.makedirs(self.dirpath, exist_ok=True) self.save_last = save_last self.save_top_k = save_top_k self.save_weights_only = save_weights_only self.period = period self.epoch_last_check = None self.prefix = prefix self.best_k_models = {} # {filename: monitor} self.kth_best_model_path = '' self.best_model_score = 0 self.best_model_path = '' self.save_function = None torch_inf = torch.tensor(np.Inf) mode_dict = { 'min': (torch_inf, 'min'), 'max': (-torch_inf, 'max'), 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure') else (torch_inf, 'min'), } if mode not in mode_dict: rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, ' f'fallback to auto mode.', RuntimeWarning) mode = 'auto' self.kth_value, self.mode = mode_dict[mode] @property def best(self): rank_zero_warn("Attribute `best` has been renamed to `best_model_score` since v0.8.0" " and will be removed in v0.10.0", DeprecationWarning) return self.best_model_score @property def kth_best_model(self): rank_zero_warn("Attribute `kth_best_model` has been renamed to `kth_best_model_path` since v0.8.0" " and will be removed in v0.10.0", DeprecationWarning) return self.kth_best_model_path def _del_model(self, filepath): if os.path.isfile(filepath): os.remove(filepath) def _save_model(self, filepath): # make paths os.makedirs(os.path.dirname(filepath), exist_ok=True) # delegate the saving to the model if self.save_function is not None: self.save_function(filepath, self.save_weights_only) else: raise ValueError(".save_function() not set") def check_monitor_top_k(self, current): less_than_k_models = len(self.best_k_models) < self.save_top_k if less_than_k_models: return True if not isinstance(current, torch.Tensor): rank_zero_warn( f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.' f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning ) current = torch.tensor(current) monitor_op = { "min": torch.lt, "max": torch.gt, }[self.mode] return monitor_op(current, self.best_k_models[self.kth_best_model_path]) def format_checkpoint_name(self, epoch, metrics, ver=None): """Generate a filename according to the defined template. Example:: >>> tmpdir = os.path.dirname(__file__) >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}')) >>> os.path.basename(ckpt.format_checkpoint_name(0, {})) 'epoch=0.ckpt' >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}')) >>> os.path.basename(ckpt.format_checkpoint_name(5, {})) 'epoch=005.ckpt' >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}')) >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456))) 'epoch=2-val_loss=0.12.ckpt' >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}')) >>> os.path.basename(ckpt.format_checkpoint_name(0, {})) 'missing=0.ckpt' """ # check if user passed in keys to the string groups = re.findall(r'(\{.*?)[:\}]', self.filename) if len(groups) == 0: # default name filename = f'{self.prefix}_ckpt_epoch_{epoch}' else: metrics['epoch'] = epoch filename = self.filename for tmp in groups: name = tmp[1:] filename = filename.replace(tmp, name + '={' + name) if name not in metrics: metrics[name] = 0 filename = filename.format(**metrics) str_ver = f'_v{ver}' if ver is not None else '' filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt') return filepath @rank_zero_only def on_train_start(self, trainer, pl_module): """ Determine model checkpoint save directory at runtime. References attributes from the Trainer's logger to determine where to save checkpoints. """ if self.dirpath is not None: return # short circuit self.filename = '{epoch}' if trainer.logger is not None: # weights_save_path overrides anything save_dir = (getattr(trainer, 'weights_save_path', None) or getattr(trainer.logger, 'save_dir', None) or trainer.default_root_dir) version = trainer.logger.version if isinstance( trainer.logger.version, str) else f'version_{trainer.logger.version}' ckpt_path = os.path.join( save_dir, trainer.logger.name, version, "checkpoints" ) else: ckpt_path = os.path.join(trainer.default_root_dir, "checkpoints") self.dirpath = ckpt_path assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0' os.makedirs(self.dirpath, exist_ok=True) trainer.ckpt_path = ckpt_path trainer.weights_save_path = ckpt_path @rank_zero_only def on_validation_end(self, trainer, pl_module): # only run on main process if trainer.global_rank != 0: return metrics = trainer.callback_metrics epoch = trainer.current_epoch if self.save_top_k == 0: # no models are saved return if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period: # skipping in this term return self.epoch_last_check = epoch if self.save_last: filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt') self._save_model(filepath) filepath = self.format_checkpoint_name(epoch, metrics) version_cnt = 0 while os.path.isfile(filepath): filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt) # this epoch called before version_cnt += 1 if self.save_top_k != -1: current = metrics.get(self.monitor) if not isinstance(current, torch.Tensor): rank_zero_warn( f'The metric you returned {current} must be a `torch.Tensor` instance, checkpoint not saved' f' HINT: what is the value of {self.monitor} in validation_epoch_end()?', RuntimeWarning ) if current is not None: current = torch.tensor(current) if current is None: rank_zero_warn( f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning ) elif self.check_monitor_top_k(current): self._do_check_save(filepath, current, epoch) elif self.verbose > 0: log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}') else: if self.verbose > 0: log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}') assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0' self._save_model(filepath) def _do_check_save(self, filepath, current, epoch): # remove kth del_list = [] if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0: delpath = self.kth_best_model_path self.best_k_models.pop(self.kth_best_model_path) del_list.append(delpath) self.best_k_models[filepath] = current if len(self.best_k_models) == self.save_top_k: # monitor dict has reached k elements _op = max if self.mode == 'min' else min self.kth_best_model_path = _op(self.best_k_models, key=self.best_k_models.get) self.kth_value = self.best_k_models[self.kth_best_model_path] _op = min if self.mode == 'min' else max self.best_model_path = _op(self.best_k_models, key=self.best_k_models.get) self.best_model_score = self.best_k_models[self.best_model_path] if self.verbose > 0: log.info( f'\nEpoch {epoch:05d}: {self.monitor} reached' f' {current:0.5f} (best {self.best_model_score:0.5f}), saving model to' f' {filepath} as top {self.save_top_k}') self._save_model(filepath) for cur_path in del_list: if cur_path != filepath: self._del_model(cur_path)
39.962963
113
0.597205
import os import re import numpy as np from typing import Optional import torch from pytorch_lightning import _logger as log from pytorch_lightning.callbacks.base import Callback from pytorch_lightning.utilities import rank_zero_warn, rank_zero_only class ModelCheckpoint(Callback): def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False, save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False, mode: str = 'auto', period: int = 1, prefix: str = ''): super().__init__() if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0: rank_zero_warn( f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0." "All files in this directory will be deleted when a checkpoint is saved!" ) self._rank = 0 self.monitor = monitor self.verbose = verbose if filepath is None: self.dirpath, self.filename = None, None else: if os.path.isdir(filepath): self.dirpath, self.filename = filepath, '{epoch}' else: filepath = os.path.realpath(filepath) self.dirpath, self.filename = os.path.split(filepath) os.makedirs(self.dirpath, exist_ok=True) self.save_last = save_last self.save_top_k = save_top_k self.save_weights_only = save_weights_only self.period = period self.epoch_last_check = None self.prefix = prefix self.best_k_models = {} self.kth_best_model_path = '' self.best_model_score = 0 self.best_model_path = '' self.save_function = None torch_inf = torch.tensor(np.Inf) mode_dict = { 'min': (torch_inf, 'min'), 'max': (-torch_inf, 'max'), 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure') else (torch_inf, 'min'), } if mode not in mode_dict: rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, ' f'fallback to auto mode.', RuntimeWarning) mode = 'auto' self.kth_value, self.mode = mode_dict[mode] @property def best(self): rank_zero_warn("Attribute `best` has been renamed to `best_model_score` since v0.8.0" " and will be removed in v0.10.0", DeprecationWarning) return self.best_model_score @property def kth_best_model(self): rank_zero_warn("Attribute `kth_best_model` has been renamed to `kth_best_model_path` since v0.8.0" " and will be removed in v0.10.0", DeprecationWarning) return self.kth_best_model_path def _del_model(self, filepath): if os.path.isfile(filepath): os.remove(filepath) def _save_model(self, filepath): os.makedirs(os.path.dirname(filepath), exist_ok=True) if self.save_function is not None: self.save_function(filepath, self.save_weights_only) else: raise ValueError(".save_function() not set") def check_monitor_top_k(self, current): less_than_k_models = len(self.best_k_models) < self.save_top_k if less_than_k_models: return True if not isinstance(current, torch.Tensor): rank_zero_warn( f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.' f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning ) current = torch.tensor(current) monitor_op = { "min": torch.lt, "max": torch.gt, }[self.mode] return monitor_op(current, self.best_k_models[self.kth_best_model_path]) def format_checkpoint_name(self, epoch, metrics, ver=None): groups = re.findall(r'(\{.*?)[:\}]', self.filename) if len(groups) == 0: filename = f'{self.prefix}_ckpt_epoch_{epoch}' else: metrics['epoch'] = epoch filename = self.filename for tmp in groups: name = tmp[1:] filename = filename.replace(tmp, name + '={' + name) if name not in metrics: metrics[name] = 0 filename = filename.format(**metrics) str_ver = f'_v{ver}' if ver is not None else '' filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt') return filepath @rank_zero_only def on_train_start(self, trainer, pl_module): if self.dirpath is not None: return self.filename = '{epoch}' if trainer.logger is not None: save_dir = (getattr(trainer, 'weights_save_path', None) or getattr(trainer.logger, 'save_dir', None) or trainer.default_root_dir) version = trainer.logger.version if isinstance( trainer.logger.version, str) else f'version_{trainer.logger.version}' ckpt_path = os.path.join( save_dir, trainer.logger.name, version, "checkpoints" ) else: ckpt_path = os.path.join(trainer.default_root_dir, "checkpoints") self.dirpath = ckpt_path assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0' os.makedirs(self.dirpath, exist_ok=True) trainer.ckpt_path = ckpt_path trainer.weights_save_path = ckpt_path @rank_zero_only def on_validation_end(self, trainer, pl_module): if trainer.global_rank != 0: return metrics = trainer.callback_metrics epoch = trainer.current_epoch if self.save_top_k == 0: return if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period: return self.epoch_last_check = epoch if self.save_last: filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt') self._save_model(filepath) filepath = self.format_checkpoint_name(epoch, metrics) version_cnt = 0 while os.path.isfile(filepath): filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt) version_cnt += 1 if self.save_top_k != -1: current = metrics.get(self.monitor) if not isinstance(current, torch.Tensor): rank_zero_warn( f'The metric you returned {current} must be a `torch.Tensor` instance, checkpoint not saved' f' HINT: what is the value of {self.monitor} in validation_epoch_end()?', RuntimeWarning ) if current is not None: current = torch.tensor(current) if current is None: rank_zero_warn( f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning ) elif self.check_monitor_top_k(current): self._do_check_save(filepath, current, epoch) elif self.verbose > 0: log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}') else: if self.verbose > 0: log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}') assert trainer.global_rank == 0, 'tried to make a checkpoint from non global_rank=0' self._save_model(filepath) def _do_check_save(self, filepath, current, epoch): del_list = [] if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0: delpath = self.kth_best_model_path self.best_k_models.pop(self.kth_best_model_path) del_list.append(delpath) self.best_k_models[filepath] = current if len(self.best_k_models) == self.save_top_k: _op = max if self.mode == 'min' else min self.kth_best_model_path = _op(self.best_k_models, key=self.best_k_models.get) self.kth_value = self.best_k_models[self.kth_best_model_path] _op = min if self.mode == 'min' else max self.best_model_path = _op(self.best_k_models, key=self.best_k_models.get) self.best_model_score = self.best_k_models[self.best_model_path] if self.verbose > 0: log.info( f'\nEpoch {epoch:05d}: {self.monitor} reached' f' {current:0.5f} (best {self.best_model_score:0.5f}), saving model to' f' {filepath} as top {self.save_top_k}') self._save_model(filepath) for cur_path in del_list: if cur_path != filepath: self._del_model(cur_path)
true
true
f70d8e6ddc0e24af18d11f98db6e85e6d24faab2
7,862
py
Python
code/invivo/diffusion/02_analysis/dipy_atlas_target.py
sitek/subcortical-auditory-atlas
8218140c457ab97a6d897eb26aae4d6240596033
[ "BSD-3-Clause" ]
3
2019-02-27T19:17:52.000Z
2019-03-28T06:24:52.000Z
code/invivo/diffusion/02_analysis/dipy_atlas_target.py
sitek/subcortical-auditory-atlas
8218140c457ab97a6d897eb26aae4d6240596033
[ "BSD-3-Clause" ]
2
2021-04-28T22:55:14.000Z
2021-09-09T13:14:14.000Z
code/invivo/diffusion/02_analysis/dipy_atlas_target.py
sitek/subcortical-auditory-atlas
8218140c457ab97a6d897eb26aae4d6240596033
[ "BSD-3-Clause" ]
3
2019-08-19T14:22:54.000Z
2021-01-21T08:31:50.000Z
''' After creating tractography streamlines with dipy_csd.py, this workflow takes an atlas file and finds connections between each region in the atlas KRS 2018.05.04 ''' from nipype import config config.set('execution', 'remove_unnecessary_outputs', 'false') config.set('execution', 'crashfile_format', 'txt') from nipype import Node, Function, Workflow, IdentityInterface, MapNode from nipype.interfaces.io import SelectFiles, DataSink import os from glob import glob # which data sampling? also used for naming out_prefix = 'dipy_csd' atlas_type = 'func-atlas_shift_vox-4_ax-1' proj_dir = os.path.abspath('/om2/user/ksitek/maastricht/diffusion_faruk/') data_dir = os.path.join(proj_dir, 'data/01_diff_preprocessed') out_base = os.path.join(proj_dir, 'analysis/') out_dir = os.path.join(out_base, '%s_%s/'%(out_prefix, atlas_type)) if not os.path.exists(out_dir): os.mkdir(out_dir) work_dir = os.path.abspath('/om2/scratch/ksitek/%s_%s_0114/'%(out_prefix, atlas_type)) #sids = ['S02'] sids = ['S%02d' %s for s in range(1,12)] roi_names = ['LH_CN', 'LH_SOC', 'LH_IC', 'LH_MGB', 'RH_CN', 'RH_SOC', 'RH_IC', 'RH_MGB'] rois = list(range(len(roi_names))) ''' roi_dir = os.path.join(proj_dir, 'analysis/roi_diff/') subj_rois = {} for subj in sids: subj_rois[subj] = sorted(glob('%s/%s/%s_roi*_2diff.nii.gz'%(roi_dir, subj, subj))) print(subj_rois) ''' roi_dir = os.path.join(proj_dir, 'analysis/roi_diff_shift/') subj_rois = {} for subj in sids: subj_rois[subj] = sorted(glob('%s/%s/%s_roi*_2diff_shift_vox-4_ax-1.nii.gz'%(roi_dir, subj, subj))) print(subj_rois) # create the nipype workflow wf = Workflow(name='connectivity') wf.config['execution']['crashfile_format'] = 'txt' # define inputs to the workflow infosource = Node(IdentityInterface(fields=['subject_id', 'roi']), name='infosource') infosource.iterables = [('subject_id', list(subj_rois.keys())), ('roi', rois)] # grab data #templates = {'trk': 'analysis/mrtrix/{subject_id}/tracks.trk'} templates = {'trk': 'analysis/fathresh-0.5/{subject_id}/recon/{subject_id}_csd_streamline.trk'} grabber = Node(SelectFiles(templates), name='grabber') grabber.inputs.base_directory = proj_dir grabber.inputs.sort_filelist = True wf.connect(infosource, 'subject_id', grabber, 'subject_id') ''' define ROI mask files ''' # get subject-specific list of ROI filenames: def rois_fetcher(subj_rois, subj): return subj_rois[subj], subj fetch_rois = Node(Function(input_names=['subj_rois', 'subj'], output_names=['target_roi_filenames', 'subj'], function=rois_fetcher), name='fetch_rois') fetch_rois.inputs.subj_rois = subj_rois wf.connect(infosource, 'subject_id', fetch_rois, 'subj') # get single ROI filename for a specific subject: def roi_fetcher(subj_rois, subj, roi_idx): return subj_rois[subj][roi_idx], roi_idx fetch_roi = Node(Function(input_names=['subj_rois', 'subj', 'roi_idx'], output_names=['seed_roi', 'roi_idx'], function=roi_fetcher), name='fetch_roi') fetch_roi.inputs.subj_rois = subj_rois wf.connect(fetch_rois, 'subj', fetch_roi, 'subj') wf.connect(infosource, 'roi', fetch_roi, 'roi_idx') ''' streamline filtering ''' # filter streamlines by seed region of interest def sl_filter(streamlines, target_mask): from dipy.tracking.utils import target #from nilearn.image import resample_img import numpy as np import os import nibabel as nib trk_file = nib.streamlines.load(streamlines) streams = trk_file.streamlines hdr = trk_file.header # resample mask to resolution of input data & get data #target_resamp = resample_img(target_mask, affine) target_mask_img = nib.load(target_mask) affine = target_mask_img.affine target_mask_bool = np.zeros(target_mask_img.get_data().shape) target_mask_bool[target_mask_img.get_data().round()>0]=1 # rounding is key! target_sl_generator = target(streams, target_mask_bool, affine, include=True) target_streams = list(target_sl_generator) # create new filtered streamlines .trk file tractogram = nib.streamlines.Tractogram(target_streams) tractogram.affine_to_rasmm = np.eye(4) trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) # get the filename import re label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6] # save streamlines to filename target_streamlines = os.path.abspath('target_streamlines_region_%s.trk'%label) nib.streamlines.save(trk_file, target_streamlines) return target_streamlines, target_mask, affine, label filter_streamlines = Node(Function(input_names = ['streamlines', 'target_mask'], output_names = ['target_streamlines', 'target_mask', 'affine', 'seed_label'], function = sl_filter), name = 'filter_streamlines') filter_streamlines.inputs.roi_names = roi_names wf.connect(grabber, 'trk', filter_streamlines, 'streamlines') wf.connect(fetch_roi, 'seed_roi', filter_streamlines, 'target_mask') # filter streamlines by target ROI (for each seed ROI) def sl_filter_target(streamlines, target_mask, affine, seed_label): from dipy.tracking.utils import target from nilearn.image import resample_img import numpy as np import os import nibabel as nib trk_file = nib.streamlines.load(streamlines) streams = trk_file.streamlines hdr = trk_file.header # resample mask to resolution of input data & get data #target_resamp = resample_img(target_mask, affine) target_mask_img = nib.load(target_mask) affine = target_mask_img.affine target_mask_bool = np.zeros(target_mask_img.get_data().shape) target_mask_bool[target_mask_img.get_data().round()>0]=1 # rounding is key! target_sl_generator = target(streams, target_mask_bool, affine, include=True) target_streams = list(target_sl_generator) # create new filtered streamlines .trk file tractogram = nib.streamlines.Tractogram(target_streams) tractogram.affine_to_rasmm = np.eye(4) trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) # get the filename import re label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6] # save streamlines to filename target_streamlines = os.path.abspath('target_streamlines_seed-%s_target-%s.trk'%(seed_label, label)) nib.streamlines.save(trk_file, target_streamlines) return target_streamlines filter_streamlines_target = MapNode(Function(input_names = ['streamlines', 'target_mask', 'affine', 'seed_label'], output_names = ['target_streamlines'], function = sl_filter_target), iterfield = ['target_mask'], name = 'filter_streamlines_target') wf.connect(fetch_rois, 'target_roi_filenames', filter_streamlines_target, 'target_mask') wf.connect(filter_streamlines, 'target_streamlines', filter_streamlines_target, 'streamlines') wf.connect(filter_streamlines, 'affine', filter_streamlines_target, 'affine') wf.connect(filter_streamlines, 'seed_label', filter_streamlines_target, 'seed_label') ''' workflow ''' # create the output data sink ds = Node(DataSink(parameterization=False), name='sinker') ds.inputs.base_directory = out_dir ds.plugin_args = {'overwrite': True} wf.connect(infosource, 'subject_id', ds, 'container') wf.connect(filter_streamlines_target, 'target_streamlines', ds, 'target_streamlines') # definte the working directory and run the workflow wf.base_dir = work_dir wf.run(plugin='MultiProc')
38.729064
104
0.69537
from nipype import config config.set('execution', 'remove_unnecessary_outputs', 'false') config.set('execution', 'crashfile_format', 'txt') from nipype import Node, Function, Workflow, IdentityInterface, MapNode from nipype.interfaces.io import SelectFiles, DataSink import os from glob import glob out_prefix = 'dipy_csd' atlas_type = 'func-atlas_shift_vox-4_ax-1' proj_dir = os.path.abspath('/om2/user/ksitek/maastricht/diffusion_faruk/') data_dir = os.path.join(proj_dir, 'data/01_diff_preprocessed') out_base = os.path.join(proj_dir, 'analysis/') out_dir = os.path.join(out_base, '%s_%s/'%(out_prefix, atlas_type)) if not os.path.exists(out_dir): os.mkdir(out_dir) work_dir = os.path.abspath('/om2/scratch/ksitek/%s_%s_0114/'%(out_prefix, atlas_type)) sids = ['S%02d' %s for s in range(1,12)] roi_names = ['LH_CN', 'LH_SOC', 'LH_IC', 'LH_MGB', 'RH_CN', 'RH_SOC', 'RH_IC', 'RH_MGB'] rois = list(range(len(roi_names))) roi_dir = os.path.join(proj_dir, 'analysis/roi_diff_shift/') subj_rois = {} for subj in sids: subj_rois[subj] = sorted(glob('%s/%s/%s_roi*_2diff_shift_vox-4_ax-1.nii.gz'%(roi_dir, subj, subj))) print(subj_rois) wf = Workflow(name='connectivity') wf.config['execution']['crashfile_format'] = 'txt' infosource = Node(IdentityInterface(fields=['subject_id', 'roi']), name='infosource') infosource.iterables = [('subject_id', list(subj_rois.keys())), ('roi', rois)] templates = {'trk': 'analysis/fathresh-0.5/{subject_id}/recon/{subject_id}_csd_streamline.trk'} grabber = Node(SelectFiles(templates), name='grabber') grabber.inputs.base_directory = proj_dir grabber.inputs.sort_filelist = True wf.connect(infosource, 'subject_id', grabber, 'subject_id') def rois_fetcher(subj_rois, subj): return subj_rois[subj], subj fetch_rois = Node(Function(input_names=['subj_rois', 'subj'], output_names=['target_roi_filenames', 'subj'], function=rois_fetcher), name='fetch_rois') fetch_rois.inputs.subj_rois = subj_rois wf.connect(infosource, 'subject_id', fetch_rois, 'subj') def roi_fetcher(subj_rois, subj, roi_idx): return subj_rois[subj][roi_idx], roi_idx fetch_roi = Node(Function(input_names=['subj_rois', 'subj', 'roi_idx'], output_names=['seed_roi', 'roi_idx'], function=roi_fetcher), name='fetch_roi') fetch_roi.inputs.subj_rois = subj_rois wf.connect(fetch_rois, 'subj', fetch_roi, 'subj') wf.connect(infosource, 'roi', fetch_roi, 'roi_idx') def sl_filter(streamlines, target_mask): from dipy.tracking.utils import target import numpy as np import os import nibabel as nib trk_file = nib.streamlines.load(streamlines) streams = trk_file.streamlines hdr = trk_file.header target_mask_img = nib.load(target_mask) affine = target_mask_img.affine target_mask_bool = np.zeros(target_mask_img.get_data().shape) target_mask_bool[target_mask_img.get_data().round()>0]=1 target_sl_generator = target(streams, target_mask_bool, affine, include=True) target_streams = list(target_sl_generator) tractogram = nib.streamlines.Tractogram(target_streams) tractogram.affine_to_rasmm = np.eye(4) trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) import re label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6] target_streamlines = os.path.abspath('target_streamlines_region_%s.trk'%label) nib.streamlines.save(trk_file, target_streamlines) return target_streamlines, target_mask, affine, label filter_streamlines = Node(Function(input_names = ['streamlines', 'target_mask'], output_names = ['target_streamlines', 'target_mask', 'affine', 'seed_label'], function = sl_filter), name = 'filter_streamlines') filter_streamlines.inputs.roi_names = roi_names wf.connect(grabber, 'trk', filter_streamlines, 'streamlines') wf.connect(fetch_roi, 'seed_roi', filter_streamlines, 'target_mask') def sl_filter_target(streamlines, target_mask, affine, seed_label): from dipy.tracking.utils import target from nilearn.image import resample_img import numpy as np import os import nibabel as nib trk_file = nib.streamlines.load(streamlines) streams = trk_file.streamlines hdr = trk_file.header target_mask_img = nib.load(target_mask) affine = target_mask_img.affine target_mask_bool = np.zeros(target_mask_img.get_data().shape) target_mask_bool[target_mask_img.get_data().round()>0]=1 target_sl_generator = target(streams, target_mask_bool, affine, include=True) target_streams = list(target_sl_generator) tractogram = nib.streamlines.Tractogram(target_streams) tractogram.affine_to_rasmm = np.eye(4) trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) import re label = re.search(r'(?<=Fix_)\w+',target_mask).group(0)[:-6] target_streamlines = os.path.abspath('target_streamlines_seed-%s_target-%s.trk'%(seed_label, label)) nib.streamlines.save(trk_file, target_streamlines) return target_streamlines filter_streamlines_target = MapNode(Function(input_names = ['streamlines', 'target_mask', 'affine', 'seed_label'], output_names = ['target_streamlines'], function = sl_filter_target), iterfield = ['target_mask'], name = 'filter_streamlines_target') wf.connect(fetch_rois, 'target_roi_filenames', filter_streamlines_target, 'target_mask') wf.connect(filter_streamlines, 'target_streamlines', filter_streamlines_target, 'streamlines') wf.connect(filter_streamlines, 'affine', filter_streamlines_target, 'affine') wf.connect(filter_streamlines, 'seed_label', filter_streamlines_target, 'seed_label') ds = Node(DataSink(parameterization=False), name='sinker') ds.inputs.base_directory = out_dir ds.plugin_args = {'overwrite': True} wf.connect(infosource, 'subject_id', ds, 'container') wf.connect(filter_streamlines_target, 'target_streamlines', ds, 'target_streamlines') wf.base_dir = work_dir wf.run(plugin='MultiProc')
true
true
f70d8e8ea5008eddd8e7aa1c216f2f3abe2fcd32
1,815
py
Python
torch_geometric/graphgym/__init__.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
2,350
2021-09-12T08:32:50.000Z
2022-03-31T18:09:36.000Z
torch_geometric/graphgym/__init__.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
588
2021-09-12T08:49:08.000Z
2022-03-31T21:02:13.000Z
torch_geometric/graphgym/__init__.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
505
2021-09-13T13:13:32.000Z
2022-03-31T15:54:00.000Z
from .contrib import * # noqa from .models import * # noqa from .utils import * # noqa from .checkpoint import load_ckpt, save_ckpt, remove_ckpt, clean_ckpt from .cmd_args import parse_args from .config import (cfg, set_cfg, load_cfg, dump_cfg, set_run_dir, set_out_dir, get_fname) from .init import init_weights from .loader import create_loader from .logger import set_printing, create_logger from .loss import compute_loss from .model_builder import create_model from .optim import create_optimizer, create_scheduler from .train import train from .register import (register_base, register_act, register_node_encoder, register_edge_encoder, register_stage, register_head, register_layer, register_pooling, register_network, register_config, register_dataset, register_loader, register_optimizer, register_scheduler, register_loss, register_train, register_metric) __all__ = classes = [ 'load_ckpt', 'save_ckpt', 'remove_ckpt', 'clean_ckpt', 'parse_args', 'cfg', 'set_cfg', 'load_cfg', 'dump_cfg', 'set_run_dir', 'set_out_dir', 'get_fname', 'init_weights', 'create_loader', 'set_printing', 'create_logger', 'compute_loss', 'create_model', 'create_optimizer', 'create_scheduler', 'train', 'register_base', 'register_act', 'register_node_encoder', 'register_edge_encoder', 'register_stage', 'register_head', 'register_layer', 'register_pooling', 'register_network', 'register_config', 'register_dataset', 'register_loader', 'register_optimizer', 'register_scheduler', 'register_loss', 'register_train', 'register_metric', ]
29.274194
77
0.672727
from .contrib import * from .models import * from .utils import * from .checkpoint import load_ckpt, save_ckpt, remove_ckpt, clean_ckpt from .cmd_args import parse_args from .config import (cfg, set_cfg, load_cfg, dump_cfg, set_run_dir, set_out_dir, get_fname) from .init import init_weights from .loader import create_loader from .logger import set_printing, create_logger from .loss import compute_loss from .model_builder import create_model from .optim import create_optimizer, create_scheduler from .train import train from .register import (register_base, register_act, register_node_encoder, register_edge_encoder, register_stage, register_head, register_layer, register_pooling, register_network, register_config, register_dataset, register_loader, register_optimizer, register_scheduler, register_loss, register_train, register_metric) __all__ = classes = [ 'load_ckpt', 'save_ckpt', 'remove_ckpt', 'clean_ckpt', 'parse_args', 'cfg', 'set_cfg', 'load_cfg', 'dump_cfg', 'set_run_dir', 'set_out_dir', 'get_fname', 'init_weights', 'create_loader', 'set_printing', 'create_logger', 'compute_loss', 'create_model', 'create_optimizer', 'create_scheduler', 'train', 'register_base', 'register_act', 'register_node_encoder', 'register_edge_encoder', 'register_stage', 'register_head', 'register_layer', 'register_pooling', 'register_network', 'register_config', 'register_dataset', 'register_loader', 'register_optimizer', 'register_scheduler', 'register_loss', 'register_train', 'register_metric', ]
true
true
f70d8fa4f84560af2015c5a1b3a2f184e9248d79
478
py
Python
env/Lib/site-packages/pylint/reporters/collecting_reporter.py
aammjian/cotton
f72b814f795f79a4054688e465c8b0ae5560f3b7
[ "Apache-2.0" ]
33
2020-10-05T01:04:55.000Z
2021-06-24T01:52:31.000Z
env/Lib/site-packages/pylint/reporters/collecting_reporter.py
aammjian/cotton
f72b814f795f79a4054688e465c8b0ae5560f3b7
[ "Apache-2.0" ]
14
2020-10-07T03:15:12.000Z
2021-01-15T11:53:29.000Z
env/Lib/site-packages/pylint/reporters/collecting_reporter.py
aammjian/cotton
f72b814f795f79a4054688e465c8b0ae5560f3b7
[ "Apache-2.0" ]
11
2020-07-31T08:20:43.000Z
2020-08-21T04:08:29.000Z
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING from pylint.reporters.base_reporter import BaseReporter class CollectingReporter(BaseReporter): """collects messages""" name = "collector" def __init__(self): BaseReporter.__init__(self) self.messages = [] def handle_message(self, msg): self.messages.append(msg) _display = None
23.9
80
0.700837
from pylint.reporters.base_reporter import BaseReporter class CollectingReporter(BaseReporter): name = "collector" def __init__(self): BaseReporter.__init__(self) self.messages = [] def handle_message(self, msg): self.messages.append(msg) _display = None
true
true
f70d8fd065f47a79a3a5fc37f61875b9851e4b48
995
py
Python
get_infoleg.py
crscardellino/sbwce
da39df54f666dcfa4b893bf3a1d1bfc7c801ad57
[ "MIT" ]
30
2017-05-28T05:54:58.000Z
2022-03-12T10:37:23.000Z
get_infoleg.py
NazaGara/sbwce
aea541d6bbe94d17fddb58e2466ce1d83ffae5cc
[ "MIT" ]
1
2018-01-10T12:36:59.000Z
2018-01-10T12:36:59.000Z
get_infoleg.py
NazaGara/sbwce
aea541d6bbe94d17fddb58e2466ce1d83ffae5cc
[ "MIT" ]
8
2019-07-02T05:33:28.000Z
2022-03-13T09:16:08.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import requests import sys import tarfile from lxml import etree from tqdm import tqdm INFOLEG_URL = 'https://cs.famaf.unc.edu.ar/~ccardellino/resources/mirel/law_text_cleaned.tar.bz2' INFOLEG_TMP = '/tmp/infoleg.tar.bz2' INFOLEG_OUT = '/tmp/law_text_cleaned/' INFOLEG_FIL = './corpora/infoleg/infoleg.txt' os.makedirs(os.path.dirname(INFOLEG_FIL), exist_ok=True) print("Downloading file...", file=sys.stderr) req = requests.get(INFOLEG_URL) with open(INFOLEG_TMP, 'wb') as fh: fh.write(req.content) with tarfile.open(INFOLEG_TMP, 'r') as fi, open(INFOLEG_FIL, 'w') as fo: print("Extracting file...", file=sys.stderr) fi.extractall(path="/tmp") print("Parsing files...", file=sys.stderr) for fname in tqdm(os.listdir(INFOLEG_OUT)): root = etree.parse(INFOLEG_OUT + fname).getroot() print(root.find('text').text, file=fo)
28.428571
97
0.720603
from __future__ import absolute_import, print_function, unicode_literals import os import requests import sys import tarfile from lxml import etree from tqdm import tqdm INFOLEG_URL = 'https://cs.famaf.unc.edu.ar/~ccardellino/resources/mirel/law_text_cleaned.tar.bz2' INFOLEG_TMP = '/tmp/infoleg.tar.bz2' INFOLEG_OUT = '/tmp/law_text_cleaned/' INFOLEG_FIL = './corpora/infoleg/infoleg.txt' os.makedirs(os.path.dirname(INFOLEG_FIL), exist_ok=True) print("Downloading file...", file=sys.stderr) req = requests.get(INFOLEG_URL) with open(INFOLEG_TMP, 'wb') as fh: fh.write(req.content) with tarfile.open(INFOLEG_TMP, 'r') as fi, open(INFOLEG_FIL, 'w') as fo: print("Extracting file...", file=sys.stderr) fi.extractall(path="/tmp") print("Parsing files...", file=sys.stderr) for fname in tqdm(os.listdir(INFOLEG_OUT)): root = etree.parse(INFOLEG_OUT + fname).getroot() print(root.find('text').text, file=fo)
true
true
f70d9006df1d3358e295c37467a8911a38d78293
1,137
py
Python
setup.py
martok/py-symcircuit
c48b1ad8ae4e496306da0c0a7474b4cd968a629f
[ "MIT" ]
null
null
null
setup.py
martok/py-symcircuit
c48b1ad8ae4e496306da0c0a7474b4cd968a629f
[ "MIT" ]
null
null
null
setup.py
martok/py-symcircuit
c48b1ad8ae4e496306da0c0a7474b4cd968a629f
[ "MIT" ]
null
null
null
#!/usr/bin/env python from setuptools import setup, find_packages setup( name="SymCircuit", version="0.2.0", author="Martok", author_email="martok@martoks-place.de", description="Symbolic electronic circuit analysis", long_description=open("README.md","rt").read(), long_description_content_type="text/markdown", url="https://github.com/martok/py-symcircuit", project_urls={ "Bug Tracker": "https://github.com/martok/py-symcircuit/issues", }, license="MIT", classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Intended Audience :: Science/Research", "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)", ], packages=find_packages(), python_requires='>=3.6', install_requires=[ "sympy", ], extras_require={ "EE": [ "networkx", "numpy", "mplotkit" ], }, )
28.425
80
0.601583
from setuptools import setup, find_packages setup( name="SymCircuit", version="0.2.0", author="Martok", author_email="martok@martoks-place.de", description="Symbolic electronic circuit analysis", long_description=open("README.md","rt").read(), long_description_content_type="text/markdown", url="https://github.com/martok/py-symcircuit", project_urls={ "Bug Tracker": "https://github.com/martok/py-symcircuit/issues", }, license="MIT", classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Intended Audience :: Science/Research", "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)", ], packages=find_packages(), python_requires='>=3.6', install_requires=[ "sympy", ], extras_require={ "EE": [ "networkx", "numpy", "mplotkit" ], }, )
true
true
f70d90d6d3226de3373ba626ee5ba1d83ff5c2ce
229
py
Python
backend/src/hatchling/builders/plugin/hooks.py
daobook/hatch
1cf39ad1a11ce90bc77fb7fdc4b9202433509179
[ "MIT" ]
null
null
null
backend/src/hatchling/builders/plugin/hooks.py
daobook/hatch
1cf39ad1a11ce90bc77fb7fdc4b9202433509179
[ "MIT" ]
null
null
null
backend/src/hatchling/builders/plugin/hooks.py
daobook/hatch
1cf39ad1a11ce90bc77fb7fdc4b9202433509179
[ "MIT" ]
null
null
null
from ...plugin import hookimpl from ..custom import CustomBuilder from ..sdist import SdistBuilder from ..wheel import WheelBuilder @hookimpl def hatch_register_builder(): return [CustomBuilder, SdistBuilder, WheelBuilder]
22.9
54
0.799127
from ...plugin import hookimpl from ..custom import CustomBuilder from ..sdist import SdistBuilder from ..wheel import WheelBuilder @hookimpl def hatch_register_builder(): return [CustomBuilder, SdistBuilder, WheelBuilder]
true
true
f70d919605a55c19587a7f0cfaa146dd71e5ac23
5,390
py
Python
python/ray/tests/test_multinode_failures.py
jacobowitz/ray
a69f2c7bf759b35fa6573329ec244a60f4d56a2a
[ "Apache-2.0" ]
1
2021-03-13T08:18:36.000Z
2021-03-13T08:18:36.000Z
python/ray/tests/test_multinode_failures.py
jacobowitz/ray
a69f2c7bf759b35fa6573329ec244a60f4d56a2a
[ "Apache-2.0" ]
null
null
null
python/ray/tests/test_multinode_failures.py
jacobowitz/ray
a69f2c7bf759b35fa6573329ec244a60f4d56a2a
[ "Apache-2.0" ]
null
null
null
import os import signal import sys import time import pytest import ray import ray.ray_constants as ray_constants from ray._private.cluster_utils import Cluster from ray.test_utils import RayTestTimeoutException, get_other_nodes SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM @pytest.fixture(params=[(1, 4), (4, 4)]) def ray_start_workers_separate_multinode(request): num_nodes = request.param[0] num_initial_workers = request.param[1] # Start the Ray processes. cluster = Cluster() for _ in range(num_nodes): cluster.add_node(num_cpus=num_initial_workers) ray.init(address=cluster.address) yield num_nodes, num_initial_workers # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown() def test_worker_failed(ray_start_workers_separate_multinode): num_nodes, num_initial_workers = (ray_start_workers_separate_multinode) @ray.remote def get_pids(): time.sleep(0.25) return os.getpid() start_time = time.time() pids = set() while len(pids) < num_nodes * num_initial_workers: new_pids = ray.get([ get_pids.remote() for _ in range(2 * num_nodes * num_initial_workers) ]) for pid in new_pids: pids.add(pid) if time.time() - start_time > 60: raise RayTestTimeoutException( "Timed out while waiting to get worker PIDs.") @ray.remote def f(x): time.sleep(0.5) return x # Submit more tasks than there are workers so that all workers and # cores are utilized. object_refs = [f.remote(i) for i in range(num_initial_workers * num_nodes)] object_refs += [f.remote(object_ref) for object_ref in object_refs] # Allow the tasks some time to begin executing. time.sleep(0.1) # Kill the workers as the tasks execute. for pid in pids: try: os.kill(pid, SIGKILL) except OSError: # The process may have already exited due to worker capping. pass time.sleep(0.1) # Make sure that we either get the object or we get an appropriate # exception. for object_ref in object_refs: try: ray.get(object_ref) except (ray.exceptions.RayTaskError, ray.exceptions.WorkerCrashedError): pass def _test_component_failed(cluster, component_type): """Kill a component on all worker nodes and check workload succeeds.""" # Submit many tasks with many dependencies. @ray.remote def f(x): # Sleep to make sure that tasks actually fail mid-execution. time.sleep(0.01) return x @ray.remote def g(*xs): # Sleep to make sure that tasks actually fail mid-execution. We # only use it for direct calls because the test already takes a # long time to run with the raylet codepath. time.sleep(0.01) return 1 # Kill the component on all nodes except the head node as the tasks # execute. Do this in a loop while submitting tasks between each # component failure. time.sleep(0.1) worker_nodes = get_other_nodes(cluster) assert len(worker_nodes) > 0 for node in worker_nodes: process = node.all_processes[component_type][0].process # Submit a round of tasks with many dependencies. x = 1 for _ in range(1000): x = f.remote(x) xs = [g.remote(1)] for _ in range(100): xs.append(g.remote(*xs)) xs.append(g.remote(1)) # Kill a component on one of the nodes. process.terminate() time.sleep(1) process.kill() process.wait() assert not process.poll() is None # Make sure that we can still get the objects after the # executing tasks died. ray.get(x) ray.get(xs) def check_components_alive(cluster, component_type, check_component_alive): """Check that a given component type is alive on all worker nodes.""" worker_nodes = get_other_nodes(cluster) assert len(worker_nodes) > 0 for node in worker_nodes: process = node.all_processes[component_type][0].process if check_component_alive: assert process.poll() is None else: print("waiting for " + component_type + " with PID " + str(process.pid) + "to terminate") process.wait() print("done waiting for " + component_type + " with PID " + str(process.pid) + "to terminate") assert not process.poll() is None @pytest.mark.parametrize( "ray_start_cluster", [{ "num_cpus": 8, "num_nodes": 4, "_system_config": { # Raylet codepath is not stable with a shorter timeout. "num_heartbeats_timeout": 10 }, }], indirect=True) def test_raylet_failed(ray_start_cluster): cluster = ray_start_cluster # Kill all raylets on worker nodes. _test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET) # The plasma stores should still be alive on the worker nodes. check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE, True) if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
31.520468
79
0.64026
import os import signal import sys import time import pytest import ray import ray.ray_constants as ray_constants from ray._private.cluster_utils import Cluster from ray.test_utils import RayTestTimeoutException, get_other_nodes SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM @pytest.fixture(params=[(1, 4), (4, 4)]) def ray_start_workers_separate_multinode(request): num_nodes = request.param[0] num_initial_workers = request.param[1] cluster = Cluster() for _ in range(num_nodes): cluster.add_node(num_cpus=num_initial_workers) ray.init(address=cluster.address) yield num_nodes, num_initial_workers ray.shutdown() cluster.shutdown() def test_worker_failed(ray_start_workers_separate_multinode): num_nodes, num_initial_workers = (ray_start_workers_separate_multinode) @ray.remote def get_pids(): time.sleep(0.25) return os.getpid() start_time = time.time() pids = set() while len(pids) < num_nodes * num_initial_workers: new_pids = ray.get([ get_pids.remote() for _ in range(2 * num_nodes * num_initial_workers) ]) for pid in new_pids: pids.add(pid) if time.time() - start_time > 60: raise RayTestTimeoutException( "Timed out while waiting to get worker PIDs.") @ray.remote def f(x): time.sleep(0.5) return x object_refs = [f.remote(i) for i in range(num_initial_workers * num_nodes)] object_refs += [f.remote(object_ref) for object_ref in object_refs] time.sleep(0.1) for pid in pids: try: os.kill(pid, SIGKILL) except OSError: pass time.sleep(0.1) for object_ref in object_refs: try: ray.get(object_ref) except (ray.exceptions.RayTaskError, ray.exceptions.WorkerCrashedError): pass def _test_component_failed(cluster, component_type): @ray.remote def f(x): time.sleep(0.01) return x @ray.remote def g(*xs): time.sleep(0.01) return 1 time.sleep(0.1) worker_nodes = get_other_nodes(cluster) assert len(worker_nodes) > 0 for node in worker_nodes: process = node.all_processes[component_type][0].process x = 1 for _ in range(1000): x = f.remote(x) xs = [g.remote(1)] for _ in range(100): xs.append(g.remote(*xs)) xs.append(g.remote(1)) process.terminate() time.sleep(1) process.kill() process.wait() assert not process.poll() is None ray.get(x) ray.get(xs) def check_components_alive(cluster, component_type, check_component_alive): worker_nodes = get_other_nodes(cluster) assert len(worker_nodes) > 0 for node in worker_nodes: process = node.all_processes[component_type][0].process if check_component_alive: assert process.poll() is None else: print("waiting for " + component_type + " with PID " + str(process.pid) + "to terminate") process.wait() print("done waiting for " + component_type + " with PID " + str(process.pid) + "to terminate") assert not process.poll() is None @pytest.mark.parametrize( "ray_start_cluster", [{ "num_cpus": 8, "num_nodes": 4, "_system_config": { "num_heartbeats_timeout": 10 }, }], indirect=True) def test_raylet_failed(ray_start_cluster): cluster = ray_start_cluster _test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET) check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE, True) if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
true
true
f70d92ad79eb85d982080b22c087b526d599548f
4,699
py
Python
src/websockets/datastructures.py
pushp-garg/websockets
aa93c4ceca90a1798f86b2fc2b110a42f308d721
[ "BSD-3-Clause" ]
3
2021-04-02T08:35:24.000Z
2021-04-02T08:49:47.000Z
src/websockets/datastructures.py
pushp-garg/websockets
aa93c4ceca90a1798f86b2fc2b110a42f308d721
[ "BSD-3-Clause" ]
null
null
null
src/websockets/datastructures.py
pushp-garg/websockets
aa93c4ceca90a1798f86b2fc2b110a42f308d721
[ "BSD-3-Clause" ]
2
2021-04-02T08:40:50.000Z
2021-04-02T08:45:23.000Z
""" This module defines a data structure for manipulating HTTP headers. """ from typing import ( Any, Dict, Iterable, Iterator, List, Mapping, MutableMapping, Tuple, Union, ) __all__ = ["Headers", "MultipleValuesError"] class MultipleValuesError(LookupError): """ Exception raised when :class:`Headers` has more than one value for a key. """ def __str__(self) -> str: # Implement the same logic as KeyError_str in Objects/exceptions.c. if len(self.args) == 1: return repr(self.args[0]) return super().__str__() class Headers(MutableMapping[str, str]): """ Efficient data structure for manipulating HTTP headers. A :class:`list` of ``(name, values)`` is inefficient for lookups. A :class:`dict` doesn't suffice because header names are case-insensitive and multiple occurrences of headers with the same name are possible. :class:`Headers` stores HTTP headers in a hybrid data structure to provide efficient insertions and lookups while preserving the original data. In order to account for multiple values with minimal hassle, :class:`Headers` follows this logic: - When getting a header with ``headers[name]``: - if there's no value, :exc:`KeyError` is raised; - if there's exactly one value, it's returned; - if there's more than one value, :exc:`MultipleValuesError` is raised. - When setting a header with ``headers[name] = value``, the value is appended to the list of values for that header. - When deleting a header with ``del headers[name]``, all values for that header are removed (this is slow). Other methods for manipulating headers are consistent with this logic. As long as no header occurs multiple times, :class:`Headers` behaves like :class:`dict`, except keys are lower-cased to provide case-insensitivity. Two methods support support manipulating multiple values explicitly: - :meth:`get_all` returns a list of all values for a header; - :meth:`raw_items` returns an iterator of ``(name, values)`` pairs. """ __slots__ = ["_dict", "_list"] def __init__(self, *args: Any, **kwargs: str) -> None: self._dict: Dict[str, List[str]] = {} self._list: List[Tuple[str, str]] = [] # MutableMapping.update calls __setitem__ for each (name, value) pair. self.update(*args, **kwargs) def __str__(self) -> str: return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n" def __repr__(self) -> str: return f"{self.__class__.__name__}({self._list!r})" def copy(self) -> "Headers": copy = self.__class__() copy._dict = self._dict.copy() copy._list = self._list.copy() return copy def serialize(self) -> bytes: # Headers only contain ASCII characters. return str(self).encode() # Collection methods def __contains__(self, key: object) -> bool: return isinstance(key, str) and key.lower() in self._dict def __iter__(self) -> Iterator[str]: return iter(self._dict) def __len__(self) -> int: return len(self._dict) # MutableMapping methods def __getitem__(self, key: str) -> str: value = self._dict[key.lower()] if len(value) == 1: return value[0] else: raise MultipleValuesError(key) def __setitem__(self, key: str, value: str) -> None: self._dict.setdefault(key.lower(), []).append(value) self._list.append((key, value)) def __delitem__(self, key: str) -> None: key_lower = key.lower() self._dict.__delitem__(key_lower) # This is inefficent. Fortunately deleting HTTP headers is uncommon. self._list = [(k, v) for k, v in self._list if k.lower() != key_lower] def __eq__(self, other: Any) -> bool: if not isinstance(other, Headers): return NotImplemented return self._list == other._list def clear(self) -> None: """ Remove all headers. """ self._dict = {} self._list = [] # Methods for handling multiple values def get_all(self, key: str) -> List[str]: """ Return the (possibly empty) list of all values for a header. :param key: header name """ return self._dict.get(key.lower(), []) def raw_items(self) -> Iterator[Tuple[str, str]]: """ Return an iterator of all values as ``(name, value)`` pairs. """ return iter(self._list) HeadersLike = Union[Headers, Mapping[str, str], Iterable[Tuple[str, str]]]
29.36875
83
0.625239
from typing import ( Any, Dict, Iterable, Iterator, List, Mapping, MutableMapping, Tuple, Union, ) __all__ = ["Headers", "MultipleValuesError"] class MultipleValuesError(LookupError): def __str__(self) -> str: if len(self.args) == 1: return repr(self.args[0]) return super().__str__() class Headers(MutableMapping[str, str]): __slots__ = ["_dict", "_list"] def __init__(self, *args: Any, **kwargs: str) -> None: self._dict: Dict[str, List[str]] = {} self._list: List[Tuple[str, str]] = [] self.update(*args, **kwargs) def __str__(self) -> str: return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n" def __repr__(self) -> str: return f"{self.__class__.__name__}({self._list!r})" def copy(self) -> "Headers": copy = self.__class__() copy._dict = self._dict.copy() copy._list = self._list.copy() return copy def serialize(self) -> bytes: return str(self).encode() def __contains__(self, key: object) -> bool: return isinstance(key, str) and key.lower() in self._dict def __iter__(self) -> Iterator[str]: return iter(self._dict) def __len__(self) -> int: return len(self._dict) def __getitem__(self, key: str) -> str: value = self._dict[key.lower()] if len(value) == 1: return value[0] else: raise MultipleValuesError(key) def __setitem__(self, key: str, value: str) -> None: self._dict.setdefault(key.lower(), []).append(value) self._list.append((key, value)) def __delitem__(self, key: str) -> None: key_lower = key.lower() self._dict.__delitem__(key_lower) self._list = [(k, v) for k, v in self._list if k.lower() != key_lower] def __eq__(self, other: Any) -> bool: if not isinstance(other, Headers): return NotImplemented return self._list == other._list def clear(self) -> None: self._dict = {} self._list = [] def get_all(self, key: str) -> List[str]: return self._dict.get(key.lower(), []) def raw_items(self) -> Iterator[Tuple[str, str]]: return iter(self._list) HeadersLike = Union[Headers, Mapping[str, str], Iterable[Tuple[str, str]]]
true
true
f70d92b75a6e4cfc361c5e2dd6d2c9296d030ae6
318
py
Python
__init__.py
TheXu/Inverse_Logistic_Regression_Recommender
7dcba1f240f818488382d4727c2e2c252baea9b8
[ "MIT" ]
null
null
null
__init__.py
TheXu/Inverse_Logistic_Regression_Recommender
7dcba1f240f818488382d4727c2e2c252baea9b8
[ "MIT" ]
null
null
null
__init__.py
TheXu/Inverse_Logistic_Regression_Recommender
7dcba1f240f818488382d4727c2e2c252baea9b8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Inverse Logistic Regression Recommender Created on 2019 @author: Alex Xu <ayx2@case.edu> """ from .predict_feature_values import InverseLogisticRegressionRecommender from .evaluate import validate from .evaluate import _error_metrics_ __all__ = [ 'validate', '_error_metrics_' ]
18.705882
72
0.751572
from .predict_feature_values import InverseLogisticRegressionRecommender from .evaluate import validate from .evaluate import _error_metrics_ __all__ = [ 'validate', '_error_metrics_' ]
true
true
f70d92d287bb997262dc6ea43cb19d2451852d47
1,799
py
Python
chapter_3/3_6_animal_shelter.py
elishaking/CTCi
6a91fd67e8765e5abef72c5b247f4d5444945438
[ "MIT" ]
1
2020-07-03T09:47:34.000Z
2020-07-03T09:47:34.000Z
chapter_3/3_6_animal_shelter.py
elishaking/CTCi
6a91fd67e8765e5abef72c5b247f4d5444945438
[ "MIT" ]
null
null
null
chapter_3/3_6_animal_shelter.py
elishaking/CTCi
6a91fd67e8765e5abef72c5b247f4d5444945438
[ "MIT" ]
null
null
null
import enum from queue import Queue class Animal(enum.Enum): cat = 'cat' dog = 'dog' class AnimalShelter: def __init__(self): self.cats = Queue() self.dogs = Queue() self.pos = 0 # Time complexity: O(1) # Space complexity: O(1) def enqueue(self, animal: Animal): if animal == Animal.cat: self.cats.add([animal, self.pos]) else: self.dogs.add([animal, self.pos]) self.pos += 1 return self # Time complexity: O(1) # Space complexity: O(1) def dequeue(self): if self.dogs.is_empty() and self.cats.is_empty(): raise Exception('no animal in shelter') if self.dogs.is_empty(): return self.cats.remove() if self.cats.is_empty(): return self.dogs.remove() dog_pos = self.dogs.peek()[1] cat_pos = self.cats.peek()[1] if cat_pos < dog_pos: return self.cats.remove() else: return self.dogs.remove() # Time complexity: O(1) # Space complexity: O(1) def dequeueCat(self): if self.cats.is_empty(): raise Exception('no cats in shelter') return self.cats.remove() # Time complexity: O(1) # Space complexity: O(1) def dequeueDog(self): if self.dogs.is_empty(): raise Exception('no dogs in shelter') return self.dogs.remove() def __str__(self): return 'cats: ' + str(self.cats) + '\ndogs: ' + str(self.dogs) if __name__ == "__main__": shelter = AnimalShelter() shelter.enqueue(Animal.cat).enqueue(Animal.dog).enqueue(Animal.cat) print(shelter) print(shelter.dequeue()) print(shelter.dequeue()) print(shelter.dequeue()) # print(shelter.dequeue())
23.671053
71
0.573096
import enum from queue import Queue class Animal(enum.Enum): cat = 'cat' dog = 'dog' class AnimalShelter: def __init__(self): self.cats = Queue() self.dogs = Queue() self.pos = 0 def enqueue(self, animal: Animal): if animal == Animal.cat: self.cats.add([animal, self.pos]) else: self.dogs.add([animal, self.pos]) self.pos += 1 return self def dequeue(self): if self.dogs.is_empty() and self.cats.is_empty(): raise Exception('no animal in shelter') if self.dogs.is_empty(): return self.cats.remove() if self.cats.is_empty(): return self.dogs.remove() dog_pos = self.dogs.peek()[1] cat_pos = self.cats.peek()[1] if cat_pos < dog_pos: return self.cats.remove() else: return self.dogs.remove() def dequeueCat(self): if self.cats.is_empty(): raise Exception('no cats in shelter') return self.cats.remove() def dequeueDog(self): if self.dogs.is_empty(): raise Exception('no dogs in shelter') return self.dogs.remove() def __str__(self): return 'cats: ' + str(self.cats) + '\ndogs: ' + str(self.dogs) if __name__ == "__main__": shelter = AnimalShelter() shelter.enqueue(Animal.cat).enqueue(Animal.dog).enqueue(Animal.cat) print(shelter) print(shelter.dequeue()) print(shelter.dequeue()) print(shelter.dequeue())
true
true
f70d933e514a7e9af7d35401d59b56c3517a80a8
5,750
py
Python
cleverhans/future/tf2/attacks/fast_gradient_method.py
HaojieYuan/cleverhans
02a5ac27870ad8318c1e6ef3b210467e3500fdd9
[ "MIT" ]
21
2019-06-07T17:05:30.000Z
2022-02-07T03:25:15.000Z
cleverhans/future/tf2/attacks/fast_gradient_method.py
HaojieYuan/cleverhans
02a5ac27870ad8318c1e6ef3b210467e3500fdd9
[ "MIT" ]
null
null
null
cleverhans/future/tf2/attacks/fast_gradient_method.py
HaojieYuan/cleverhans
02a5ac27870ad8318c1e6ef3b210467e3500fdd9
[ "MIT" ]
8
2019-06-11T03:06:29.000Z
2022-01-18T04:18:27.000Z
"""The Fast Gradient Method attack.""" import numpy as np import tensorflow as tf def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False): """ Tensorflow 2.0 implementation of the Fast Gradient Method. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572. :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: (optional) float. Minimum float value for adversarial example components. :param clip_max: (optional) float. Maximum float value for adversarial example components. :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example """ if ord not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: asserts.append(tf.math.greater_equal(x, clip_min)) if clip_max is not None: asserts.append(tf.math.less_equal(x, clip_max)) if y is None: # Using model predictions as ground truth to avoid label leaking y = tf.argmax(model_fn(x), 1) grad = compute_gradient(model_fn, x, y, targeted) optimal_perturbation = optimize_linear(grad, eps, ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) or (clip_max is not None): # We don't currently support one-sided clipping assert clip_min is not None and clip_max is not None adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) if sanity_checks: assert np.all(asserts) return adv_x # Due to performance reasons, this function is wrapped inside of tf.function decorator. # Not using the decorator here, or letting the user wrap the attack in tf.function is way # slower on Tensorflow 2.0.0-alpha0. @tf.function def compute_gradient(model_fn, x, y, targeted): """ Computes the gradient of the loss with respect to the input tensor. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor :param y: Tensor with true labels. If targeted is true, then provide the target label. :param targeted: bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: A tensor containing the gradient of the loss with respect to the input tensor. """ loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits with tf.GradientTape() as g: g.watch(x) # Compute loss loss = loss_fn(labels=y, logits=model_fn(x)) if targeted: # attack is targeted, minimize loss of target label rather than maximize loss of correct label loss = -loss # Define gradient of loss wrt input grad = g.gradient(loss, x) return grad def optimize_linear(grad, eps, ord=np.inf): """ Solves for the optimal input to a linear function under a norm constraint. Optimal_perturbation = argmax_{eta, ||eta||_{ord} < eps} dot(eta, grad) :param grad: tf tensor containing a batch of gradients :param eps: float scalar specifying size of constraint region :param ord: int specifying order of norm :returns: tf tensor containing optimal perturbation """ # Convert the iterator returned by `range` into a list. axis = list(range(1, len(grad.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: # Take sign of gradient optimal_perturbation = tf.sign(grad) # The following line should not change the numerical results. It applies only because # `optimal_perturbation` is the output of a `sign` op, which has zero derivative anyway. # It should not be applied for the other norms, where the perturbation has a non-zero derivative. optimal_perturbation = tf.stop_gradient(optimal_perturbation) elif ord == 1: abs_grad = tf.abs(grad) sign = tf.sign(grad) max_abs_grad = tf.reduce_max(abs_grad, axis, keepdims=True) tied_for_max = tf.dtypes.cast(tf.equal(abs_grad, max_abs_grad), dtype=tf.float32) num_ties = tf.reduce_sum(tied_for_max, axis, keepdims=True) optimal_perturbation = sign * tied_for_max / num_ties elif ord == 2: square = tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(grad), axis, keepdims=True)) optimal_perturbation = grad / tf.sqrt(square) else: raise NotImplementedError("Only L-inf, L1 and L2 norms are currently implemented.") # Scale perturbation to be the solution for the norm=eps rather than norm=1 problem scaled_perturbation = tf.multiply(eps, optimal_perturbation) return scaled_perturbation
44.573643
112
0.722087
import numpy as np import tensorflow as tf def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False): if ord not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] if clip_min is not None: asserts.append(tf.math.greater_equal(x, clip_min)) if clip_max is not None: asserts.append(tf.math.less_equal(x, clip_max)) if y is None: y = tf.argmax(model_fn(x), 1) grad = compute_gradient(model_fn, x, y, targeted) optimal_perturbation = optimize_linear(grad, eps, ord) adv_x = x + optimal_perturbation if (clip_min is not None) or (clip_max is not None): assert clip_min is not None and clip_max is not None adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) if sanity_checks: assert np.all(asserts) return adv_x # Due to performance reasons, this function is wrapped inside of tf.function decorator. # Not using the decorator here, or letting the user wrap the attack in tf.function is way # slower on Tensorflow 2.0.0-alpha0. @tf.function def compute_gradient(model_fn, x, y, targeted): loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits with tf.GradientTape() as g: g.watch(x) # Compute loss loss = loss_fn(labels=y, logits=model_fn(x)) if targeted: # attack is targeted, minimize loss of target label rather than maximize loss of correct label loss = -loss # Define gradient of loss wrt input grad = g.gradient(loss, x) return grad def optimize_linear(grad, eps, ord=np.inf): # Convert the iterator returned by `range` into a list. axis = list(range(1, len(grad.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: # Take sign of gradient optimal_perturbation = tf.sign(grad) # The following line should not change the numerical results. It applies only because # `optimal_perturbation` is the output of a `sign` op, which has zero derivative anyway. # It should not be applied for the other norms, where the perturbation has a non-zero derivative. optimal_perturbation = tf.stop_gradient(optimal_perturbation) elif ord == 1: abs_grad = tf.abs(grad) sign = tf.sign(grad) max_abs_grad = tf.reduce_max(abs_grad, axis, keepdims=True) tied_for_max = tf.dtypes.cast(tf.equal(abs_grad, max_abs_grad), dtype=tf.float32) num_ties = tf.reduce_sum(tied_for_max, axis, keepdims=True) optimal_perturbation = sign * tied_for_max / num_ties elif ord == 2: square = tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(grad), axis, keepdims=True)) optimal_perturbation = grad / tf.sqrt(square) else: raise NotImplementedError("Only L-inf, L1 and L2 norms are currently implemented.") # Scale perturbation to be the solution for the norm=eps rather than norm=1 problem scaled_perturbation = tf.multiply(eps, optimal_perturbation) return scaled_perturbation
true
true
f70d937a2f3eb0c27ac1061da0f0c175ebf0e68f
1,695
py
Python
tools/ports/sdl2_net.py
talrasha/emscripten
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
[ "MIT" ]
1
2021-06-15T20:40:30.000Z
2021-06-15T20:40:30.000Z
tools/ports/sdl2_net.py
talrasha/emscripten
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
[ "MIT" ]
null
null
null
tools/ports/sdl2_net.py
talrasha/emscripten
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
[ "MIT" ]
null
null
null
# Copyright 2016 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. import os import logging TAG = 'version_2' HASH = '317b22ad9b6b2f7b40fac7b7c426da2fa2da1803bbe58d480631f1e5b190d730763f2768c77c72affa806c69a1e703f401b15a1be3ec611cd259950d5ebc3711' def needed(settings): return settings.USE_SDL_NET == 2 def get(ports, settings, shared): sdl_build = os.path.join(ports.get_build_dir(), 'sdl2') assert os.path.exists(sdl_build), 'You must use SDL2 to use SDL2_net' ports.fetch_project('sdl2_net', 'https://github.com/emscripten-ports/SDL2_net/archive/' + TAG + '.zip', 'SDL2_net-' + TAG, sha512hash=HASH) def create(final): logging.info('building port: sdl2_net') src_dir = os.path.join(ports.get_dir(), 'sdl2_net', 'SDL2_net-' + TAG) ports.install_headers(src_dir, target='SDL2') srcs = 'SDLnet.c SDLnetselect.c SDLnetTCP.c SDLnetUDP.c'.split() commands = [] o_s = [] for src in srcs: o = os.path.join(ports.get_build_dir(), 'sdl2_net', src + '.o') commands.append([shared.EMCC, '-c', os.path.join(src_dir, src), '-O2', '-s', 'USE_SDL=2', '-o', o, '-w']) o_s.append(o) shared.safe_ensure_dirs(os.path.dirname(o_s[0])) ports.run_commands(commands) ports.create_lib(final, o_s) return [shared.Cache.get_lib('libSDL2_net.a', create, what='port')] def clear(ports, settings, shared): shared.Cache.erase_lib('libSDL2_net.a') def process_args(ports): return [] def show(): return 'SDL2_net (zlib license)'
33.235294
141
0.703835
import os import logging TAG = 'version_2' HASH = '317b22ad9b6b2f7b40fac7b7c426da2fa2da1803bbe58d480631f1e5b190d730763f2768c77c72affa806c69a1e703f401b15a1be3ec611cd259950d5ebc3711' def needed(settings): return settings.USE_SDL_NET == 2 def get(ports, settings, shared): sdl_build = os.path.join(ports.get_build_dir(), 'sdl2') assert os.path.exists(sdl_build), 'You must use SDL2 to use SDL2_net' ports.fetch_project('sdl2_net', 'https://github.com/emscripten-ports/SDL2_net/archive/' + TAG + '.zip', 'SDL2_net-' + TAG, sha512hash=HASH) def create(final): logging.info('building port: sdl2_net') src_dir = os.path.join(ports.get_dir(), 'sdl2_net', 'SDL2_net-' + TAG) ports.install_headers(src_dir, target='SDL2') srcs = 'SDLnet.c SDLnetselect.c SDLnetTCP.c SDLnetUDP.c'.split() commands = [] o_s = [] for src in srcs: o = os.path.join(ports.get_build_dir(), 'sdl2_net', src + '.o') commands.append([shared.EMCC, '-c', os.path.join(src_dir, src), '-O2', '-s', 'USE_SDL=2', '-o', o, '-w']) o_s.append(o) shared.safe_ensure_dirs(os.path.dirname(o_s[0])) ports.run_commands(commands) ports.create_lib(final, o_s) return [shared.Cache.get_lib('libSDL2_net.a', create, what='port')] def clear(ports, settings, shared): shared.Cache.erase_lib('libSDL2_net.a') def process_args(ports): return [] def show(): return 'SDL2_net (zlib license)'
true
true
f70d94efdaf5515a4cd7eccef6c077140b0a5904
24
py
Python
sitgan/data/__init__.py
clintonjwang/sitgan
05210ec13073bcca9b4dbff798fb626d963082dc
[ "MIT" ]
null
null
null
sitgan/data/__init__.py
clintonjwang/sitgan
05210ec13073bcca9b4dbff798fb626d963082dc
[ "MIT" ]
null
null
null
sitgan/data/__init__.py
clintonjwang/sitgan
05210ec13073bcca9b4dbff798fb626d963082dc
[ "MIT" ]
null
null
null
from . import transforms
24
24
0.833333
from . import transforms
true
true
f70d9574d01b85f8c6cf508bbfc319ed7382d21b
4,863
py
Python
pykeyset/utils/logging.py
staticintlucas/pykeyset
8581252c85dfceebe22926af4640164a0895e7a0
[ "Apache-2.0", "MIT" ]
1
2021-07-06T16:43:25.000Z
2021-07-06T16:43:25.000Z
pykeyset/utils/logging.py
staticintlucas/pykeyset
8581252c85dfceebe22926af4640164a0895e7a0
[ "Apache-2.0", "MIT" ]
null
null
null
pykeyset/utils/logging.py
staticintlucas/pykeyset
8581252c85dfceebe22926af4640164a0895e7a0
[ "Apache-2.0", "MIT" ]
null
null
null
import inspect import sys from pathlib import Path from types import TracebackType from typing import NoReturn, Optional, Union import rich.console import typer from . import Severity, Verbosity from .config import config __all__ = ["error", "warning", "info", "debug"] COLOR_MAP = { Severity.ERROR: "red", Severity.WARNING: "yellow", Severity.INFO: "blue", Severity.DEBUG: "dim", } VERBOSITY_MAP = { Severity.ERROR: Verbosity.QUIET, Severity.WARNING: Verbosity.NORMAL, Severity.INFO: Verbosity.VERBOSE, Severity.DEBUG: Verbosity.DEBUG, } def error( error: Exception, file: Optional[Union[str, Path]] = None, prev_except: Optional[Exception] = None, ) -> NoReturn: """Handle an error in pykeyset code. Depending on the current configuration, this function can raise the error as an exception, print the error to the terminal, and/or exit the script.""" # Try to remove this call from the traceback. This will make it look like the exception was # raised where this function was called, not inside. Note: this is not guaranteed to work on # version < 3.7 or implementation != CPython, in which case we just pass None to raise_or_print frame = inspect.currentframe() if frame is not None: frame = frame.f_back conf = config() if conf.verbosity >= Verbosity.QUIET: message = format_error(error) print_message(message, Severity.ERROR, file) if conf.is_script: raise typer.Exit(1) else: # Create a traceback from frame (Python >= 3.7 only) if frame is not None and sys.version_info >= (3, 7): tb = TracebackType( tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno ) raise error.with_traceback(tb) from prev_except else: raise error from prev_except # pragma: no cover def warning( error: Exception, resolution: str, file: Optional[str] = None, prev_except: Optional[Exception] = None, ) -> None: """Handle an warning in pykeyset code. Depending on the current configuration, this function can raise the warning as an exception or print the warning to the terminal, or silently ignore it.""" # See comment in error() for details. Warnings can also end up raising an exception (if # raise_warnings is set in the config). frame = inspect.currentframe() if frame is not None: frame = frame.f_back conf = config() if conf.verbosity >= Verbosity.NORMAL: # Only format the resolution if this warning will not be raised. Otherwise the resolution # doesn't resolve anything if conf.raise_warnings: message = format_error(error) else: message = format_error(error, resolution) print_message(message, Severity.WARNING, file) if conf.raise_warnings: if conf.is_script: raise typer.Exit(1) else: # Create a traceback from frame (Python >= 3.7 only) if frame is not None and sys.version_info >= (3, 7): tb = TracebackType( tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno ) raise error.with_traceback(tb) from prev_except else: raise error from prev_except # pragma: no cover def info(message: str, file: Optional[str] = None): if config().verbosity >= Verbosity.VERBOSE: print_message(message, Severity.INFO, file) def debug(message: str, file: Optional[str] = None): if config().verbosity >= Verbosity.DEBUG: print_message(message, Severity.DEBUG, file) def format_error(error: Exception, resolution: Optional[str] = None) -> str: if isinstance(error, OSError): if error.filename is not None: filename = Path(error.filename).name result = f"cannot open file {format_filename(filename)}: {error.strerror.lower()}" elif error.strerror is not None: result = error.strerror.lower() else: result = str(error).lower() else: result = f"{error}" if resolution is not None: result = f"{result}. {resolution}" return result def format_filename(filename: Union[str, Path]) -> str: return f"[bold magenta]{filename}[/bold magenta]" def print_message( message: str, severity: Severity, filename: Optional[Union[str, Path]] = None ) -> None: color = COLOR_MAP.get(severity, "magenta") prefix = severity.name.capitalize() console = rich.console.Console(force_terminal=config().color, stderr=True) console.print(f"[{color} bold]{prefix}:[/{color} bold] {message}") if filename is not None: console.print(f" In file {format_filename(filename)}")
31.577922
99
0.652478
import inspect import sys from pathlib import Path from types import TracebackType from typing import NoReturn, Optional, Union import rich.console import typer from . import Severity, Verbosity from .config import config __all__ = ["error", "warning", "info", "debug"] COLOR_MAP = { Severity.ERROR: "red", Severity.WARNING: "yellow", Severity.INFO: "blue", Severity.DEBUG: "dim", } VERBOSITY_MAP = { Severity.ERROR: Verbosity.QUIET, Severity.WARNING: Verbosity.NORMAL, Severity.INFO: Verbosity.VERBOSE, Severity.DEBUG: Verbosity.DEBUG, } def error( error: Exception, file: Optional[Union[str, Path]] = None, prev_except: Optional[Exception] = None, ) -> NoReturn: frame = inspect.currentframe() if frame is not None: frame = frame.f_back conf = config() if conf.verbosity >= Verbosity.QUIET: message = format_error(error) print_message(message, Severity.ERROR, file) if conf.is_script: raise typer.Exit(1) else: if frame is not None and sys.version_info >= (3, 7): tb = TracebackType( tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno ) raise error.with_traceback(tb) from prev_except else: raise error from prev_except def warning( error: Exception, resolution: str, file: Optional[str] = None, prev_except: Optional[Exception] = None, ) -> None: frame = inspect.currentframe() if frame is not None: frame = frame.f_back conf = config() if conf.verbosity >= Verbosity.NORMAL: if conf.raise_warnings: message = format_error(error) else: message = format_error(error, resolution) print_message(message, Severity.WARNING, file) if conf.raise_warnings: if conf.is_script: raise typer.Exit(1) else: # Create a traceback from frame (Python >= 3.7 only) if frame is not None and sys.version_info >= (3, 7): tb = TracebackType( tb_next=None, tb_frame=frame, tb_lasti=frame.f_lasti, tb_lineno=frame.f_lineno ) raise error.with_traceback(tb) from prev_except else: raise error from prev_except # pragma: no cover def info(message: str, file: Optional[str] = None): if config().verbosity >= Verbosity.VERBOSE: print_message(message, Severity.INFO, file) def debug(message: str, file: Optional[str] = None): if config().verbosity >= Verbosity.DEBUG: print_message(message, Severity.DEBUG, file) def format_error(error: Exception, resolution: Optional[str] = None) -> str: if isinstance(error, OSError): if error.filename is not None: filename = Path(error.filename).name result = f"cannot open file {format_filename(filename)}: {error.strerror.lower()}" elif error.strerror is not None: result = error.strerror.lower() else: result = str(error).lower() else: result = f"{error}" if resolution is not None: result = f"{result}. {resolution}" return result def format_filename(filename: Union[str, Path]) -> str: return f"[bold magenta]{filename}[/bold magenta]" def print_message( message: str, severity: Severity, filename: Optional[Union[str, Path]] = None ) -> None: color = COLOR_MAP.get(severity, "magenta") prefix = severity.name.capitalize() console = rich.console.Console(force_terminal=config().color, stderr=True) console.print(f"[{color} bold]{prefix}:[/{color} bold] {message}") if filename is not None: console.print(f" In file {format_filename(filename)}")
true
true
f70d95b29b25b745f9f62876f9ff2ab90112943d
1,234
py
Python
notebooks/project_functions.py
data301-2021-winter1/project-group25-project
203421ca91c95786de4a2fff5412693493b9371f
[ "MIT" ]
null
null
null
notebooks/project_functions.py
data301-2021-winter1/project-group25-project
203421ca91c95786de4a2fff5412693493b9371f
[ "MIT" ]
null
null
null
notebooks/project_functions.py
data301-2021-winter1/project-group25-project
203421ca91c95786de4a2fff5412693493b9371f
[ "MIT" ]
1
2021-10-31T20:46:34.000Z
2021-10-31T20:46:34.000Z
import pandas as pd import numpy as np def load_and_process_data(path): rawData = pd.read_csv(path, sep=";") rawData = rawData[rawData.columns[:-2]].dropna().rename(columns={"RH": "Relative Humidity", "AH": "Absolute Humdity", "T": "Temp"}) for col in rawData.columns: #covert strings into floats if rawData[col].dtypes == object: try: rawData[col] = rawData[col].str.replace(",", ".") rawData[col] = rawData[col].astype(float) except ValueError: pass #remove row with values of less than 0 if rawData[col].dtypes==np.float64: rawData = rawData[rawData[col]>=0] return rawData def getAverageConcentration(df, column): ''' takes in dataFrame and a string column name returns an array of 24 integers representing the average values of the column for every hour of the day ''' average=0 averages = np.zeros(24) print(type(df[column][0])) for hour in range(24): time = "%s.00.00"%hour validColumns = df[df["Time"]==time] average = float(validColumns.sum()[column]) / int(df.shape[0]) averages[hour]= average return averages pass
36.294118
135
0.60859
import pandas as pd import numpy as np def load_and_process_data(path): rawData = pd.read_csv(path, sep=";") rawData = rawData[rawData.columns[:-2]].dropna().rename(columns={"RH": "Relative Humidity", "AH": "Absolute Humdity", "T": "Temp"}) for col in rawData.columns: if rawData[col].dtypes == object: try: rawData[col] = rawData[col].str.replace(",", ".") rawData[col] = rawData[col].astype(float) except ValueError: pass if rawData[col].dtypes==np.float64: rawData = rawData[rawData[col]>=0] return rawData def getAverageConcentration(df, column): average=0 averages = np.zeros(24) print(type(df[column][0])) for hour in range(24): time = "%s.00.00"%hour validColumns = df[df["Time"]==time] average = float(validColumns.sum()[column]) / int(df.shape[0]) averages[hour]= average return averages pass
true
true
f70d95c5b7bb3ce53da0664c50022da70c4679e7
3,359
py
Python
analytical/templatetags/woopra.py
rristow/django-analytical
87fed08537174e7b018b7364ce1dade31015f1d9
[ "MIT" ]
null
null
null
analytical/templatetags/woopra.py
rristow/django-analytical
87fed08537174e7b018b7364ce1dade31015f1d9
[ "MIT" ]
null
null
null
analytical/templatetags/woopra.py
rristow/django-analytical
87fed08537174e7b018b7364ce1dade31015f1d9
[ "MIT" ]
null
null
null
""" Woopra template tags and filters. """ from __future__ import absolute_import import json import re from django.conf import settings from django.template import Library, Node, TemplateSyntaxError from analytical.utils import ( disable_html, get_identity, get_required_setting, get_user_from_context, get_user_is_authenticated, is_internal_ip, ) DOMAIN_RE = re.compile(r'^\S+$') TRACKING_CODE = """ <script type="text/javascript"> var woo_settings = %(settings)s; var woo_visitor = %(visitor)s; !function(){var a,b,c,d=window,e=document,f=arguments,g="script",h=["config","track","trackForm","trackClick","identify","visit","push","call"],i=function(){var a,b=this,c=function(a){b[a]=function(){return b._e.push([a].concat(Array.prototype.slice.call(arguments,0))),b}};for(b._e=[],a=0;a<h.length;a++)c(h[a])};for(d.__woo=d.__woo||{},a=0;a<f.length;a++)d.__woo[f[a]]=d[f[a]]=d[f[a]]||new i;b=e.createElement(g),b.async=1,b.src="//static.woopra.com/js/w.js",c=e.getElementsByTagName(g)[0],c.parentNode.insertBefore(b,c)}("woopra"); woopra.config(woo_settings); woopra.identify(woo_visitor); woopra.track(); </script> """ # noqa register = Library() @register.tag def woopra(parser, token): """ Woopra tracking template tag. Renders Javascript code to track page visits. You must supply your Woopra domain in the ``WOOPRA_DOMAIN`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return WoopraNode() class WoopraNode(Node): def __init__(self): self.domain = get_required_setting( 'WOOPRA_DOMAIN', DOMAIN_RE, "must be a domain name") def render(self, context): if settings.get("DISABLE_TRACKING_CODE", False): return "" cfg = self._get_settings(context) visitor = self._get_visitor(context) html = TRACKING_CODE % { 'settings': json.dumps(cfg, sort_keys=True), 'visitor': json.dumps(visitor, sort_keys=True), } if is_internal_ip(context, 'WOOPRA'): html = disable_html(html, 'Woopra') return html def _get_settings(self, context): variables = {'domain': self.domain} try: variables['idle_timeout'] = str(settings.WOOPRA_IDLE_TIMEOUT) except AttributeError: pass return variables def _get_visitor(self, context): params = {} for dict_ in context: for var, val in dict_.items(): if var.startswith('woopra_'): params[var[7:]] = val if 'name' not in params and 'email' not in params: user = get_user_from_context(context) if user is not None and get_user_is_authenticated(user): params['name'] = get_identity( context, 'woopra', self._identify, user) if user.email: params['email'] = user.email return params def _identify(self, user): name = user.get_full_name() if not name: name = user.username return name def contribute_to_analytical(add_node): WoopraNode() # ensure properly configured add_node('head_bottom', WoopraNode)
32.298077
540
0.622209
from __future__ import absolute_import import json import re from django.conf import settings from django.template import Library, Node, TemplateSyntaxError from analytical.utils import ( disable_html, get_identity, get_required_setting, get_user_from_context, get_user_is_authenticated, is_internal_ip, ) DOMAIN_RE = re.compile(r'^\S+$') TRACKING_CODE = """ <script type="text/javascript"> var woo_settings = %(settings)s; var woo_visitor = %(visitor)s; !function(){var a,b,c,d=window,e=document,f=arguments,g="script",h=["config","track","trackForm","trackClick","identify","visit","push","call"],i=function(){var a,b=this,c=function(a){b[a]=function(){return b._e.push([a].concat(Array.prototype.slice.call(arguments,0))),b}};for(b._e=[],a=0;a<h.length;a++)c(h[a])};for(d.__woo=d.__woo||{},a=0;a<f.length;a++)d.__woo[f[a]]=d[f[a]]=d[f[a]]||new i;b=e.createElement(g),b.async=1,b.src="//static.woopra.com/js/w.js",c=e.getElementsByTagName(g)[0],c.parentNode.insertBefore(b,c)}("woopra"); woopra.config(woo_settings); woopra.identify(woo_visitor); woopra.track(); </script> """ register = Library() @register.tag def woopra(parser, token): bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return WoopraNode() class WoopraNode(Node): def __init__(self): self.domain = get_required_setting( 'WOOPRA_DOMAIN', DOMAIN_RE, "must be a domain name") def render(self, context): if settings.get("DISABLE_TRACKING_CODE", False): return "" cfg = self._get_settings(context) visitor = self._get_visitor(context) html = TRACKING_CODE % { 'settings': json.dumps(cfg, sort_keys=True), 'visitor': json.dumps(visitor, sort_keys=True), } if is_internal_ip(context, 'WOOPRA'): html = disable_html(html, 'Woopra') return html def _get_settings(self, context): variables = {'domain': self.domain} try: variables['idle_timeout'] = str(settings.WOOPRA_IDLE_TIMEOUT) except AttributeError: pass return variables def _get_visitor(self, context): params = {} for dict_ in context: for var, val in dict_.items(): if var.startswith('woopra_'): params[var[7:]] = val if 'name' not in params and 'email' not in params: user = get_user_from_context(context) if user is not None and get_user_is_authenticated(user): params['name'] = get_identity( context, 'woopra', self._identify, user) if user.email: params['email'] = user.email return params def _identify(self, user): name = user.get_full_name() if not name: name = user.username return name def contribute_to_analytical(add_node): WoopraNode() add_node('head_bottom', WoopraNode)
true
true
f70d95f727bc95acfcd6297cb018c2d395fbfd5c
1,277
py
Python
.docker/Redis/redispass.py
andersonpem/cpmr-stack
b06da6a1dce922b822cbb8884efde08ad92f3d46
[ "MIT" ]
1
2020-06-18T18:52:17.000Z
2020-06-18T18:52:17.000Z
.docker/Redis/redispass.py
andersonpem/cpmr-stack
b06da6a1dce922b822cbb8884efde08ad92f3d46
[ "MIT" ]
null
null
null
.docker/Redis/redispass.py
andersonpem/cpmr-stack
b06da6a1dce922b822cbb8884efde08ad92f3d46
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os, sys from os import path clear = lambda: os.system('clear') green = "\033[1;32;2m" greenblink = "\033[1;32;5m" yellow = "\033[1;33;2m" yellowblink = "\033[1;33;5m" redblink = "\033[1;31;5m" red = "\033[1;31;2m" white = "\033[1;37;0m" normal = "\033[0m" # ============================= if not path.exists("/run/secrets/redis_secret"): print (red+" A Redis password is not set in the secrets."+normal) print (red+" The server will start with the default password: "+green+"testpass"+normal) print (red+" It is highly advisable to change this password for security reasons."+normal) print (red+" Please refer to http://link.to.documentation to fix this. "+normal) # sys.exit(1) else: print (green+" Setting everything up. It'll only take a second."+normal) secret = open('/run/secrets/redis_secret', 'r') with open('/usr/local/etc/redis/redis.conf') as f: newText=f.read().replace('testpass', secret.read()) with open('/usr/local/etc/redis/redis.conf', "w") as f: f.write(newText) secret.close() print(green+" Server is ready to start. That's what we will do next :)"+normal) print("=========================================================================")
41.193548
94
0.602193
import os, sys from os import path clear = lambda: os.system('clear') green = "\033[1;32;2m" greenblink = "\033[1;32;5m" yellow = "\033[1;33;2m" yellowblink = "\033[1;33;5m" redblink = "\033[1;31;5m" red = "\033[1;31;2m" white = "\033[1;37;0m" normal = "\033[0m" if not path.exists("/run/secrets/redis_secret"): print (red+" A Redis password is not set in the secrets."+normal) print (red+" The server will start with the default password: "+green+"testpass"+normal) print (red+" It is highly advisable to change this password for security reasons."+normal) print (red+" Please refer to http://link.to.documentation to fix this. "+normal) else: print (green+" Setting everything up. It'll only take a second."+normal) secret = open('/run/secrets/redis_secret', 'r') with open('/usr/local/etc/redis/redis.conf') as f: newText=f.read().replace('testpass', secret.read()) with open('/usr/local/etc/redis/redis.conf', "w") as f: f.write(newText) secret.close() print(green+" Server is ready to start. That's what we will do next :)"+normal) print("=========================================================================")
true
true
f70d968be6e53e9add9f2f5cc5c6f2bcc49aa019
11,071
py
Python
octavia/tests/unit/common/tls_utils/test_cert_parser.py
lingxiankong/octavia
4a5c24ef6fcd3b5f198a20d780dedd7f7976296d
[ "Apache-2.0" ]
null
null
null
octavia/tests/unit/common/tls_utils/test_cert_parser.py
lingxiankong/octavia
4a5c24ef6fcd3b5f198a20d780dedd7f7976296d
[ "Apache-2.0" ]
null
null
null
octavia/tests/unit/common/tls_utils/test_cert_parser.py
lingxiankong/octavia
4a5c24ef6fcd3b5f198a20d780dedd7f7976296d
[ "Apache-2.0" ]
null
null
null
# # Copyright 2014 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from cryptography import x509 import mock from octavia.common import data_models import octavia.common.exceptions as exceptions import octavia.common.tls_utils.cert_parser as cert_parser from octavia.tests.common import sample_certs from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_combined class TestTLSParseUtils(base.TestCase): def test_alt_subject_name_parses(self): hosts = cert_parser.get_host_names(sample_certs.ALT_EXT_CRT) self.assertIn('www.cnfromsubject.org', hosts['cn']) self.assertIn('www.hostFromDNSName1.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName2.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName3.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName4.com', hosts['dns_names']) def test_x509_parses(self): self.assertRaises(exceptions.UnreadableCert, cert_parser.validate_cert, "BAD CERT") self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT)) self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY)) def test_read_private_key_pkcs8(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_private_key, sample_certs.ENCRYPTED_PKCS8_CRT_KEY) cert_parser._read_private_key( sample_certs.ENCRYPTED_PKCS8_CRT_KEY, passphrase=sample_certs.ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE) def test_read_private_key_pem(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_private_key, sample_certs.X509_CERT_KEY_ENCRYPTED) cert_parser._read_private_key( sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) def test_prepare_private_key(self): self.assertEqual( cert_parser.prepare_private_key( sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE), sample_certs.X509_CERT_KEY) def test_prepare_private_key_orig_not_encrypted(self): self.assertEqual( cert_parser.prepare_private_key( sample_certs.X509_CERT_KEY), sample_certs.X509_CERT_KEY) def test_validate_cert_and_key_match(self): self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY)) self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY.decode('utf-8'))) self.assertRaises(exceptions.MisMatchedKey, cert_parser.validate_cert, sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY_2) def test_validate_cert_handles_intermediates(self): self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=(sample_certs.X509_IMDS + b"\nParser should ignore junk\n"))) self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=sample_certs.X509_IMDS_LIST)) def test_split_x509s(self): imds = [] for x509Pem in cert_parser._split_x509s(sample_certs.TEST_X509_IMDS): imds.append(cert_parser._get_x509_from_pem_bytes(x509Pem)) for i in range(0, len(imds)): self.assertEqual(sample_certs.EXPECTED_IMD_TEST_SUBJS[i], imds[i].subject.get_attributes_for_oid( x509.OID_COMMON_NAME)[0].value) def test_get_intermediates_pem_chain(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.X509_IMDS))) def test_get_intermediates_pkcs7_pem(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_PEM))) def test_get_intermediates_pkcs7_pem_bad(self): self.assertRaises( exceptions.UnreadableCert, lambda: list(cert_parser.get_intermediates_pems( b'-----BEGIN PKCS7-----\nbad data\n-----END PKCS7-----'))) def test_get_intermediates_pkcs7_der(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_DER))) def test_get_intermediates_pkcs7_der_bad(self): self.assertRaises( exceptions.UnreadableCert, lambda: list(cert_parser.get_intermediates_pems( b'\xfe\xfe\xff\xff'))) def test_get_x509_from_der_bytes_bad(self): self.assertRaises( exceptions.UnreadableCert, cert_parser._get_x509_from_der_bytes, b'bad data') @mock.patch('oslo_context.context.RequestContext') def test_load_certificates(self, mock_oslo): listener = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True) client = mock.MagicMock() context = mock.Mock() context.project_id = '12345' with mock.patch.object(cert_parser, 'get_host_names') as cp: with mock.patch.object(cert_parser, '_map_cert_tls_container'): cp.return_value = {'cn': 'fakeCN'} cert_parser.load_certificates_data(client, listener, context) # Ensure upload_cert is called three times calls_cert_mngr = [ mock.call.get_cert(context, 'cont_id_1', check_only=True), mock.call.get_cert(context, 'cont_id_2', check_only=True), mock.call.get_cert(context, 'cont_id_3', check_only=True) ] client.assert_has_calls(calls_cert_mngr) # Test asking for nothing listener = sample_configs_combined.sample_listener_tuple( tls=False, sni=False, client_ca_cert=False) client = mock.MagicMock() with mock.patch.object(cert_parser, '_map_cert_tls_container') as mock_map: result = cert_parser.load_certificates_data(client, listener) mock_map.assert_not_called() ref_empty_dict = {'tls_cert': None, 'sni_certs': []} self.assertEqual(ref_empty_dict, result) mock_oslo.assert_called() def test_load_certificates_get_cert_errors(self): mock_cert_mngr = mock.MagicMock() mock_obj = mock.MagicMock() mock_sni_container = mock.MagicMock() mock_sni_container.tls_container_id = 2 mock_cert_mngr.get_cert.side_effect = [Exception, Exception] # Test tls_certificate_id error mock_obj.tls_certificate_id = 1 self.assertRaises(exceptions.CertificateRetrievalException, cert_parser.load_certificates_data, mock_cert_mngr, mock_obj) # Test sni_containers error mock_obj.tls_certificate_id = None mock_obj.sni_containers = [mock_sni_container] self.assertRaises(exceptions.CertificateRetrievalException, cert_parser.load_certificates_data, mock_cert_mngr, mock_obj) @mock.patch('octavia.certificates.common.cert.Cert') def test_map_cert_tls_container(self, cert_mock): tls = data_models.TLSContainer( id=sample_certs.X509_CERT_SHA1, primary_cn=sample_certs.X509_CERT_CN, certificate=sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE, intermediates=sample_certs.X509_IMDS_LIST) cert_mock.get_private_key.return_value = tls.private_key cert_mock.get_certificate.return_value = tls.certificate cert_mock.get_intermediates.return_value = tls.intermediates cert_mock.get_private_key_passphrase.return_value = tls.passphrase with mock.patch.object(cert_parser, 'get_host_names') as cp: cp.return_value = {'cn': sample_certs.X509_CERT_CN} self.assertEqual( tls.id, cert_parser._map_cert_tls_container( cert_mock).id) self.assertEqual( tls.primary_cn, cert_parser._map_cert_tls_container( cert_mock).primary_cn) self.assertEqual( tls.certificate, cert_parser._map_cert_tls_container( cert_mock).certificate) self.assertEqual( sample_certs.X509_CERT_KEY, cert_parser._map_cert_tls_container( cert_mock).private_key) self.assertEqual( tls.intermediates, cert_parser._map_cert_tls_container( cert_mock).intermediates) def test_build_pem(self): expected = b'imacert\nimakey\nimainter\nimainter2\n' tls_tuple = sample_configs_combined.sample_tls_container_tuple( certificate=b'imacert', private_key=b'imakey', intermediates=[b'imainter', b'imainter2']) self.assertEqual(expected, cert_parser.build_pem(tls_tuple)) def test_get_primary_cn(self): cert = sample_certs.X509_CERT with mock.patch.object(cert_parser, 'get_host_names') as cp: cp.return_value = {'cn': 'fakeCN'} cn = cert_parser.get_primary_cn(cert) self.assertEqual('fakeCN', cn) def test_get_cert_expiration(self): exp_date = cert_parser.get_cert_expiration(sample_certs.X509_EXPIRED) self.assertEqual(datetime.datetime(2016, 9, 25, 18, 1, 54), exp_date) # test the exception self.assertRaises(exceptions.UnreadableCert, cert_parser.get_cert_expiration, 'bad-cert-file')
43.586614
78
0.65658
import datetime from cryptography import x509 import mock from octavia.common import data_models import octavia.common.exceptions as exceptions import octavia.common.tls_utils.cert_parser as cert_parser from octavia.tests.common import sample_certs from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_combined class TestTLSParseUtils(base.TestCase): def test_alt_subject_name_parses(self): hosts = cert_parser.get_host_names(sample_certs.ALT_EXT_CRT) self.assertIn('www.cnfromsubject.org', hosts['cn']) self.assertIn('www.hostFromDNSName1.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName2.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName3.com', hosts['dns_names']) self.assertIn('www.hostFromDNSName4.com', hosts['dns_names']) def test_x509_parses(self): self.assertRaises(exceptions.UnreadableCert, cert_parser.validate_cert, "BAD CERT") self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT)) self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY)) def test_read_private_key_pkcs8(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_private_key, sample_certs.ENCRYPTED_PKCS8_CRT_KEY) cert_parser._read_private_key( sample_certs.ENCRYPTED_PKCS8_CRT_KEY, passphrase=sample_certs.ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE) def test_read_private_key_pem(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_private_key, sample_certs.X509_CERT_KEY_ENCRYPTED) cert_parser._read_private_key( sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) def test_prepare_private_key(self): self.assertEqual( cert_parser.prepare_private_key( sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE), sample_certs.X509_CERT_KEY) def test_prepare_private_key_orig_not_encrypted(self): self.assertEqual( cert_parser.prepare_private_key( sample_certs.X509_CERT_KEY), sample_certs.X509_CERT_KEY) def test_validate_cert_and_key_match(self): self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY)) self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY.decode('utf-8'))) self.assertRaises(exceptions.MisMatchedKey, cert_parser.validate_cert, sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY_2) def test_validate_cert_handles_intermediates(self): self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=(sample_certs.X509_IMDS + b"\nParser should ignore junk\n"))) self.assertTrue( cert_parser.validate_cert( sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=sample_certs.X509_IMDS_LIST)) def test_split_x509s(self): imds = [] for x509Pem in cert_parser._split_x509s(sample_certs.TEST_X509_IMDS): imds.append(cert_parser._get_x509_from_pem_bytes(x509Pem)) for i in range(0, len(imds)): self.assertEqual(sample_certs.EXPECTED_IMD_TEST_SUBJS[i], imds[i].subject.get_attributes_for_oid( x509.OID_COMMON_NAME)[0].value) def test_get_intermediates_pem_chain(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.X509_IMDS))) def test_get_intermediates_pkcs7_pem(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_PEM))) def test_get_intermediates_pkcs7_pem_bad(self): self.assertRaises( exceptions.UnreadableCert, lambda: list(cert_parser.get_intermediates_pems( b'-----BEGIN PKCS7-----\nbad data\n-----END PKCS7-----'))) def test_get_intermediates_pkcs7_der(self): self.assertEqual( sample_certs.X509_IMDS_LIST, list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_DER))) def test_get_intermediates_pkcs7_der_bad(self): self.assertRaises( exceptions.UnreadableCert, lambda: list(cert_parser.get_intermediates_pems( b'\xfe\xfe\xff\xff'))) def test_get_x509_from_der_bytes_bad(self): self.assertRaises( exceptions.UnreadableCert, cert_parser._get_x509_from_der_bytes, b'bad data') @mock.patch('oslo_context.context.RequestContext') def test_load_certificates(self, mock_oslo): listener = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True) client = mock.MagicMock() context = mock.Mock() context.project_id = '12345' with mock.patch.object(cert_parser, 'get_host_names') as cp: with mock.patch.object(cert_parser, '_map_cert_tls_container'): cp.return_value = {'cn': 'fakeCN'} cert_parser.load_certificates_data(client, listener, context) calls_cert_mngr = [ mock.call.get_cert(context, 'cont_id_1', check_only=True), mock.call.get_cert(context, 'cont_id_2', check_only=True), mock.call.get_cert(context, 'cont_id_3', check_only=True) ] client.assert_has_calls(calls_cert_mngr) listener = sample_configs_combined.sample_listener_tuple( tls=False, sni=False, client_ca_cert=False) client = mock.MagicMock() with mock.patch.object(cert_parser, '_map_cert_tls_container') as mock_map: result = cert_parser.load_certificates_data(client, listener) mock_map.assert_not_called() ref_empty_dict = {'tls_cert': None, 'sni_certs': []} self.assertEqual(ref_empty_dict, result) mock_oslo.assert_called() def test_load_certificates_get_cert_errors(self): mock_cert_mngr = mock.MagicMock() mock_obj = mock.MagicMock() mock_sni_container = mock.MagicMock() mock_sni_container.tls_container_id = 2 mock_cert_mngr.get_cert.side_effect = [Exception, Exception] mock_obj.tls_certificate_id = 1 self.assertRaises(exceptions.CertificateRetrievalException, cert_parser.load_certificates_data, mock_cert_mngr, mock_obj) mock_obj.tls_certificate_id = None mock_obj.sni_containers = [mock_sni_container] self.assertRaises(exceptions.CertificateRetrievalException, cert_parser.load_certificates_data, mock_cert_mngr, mock_obj) @mock.patch('octavia.certificates.common.cert.Cert') def test_map_cert_tls_container(self, cert_mock): tls = data_models.TLSContainer( id=sample_certs.X509_CERT_SHA1, primary_cn=sample_certs.X509_CERT_CN, certificate=sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY_ENCRYPTED, passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE, intermediates=sample_certs.X509_IMDS_LIST) cert_mock.get_private_key.return_value = tls.private_key cert_mock.get_certificate.return_value = tls.certificate cert_mock.get_intermediates.return_value = tls.intermediates cert_mock.get_private_key_passphrase.return_value = tls.passphrase with mock.patch.object(cert_parser, 'get_host_names') as cp: cp.return_value = {'cn': sample_certs.X509_CERT_CN} self.assertEqual( tls.id, cert_parser._map_cert_tls_container( cert_mock).id) self.assertEqual( tls.primary_cn, cert_parser._map_cert_tls_container( cert_mock).primary_cn) self.assertEqual( tls.certificate, cert_parser._map_cert_tls_container( cert_mock).certificate) self.assertEqual( sample_certs.X509_CERT_KEY, cert_parser._map_cert_tls_container( cert_mock).private_key) self.assertEqual( tls.intermediates, cert_parser._map_cert_tls_container( cert_mock).intermediates) def test_build_pem(self): expected = b'imacert\nimakey\nimainter\nimainter2\n' tls_tuple = sample_configs_combined.sample_tls_container_tuple( certificate=b'imacert', private_key=b'imakey', intermediates=[b'imainter', b'imainter2']) self.assertEqual(expected, cert_parser.build_pem(tls_tuple)) def test_get_primary_cn(self): cert = sample_certs.X509_CERT with mock.patch.object(cert_parser, 'get_host_names') as cp: cp.return_value = {'cn': 'fakeCN'} cn = cert_parser.get_primary_cn(cert) self.assertEqual('fakeCN', cn) def test_get_cert_expiration(self): exp_date = cert_parser.get_cert_expiration(sample_certs.X509_EXPIRED) self.assertEqual(datetime.datetime(2016, 9, 25, 18, 1, 54), exp_date) self.assertRaises(exceptions.UnreadableCert, cert_parser.get_cert_expiration, 'bad-cert-file')
true
true
f70d969d2d54edc6fc7cd0ed266f778078fe1b26
2,549
py
Python
experiments/produce_images.py
hietalajulius/clothmanip
ec2ee1177d5cf31ee2367c2576c34b9cf3691501
[ "MIT" ]
null
null
null
experiments/produce_images.py
hietalajulius/clothmanip
ec2ee1177d5cf31ee2367c2576c34b9cf3691501
[ "MIT" ]
null
null
null
experiments/produce_images.py
hietalajulius/clothmanip
ec2ee1177d5cf31ee2367c2576c34b9cf3691501
[ "MIT" ]
null
null
null
from clothmanip.utils.utils import get_variant, argsparser, get_randomized_env, dump_commit_hashes, get_keys_and_dims, dump_goal from clothmanip.envs.cloth import ClothEnvPickled as ClothEnv import numpy as np from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, TanhScriptPolicy, CustomScriptPolicy, CustomTanhScriptPolicy, ScriptPolicy import cv2 import os from rlkit.envs.wrappers import NormalizedBoxEnv def main(variant): variant['save_folder'] = "/home/julius/robotics/clothmanip/experiments/paper_images" env = ClothEnv(**variant['env_kwargs'], has_viewer=True, save_folder=variant['save_folder']) env = NormalizedBoxEnv(env) env = get_randomized_env(env, variant) keys, dims = get_keys_and_dims(variant, env) demo_path = variant['demo_paths'][0] predefined_actions = np.genfromtxt(demo_path, delimiter=',') iter_folder = os.path.join(variant['save_folder'], "close_no_corners", "0") os.makedirs(os.path.join(iter_folder, "corners_images"), exist_ok=True) #os.makedirs(os.path.join(iter_folder, "env_images"), exist_ok=True) #os.makedirs(os.path.join(iter_folder, "cnn_images"), exist_ok=True) #os.makedirs(os.path.join(iter_folder, "cnn_color_images"), exist_ok=True) #os.makedirs(os.path.join(iter_folder, "cnn_color_full_images"), exist_ok=True) policy = TanhScriptPolicy( output_size=dims['action_dim'], added_fc_input_size=dims['added_fc_input_size'], aux_output_size=9, **variant['policy_kwargs'], ) eval_policy = MakeDeterministic(policy) for step_number, delta in enumerate(predefined_actions): print(step_number) a = delta/env.output_max a = np.clip(a, -1, 1) corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(None, mask_type=None) cv2.imwrite(f'{iter_folder}/corners_images/{str(step_number).zfill(3)}.png', corner_image) #cv2.imwrite(f'{iter_folder}/env_images/{str(step_number).zfill(3)}.png', eval_image) #cv2.imwrite(f'{iter_folder}/cnn_images/{str(step_number).zfill(3)}.png', corner_image) #cv2.imwrite(f'{iter_folder}/cnn_color_images/{str(step_number).zfill(3)}.png', cnn_color_image) #cv2.imwrite(f'{iter_folder}/cnn_color_full_images/{str(step_number).zfill(3)}.png', cnn_color_image_full) o, r, d, env_info = env.step(a) if __name__ == "__main__": args = argsparser() variant, arg_str = get_variant(args) main(variant)
39.215385
150
0.723029
from clothmanip.utils.utils import get_variant, argsparser, get_randomized_env, dump_commit_hashes, get_keys_and_dims, dump_goal from clothmanip.envs.cloth import ClothEnvPickled as ClothEnv import numpy as np from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, TanhScriptPolicy, CustomScriptPolicy, CustomTanhScriptPolicy, ScriptPolicy import cv2 import os from rlkit.envs.wrappers import NormalizedBoxEnv def main(variant): variant['save_folder'] = "/home/julius/robotics/clothmanip/experiments/paper_images" env = ClothEnv(**variant['env_kwargs'], has_viewer=True, save_folder=variant['save_folder']) env = NormalizedBoxEnv(env) env = get_randomized_env(env, variant) keys, dims = get_keys_and_dims(variant, env) demo_path = variant['demo_paths'][0] predefined_actions = np.genfromtxt(demo_path, delimiter=',') iter_folder = os.path.join(variant['save_folder'], "close_no_corners", "0") os.makedirs(os.path.join(iter_folder, "corners_images"), exist_ok=True) policy = TanhScriptPolicy( output_size=dims['action_dim'], added_fc_input_size=dims['added_fc_input_size'], aux_output_size=9, **variant['policy_kwargs'], ) eval_policy = MakeDeterministic(policy) for step_number, delta in enumerate(predefined_actions): print(step_number) a = delta/env.output_max a = np.clip(a, -1, 1) corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(None, mask_type=None) cv2.imwrite(f'{iter_folder}/corners_images/{str(step_number).zfill(3)}.png', corner_image) o, r, d, env_info = env.step(a) if __name__ == "__main__": args = argsparser() variant, arg_str = get_variant(args) main(variant)
true
true
f70d969e2bf0cf36ad855ee81686eb3925662821
482
py
Python
exercises/en/solution_03_09_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/en/solution_03_09_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/en/solution_03_09_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
from spacy.lang.en import English from spacy.tokens import Token nlp = English() # Register the Token extension attribute "is_country" with the default value False Token.set_extension("is_country", default=False) # Process the text and set the is_country attribute to True for the token "Spain" doc = nlp("I live in Spain.") doc[3]._.is_country = True # Print the token text and the is_country attribute for all tokens print([(token.text, token._.is_country) for token in doc])
32.133333
82
0.767635
from spacy.lang.en import English from spacy.tokens import Token nlp = English() Token.set_extension("is_country", default=False) doc = nlp("I live in Spain.") doc[3]._.is_country = True print([(token.text, token._.is_country) for token in doc])
true
true
f70d96bce35013ac957938e048f02f5425a2e2f2
569
bzl
Python
example/third_party/org_eclipse_jgit.bzl
wix-playground/rules_maven_third_party
ff0b486df194779d7d8e6c9102cd12138e3305c3
[ "Apache-2.0" ]
null
null
null
example/third_party/org_eclipse_jgit.bzl
wix-playground/rules_maven_third_party
ff0b486df194779d7d8e6c9102cd12138e3305c3
[ "Apache-2.0" ]
null
null
null
example/third_party/org_eclipse_jgit.bzl
wix-playground/rules_maven_third_party
ff0b486df194779d7d8e6c9102cd12138e3305c3
[ "Apache-2.0" ]
null
null
null
load("@rules_maven_third_party//:import_external.bzl", import_external = "import_external") def dependencies(): import_external( name = "org_eclipse_jgit_org_eclipse_jgit", artifact = "org.eclipse.jgit:org.eclipse.jgit:5.11.0.202103091610-r", artifact_sha256 = "b0f012105d67729a67c7fde546b6e89580f7ddc5bd73c6c7bae7084c50e36a37", srcjar_sha256 = "23b4f2debe38b2e18cb925ada6639eb78cc029243060f8f8c080ba3e0e70ab71", deps = [ "@com_googlecode_javaewah_JavaEWAH", "@org_slf4j_slf4j_api", ], )
40.642857
93
0.713533
load("@rules_maven_third_party//:import_external.bzl", import_external = "import_external") def dependencies(): import_external( name = "org_eclipse_jgit_org_eclipse_jgit", artifact = "org.eclipse.jgit:org.eclipse.jgit:5.11.0.202103091610-r", artifact_sha256 = "b0f012105d67729a67c7fde546b6e89580f7ddc5bd73c6c7bae7084c50e36a37", srcjar_sha256 = "23b4f2debe38b2e18cb925ada6639eb78cc029243060f8f8c080ba3e0e70ab71", deps = [ "@com_googlecode_javaewah_JavaEWAH", "@org_slf4j_slf4j_api", ], )
true
true
f70d97557a7fce5e61b5bf6370cefe3820b37663
5,603
py
Python
models/image_classification/inception/inception_resnet.py
tensorize/models
453165bc0cb2ed7bc1136f038458503aee6fccc3
[ "Apache-2.0" ]
null
null
null
models/image_classification/inception/inception_resnet.py
tensorize/models
453165bc0cb2ed7bc1136f038458503aee6fccc3
[ "Apache-2.0" ]
null
null
null
models/image_classification/inception/inception_resnet.py
tensorize/models
453165bc0cb2ed7bc1136f038458503aee6fccc3
[ "Apache-2.0" ]
null
null
null
from tensorize import * class InceptionResnetV1(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() class InceptionResnetV2(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() def stem(inputs, outputs): BatchImageInput(inputs) Convolution3x3(filters=32) Convolution3x3(filters=32) Convolution3x3(filters=64) with ParallelBlock() as parallel: with parallel: MaxPooling2D() with parallel: Convolution3x3(filters=64) FilterConcat() with ParallelBlock() as parallel: with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution2D([7, 1], filters=64) Convolution2D([1, 7], filters=64) Convolution3x3(filters=96) FilterConcat() with ParallelBlock() as block: with block: MaxPooling2D() with block: Convolution3x3(filters=64) FilterConcat() def inceptionA(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=96) with parallel: Convolution1x1(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) Convolution3x3(filters=96) FilterConcat() def inceptionB(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=128) with parallel: Convolution1x1(filters=384) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=224) Convolution2D([1, 7], filters=256) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=192) Convolution2D([7, 1], filters=224) Convolution2D([1, 7], filters=224) Convolution2D([7, 1], filters=256) FilterConcat() def inceptionC(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=256) with parallel: Convolution1x1(filters=256) with parallel: Convolution1x1(filters=384) with ParallelBlock() as parallel_inner: with parallel_inner: Convolution2D([1, 3], filters=256) with parallel_inner: Convolution2D([3, 1], filters=256) with parallel: Convolution1x1(filters=384) Convolution2D([1, 3], filters=384) Convolution2D([3, 1], filters=512) FilterConcat() def reduceA(n, l, k, m): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3]) with parallel: Convolution3x3(n) with parallel: Convolution1x1(filters=k) Convolution3x3(filters=l) Convolution3x3(filters=m) FilterConcat() def reduceB(): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3], stride=2) with parallel: Convolution1x1(192) Convolution3x3(192) with parallel: Convolution1x1(filters=256) Convolution2D([1, 7], filters=256) Convolution2D([7, 1], filters=320) Convolution3x3(filters=320, stride=2) FilterConcat() def inceptionResnetA(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) Convolution3x3(32) Convolution1x1(filters=256) Sum() def inceptionResnetB(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(128) with parallel_inner: Convolution1x1(128) Convolution2D([1, 7], filters=128) Convolution2D([7, 1], filters=128) Convolution1x1(filters=896) Sum()
23.248963
54
0.557558
from tensorize import * class InceptionResnetV1(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() class InceptionResnetV2(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() def stem(inputs, outputs): BatchImageInput(inputs) Convolution3x3(filters=32) Convolution3x3(filters=32) Convolution3x3(filters=64) with ParallelBlock() as parallel: with parallel: MaxPooling2D() with parallel: Convolution3x3(filters=64) FilterConcat() with ParallelBlock() as parallel: with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution2D([7, 1], filters=64) Convolution2D([1, 7], filters=64) Convolution3x3(filters=96) FilterConcat() with ParallelBlock() as block: with block: MaxPooling2D() with block: Convolution3x3(filters=64) FilterConcat() def inceptionA(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=96) with parallel: Convolution1x1(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) Convolution3x3(filters=96) FilterConcat() def inceptionB(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=128) with parallel: Convolution1x1(filters=384) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=224) Convolution2D([1, 7], filters=256) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=192) Convolution2D([7, 1], filters=224) Convolution2D([1, 7], filters=224) Convolution2D([7, 1], filters=256) FilterConcat() def inceptionC(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=256) with parallel: Convolution1x1(filters=256) with parallel: Convolution1x1(filters=384) with ParallelBlock() as parallel_inner: with parallel_inner: Convolution2D([1, 3], filters=256) with parallel_inner: Convolution2D([3, 1], filters=256) with parallel: Convolution1x1(filters=384) Convolution2D([1, 3], filters=384) Convolution2D([3, 1], filters=512) FilterConcat() def reduceA(n, l, k, m): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3]) with parallel: Convolution3x3(n) with parallel: Convolution1x1(filters=k) Convolution3x3(filters=l) Convolution3x3(filters=m) FilterConcat() def reduceB(): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3], stride=2) with parallel: Convolution1x1(192) Convolution3x3(192) with parallel: Convolution1x1(filters=256) Convolution2D([1, 7], filters=256) Convolution2D([7, 1], filters=320) Convolution3x3(filters=320, stride=2) FilterConcat() def inceptionResnetA(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) Convolution3x3(32) Convolution1x1(filters=256) Sum() def inceptionResnetB(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(128) with parallel_inner: Convolution1x1(128) Convolution2D([1, 7], filters=128) Convolution2D([7, 1], filters=128) Convolution1x1(filters=896) Sum()
true
true
f70d975dbe2f348e5ab72227d7ff4eef71b35806
2,934
py
Python
django/core/files/temp.py
indevgr/django
0247c9b08f8da4a2d93b9cede6c615011552b55a
[ "PSF-2.0", "BSD-3-Clause" ]
1
2016-05-22T08:25:51.000Z
2016-05-22T08:25:51.000Z
django/core/files/temp.py
indevgr/django
0247c9b08f8da4a2d93b9cede6c615011552b55a
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
django/core/files/temp.py
indevgr/django
0247c9b08f8da4a2d93b9cede6c615011552b55a
[ "PSF-2.0", "BSD-3-Clause" ]
1
2016-04-21T11:47:46.000Z
2016-04-21T11:47:46.000Z
""" The temp module provides a NamedTemporaryFile that can be reopened in the same process on any platform. Most platforms use the standard Python tempfile.NamedTemporaryFile class, but Windows users are given a custom class. This is needed because the Python implementation of NamedTemporaryFile uses the O_TEMPORARY flag under Windows, which prevents the file from being reopened if the same flag is not provided [1][2]. Note that this does not address the more general issue of opening a file for writing and reading in multiple processes in a manner that works across platforms. Also note that the custom version of NamedTemporaryFile does not support the full range of keyword arguments available in Python 2.6+ and 3.0+. 1: https://mail.python.org/pipermail/python-list/2005-December/336958.html 2: http://bugs.python.org/issue14243 """ import os import tempfile from django.core.files.utils import FileProxyMixin __all__ = ('NamedTemporaryFile', 'gettempdir',) if os.name == 'nt': class TemporaryFile(FileProxyMixin): """ Temporary file object constructor that supports reopening of the temporary file in Windows. Note that unlike tempfile.NamedTemporaryFile from the standard library, __init__() does not support the 'delete' keyword argument in Python 2.6+, or the 'delete', 'buffering', 'encoding', or 'newline' keyword arguments in Python 3.0+. """ def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None): fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) self.name = name self.file = os.fdopen(fd, mode, bufsize) self.close_called = False # Because close can be called during shutdown # we need to cache os.unlink and access it # as self.unlink only unlink = os.unlink def close(self): if not self.close_called: self.close_called = True try: self.file.close() except (OSError, IOError): pass try: self.unlink(self.name) except (OSError): pass @property def closed(self): """ This attribute needs to be accessible in certain situations, because this class is supposed to mock the API of the class tempfile.NamedTemporaryFile in the Python standard library. """ return self.file.closed def __del__(self): self.close() def __enter__(self): self.file.__enter__() return self def __exit__(self, exc, value, tb): self.file.__exit__(exc, value, tb) NamedTemporaryFile = TemporaryFile else: NamedTemporaryFile = tempfile.NamedTemporaryFile gettempdir = tempfile.gettempdir
34.517647
83
0.644172
import os import tempfile from django.core.files.utils import FileProxyMixin __all__ = ('NamedTemporaryFile', 'gettempdir',) if os.name == 'nt': class TemporaryFile(FileProxyMixin): def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None): fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) self.name = name self.file = os.fdopen(fd, mode, bufsize) self.close_called = False unlink = os.unlink def close(self): if not self.close_called: self.close_called = True try: self.file.close() except (OSError, IOError): pass try: self.unlink(self.name) except (OSError): pass @property def closed(self): return self.file.closed def __del__(self): self.close() def __enter__(self): self.file.__enter__() return self def __exit__(self, exc, value, tb): self.file.__exit__(exc, value, tb) NamedTemporaryFile = TemporaryFile else: NamedTemporaryFile = tempfile.NamedTemporaryFile gettempdir = tempfile.gettempdir
true
true
f70d989095a9c5a971fdea4aa937edd891c61ae0
7,009
py
Python
python/cendalytics/report/core/dmo/ranking_report_writer.py
jiportilla/ontology
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
[ "MIT" ]
null
null
null
python/cendalytics/report/core/dmo/ranking_report_writer.py
jiportilla/ontology
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
[ "MIT" ]
null
null
null
python/cendalytics/report/core/dmo/ranking_report_writer.py
jiportilla/ontology
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from openpyxl.worksheet.worksheet import Worksheet COLUMNS = {"A": 20, "B": 10, "C": 10, "D": 10, "E": 10, "F": 10, "G": 10, "H": 10, "I": 10} class RankingReportWriter(object): def __init__(self, some_excel_worksheet: Worksheet, some_source_dimension: list, some_target_dimensions: list, some_final_ranking: list): """ :param some_excel_worksheet: the excel worksheet to write to """ from . import WorksheetHelper if not some_excel_worksheet: raise ValueError("Mandatory Param: Excel Worksheet") if not some_source_dimension: raise ValueError("Mandatory Param: Source Dimension") if not some_target_dimensions: raise ValueError("Mandatory Param: Target Dimemsion") if not some_final_ranking: raise ValueError("Mandatory Param: Final Ranking") self.worksheet = some_excel_worksheet self.source_dimension = some_source_dimension self.target_dimensions = some_target_dimensions self.final_ranking = some_final_ranking self.helper = WorksheetHelper def _write_value(self, some_column: str, some_row: int, some_text: str, some_named_format: str): """ :param some_column: :param some_row: :param some_text: :param some_named_format: """ cell = "{}{}".format(some_column, some_row) self.worksheet[cell].value = some_text self.worksheet[cell].style = some_named_format def _write_records(self, source_weights: list, source_values: list): """ writes records by row and column """ def _dimension_value(value: str) -> dict: return self.helper.struct(value, "dimension_value_source") def _dimension_weight(value: str) -> dict: return self.helper.struct(value, "dimension_weight_source") def _header_dimension(value: str) -> dict: return self.helper.struct(value, "header_dimension") def _header_other(value: str) -> dict: return self.helper.struct(value, "header_other") def _field_key(value: str) -> dict: return self.helper.struct(value, "keyfield") def _field_weight(value: str) -> dict: return self.helper.struct(value, "field_weight_source") def _field_rank(value: str) -> dict: return self.helper.struct(value, "field_rank") d_row_1 = { "A1": _header_other("Open Seat ID"), "B1": _header_dimension("Cloud"), "C1": _header_dimension("Database"), "D1": _header_dimension("System Administrator"), "E1": _header_dimension("Hard Skill"), "F1": _header_dimension("Project Management"), "G1": _header_dimension("Service Management"), "H1": _header_dimension("Soft Skill"), "I1": _header_other("Rank")} d_row_2 = { "A2": self.helper.struct(self.source_dimension[0]["key_field"], "keyfield_value_source"), "B2": _dimension_value(source_values[0]), "C2": _dimension_value(source_values[1]), "D2": _dimension_value(source_values[6]), "E2": _dimension_value(source_values[2]), "F2": _dimension_value(source_values[3]), "G2": _dimension_value(source_values[4]), "H2": _dimension_value(source_values[5])} d_row_3 = { "A3": self.helper.struct("Weight", "dimension_weight_text"), "B3": _dimension_weight(source_weights[0]), "C3": _dimension_weight(source_weights[1]), "D3": _dimension_weight(source_weights[6]), "E3": _dimension_weight(source_weights[2]), "F3": _dimension_weight(source_weights[3]), "G3": _dimension_weight(source_weights[4]), "H3": _dimension_weight(source_weights[5])} def _field_weight_value(target_dimension: dict, slot_name: str) -> str: return target_dimension["slots"][slot_name]["weight"] l_values = [] for i in range(0, len(self.target_dimensions)): l_values.append({ "A{}".format(i + 5): _field_key( self.target_dimensions[i]["key_field"]), "B{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "cloud")), "C{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "database")), "D{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "system administrator")), "E{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "hard skill")), "F{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "project management")), "G{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "service management")), "H{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "soft skill")), "I{}".format(i + 5): _field_rank( self.final_ranking[i])}) self.helper.generate(self.worksheet, [d_row_1, d_row_2, d_row_3]) self.helper.generate(self.worksheet, l_values) def process(self): """ Processes the logs from the input directory @input: Base directory containing the input and output subdirs. @output: None """ def _weights(some_records: list) -> list: weights = [] for record in some_records: weights.append([record["slots"][x]["weight"] for x in record["slots"]]) return weights def _values(some_records: list) -> list: values = [] for record in some_records: values.append([record["slots"][x]["z_score"] for x in record["slots"]]) return values source_weights = _weights(self.source_dimension)[0] source_values = _values(self.source_dimension)[0] self.helper.column_widths(self.worksheet, COLUMNS) self._write_records(source_weights, source_values)
38.938889
92
0.548723
from openpyxl.worksheet.worksheet import Worksheet COLUMNS = {"A": 20, "B": 10, "C": 10, "D": 10, "E": 10, "F": 10, "G": 10, "H": 10, "I": 10} class RankingReportWriter(object): def __init__(self, some_excel_worksheet: Worksheet, some_source_dimension: list, some_target_dimensions: list, some_final_ranking: list): from . import WorksheetHelper if not some_excel_worksheet: raise ValueError("Mandatory Param: Excel Worksheet") if not some_source_dimension: raise ValueError("Mandatory Param: Source Dimension") if not some_target_dimensions: raise ValueError("Mandatory Param: Target Dimemsion") if not some_final_ranking: raise ValueError("Mandatory Param: Final Ranking") self.worksheet = some_excel_worksheet self.source_dimension = some_source_dimension self.target_dimensions = some_target_dimensions self.final_ranking = some_final_ranking self.helper = WorksheetHelper def _write_value(self, some_column: str, some_row: int, some_text: str, some_named_format: str): cell = "{}{}".format(some_column, some_row) self.worksheet[cell].value = some_text self.worksheet[cell].style = some_named_format def _write_records(self, source_weights: list, source_values: list): def _dimension_value(value: str) -> dict: return self.helper.struct(value, "dimension_value_source") def _dimension_weight(value: str) -> dict: return self.helper.struct(value, "dimension_weight_source") def _header_dimension(value: str) -> dict: return self.helper.struct(value, "header_dimension") def _header_other(value: str) -> dict: return self.helper.struct(value, "header_other") def _field_key(value: str) -> dict: return self.helper.struct(value, "keyfield") def _field_weight(value: str) -> dict: return self.helper.struct(value, "field_weight_source") def _field_rank(value: str) -> dict: return self.helper.struct(value, "field_rank") d_row_1 = { "A1": _header_other("Open Seat ID"), "B1": _header_dimension("Cloud"), "C1": _header_dimension("Database"), "D1": _header_dimension("System Administrator"), "E1": _header_dimension("Hard Skill"), "F1": _header_dimension("Project Management"), "G1": _header_dimension("Service Management"), "H1": _header_dimension("Soft Skill"), "I1": _header_other("Rank")} d_row_2 = { "A2": self.helper.struct(self.source_dimension[0]["key_field"], "keyfield_value_source"), "B2": _dimension_value(source_values[0]), "C2": _dimension_value(source_values[1]), "D2": _dimension_value(source_values[6]), "E2": _dimension_value(source_values[2]), "F2": _dimension_value(source_values[3]), "G2": _dimension_value(source_values[4]), "H2": _dimension_value(source_values[5])} d_row_3 = { "A3": self.helper.struct("Weight", "dimension_weight_text"), "B3": _dimension_weight(source_weights[0]), "C3": _dimension_weight(source_weights[1]), "D3": _dimension_weight(source_weights[6]), "E3": _dimension_weight(source_weights[2]), "F3": _dimension_weight(source_weights[3]), "G3": _dimension_weight(source_weights[4]), "H3": _dimension_weight(source_weights[5])} def _field_weight_value(target_dimension: dict, slot_name: str) -> str: return target_dimension["slots"][slot_name]["weight"] l_values = [] for i in range(0, len(self.target_dimensions)): l_values.append({ "A{}".format(i + 5): _field_key( self.target_dimensions[i]["key_field"]), "B{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "cloud")), "C{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "database")), "D{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "system administrator")), "E{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "hard skill")), "F{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "project management")), "G{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "service management")), "H{}".format(i + 5): _field_weight( _field_weight_value(self.target_dimensions[i], "soft skill")), "I{}".format(i + 5): _field_rank( self.final_ranking[i])}) self.helper.generate(self.worksheet, [d_row_1, d_row_2, d_row_3]) self.helper.generate(self.worksheet, l_values) def process(self): def _weights(some_records: list) -> list: weights = [] for record in some_records: weights.append([record["slots"][x]["weight"] for x in record["slots"]]) return weights def _values(some_records: list) -> list: values = [] for record in some_records: values.append([record["slots"][x]["z_score"] for x in record["slots"]]) return values source_weights = _weights(self.source_dimension)[0] source_values = _values(self.source_dimension)[0] self.helper.column_widths(self.worksheet, COLUMNS) self._write_records(source_weights, source_values)
true
true
f70d98c16626cf822bb3cda9eb6e1159f8a17a26
393
py
Python
news-headlines/initselenium.py
OlegDurandin/Crawlers
ab9fbe7aaec43b5251762f1a04e624d440e4b0d9
[ "MIT" ]
null
null
null
news-headlines/initselenium.py
OlegDurandin/Crawlers
ab9fbe7aaec43b5251762f1a04e624d440e4b0d9
[ "MIT" ]
null
null
null
news-headlines/initselenium.py
OlegDurandin/Crawlers
ab9fbe7aaec43b5251762f1a04e624d440e4b0d9
[ "MIT" ]
null
null
null
from selenium import webdriver from selenium.common.exceptions import NoSuchElementException def selenium_initializer(): options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--ignore-ssl-errors') driver = webdriver.Chrome('../chromedriver', chrome_options=options) return driver if __name__ == "__main__": pass
32.75
72
0.760814
from selenium import webdriver from selenium.common.exceptions import NoSuchElementException def selenium_initializer(): options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--ignore-ssl-errors') driver = webdriver.Chrome('../chromedriver', chrome_options=options) return driver if __name__ == "__main__": pass
true
true
f70d9934f9a74adb2b61765bbae200c7051d841e
1,141
py
Python
autogit/main.py
quintenroets/gitmanager
bfe131bd23ccaaa37ad6624112fe465adff1fda0
[ "MIT" ]
null
null
null
autogit/main.py
quintenroets/gitmanager
bfe131bd23ccaaa37ad6624112fe465adff1fda0
[ "MIT" ]
null
null
null
autogit/main.py
quintenroets/gitmanager
bfe131bd23ccaaa37ad6624112fe465adff1fda0
[ "MIT" ]
null
null
null
import argparse def install(*args): from .installer import Installer # noqa: autoimport Installer.install(*args) def clone(*args): from .installer import Installer # noqa: autoimport Installer.clone(*args) def refresh(do_pull=False): from .repomanager import RepoManager # noqa: autoimport RepoManager.refresh(do_pull=do_pull) def run_hooks(): from .repomanager import RepoManager # noqa: autoimport RepoManager.run_hooks() def main(): parser = argparse.ArgumentParser(description="Automate common git workflows") parser.add_argument("action", nargs="?", help="The action to do", default="refresh") parser.add_argument("names", nargs="*", help="repository names") args = parser.parse_args() action_mapper = { "refresh": refresh, "clone": clone, "install": install, "pull": lambda: refresh(do_pull=True), "hooks": run_hooks, } if args.action not in action_mapper: raise Exception(f"{args.action} not defined") action = action_mapper[args.action] action(*args.names) if __name__ == "__main__": main()
22.82
88
0.668712
import argparse def install(*args): from .installer import Installer Installer.install(*args) def clone(*args): from .installer import Installer Installer.clone(*args) def refresh(do_pull=False): from .repomanager import RepoManager RepoManager.refresh(do_pull=do_pull) def run_hooks(): from .repomanager import RepoManager RepoManager.run_hooks() def main(): parser = argparse.ArgumentParser(description="Automate common git workflows") parser.add_argument("action", nargs="?", help="The action to do", default="refresh") parser.add_argument("names", nargs="*", help="repository names") args = parser.parse_args() action_mapper = { "refresh": refresh, "clone": clone, "install": install, "pull": lambda: refresh(do_pull=True), "hooks": run_hooks, } if args.action not in action_mapper: raise Exception(f"{args.action} not defined") action = action_mapper[args.action] action(*args.names) if __name__ == "__main__": main()
true
true
f70d99476357b7a92f0400eda44cc0bffb1a4c42
2,231
py
Python
linum/layer_list.py
chabErch/Linum
e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f
[ "MIT" ]
null
null
null
linum/layer_list.py
chabErch/Linum
e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f
[ "MIT" ]
null
null
null
linum/layer_list.py
chabErch/Linum
e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f
[ "MIT" ]
null
null
null
from datetime import date from typing import List, Tuple, Optional from linum.exceptions import IntersectionException from linum.layer import Layer from linum.task_part import TaskPart class LayerList: def __init__(self, layers: Optional[List[Layer]] = None): """ Массив слоев. :param layers: слои для добавления в список """ self.layers = layers or [] def __repr__(self): return "<LayerList with {} layer(s)>".format(len(self.layers)) def __eq__(self, other): if not isinstance(other, LayerList): return False return self.layers == other.layers def __getitem__(self, item): return self.layers[item] def __bool__(self): if not self.layers: return False for layer in self.layers: if layer: return True return False def split(self, split_date: date) -> Tuple['LayerList', 'LayerList']: """ Функция разделения списка слоев на два относительно указанной даты. :param split_date: date :return: """ list_before = LayerList() list_after = LayerList() for layer in self.layers: layer_before, layer_after = layer.split(split_date) list_before.layers.append(layer_before) list_after.layers.append(layer_after) return list_before, list_after def add_task_part(self, task_part: TaskPart): """ Добавление кусочка задачи в список слоев. Если есть свободное место в текущих слоях, то кусочек добавится к ним. Если свободного места нет, то список слоев расширится на один слой и кусочек задачи будет помещен на этот новый слой. :param task_part: кусочек задачи для добавления """ for layer in self.layers: try: layer.append(task_part) return except IntersectionException: pass layer = Layer([task_part]) self.layers.append(layer) def cleanup(self): layers = [] for layer in self.layers: if layer: layers.append(layer) self.layers = layers
28.240506
78
0.603765
from datetime import date from typing import List, Tuple, Optional from linum.exceptions import IntersectionException from linum.layer import Layer from linum.task_part import TaskPart class LayerList: def __init__(self, layers: Optional[List[Layer]] = None): self.layers = layers or [] def __repr__(self): return "<LayerList with {} layer(s)>".format(len(self.layers)) def __eq__(self, other): if not isinstance(other, LayerList): return False return self.layers == other.layers def __getitem__(self, item): return self.layers[item] def __bool__(self): if not self.layers: return False for layer in self.layers: if layer: return True return False def split(self, split_date: date) -> Tuple['LayerList', 'LayerList']: list_before = LayerList() list_after = LayerList() for layer in self.layers: layer_before, layer_after = layer.split(split_date) list_before.layers.append(layer_before) list_after.layers.append(layer_after) return list_before, list_after def add_task_part(self, task_part: TaskPart): for layer in self.layers: try: layer.append(task_part) return except IntersectionException: pass layer = Layer([task_part]) self.layers.append(layer) def cleanup(self): layers = [] for layer in self.layers: if layer: layers.append(layer) self.layers = layers
true
true
f70d99f39179924a992e9bdff97d615e2b14ab03
96,559
py
Python
tests/unit/gapic/dialogflow_v2/test_versions.py
googleapis/dialogflow-python-client-v2
069c71e296c94b5e1bcfb9be2abeadd7c44dcf3d
[ "Apache-2.0" ]
171
2018-09-19T21:16:18.000Z
2020-12-07T17:41:10.000Z
tests/unit/gapic/dialogflow_v2/test_versions.py
googleapis/dialogflow-python-client-v2
069c71e296c94b5e1bcfb9be2abeadd7c44dcf3d
[ "Apache-2.0" ]
150
2018-09-25T14:04:28.000Z
2020-12-09T21:45:43.000Z
tests/unit/gapic/dialogflow_v2/test_versions.py
googleapis/dialogflow-python-client-v2
069c71e296c94b5e1bcfb9be2abeadd7c44dcf3d
[ "Apache-2.0" ]
75
2018-09-22T14:12:18.000Z
2020-12-08T07:12:12.000Z
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os # try/except added for compatibility with python < 3.8 try: from unittest import mock from unittest.mock import AsyncMock except ImportError: import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient from google.cloud.dialogflow_v2.services.versions import VersionsClient from google.cloud.dialogflow_v2.services.versions import pagers from google.cloud.dialogflow_v2.services.versions import transports from google.cloud.dialogflow_v2.types import version from google.cloud.dialogflow_v2.types import version as gcd_version from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert VersionsClient._get_default_mtls_endpoint(None) is None assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.VersionsGrpcTransport, "grpc"), (transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") def test_versions_client_get_transport_class(): transport = VersionsClient.get_transport_class() available_transports = [ transports.VersionsGrpcTransport, ] assert transport in available_transports transport = VersionsClient.get_transport_class("grpc") assert transport == transports.VersionsGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_versions_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient]) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_versions_client_client_options_from_dict(): with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=None, default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "request_type", [ version.ListVersionsRequest, dict, ], ) def test_list_versions(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse( next_page_token="next_page_token_value", ) response = client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsPager) assert response.next_page_token == "next_page_token_value" def test_list_versions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: client.list_versions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() @pytest.mark.asyncio async def test_list_versions_async( transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_versions_async_from_dict(): await test_list_versions_async(request_type=dict) def test_list_versions_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_list_versions_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_list_versions_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_versions_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_versions_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_versions_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) def test_list_versions_pager(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_versions(request={}) assert pager._metadata == metadata results = list(pager) assert len(results) == 6 assert all(isinstance(i, version.Version) for i in results) def test_list_versions_pages(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = list(client.list_versions(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_versions_async_pager(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) async_pager = await client.list_versions( request={}, ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 assert all(isinstance(i, version.Version) for i in responses) @pytest.mark.asyncio async def test_list_versions_async_pages(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = [] async for page_ in ( await client.list_versions(request={}) ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [ version.GetVersionRequest, dict, ], ) def test_get_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) response = client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS def test_get_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: client.get_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() @pytest.mark.asyncio async def test_get_version_async( transport: str = "grpc_asyncio", request_type=version.GetVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_get_version_async_from_dict(): await test_get_version_async(request_type=dict) def test_get_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_get_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_get_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.parametrize( "request_type", [ gcd_version.CreateVersionRequest, dict, ], ) def test_create_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_create_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: client.create_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() @pytest.mark.asyncio async def test_create_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_create_version_async_from_dict(): await test_create_version_async(request_type=dict) def test_create_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_create_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_create_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val def test_create_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.asyncio async def test_create_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val @pytest.mark.asyncio async def test_create_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ gcd_version.UpdateVersionRequest, dict, ], ) def test_update_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_update_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: client.update_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() @pytest.mark.asyncio async def test_update_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_update_version_async_from_dict(): await test_update_version_async(request_type=dict) def test_update_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_update_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] def test_update_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ version.DeleteVersionRequest, dict, ], ) def test_delete_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None response = client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None def test_delete_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: client.delete_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() @pytest.mark.asyncio async def test_delete_version_async( transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio async def test_delete_version_async_from_dict(): await test_delete_version_async(request_type=dict) def test_delete_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_delete_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_delete_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_version( version.DeleteVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_version( version.DeleteVersionRequest(), name="name_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, transport=transport, ) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = VersionsClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.VersionsGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @pytest.mark.parametrize( "transport_name", [ "grpc", ], ) def test_transport_kind(transport_name): transport = VersionsClient.get_transport_class(transport_name)( credentials=ga_credentials.AnonymousCredentials(), ) assert transport.kind == transport_name def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.VersionsGrpcTransport, ) def test_versions_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_versions_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "list_versions", "get_version", "create_version", "update_version", "delete_version", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Catch all for all remaining methods and properties remainder = [ "kind", ] for r in remainder: with pytest.raises(NotImplementedError): getattr(transport, r)() def test_versions_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) def test_versions_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport() adc.assert_called_once() def test_versions_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) VersionsClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_versions_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.VersionsGrpcTransport, grpc_helpers), (transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_versions_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=["1", "2"], default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_no_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_with_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:8000") def test_versions_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_versions_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_version_path(): project = "squid" version = "clam" expected = "projects/{project}/agent/versions/{version}".format( project=project, version=version, ) actual = VersionsClient.version_path(project, version) assert expected == actual def test_parse_version_path(): expected = { "project": "whelk", "version": "octopus", } path = VersionsClient.version_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_version_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = VersionsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = VersionsClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) actual = VersionsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = VersionsClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) actual = VersionsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = VersionsClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format( project=project, ) actual = VersionsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = VersionsClient.common_project_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = VersionsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = VersionsClient.common_location_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: transport_class = VersionsClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (VersionsClient, transports.VersionsGrpcTransport), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
35.828942
106
0.666163
import os try: from unittest import mock from unittest.mock import AsyncMock except ImportError: import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient from google.cloud.dialogflow_v2.services.versions import VersionsClient from google.cloud.dialogflow_v2.services.versions import pagers from google.cloud.dialogflow_v2.services.versions import transports from google.cloud.dialogflow_v2.types import version from google.cloud.dialogflow_v2.types import version as gcd_version from google.oauth2 import service_account from google.protobuf import field_mask_pb2 from google.protobuf import timestamp_pb2 import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert VersionsClient._get_default_mtls_endpoint(None) is None assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.VersionsGrpcTransport, "grpc"), (transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") def test_versions_client_get_transport_class(): transport = VersionsClient.get_transport_class() available_transports = [ transports.VersionsGrpcTransport, ] assert transport in available_transports transport = VersionsClient.get_transport_class("grpc") assert transport == transports.VersionsGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_client_options(client_class, transport_class, transport_name): with mock.patch.object(VersionsClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_versions_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient]) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_client_options_scopes( client_class, transport_class, transport_name ): options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_versions_client_client_options_from_dict(): with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=None, default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "request_type", [ version.ListVersionsRequest, dict, ], ) def test_list_versions(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse( next_page_token="next_page_token_value", ) response = client.list_versions(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() assert isinstance(response, pagers.ListVersionsPager) assert response.next_page_token == "next_page_token_value" def test_list_versions_empty_call(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) with mock.patch.object(type(client.transport.list_versions), "__call__") as call: client.list_versions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() @pytest.mark.asyncio async def test_list_versions_async( transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_versions(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() assert isinstance(response, pagers.ListVersionsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_versions_async_from_dict(): await test_list_versions_async(request_type=dict) def test_list_versions_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.ListVersionsRequest() request.parent = "parent_value" with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() client.list_versions(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_list_versions_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.ListVersionsRequest() request.parent = "parent_value" with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) await client.list_versions(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_list_versions_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() client.list_versions( parent="parent_value", ) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_versions_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_versions_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) response = await client.list_versions( parent="parent_value", ) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_versions_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): await client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) def test_list_versions_pager(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_versions(request={}) assert pager._metadata == metadata results = list(pager) assert len(results) == 6 assert all(isinstance(i, version.Version) for i in results) def test_list_versions_pages(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = list(client.list_versions(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_versions_async_pager(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) async_pager = await client.list_versions( request={}, ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, version.Version) for i in responses) @pytest.mark.asyncio async def test_list_versions_async_pages(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = [] async for page_ in ( await client.list_versions(request={}) ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [ version.GetVersionRequest, dict, ], ) def test_get_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) response = client.get_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS def test_get_version_empty_call(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) with mock.patch.object(type(client.transport.get_version), "__call__") as call: client.get_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() @pytest.mark.asyncio async def test_get_version_async( transport: str = "grpc_asyncio", request_type=version.GetVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.get_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_get_version_async_from_dict(): await test_get_version_async(request_type=dict) def test_get_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.GetVersionRequest() request.name = "name_value" with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() client.get_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_get_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.GetVersionRequest() request.name = "name_value" with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) await client.get_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_get_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() client.get_version( name="name_value", ) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) response = await client.get_version( name="name_value", ) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): await client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.parametrize( "request_type", [ gcd_version.CreateVersionRequest, dict, ], ) def test_create_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.create_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_create_version_empty_call(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) with mock.patch.object(type(client.transport.create_version), "__call__") as call: client.create_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() @pytest.mark.asyncio async def test_create_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.create_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_create_version_async_from_dict(): await test_create_version_async(request_type=dict) def test_create_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) request = gcd_version.CreateVersionRequest() request.parent = "parent_value" with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() client.create_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_create_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) request = gcd_version.CreateVersionRequest() request.parent = "parent_value" with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.create_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_create_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val def test_create_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.asyncio async def test_create_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) response = await client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val @pytest.mark.asyncio async def test_create_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): await client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ gcd_version.UpdateVersionRequest, dict, ], ) def test_update_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.update_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_update_version_empty_call(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) with mock.patch.object(type(client.transport.update_version), "__call__") as call: client.update_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() @pytest.mark.asyncio async def test_update_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.update_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_update_version_async_from_dict(): await test_update_version_async(request_type=dict) def test_update_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() client.update_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_update_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.update_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] def test_update_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) response = await client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): await client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ version.DeleteVersionRequest, dict, ], ) def test_delete_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None response = client.delete_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() assert response is None def test_delete_version_empty_call(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) with mock.patch.object(type(client.transport.delete_version), "__call__") as call: client.delete_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() @pytest.mark.asyncio async def test_delete_version_async( transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) request = request_type() with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() assert response is None @pytest.mark.asyncio async def test_delete_version_async_from_dict(): await test_delete_version_async(request_type=dict) def test_delete_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.DeleteVersionRequest() request.name = "name_value" with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None client.delete_version(request) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_delete_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) request = version.DeleteVersionRequest() request.name = "name_value" with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_version(request) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_delete_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None client.delete_version( name="name_value", ) assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client.delete_version( version.DeleteVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_version( name="name_value", ) assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): await client.delete_version( version.DeleteVersionRequest(), name="name_value", ) def test_credentials_transport_error(): transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, transport=transport, ) options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = VersionsClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.VersionsGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @pytest.mark.parametrize( "transport_name", [ "grpc", ], ) def test_transport_kind(transport_name): transport = VersionsClient.get_transport_class(transport_name)( credentials=ga_credentials.AnonymousCredentials(), ) assert transport.kind == transport_name def test_transport_grpc_default(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.VersionsGrpcTransport, ) def test_versions_base_transport_error(): with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_versions_base_transport(): with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), ) methods = ( "list_versions", "get_version", "create_version", "update_version", "delete_version", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() remainder = [ "kind", ] for r in remainder: with pytest.raises(NotImplementedError): getattr(transport, r)() def test_versions_base_transport_with_credentials_file(): with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) def test_versions_base_transport_with_adc(): with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport() adc.assert_called_once() def test_versions_auth_adc(): with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) VersionsClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_versions_transport_auth_adc(transport_class): with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.VersionsGrpcTransport, grpc_helpers), (transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_versions_transport_create_channel(transport_class, grpc_helpers): with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=["1", "2"], default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_no_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_with_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:8000") def test_versions_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) transport = transports.VersionsGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_versions_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) transport = transports.VersionsGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_version_path(): project = "squid" version = "clam" expected = "projects/{project}/agent/versions/{version}".format( project=project, version=version, ) actual = VersionsClient.version_path(project, version) assert expected == actual def test_parse_version_path(): expected = { "project": "whelk", "version": "octopus", } path = VersionsClient.version_path(**expected) actual = VersionsClient.parse_version_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = VersionsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = VersionsClient.common_billing_account_path(**expected) actual = VersionsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) actual = VersionsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = VersionsClient.common_folder_path(**expected) actual = VersionsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) actual = VersionsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = VersionsClient.common_organization_path(**expected) actual = VersionsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format( project=project, ) actual = VersionsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = VersionsClient.common_project_path(**expected) actual = VersionsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = VersionsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = VersionsClient.common_location_path(**expected) actual = VersionsClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: transport_class = VersionsClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (VersionsClient, transports.VersionsGrpcTransport), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
true
true
f70d9a641d059770d87b4867f1e577295635ca6d
356
py
Python
app/__init__.py
WebPractices/Eager
ce509dbbd65199e70b76aad82ba740e80864ac65
[ "MIT" ]
null
null
null
app/__init__.py
WebPractices/Eager
ce509dbbd65199e70b76aad82ba740e80864ac65
[ "MIT" ]
null
null
null
app/__init__.py
WebPractices/Eager
ce509dbbd65199e70b76aad82ba740e80864ac65
[ "MIT" ]
null
null
null
from flask import Flask def create_app(flask_config): app = Flask(__name__) app.config.from_object('app.config.{}'.format(flask_config)) from app.api import api_bp from app.client import client_bp app.register_blueprint(api_bp) app.register_blueprint(client_bp) app.logger.info('>>> {}'.format(flask_config)) return app
23.733333
64
0.716292
from flask import Flask def create_app(flask_config): app = Flask(__name__) app.config.from_object('app.config.{}'.format(flask_config)) from app.api import api_bp from app.client import client_bp app.register_blueprint(api_bp) app.register_blueprint(client_bp) app.logger.info('>>> {}'.format(flask_config)) return app
true
true
f70d9a737cfddc36a288fb6856a05809b5e5cfd6
910
py
Python
idm_lp/commands/disable_notifications.py
lper1/dgm-.
aae14b2758b220f42030745bb941e7012bf77ae2
[ "MIT" ]
47
2020-09-15T11:13:35.000Z
2022-03-30T06:37:52.000Z
idm_lp/commands/disable_notifications.py
lper1/dgm-.
aae14b2758b220f42030745bb941e7012bf77ae2
[ "MIT" ]
2
2020-10-27T15:30:33.000Z
2020-11-14T16:50:01.000Z
idm_lp/commands/disable_notifications.py
lper1/dgm-.
aae14b2758b220f42030745bb941e7012bf77ae2
[ "MIT" ]
111
2020-09-27T20:06:42.000Z
2022-03-19T20:29:29.000Z
from vkbottle.rule import FromMe from vkbottle.user import Blueprint, Message from idm_lp.logger import logger_decorator from idm_lp.database import Database from idm_lp.utils import edit_message user = Blueprint( name='disable_notifications_blueprint' ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> выключать уведы") @logger_decorator async def allow_disable_notifications_wrapper(message: Message, **kwargs): db = Database.get_current() db.disable_notifications = True db.save() await edit_message(message, "&#9989; Настройка изменена") @user.on.message_handler(FromMe(), text="<prefix:service_prefix> не выключать уведы") @logger_decorator async def deny_disable_notifications_wrapper(message: Message, **kwargs): db = Database.get_current() db.disable_notifications = False db.save() await edit_message(message, "&#9989; Настройка изменена")
29.354839
85
0.774725
from vkbottle.rule import FromMe from vkbottle.user import Blueprint, Message from idm_lp.logger import logger_decorator from idm_lp.database import Database from idm_lp.utils import edit_message user = Blueprint( name='disable_notifications_blueprint' ) @user.on.message_handler(FromMe(), text="<prefix:service_prefix> выключать уведы") @logger_decorator async def allow_disable_notifications_wrapper(message: Message, **kwargs): db = Database.get_current() db.disable_notifications = True db.save() await edit_message(message, "&#9989; Настройка изменена") @user.on.message_handler(FromMe(), text="<prefix:service_prefix> не выключать уведы") @logger_decorator async def deny_disable_notifications_wrapper(message: Message, **kwargs): db = Database.get_current() db.disable_notifications = False db.save() await edit_message(message, "&#9989; Настройка изменена")
true
true
f70d9a74aa623ec70705e6fa9613849704b54d16
2,646
py
Python
bot.py
Cr3atable/Brick
012fc2bb91b7950763cf8da04f11509d0e638efe
[ "Apache-2.0" ]
1
2021-03-27T06:24:25.000Z
2021-03-27T06:24:25.000Z
bot.py
Cr3atable/Brick
012fc2bb91b7950763cf8da04f11509d0e638efe
[ "Apache-2.0" ]
null
null
null
bot.py
Cr3atable/Brick
012fc2bb91b7950763cf8da04f11509d0e638efe
[ "Apache-2.0" ]
null
null
null
""" Copyright (c) 2020, creatable Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import aiohttp import asyncio import discord import html from discord.ext import commands bot = commands.Bot(command_prefix='b!', description="""A utility bot for Reddit verification. Copyright (c) 2020, creatable (https://creatable.cafe)""") @bot.event async def on_ready(): print(""" _ _ _ | | (_) | | | |__ _ __ _ ___| | __ | '_ \| '__| |/ __| |/ / | |_) | | | | (__| < _ |_.__/|_| |_|\___|_|\_(_) by creatable""") @bot.command() async def verify(ctx, *args): if len(args) != 0: verifiedrole = discord.utils.get(ctx.guild.roles, name="Verified") verifystring = f"""-----BEGIN BRICK VERIFICATION STRING----- {ctx.author.id} -----END BRICK VERIFICATION STRING-----""" if verifiedrole in ctx.author.roles: await ctx.send("ERROR: You're already verified!") else: async with aiohttp.ClientSession() as session: async with session.get(f'https://www.reddit.com/user/{args[0]}/about.json', allow_redirects = False) as response: if response.status != 404: desc = html.unescape((await response.json())["data"]["subreddit"]["public_description"]) if (verifystring) in desc: await ctx.author.add_roles(verifiedrole) await ctx.author.edit(nick = f"u/{args[0]}") await ctx.send("""Successfully verified! You can now remove the verification string from your profile at <https://new.reddit.com/settings/profile> if you want.""") else: await ctx.send(f"""Go to <https://new.reddit.com/settings/profile> and add the following block to your "About" section: ```{verifystring}``` Then do `b!verify {discord.utils.escape_mentions(args[0])}` again to verify your Reddit account.""") else: await ctx.send("ERROR: I can't find that user.") else: await ctx.send("ERROR: No arguments were provided.") bot.run('')
37.8
148
0.616402
import aiohttp import asyncio import discord import html from discord.ext import commands bot = commands.Bot(command_prefix='b!', description="""A utility bot for Reddit verification. Copyright (c) 2020, creatable (https://creatable.cafe)""") @bot.event async def on_ready(): print(""" _ _ _ | | (_) | | | |__ _ __ _ ___| | __ | '_ \| '__| |/ __| |/ / | |_) | | | | (__| < _ |_.__/|_| |_|\___|_|\_(_) by creatable""") @bot.command() async def verify(ctx, *args): if len(args) != 0: verifiedrole = discord.utils.get(ctx.guild.roles, name="Verified") verifystring = f"""-----BEGIN BRICK VERIFICATION STRING----- {ctx.author.id} -----END BRICK VERIFICATION STRING-----""" if verifiedrole in ctx.author.roles: await ctx.send("ERROR: You're already verified!") else: async with aiohttp.ClientSession() as session: async with session.get(f'https://www.reddit.com/user/{args[0]}/about.json', allow_redirects = False) as response: if response.status != 404: desc = html.unescape((await response.json())["data"]["subreddit"]["public_description"]) if (verifystring) in desc: await ctx.author.add_roles(verifiedrole) await ctx.author.edit(nick = f"u/{args[0]}") await ctx.send("""Successfully verified! You can now remove the verification string from your profile at <https://new.reddit.com/settings/profile> if you want.""") else: await ctx.send(f"""Go to <https://new.reddit.com/settings/profile> and add the following block to your "About" section: ```{verifystring}``` Then do `b!verify {discord.utils.escape_mentions(args[0])}` again to verify your Reddit account.""") else: await ctx.send("ERROR: I can't find that user.") else: await ctx.send("ERROR: No arguments were provided.") bot.run('')
true
true
f70d9b48a9595eaeded188e32ec69facb65e394d
12,573
py
Python
phosphor-regulators/tools/validate-regulators-config.py
smccarney/phosphor-power
39ea02bc9458f3d5a7fdd317cb546ed046eaff78
[ "Apache-2.0" ]
7
2019-10-04T01:19:49.000Z
2021-06-02T23:11:19.000Z
phosphor-regulators/tools/validate-regulators-config.py
smccarney/phosphor-power
39ea02bc9458f3d5a7fdd317cb546ed046eaff78
[ "Apache-2.0" ]
9
2019-10-23T14:22:03.000Z
2022-03-22T20:39:05.000Z
phosphor-regulators/tools/validate-regulators-config.py
smccarney/phosphor-power
39ea02bc9458f3d5a7fdd317cb546ed046eaff78
[ "Apache-2.0" ]
11
2019-10-04T01:20:01.000Z
2022-03-03T06:08:16.000Z
#!/usr/bin/env python3 import argparse import json import jsonschema import os import sys r""" Validates the phosphor-regulators configuration file. Checks it against a JSON schema as well as doing some extra checks that can't be encoded in the schema. """ def handle_validation_error(): sys.exit("Validation failed.") def get_values(json_element, key, result = None): r""" Finds all occurrences of a key within the specified JSON element and its children. Returns the associated values. To search the entire configuration file, pass the root JSON element json_element: JSON element within the config file. key: key name. result: list of values found with the specified key. """ if result is None: result = [] if type(json_element) is dict: for json_key in json_element: if json_key == key: result.append(json_element[json_key]) elif type(json_element[json_key]) in (list, dict): get_values(json_element[json_key], key, result) elif type(json_element) is list: for item in json_element: if type(item) in (list, dict): get_values(item, key, result) return result def get_rule_ids(config_json): r""" Get all rule IDs in the configuration file. config_json: Configuration file JSON """ rule_ids = [] for rule in config_json.get('rules', {}): rule_ids.append(rule['id']) return rule_ids def get_device_ids(config_json): r""" Get all device IDs in the configuration file. config_json: Configuration file JSON """ device_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): device_ids.append(device['id']) return device_ids def check_number_of_elements_in_masks(config_json): r""" Check if the number of bit masks in the 'masks' property matches the number of byte values in the 'values' property. config_json: Configuration file JSON """ i2c_write_bytes = get_values(config_json, 'i2c_write_bytes') i2c_compare_bytes = get_values(config_json, 'i2c_compare_bytes') for object in i2c_write_bytes: if 'masks' in object: if len(object.get('masks', [])) != len(object.get('values', [])): sys.stderr.write("Error: Invalid i2c_write_bytes action.\n"+\ "The masks array must have the same size as the values array. "+\ "masks: "+str(object.get('masks', []))+\ ", values: "+str(object.get('values', []))+'.\n') handle_validation_error() for object in i2c_compare_bytes: if 'masks' in object: if len(object.get('masks', [])) != len(object.get('values', [])): sys.stderr.write("Error: Invalid i2c_compare_bytes action.\n"+\ "The masks array must have the same size as the values array. "+\ "masks: "+str(object.get('masks', []))+\ ", values: "+str(object.get('values', []))+'.\n') handle_validation_error() def check_rule_id_exists(config_json): r""" Check if a rule_id property specifies a rule ID that does not exist. config_json: Configuration file JSON """ rule_ids = get_values(config_json, 'rule_id') valid_rule_ids = get_rule_ids(config_json) for rule_id in rule_ids: if rule_id not in valid_rule_ids: sys.stderr.write("Error: Rule ID does not exist.\n"+\ "Found rule_id value that specifies invalid rule ID "+\ rule_id+'\n') handle_validation_error() def check_device_id_exists(config_json): r""" Check if a device_id property specifies a device ID that does not exist. config_json: Configuration file JSON """ device_ids = get_values(config_json, 'device_id') valid_device_ids = get_device_ids(config_json) for device_id in device_ids: if device_id not in valid_device_ids: sys.stderr.write("Error: Device ID does not exist.\n"+\ "Found device_id value that specifies invalid device ID "+\ device_id+'\n') handle_validation_error() def check_set_device_value_exists(config_json): r""" Check if a set_device action specifies a device ID that does not exist. config_json: Configuration file JSON """ device_ids = get_values(config_json, 'set_device') valid_device_ids = get_device_ids(config_json) for device_id in device_ids: if device_id not in valid_device_ids: sys.stderr.write("Error: Device ID does not exist.\n"+\ "Found set_device action that specifies invalid device ID "+\ device_id+'\n') handle_validation_error() def check_run_rule_value_exists(config_json): r""" Check if any run_rule actions specify a rule ID that does not exist. config_json: Configuration file JSON """ rule_ids = get_values(config_json, 'run_rule') valid_rule_ids = get_rule_ids(config_json) for rule_id in rule_ids: if rule_id not in valid_rule_ids: sys.stderr.write("Error: Rule ID does not exist.\n"+\ "Found run_rule action that specifies invalid rule ID "+\ rule_id+'\n') handle_validation_error() def check_infinite_loops_in_rule(config_json, rule_json, call_stack=[]): r""" Check if a 'run_rule' action in the specified rule causes an infinite loop. config_json: Configuration file JSON. rule_json: A rule in the JSON config file. call_stack: Current call stack of rules. """ call_stack.append(rule_json['id']) for action in rule_json.get('actions', {}): if 'run_rule' in action: run_rule_id = action['run_rule'] if run_rule_id in call_stack: call_stack.append(run_rule_id) sys.stderr.write(\ "Infinite loop caused by run_rule actions.\n"+\ str(call_stack)+'\n') handle_validation_error() else: for rule in config_json.get('rules', {}): if rule['id'] == run_rule_id: check_infinite_loops_in_rule(\ config_json, rule, call_stack) call_stack.pop() def check_infinite_loops(config_json): r""" Check if rule in config file is called recursively, causing an infinite loop. config_json: Configuration file JSON """ for rule in config_json.get('rules', {}): check_infinite_loops_in_rule(config_json, rule) def check_duplicate_object_id(config_json): r""" Check that there aren't any JSON objects with the same 'id' property value. config_json: Configuration file JSON """ json_ids = get_values(config_json, 'id') unique_ids = set() for id in json_ids: if id in unique_ids: sys.stderr.write("Error: Duplicate ID.\n"+\ "Found multiple objects with the ID "+id+'\n') handle_validation_error() else: unique_ids.add(id) def check_duplicate_rule_id(config_json): r""" Check that there aren't any "rule" elements with the same 'id' field. config_json: Configuration file JSON """ rule_ids = [] for rule in config_json.get('rules', {}): rule_id = rule['id'] if rule_id in rule_ids: sys.stderr.write("Error: Duplicate rule ID.\n"+\ "Found multiple rules with the ID "+rule_id+'\n') handle_validation_error() else: rule_ids.append(rule_id) def check_duplicate_chassis_number(config_json): r""" Check that there aren't any "chassis" elements with the same 'number' field. config_json: Configuration file JSON """ numbers = [] for chassis in config_json.get('chassis', {}): number = chassis['number'] if number in numbers: sys.stderr.write("Error: Duplicate chassis number.\n"+\ "Found multiple chassis with the number "+str(number)+'\n') handle_validation_error() else: numbers.append(number) def check_duplicate_device_id(config_json): r""" Check that there aren't any "devices" with the same 'id' field. config_json: Configuration file JSON """ device_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): device_id = device['id'] if device_id in device_ids: sys.stderr.write("Error: Duplicate device ID.\n"+\ "Found multiple devices with the ID "+device_id+'\n') handle_validation_error() else: device_ids.append(device_id) def check_duplicate_rail_id(config_json): r""" Check that there aren't any "rails" with the same 'id' field. config_json: Configuration file JSON """ rail_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): for rail in device.get('rails', {}): rail_id = rail['id'] if rail_id in rail_ids: sys.stderr.write("Error: Duplicate rail ID.\n"+\ "Found multiple rails with the ID "+rail_id+'\n') handle_validation_error() else: rail_ids.append(rail_id) def check_for_duplicates(config_json): r""" Check for duplicate ID. """ check_duplicate_rule_id(config_json) check_duplicate_chassis_number(config_json) check_duplicate_device_id(config_json) check_duplicate_rail_id(config_json) check_duplicate_object_id(config_json) def validate_schema(config, schema): r""" Validates the specified config file using the specified schema file. config: Path of the file containing the config JSON schema: Path of the file containing the schema JSON """ with open(config) as config_handle: config_json = json.load(config_handle) with open(schema) as schema_handle: schema_json = json.load(schema_handle) try: jsonschema.validate(config_json, schema_json) except jsonschema.ValidationError as e: print(e) handle_validation_error() return config_json def validate_JSON_format(file): with open(file) as json_data: try: return json.load(json_data) except ValueError as err: return False return True if __name__ == '__main__': parser = argparse.ArgumentParser( description='phosphor-regulators configuration file validator') parser.add_argument('-s', '--schema-file', dest='schema_file', help='The phosphor-regulators schema file') parser.add_argument('-c', '--configuration-file', dest='configuration_file', help='The phosphor-regulators configuration file') args = parser.parse_args() if not args.schema_file: parser.print_help() sys.exit("Error: Schema file is required.") if not os.path.exists(args.schema_file): parser.print_help() sys.exit("Error: Schema file does not exist.") if not os.access(args.schema_file, os.R_OK): parser.print_help() sys.exit("Error: Schema file is not readable.") if not validate_JSON_format(args.schema_file): parser.print_help() sys.exit("Error: Schema file is not in the JSON format.") if not args.configuration_file: parser.print_help() sys.exit("Error: Configuration file is required.") if not os.path.exists(args.configuration_file): parser.print_help() sys.exit("Error: Configuration file does not exist.") if not os.access(args.configuration_file, os.R_OK): parser.print_help() sys.exit("Error: Configuration file is not readable.") if not validate_JSON_format(args.configuration_file): parser.print_help() sys.exit("Error: Configuration file is not in the JSON format.") config_json = validate_schema(args.configuration_file, args.schema_file) check_for_duplicates(config_json) check_infinite_loops(config_json) check_run_rule_value_exists(config_json) check_set_device_value_exists(config_json) check_rule_id_exists(config_json) check_device_id_exists(config_json) check_number_of_elements_in_masks(config_json)
34.636364
81
0.638432
import argparse import json import jsonschema import os import sys def handle_validation_error(): sys.exit("Validation failed.") def get_values(json_element, key, result = None): if result is None: result = [] if type(json_element) is dict: for json_key in json_element: if json_key == key: result.append(json_element[json_key]) elif type(json_element[json_key]) in (list, dict): get_values(json_element[json_key], key, result) elif type(json_element) is list: for item in json_element: if type(item) in (list, dict): get_values(item, key, result) return result def get_rule_ids(config_json): rule_ids = [] for rule in config_json.get('rules', {}): rule_ids.append(rule['id']) return rule_ids def get_device_ids(config_json): device_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): device_ids.append(device['id']) return device_ids def check_number_of_elements_in_masks(config_json): i2c_write_bytes = get_values(config_json, 'i2c_write_bytes') i2c_compare_bytes = get_values(config_json, 'i2c_compare_bytes') for object in i2c_write_bytes: if 'masks' in object: if len(object.get('masks', [])) != len(object.get('values', [])): sys.stderr.write("Error: Invalid i2c_write_bytes action.\n"+\ "The masks array must have the same size as the values array. "+\ "masks: "+str(object.get('masks', []))+\ ", values: "+str(object.get('values', []))+'.\n') handle_validation_error() for object in i2c_compare_bytes: if 'masks' in object: if len(object.get('masks', [])) != len(object.get('values', [])): sys.stderr.write("Error: Invalid i2c_compare_bytes action.\n"+\ "The masks array must have the same size as the values array. "+\ "masks: "+str(object.get('masks', []))+\ ", values: "+str(object.get('values', []))+'.\n') handle_validation_error() def check_rule_id_exists(config_json): rule_ids = get_values(config_json, 'rule_id') valid_rule_ids = get_rule_ids(config_json) for rule_id in rule_ids: if rule_id not in valid_rule_ids: sys.stderr.write("Error: Rule ID does not exist.\n"+\ "Found rule_id value that specifies invalid rule ID "+\ rule_id+'\n') handle_validation_error() def check_device_id_exists(config_json): device_ids = get_values(config_json, 'device_id') valid_device_ids = get_device_ids(config_json) for device_id in device_ids: if device_id not in valid_device_ids: sys.stderr.write("Error: Device ID does not exist.\n"+\ "Found device_id value that specifies invalid device ID "+\ device_id+'\n') handle_validation_error() def check_set_device_value_exists(config_json): device_ids = get_values(config_json, 'set_device') valid_device_ids = get_device_ids(config_json) for device_id in device_ids: if device_id not in valid_device_ids: sys.stderr.write("Error: Device ID does not exist.\n"+\ "Found set_device action that specifies invalid device ID "+\ device_id+'\n') handle_validation_error() def check_run_rule_value_exists(config_json): rule_ids = get_values(config_json, 'run_rule') valid_rule_ids = get_rule_ids(config_json) for rule_id in rule_ids: if rule_id not in valid_rule_ids: sys.stderr.write("Error: Rule ID does not exist.\n"+\ "Found run_rule action that specifies invalid rule ID "+\ rule_id+'\n') handle_validation_error() def check_infinite_loops_in_rule(config_json, rule_json, call_stack=[]): call_stack.append(rule_json['id']) for action in rule_json.get('actions', {}): if 'run_rule' in action: run_rule_id = action['run_rule'] if run_rule_id in call_stack: call_stack.append(run_rule_id) sys.stderr.write(\ "Infinite loop caused by run_rule actions.\n"+\ str(call_stack)+'\n') handle_validation_error() else: for rule in config_json.get('rules', {}): if rule['id'] == run_rule_id: check_infinite_loops_in_rule(\ config_json, rule, call_stack) call_stack.pop() def check_infinite_loops(config_json): for rule in config_json.get('rules', {}): check_infinite_loops_in_rule(config_json, rule) def check_duplicate_object_id(config_json): json_ids = get_values(config_json, 'id') unique_ids = set() for id in json_ids: if id in unique_ids: sys.stderr.write("Error: Duplicate ID.\n"+\ "Found multiple objects with the ID "+id+'\n') handle_validation_error() else: unique_ids.add(id) def check_duplicate_rule_id(config_json): rule_ids = [] for rule in config_json.get('rules', {}): rule_id = rule['id'] if rule_id in rule_ids: sys.stderr.write("Error: Duplicate rule ID.\n"+\ "Found multiple rules with the ID "+rule_id+'\n') handle_validation_error() else: rule_ids.append(rule_id) def check_duplicate_chassis_number(config_json): numbers = [] for chassis in config_json.get('chassis', {}): number = chassis['number'] if number in numbers: sys.stderr.write("Error: Duplicate chassis number.\n"+\ "Found multiple chassis with the number "+str(number)+'\n') handle_validation_error() else: numbers.append(number) def check_duplicate_device_id(config_json): device_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): device_id = device['id'] if device_id in device_ids: sys.stderr.write("Error: Duplicate device ID.\n"+\ "Found multiple devices with the ID "+device_id+'\n') handle_validation_error() else: device_ids.append(device_id) def check_duplicate_rail_id(config_json): rail_ids = [] for chassis in config_json.get('chassis', {}): for device in chassis.get('devices', {}): for rail in device.get('rails', {}): rail_id = rail['id'] if rail_id in rail_ids: sys.stderr.write("Error: Duplicate rail ID.\n"+\ "Found multiple rails with the ID "+rail_id+'\n') handle_validation_error() else: rail_ids.append(rail_id) def check_for_duplicates(config_json): check_duplicate_rule_id(config_json) check_duplicate_chassis_number(config_json) check_duplicate_device_id(config_json) check_duplicate_rail_id(config_json) check_duplicate_object_id(config_json) def validate_schema(config, schema): with open(config) as config_handle: config_json = json.load(config_handle) with open(schema) as schema_handle: schema_json = json.load(schema_handle) try: jsonschema.validate(config_json, schema_json) except jsonschema.ValidationError as e: print(e) handle_validation_error() return config_json def validate_JSON_format(file): with open(file) as json_data: try: return json.load(json_data) except ValueError as err: return False return True if __name__ == '__main__': parser = argparse.ArgumentParser( description='phosphor-regulators configuration file validator') parser.add_argument('-s', '--schema-file', dest='schema_file', help='The phosphor-regulators schema file') parser.add_argument('-c', '--configuration-file', dest='configuration_file', help='The phosphor-regulators configuration file') args = parser.parse_args() if not args.schema_file: parser.print_help() sys.exit("Error: Schema file is required.") if not os.path.exists(args.schema_file): parser.print_help() sys.exit("Error: Schema file does not exist.") if not os.access(args.schema_file, os.R_OK): parser.print_help() sys.exit("Error: Schema file is not readable.") if not validate_JSON_format(args.schema_file): parser.print_help() sys.exit("Error: Schema file is not in the JSON format.") if not args.configuration_file: parser.print_help() sys.exit("Error: Configuration file is required.") if not os.path.exists(args.configuration_file): parser.print_help() sys.exit("Error: Configuration file does not exist.") if not os.access(args.configuration_file, os.R_OK): parser.print_help() sys.exit("Error: Configuration file is not readable.") if not validate_JSON_format(args.configuration_file): parser.print_help() sys.exit("Error: Configuration file is not in the JSON format.") config_json = validate_schema(args.configuration_file, args.schema_file) check_for_duplicates(config_json) check_infinite_loops(config_json) check_run_rule_value_exists(config_json) check_set_device_value_exists(config_json) check_rule_id_exists(config_json) check_device_id_exists(config_json) check_number_of_elements_in_masks(config_json)
true
true
f70d9c25fc56a2ac4d94d2cb43a5ca522a38b635
1,607
py
Python
playrcc/src/base/http/api.py
Gloryness/playrcc
3816a935f19c786db59ba5a46a98cc527053cc29
[ "MIT" ]
4
2020-09-24T14:25:01.000Z
2020-11-02T22:18:12.000Z
playrcc/src/base/http/api.py
Gloryness/playrcc
3816a935f19c786db59ba5a46a98cc527053cc29
[ "MIT" ]
null
null
null
playrcc/src/base/http/api.py
Gloryness/playrcc
3816a935f19c786db59ba5a46a98cc527053cc29
[ "MIT" ]
null
null
null
import requests from bs4 import BeautifulSoup class API: def __init__(self, auth): self.auth = auth self.api = 'https://api.playr.gg/api/enter' self.headers = { 'Accept': "application/json, text/plain, */*", 'Accept-Encoding': "gzip, deflate, br", 'Accept-Language': "en-GB, en;q=0.5", 'Authorization': self.auth, # an authentication is needed other we cannot use it and a response will say 'Missing JWT Token' 'Host': "api.playr.gg", 'Origin': 'https://playr.gg', 'sec-fetch-dest': "empty", 'sec-fetch-mode': "cors", 'sec-fetch-site': "same-site", 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0" } self.params = { "dry_run": False, "entry_method": "playr_secret_code" } def send_post(self, params={}): """ Send a POST request to the API :return: text """ self.params.update(params) r = requests.post(self.api, params=self.params, headers=self.headers) # sending the post request self.params = { # resetting the params "dry_run": False, "entry_method": "playr_secret_code" } return r.text # returning the response @staticmethod def get_auth(): try: r = requests.get('https://pastebin.com/UMWjEWdg').text except: return 'None' soup = BeautifulSoup(r, 'lxml') return soup.find(class_='de1').text
32.795918
136
0.549471
import requests from bs4 import BeautifulSoup class API: def __init__(self, auth): self.auth = auth self.api = 'https://api.playr.gg/api/enter' self.headers = { 'Accept': "application/json, text/plain, */*", 'Accept-Encoding': "gzip, deflate, br", 'Accept-Language': "en-GB, en;q=0.5", 'Authorization': self.auth, 'Host': "api.playr.gg", 'Origin': 'https://playr.gg', 'sec-fetch-dest': "empty", 'sec-fetch-mode': "cors", 'sec-fetch-site': "same-site", 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0" } self.params = { "dry_run": False, "entry_method": "playr_secret_code" } def send_post(self, params={}): self.params.update(params) r = requests.post(self.api, params=self.params, headers=self.headers) self.params = { "dry_run": False, "entry_method": "playr_secret_code" } return r.text @staticmethod def get_auth(): try: r = requests.get('https://pastebin.com/UMWjEWdg').text except: return 'None' soup = BeautifulSoup(r, 'lxml') return soup.find(class_='de1').text
true
true
f70d9c4aa6bad7b33a8cb47b9785a6f7e6a8b67b
6,020
py
Python
biosys/apps/main/tests/api/test_program.py
florianm/biosys
934d06ed805b0734f3cb9a00feec6cd81a94e512
[ "Apache-2.0" ]
1
2020-08-24T02:44:36.000Z
2020-08-24T02:44:36.000Z
biosys/apps/main/tests/api/test_program.py
florianm/biosys
934d06ed805b0734f3cb9a00feec6cd81a94e512
[ "Apache-2.0" ]
19
2016-09-29T01:03:18.000Z
2021-07-02T06:54:05.000Z
biosys/apps/main/tests/api/test_program.py
florianm/biosys
934d06ed805b0734f3cb9a00feec6cd81a94e512
[ "Apache-2.0" ]
5
2018-12-20T05:36:28.000Z
2021-09-29T00:44:31.000Z
from django.urls import reverse from rest_framework import status from main.tests.api import helpers class TestPermissions(helpers.BaseUserTestCase): """ Test Permissions Get: authenticated Update: admin Create: admin Delete: admin """ def test_get(self): urls = [ reverse('api:program-list'), reverse('api:program-detail', kwargs={'pk': self.program_1.pk}) ] access = { "forbidden": [self.anonymous_client], "allowed": [ self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client, self.admin_client ] } for client in access['forbidden']: for url in urls: self.assertIn( client.get(url).status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.get(url).status_code, status.HTTP_200_OK ) def test_create(self): """ Only admin :return: """ urls = [reverse('api:program-list')] data = { "name": "A new program for Unit test", "code": "T1234", "data_engineers": [self.data_engineer_1_user.pk] } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.post(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: # Name must me unique data['name'] += '1' self.assertEqual( client.post(url, data, format='json').status_code, status.HTTP_201_CREATED ) def test_put(self): """ Only admin :return: """ urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] data = { "name": "A new program for Unit test", "code": "T1234", "data_engineers": [self.data_engineer_1_user.pk] } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.put(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: # Name must me unique data['name'] += '1' self.assertEqual( client.put(url, data, format='json').status_code, status.HTTP_200_OK ) def test_patch(self): """ Only admin :return: """ urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] data = { "code": "XXXX", } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.patch(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.patch(url, data, format='json').status_code, status.HTTP_200_OK ) def test_delete(self): """ Admin only :return: """ urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.delete(url, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.delete(url, format='json').status_code, status.HTTP_204_NO_CONTENT )
31.19171
80
0.47691
from django.urls import reverse from rest_framework import status from main.tests.api import helpers class TestPermissions(helpers.BaseUserTestCase): def test_get(self): urls = [ reverse('api:program-list'), reverse('api:program-detail', kwargs={'pk': self.program_1.pk}) ] access = { "forbidden": [self.anonymous_client], "allowed": [ self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client, self.admin_client ] } for client in access['forbidden']: for url in urls: self.assertIn( client.get(url).status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.get(url).status_code, status.HTTP_200_OK ) def test_create(self): urls = [reverse('api:program-list')] data = { "name": "A new program for Unit test", "code": "T1234", "data_engineers": [self.data_engineer_1_user.pk] } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.post(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: data['name'] += '1' self.assertEqual( client.post(url, data, format='json').status_code, status.HTTP_201_CREATED ) def test_put(self): urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] data = { "name": "A new program for Unit test", "code": "T1234", "data_engineers": [self.data_engineer_1_user.pk] } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.put(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: data['name'] += '1' self.assertEqual( client.put(url, data, format='json').status_code, status.HTTP_200_OK ) def test_patch(self): urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] data = { "code": "XXXX", } access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.patch(url, data, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.patch(url, data, format='json').status_code, status.HTTP_200_OK ) def test_delete(self): urls = [reverse('api:program-detail', kwargs={'pk': self.program_1.pk})] access = { "forbidden": [ self.anonymous_client, self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.data_engineer_1_client, self.data_engineer_2_client ], "allowed": [ self.admin_client, ] } for client in access['forbidden']: for url in urls: self.assertIn( client.delete(url, format='json').status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] ) for client in access['allowed']: for url in urls: self.assertEqual( client.delete(url, format='json').status_code, status.HTTP_204_NO_CONTENT )
true
true
f70d9c6098dd5a6a6057d72465295bbf61a9b443
261
py
Python
movies/serializers.py
jamesbond007dj/blog-api2
b4e1170f448c27528c48c955fde5998304e239d9
[ "MIT" ]
1
2020-01-22T21:46:25.000Z
2020-01-22T21:46:25.000Z
movies/serializers.py
jamesbond007dj/blog-api2
b4e1170f448c27528c48c955fde5998304e239d9
[ "MIT" ]
8
2020-01-23T00:58:40.000Z
2021-09-22T18:31:32.000Z
movies/serializers.py
jamesbond007dj/blog-api2
b4e1170f448c27528c48c955fde5998304e239d9
[ "MIT" ]
null
null
null
from rest_framework import serializers from .models import Movies class MoviesSerializer(serializers.ModelSerializer): class Meta: model = Movies fields = [ 'id' , 'user_main', 'title', 'director', 'acts', 'created_at' ]
29
73
0.643678
from rest_framework import serializers from .models import Movies class MoviesSerializer(serializers.ModelSerializer): class Meta: model = Movies fields = [ 'id' , 'user_main', 'title', 'director', 'acts', 'created_at' ]
true
true
f70d9c8948607fa6efa83a53137927b8ffedb1d3
1,705
py
Python
tests/settings.py
brmc/django-octopus
a37dbd9425f92ffbb19ce29647b6bc8d9be51598
[ "MIT" ]
36
2015-03-08T09:06:51.000Z
2021-07-31T04:19:53.000Z
tests/settings.py
brmc/django_octopus
a37dbd9425f92ffbb19ce29647b6bc8d9be51598
[ "MIT" ]
2
2017-01-21T17:25:18.000Z
2019-07-27T08:01:26.000Z
tests/settings.py
brmc/django_octopus
a37dbd9425f92ffbb19ce29647b6bc8d9be51598
[ "MIT" ]
1
2015-03-08T14:53:20.000Z
2015-03-08T14:53:20.000Z
import os INSTALLED_APPS = [ 'django.contrib.staticfiles', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.admin', 'octopus', 'test_app', 'django.contrib.sites' ] SECRET_KEY = '1' DEBUG = True STATIC_URL = '/static/' MEDIA_URL = '/media/' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', ) APPEND_SLASHES = True root_dir = os.path.dirname(os.path.realpath(__file__)) STATIC_ROOT = os.path.join(root_dir, 'static') # STATICFILES_DIRS = [STATIC_ROOT] print(STATIC_ROOT) TEMPLATE_DIRECTORIES = (os.path.join(root_dir, 'test_app/templates')) MIDDLEWARE_CLASSES = ('django.middleware.csrf.CsrfViewMiddleware',) ROOT_URLCONF = "test_app.urls" DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.db', } } TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': TEMPLATE_DIRECTORIES, 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this # list if you haven't customized them: 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, }, ]
26.230769
74
0.637537
import os INSTALLED_APPS = [ 'django.contrib.staticfiles', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.admin', 'octopus', 'test_app', 'django.contrib.sites' ] SECRET_KEY = '1' DEBUG = True STATIC_URL = '/static/' MEDIA_URL = '/media/' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', ) APPEND_SLASHES = True root_dir = os.path.dirname(os.path.realpath(__file__)) STATIC_ROOT = os.path.join(root_dir, 'static') print(STATIC_ROOT) TEMPLATE_DIRECTORIES = (os.path.join(root_dir, 'test_app/templates')) MIDDLEWARE_CLASSES = ('django.middleware.csrf.CsrfViewMiddleware',) ROOT_URLCONF = "test_app.urls" DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.db', } } TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': TEMPLATE_DIRECTORIES, 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, }, ]
true
true
f70d9cf7f25e83c30d0c8b68e0d3f94f33c9b726
11,212
py
Python
python/src/nnabla/utils/network.py
CeresSoft/nnabla
185b09d9dfde7d80a5c2296293b9044b74897e42
[ "Apache-2.0" ]
null
null
null
python/src/nnabla/utils/network.py
CeresSoft/nnabla
185b09d9dfde7d80a5c2296293b9044b74897e42
[ "Apache-2.0" ]
null
null
null
python/src/nnabla/utils/network.py
CeresSoft/nnabla
185b09d9dfde7d80a5c2296293b9044b74897e42
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nnabla.logger import logger import nnabla.function as F def print_network_traceback(funcs): logger.critical('Network traceback:') for i, func in enumerate(funcs): logger.critical('{}{}'.format( '->' if i == len(funcs) - 1 else ' ', func.name)) class Network: def setup_function(self, func): try: func.function_instance.setup( func.variable_inputs, func.variable_outputs) except: logger.critical('An error occurred while setup of function {} (nn.{}) in network {}'.format( func.name, func.function_instance.name, self.name)) logger.critical('Input variables:') for v in func.inputs: logger.critical(' {} (shape: {}, design_shape: {})'.format( v.name, str(v.variable_instance.shape), str(v.shape))) logger.critical('Output variables:') for v in func.outputs: logger.critical(' {} (shape: {}, design_shape: {})'.format( v.name, str(v.variable_instance.shape), str(v.shape))) raise # logger.debug('Setup: {} {}'.format(func.name, func.function_instance.name)) def get_forward_sequence(self, loss_variables): forward_sequence = [] for func in self.functions.values(): func.forward_complete = False for loss in loss_variables: self.__forward_recursive(forward_sequence, variable=loss) return forward_sequence def __forward_recursive(self, forward_sequence, variable=None, function=None): if not function and variable not in self.variable_inputs: return for func in [function] if function else self.variable_inputs[variable]: if func.forward_complete: continue for input_function in func.input_functions: self.__forward_recursive( forward_sequence, function=input_function) forward_sequence.append(func) func.forward_complete = True def forward(self, forward_sequence): for func in forward_sequence: try: self.forward_function(func) except: index = forward_sequence.index(func) print_network_traceback( forward_sequence[max(0, index - 4):index + 1]) raise def forward_function(self, func): try: # Uncomment when debugging expand_recurrent # print(func.name) # print(func.function_instance) # for n, inp in enumerate(func.variable_inputs): # print(' IN:', n, inp.shape, inp.d.flatten()[0]) func.function_instance.forward( func.variable_inputs, func.variable_outputs) # Uncomment when debugging expand_recurrent # for n, out in enumerate(func.variable_outputs): # print(' OUT:', n, out.shape, out.d.flatten()[0]) except: logger.critical('An error occurred while executing forward of function {} (nn.{}) in network {}'.format( func.name, func.function_instance.name, self.name)) raise def get_backward_sequence(self, loss_variables, parameter_variables_and_locallr): class BackwardSequence: loss_variables = [] variables = [] grad_variables = [] unused_variables = [] parameters = [] sequence = [] backward_sequence = BackwardSequence() backward_sequence.loss_variables = [ v.variable_instance for v in loss_variables] for p, lr in parameter_variables_and_locallr.items(): if lr > 0.0: backward_sequence.parameters.append(p.variable_instance) for func in self.functions.values(): func.backward_complete = False for p, local_lr in parameter_variables_and_locallr.items(): if local_lr > 0.0: self.__backward_recursive( backward_sequence, loss_variables, variable=p) for seq in backward_sequence.sequence: backward_sequence.variables.extend(seq.func.variable_outputs) for v in self.variables.values(): vi = v.variable_instance if vi not in backward_sequence.variables and vi not in backward_sequence.parameters: backward_sequence.unused_variables.append(vi) return backward_sequence def __backward_recursive(self, backward_sequence, loss_variables, variable=None, function=None): # logger.debug('bwcall: {}'.format(function.name if function else '')) if not function and variable not in self.variable_outputs: # terminal variable return variable in self.loss_variables diff_exists = False for func in [function] if function else self.variable_outputs[variable]: if func.backward_complete: diff_exists = True continue func.backward_complete = True for output_function in func.output_functions: if func.output_functions: diff = self.__backward_recursive( backward_sequence, loss_variables, function=output_function) diff_exists = diff_exists or diff else: # terminal function for v in loss_variables: diff_exists = diff_exists or (v in func.outputs) if diff_exists: if backward_sequence is not None: class BackwardSequenceItem: func = None accum_grad = [] seq = BackwardSequenceItem() seq.func = func for i, v in enumerate(func.variable_inputs): accum = ( v in backward_sequence.grad_variables or v in backward_sequence.parameters) and not func.function_instance.inplace_grad(i) seq.accum_grad.append(accum) if not v in backward_sequence.grad_variables: backward_sequence.grad_variables.append(v) backward_sequence.sequence.append(seq) return diff_exists def prepare_backward(self, backward_sequence, parameter_zero_grad=True): for v in backward_sequence.unused_variables: v.need_grad = False for p in backward_sequence.parameters: p.need_grad = True if parameter_zero_grad: p.grad.zero() for v in backward_sequence.variables: v.need_grad = True for l in backward_sequence.loss_variables: l.grad.fill(1.0 / l.size) def backward(self, backward_sequence, parameter_zero_grad=True): self.prepare_backward(backward_sequence, parameter_zero_grad) for seq in backward_sequence.sequence: try: self.backward_function(seq) except: index = backward_sequence.sequence.index(seq) print_network_traceback( [seq.func for seq in backward_sequence.sequence[max(0, index - 4):index + 1]]) raise def backward_function(self, seq): try: seq.func.function_instance.backward( seq.func.variable_inputs, seq.func.variable_outputs, seq.accum_grad) except: logger.critical('An error occurred while executing backward of function {} (nn.{}) in network {}'.format( seq.func.name, seq.func.function_instance.name, self.name)) raise # logger.debug('Backward: {} {}'.format(func.name, func.function_instance.name)) def setup(self, optimize=False): if optimize: for func in list(self.functions.values()): # remove identity layer if func.function_instance.name[0:8] == "Identity": assert(len(func.inputs) == 1) assert(len(func.outputs) == 1) # if the identity function is not terminal (keep terminal # identity function) if func.outputs[0] in self.variable_outputs: next_functions = self.variable_outputs[func.outputs[0]] self.variable_outputs[func.inputs[0]].remove(func) self.variable_outputs[ func.inputs[0]].extend(next_functions) for next_function in next_functions: next_function.inputs = [func.inputs[0] if v == func.outputs[ 0] else v for v in next_function.inputs] del self.functions[func.name] del self.variables[func.outputs[0].name] # create variable instances for variable in self.variables.values(): if variable.variable_instance.shape != variable.shape: if hasattr(variable.variable_instance, 'reset_shape'): variable.variable_instance.reset_shape( variable.shape, force=True) else: variable.variable_instance.reshape( variable.shape, force=True) # setup functions for i, func in enumerate(self.functions.values()): func.variable_inputs = [v.variable_instance for v in func.inputs] func.variable_outputs = [v.variable_instance for v in func.outputs] try: self.setup_function(func) except: print_network_traceback(list(self.functions.values())[ max(0, i - 4):i + 1]) raise # set link structure to each layer from itertools import chain for func in self.functions.values(): func.input_functions = list(chain.from_iterable( [self.variable_inputs[v] for v in func.inputs if v in self.variable_inputs])) func.output_functions = list(chain.from_iterable( [self.variable_outputs[v] for v in func.outputs if v in self.variable_outputs])) logger.debug(func.name) logger.debug(' in: {}'.format( [f.name for f in func.input_functions])) logger.debug(' out: {}'.format( [f.name for f in func.output_functions]))
45.392713
150
0.590528
from nnabla.logger import logger import nnabla.function as F def print_network_traceback(funcs): logger.critical('Network traceback:') for i, func in enumerate(funcs): logger.critical('{}{}'.format( '->' if i == len(funcs) - 1 else ' ', func.name)) class Network: def setup_function(self, func): try: func.function_instance.setup( func.variable_inputs, func.variable_outputs) except: logger.critical('An error occurred while setup of function {} (nn.{}) in network {}'.format( func.name, func.function_instance.name, self.name)) logger.critical('Input variables:') for v in func.inputs: logger.critical(' {} (shape: {}, design_shape: {})'.format( v.name, str(v.variable_instance.shape), str(v.shape))) logger.critical('Output variables:') for v in func.outputs: logger.critical(' {} (shape: {}, design_shape: {})'.format( v.name, str(v.variable_instance.shape), str(v.shape))) raise def get_forward_sequence(self, loss_variables): forward_sequence = [] for func in self.functions.values(): func.forward_complete = False for loss in loss_variables: self.__forward_recursive(forward_sequence, variable=loss) return forward_sequence def __forward_recursive(self, forward_sequence, variable=None, function=None): if not function and variable not in self.variable_inputs: return for func in [function] if function else self.variable_inputs[variable]: if func.forward_complete: continue for input_function in func.input_functions: self.__forward_recursive( forward_sequence, function=input_function) forward_sequence.append(func) func.forward_complete = True def forward(self, forward_sequence): for func in forward_sequence: try: self.forward_function(func) except: index = forward_sequence.index(func) print_network_traceback( forward_sequence[max(0, index - 4):index + 1]) raise def forward_function(self, func): try: func.function_instance.forward( func.variable_inputs, func.variable_outputs) except: logger.critical('An error occurred while executing forward of function {} (nn.{}) in network {}'.format( func.name, func.function_instance.name, self.name)) raise def get_backward_sequence(self, loss_variables, parameter_variables_and_locallr): class BackwardSequence: loss_variables = [] variables = [] grad_variables = [] unused_variables = [] parameters = [] sequence = [] backward_sequence = BackwardSequence() backward_sequence.loss_variables = [ v.variable_instance for v in loss_variables] for p, lr in parameter_variables_and_locallr.items(): if lr > 0.0: backward_sequence.parameters.append(p.variable_instance) for func in self.functions.values(): func.backward_complete = False for p, local_lr in parameter_variables_and_locallr.items(): if local_lr > 0.0: self.__backward_recursive( backward_sequence, loss_variables, variable=p) for seq in backward_sequence.sequence: backward_sequence.variables.extend(seq.func.variable_outputs) for v in self.variables.values(): vi = v.variable_instance if vi not in backward_sequence.variables and vi not in backward_sequence.parameters: backward_sequence.unused_variables.append(vi) return backward_sequence def __backward_recursive(self, backward_sequence, loss_variables, variable=None, function=None): if not function and variable not in self.variable_outputs: return variable in self.loss_variables diff_exists = False for func in [function] if function else self.variable_outputs[variable]: if func.backward_complete: diff_exists = True continue func.backward_complete = True for output_function in func.output_functions: if func.output_functions: diff = self.__backward_recursive( backward_sequence, loss_variables, function=output_function) diff_exists = diff_exists or diff else: for v in loss_variables: diff_exists = diff_exists or (v in func.outputs) if diff_exists: if backward_sequence is not None: class BackwardSequenceItem: func = None accum_grad = [] seq = BackwardSequenceItem() seq.func = func for i, v in enumerate(func.variable_inputs): accum = ( v in backward_sequence.grad_variables or v in backward_sequence.parameters) and not func.function_instance.inplace_grad(i) seq.accum_grad.append(accum) if not v in backward_sequence.grad_variables: backward_sequence.grad_variables.append(v) backward_sequence.sequence.append(seq) return diff_exists def prepare_backward(self, backward_sequence, parameter_zero_grad=True): for v in backward_sequence.unused_variables: v.need_grad = False for p in backward_sequence.parameters: p.need_grad = True if parameter_zero_grad: p.grad.zero() for v in backward_sequence.variables: v.need_grad = True for l in backward_sequence.loss_variables: l.grad.fill(1.0 / l.size) def backward(self, backward_sequence, parameter_zero_grad=True): self.prepare_backward(backward_sequence, parameter_zero_grad) for seq in backward_sequence.sequence: try: self.backward_function(seq) except: index = backward_sequence.sequence.index(seq) print_network_traceback( [seq.func for seq in backward_sequence.sequence[max(0, index - 4):index + 1]]) raise def backward_function(self, seq): try: seq.func.function_instance.backward( seq.func.variable_inputs, seq.func.variable_outputs, seq.accum_grad) except: logger.critical('An error occurred while executing backward of function {} (nn.{}) in network {}'.format( seq.func.name, seq.func.function_instance.name, self.name)) raise def setup(self, optimize=False): if optimize: for func in list(self.functions.values()): if func.function_instance.name[0:8] == "Identity": assert(len(func.inputs) == 1) assert(len(func.outputs) == 1) if func.outputs[0] in self.variable_outputs: next_functions = self.variable_outputs[func.outputs[0]] self.variable_outputs[func.inputs[0]].remove(func) self.variable_outputs[ func.inputs[0]].extend(next_functions) for next_function in next_functions: next_function.inputs = [func.inputs[0] if v == func.outputs[ 0] else v for v in next_function.inputs] del self.functions[func.name] del self.variables[func.outputs[0].name] for variable in self.variables.values(): if variable.variable_instance.shape != variable.shape: if hasattr(variable.variable_instance, 'reset_shape'): variable.variable_instance.reset_shape( variable.shape, force=True) else: variable.variable_instance.reshape( variable.shape, force=True) for i, func in enumerate(self.functions.values()): func.variable_inputs = [v.variable_instance for v in func.inputs] func.variable_outputs = [v.variable_instance for v in func.outputs] try: self.setup_function(func) except: print_network_traceback(list(self.functions.values())[ max(0, i - 4):i + 1]) raise from itertools import chain for func in self.functions.values(): func.input_functions = list(chain.from_iterable( [self.variable_inputs[v] for v in func.inputs if v in self.variable_inputs])) func.output_functions = list(chain.from_iterable( [self.variable_outputs[v] for v in func.outputs if v in self.variable_outputs])) logger.debug(func.name) logger.debug(' in: {}'.format( [f.name for f in func.input_functions])) logger.debug(' out: {}'.format( [f.name for f in func.output_functions]))
true
true
f70d9d467c7c7fa8a528741143dc4281a0fca11f
891
py
Python
thirdparty/org/apache/arrow/flatbuf/Interval.py
mrocklin/pygdf
2de9407427da9497ebdf8951a12857be0fab31bb
[ "Apache-2.0" ]
5
2018-10-17T20:28:42.000Z
2022-02-15T17:33:01.000Z
thirdparty/org/apache/arrow/flatbuf/Interval.py
mrocklin/pygdf
2de9407427da9497ebdf8951a12857be0fab31bb
[ "Apache-2.0" ]
19
2018-07-18T07:15:44.000Z
2021-02-22T17:00:18.000Z
thirdparty/org/apache/arrow/flatbuf/Interval.py
mrocklin/pygdf
2de9407427da9497ebdf8951a12857be0fab31bb
[ "Apache-2.0" ]
2
2020-05-01T09:54:34.000Z
2021-04-17T10:57:07.000Z
# automatically generated by the FlatBuffers compiler, do not modify # namespace: flatbuf import flatbuffers class Interval(object): __slots__ = ['_tab'] @classmethod def GetRootAsInterval(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Interval() x.Init(buf, n + offset) return x # Interval def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Interval def Unit(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos) return 0 def IntervalStart(builder): builder.StartObject(1) def IntervalAddUnit(builder, unit): builder.PrependInt16Slot(0, unit, 0) def IntervalEnd(builder): return builder.EndObject()
28.741935
88
0.67789
import flatbuffers class Interval(object): __slots__ = ['_tab'] @classmethod def GetRootAsInterval(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Interval() x.Init(buf, n + offset) return x def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def Unit(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos) return 0 def IntervalStart(builder): builder.StartObject(1) def IntervalAddUnit(builder, unit): builder.PrependInt16Slot(0, unit, 0) def IntervalEnd(builder): return builder.EndObject()
true
true
f70d9e0a3d17703610d440294f28a62f2040f948
2,209
py
Python
Backend/foodieshoot_api/users/forms.py
eamorgado/FoodieShoot
87172bc295edd93aa0448bc14ce85b2710dd4aab
[ "BSD-2-Clause" ]
null
null
null
Backend/foodieshoot_api/users/forms.py
eamorgado/FoodieShoot
87172bc295edd93aa0448bc14ce85b2710dd4aab
[ "BSD-2-Clause" ]
10
2020-06-06T01:56:57.000Z
2022-03-12T00:31:25.000Z
Backend/foodieshoot_api/users/forms.py
eamorgado/FoodieShoot
87172bc295edd93aa0448bc14ce85b2710dd4aab
[ "BSD-2-Clause" ]
1
2020-07-25T09:19:37.000Z
2020-07-25T09:19:37.000Z
import re from django import forms from django.utils.safestring import mark_safe from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm, UserChangeForm, ReadOnlyPasswordHashField from .models import Profile class UserRegisterForm(UserCreationForm): email = forms.EmailField(required=True) first_name = forms.Textarea() last_name = forms.Textarea() class Meta: model = User fields = ['username','email','first_name','last_name','password1','password2',] widgets = { 'username': forms.fields.TextInput(attrs={'placeholder': 'username'}), 'email': forms.fields.TextInput(attrs={'placeholder': 'example@foodieshoot.com'}), 'first_name': forms.fields.TextInput(attrs={'placeholder': 'First name'}), 'last_name': forms.fields.TextInput(attrs={'placeholder': 'Last name'}), } def clean_email(self): email = self.cleaned_data.get('email') username = self.cleaned_data.get('username') if email and User.objects.filter(email=email).exclude(username=username).exists(): raise forms.ValidationError(u'Email addresses must be unique.') return email class UserUpdateForm(forms.ModelForm): email = forms.EmailField() def __init__(self, *args, **kwargs): super(UserUpdateForm, self).__init__(*args, **kwargs) for fieldname in ['username','email',]: self.fields[fieldname].help_text = None class Meta: model = User fields = ['username', 'email',] def clean_email(self): email = self.cleaned_data.get('email') username = self.cleaned_data.get('username') if email and User.objects.filter(email=email).exclude(username=username).exists(): raise forms.ValidationError(u'Email addresses must be unique.') return email class ProfileUpdateForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ProfileUpdateForm, self).__init__(*args, **kwargs) for fieldname in ['image',]: self.fields[fieldname].help_text = None class Meta: model = Profile fields = ['image']
38.754386
97
0.660027
import re from django import forms from django.utils.safestring import mark_safe from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm, UserChangeForm, ReadOnlyPasswordHashField from .models import Profile class UserRegisterForm(UserCreationForm): email = forms.EmailField(required=True) first_name = forms.Textarea() last_name = forms.Textarea() class Meta: model = User fields = ['username','email','first_name','last_name','password1','password2',] widgets = { 'username': forms.fields.TextInput(attrs={'placeholder': 'username'}), 'email': forms.fields.TextInput(attrs={'placeholder': 'example@foodieshoot.com'}), 'first_name': forms.fields.TextInput(attrs={'placeholder': 'First name'}), 'last_name': forms.fields.TextInput(attrs={'placeholder': 'Last name'}), } def clean_email(self): email = self.cleaned_data.get('email') username = self.cleaned_data.get('username') if email and User.objects.filter(email=email).exclude(username=username).exists(): raise forms.ValidationError(u'Email addresses must be unique.') return email class UserUpdateForm(forms.ModelForm): email = forms.EmailField() def __init__(self, *args, **kwargs): super(UserUpdateForm, self).__init__(*args, **kwargs) for fieldname in ['username','email',]: self.fields[fieldname].help_text = None class Meta: model = User fields = ['username', 'email',] def clean_email(self): email = self.cleaned_data.get('email') username = self.cleaned_data.get('username') if email and User.objects.filter(email=email).exclude(username=username).exists(): raise forms.ValidationError(u'Email addresses must be unique.') return email class ProfileUpdateForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ProfileUpdateForm, self).__init__(*args, **kwargs) for fieldname in ['image',]: self.fields[fieldname].help_text = None class Meta: model = Profile fields = ['image']
true
true
f70d9f2a0ed020186d7f2a4487e673fdce38dca4
3,025
py
Python
src/logChunk/LanguageSwitcherFactory.py
saledouble/gitcproc
009d614fa1a56dc75acb0277ecc98ea27e91750b
[ "BSD-3-Clause" ]
null
null
null
src/logChunk/LanguageSwitcherFactory.py
saledouble/gitcproc
009d614fa1a56dc75acb0277ecc98ea27e91750b
[ "BSD-3-Clause" ]
3
2020-11-12T14:42:22.000Z
2021-01-13T22:30:23.000Z
src/logChunk/LanguageSwitcherFactory.py
saledouble/gitcproc
009d614fa1a56dc75acb0277ecc98ea27e91750b
[ "BSD-3-Clause" ]
2
2020-11-11T22:27:28.000Z
2021-01-13T21:07:14.000Z
import sys import os import yaml #PyYAML must be installed import languageSwitcher import CPlusPlusLanguageSwitcher import CLanguageSwitcher import JavaLanguageSwitcher import PythonLanguageSwitcher from UnsupportedLanguageException import * sys.path.append("../util") from Util import supportedLanguages class LanguageSwitcherFactory: extMap = {} @staticmethod def loadLanguageMap(langFile = "../../Resources/languages.yml"): with open(langFile, 'r') as f: LanguageSwitcherFactory.extMap = yaml.safe_load(f) #Create a new language switcher of the correct type. @staticmethod def createLS(language): if(LanguageSwitcherFactory.extMap == {}): LanguageSwitcherFactory.loadLanguageMap("../../Resources/languages.yml") return LanguageSwitcherFactory.determineLanguage(language) #String -> String #Given either a language name or a file extension for a language, return a normalized language string #to use @staticmethod def determineLanguage(language): #Replace these with tokens? language = language.strip() #Check for names if(language.lower() == "c++" or language.lower() in LanguageSwitcherFactory.extMap["C++"]["extensions"]): return CPlusPlusLanguageSwitcher.CPlusPlusLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C++"]["extensions"])) elif(language.lower() == "c" or language.lower() in LanguageSwitcherFactory.extMap["C"]["extensions"]): return CLanguageSwitcher.CLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C"]["extensions"])) elif(language.lower() == "java" or language.lower() in LanguageSwitcherFactory.extMap["Java"]["extensions"]): return JavaLanguageSwitcher.JavaLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Java"]["extensions"])) elif(language.lower() == "python" or language.lower() in LanguageSwitcherFactory.extMap["Python"]["extensions"]): return PythonLanguageSwitcher.PythonLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Python"]["extensions"])) else: print((LanguageSwitcherFactory.extMap["C"]["extensions"])) raise UnsupportedLanguageException(language + " not yet supported.") @staticmethod def getExtensions(languages): ''' Given some languages, return the set of extensions associated with them. If no languages are given or none in the set are recognized, return the extensions for all recognized languages. If only a portion are recognized, return the set of extensions for just these languages. ''' extensions = set() for l in languages: try: extensions.update(LanguageSwitcherFactory.createLS(l).getExtensions()) except UnsupportedLanguageException: #skip unrecognized languages pass if (len(extensions) == 0): return getExtensions(supportedLanguages) else: return extensions
43.84058
128
0.698843
import sys import os import yaml import languageSwitcher import CPlusPlusLanguageSwitcher import CLanguageSwitcher import JavaLanguageSwitcher import PythonLanguageSwitcher from UnsupportedLanguageException import * sys.path.append("../util") from Util import supportedLanguages class LanguageSwitcherFactory: extMap = {} @staticmethod def loadLanguageMap(langFile = "../../Resources/languages.yml"): with open(langFile, 'r') as f: LanguageSwitcherFactory.extMap = yaml.safe_load(f) @staticmethod def createLS(language): if(LanguageSwitcherFactory.extMap == {}): LanguageSwitcherFactory.loadLanguageMap("../../Resources/languages.yml") return LanguageSwitcherFactory.determineLanguage(language) @staticmethod def determineLanguage(language): language = language.strip() if(language.lower() == "c++" or language.lower() in LanguageSwitcherFactory.extMap["C++"]["extensions"]): return CPlusPlusLanguageSwitcher.CPlusPlusLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C++"]["extensions"])) elif(language.lower() == "c" or language.lower() in LanguageSwitcherFactory.extMap["C"]["extensions"]): return CLanguageSwitcher.CLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C"]["extensions"])) elif(language.lower() == "java" or language.lower() in LanguageSwitcherFactory.extMap["Java"]["extensions"]): return JavaLanguageSwitcher.JavaLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Java"]["extensions"])) elif(language.lower() == "python" or language.lower() in LanguageSwitcherFactory.extMap["Python"]["extensions"]): return PythonLanguageSwitcher.PythonLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Python"]["extensions"])) else: print((LanguageSwitcherFactory.extMap["C"]["extensions"])) raise UnsupportedLanguageException(language + " not yet supported.") @staticmethod def getExtensions(languages): extensions = set() for l in languages: try: extensions.update(LanguageSwitcherFactory.createLS(l).getExtensions()) except UnsupportedLanguageException: pass if (len(extensions) == 0): return getExtensions(supportedLanguages) else: return extensions
true
true
f70da0756d9a21f26d787bc1f5bd830aee541599
566
py
Python
src/contexts/kms/cryptokeys/domain/create_one/CryptoInvalidValueError.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
src/contexts/kms/cryptokeys/domain/create_one/CryptoInvalidValueError.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
src/contexts/kms/cryptokeys/domain/create_one/CryptoInvalidValueError.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
from typing import Union, Dict, List from src.contexts.shared.domain.errors.DomainError import DomainError class CryptoKeyInvalidValueError(DomainError): ERROR_ID = '8fd818c5-10dc-4639-82ac-d4b37394517d' def __init__(self, msg: str = None): if msg is None: msg = 'Invalid value for CryptoKey found.' self.message = msg def to_primitives(self) -> Union[Dict, List]: return { 'message': self.message, 'id': self.ERROR_ID, } def get_id(self) -> str: return self.ERROR_ID
25.727273
69
0.634276
from typing import Union, Dict, List from src.contexts.shared.domain.errors.DomainError import DomainError class CryptoKeyInvalidValueError(DomainError): ERROR_ID = '8fd818c5-10dc-4639-82ac-d4b37394517d' def __init__(self, msg: str = None): if msg is None: msg = 'Invalid value for CryptoKey found.' self.message = msg def to_primitives(self) -> Union[Dict, List]: return { 'message': self.message, 'id': self.ERROR_ID, } def get_id(self) -> str: return self.ERROR_ID
true
true
f70da14696e1d9db2fc019e8d79d9e4912df1421
122
py
Python
pycolfin/__init__.py
patpatpatpatpat/pycolfin
80ca7c226db6352ac9d0dd062ff02dc85d629f51
[ "MIT" ]
2
2016-08-29T03:14:38.000Z
2017-11-03T13:07:02.000Z
pycolfin/__init__.py
patpatpatpatpat/pycolfin
80ca7c226db6352ac9d0dd062ff02dc85d629f51
[ "MIT" ]
10
2016-07-18T08:47:31.000Z
2020-12-09T10:23:58.000Z
pycolfin/__init__.py
patpatpatpatpat/pycolfin
80ca7c226db6352ac9d0dd062ff02dc85d629f51
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'Ed Patrick Tan' __email__ = 'pat.keeps.looking.up@gmail.com' __version__ = '0.1.0'
20.333333
44
0.655738
__author__ = 'Ed Patrick Tan' __email__ = 'pat.keeps.looking.up@gmail.com' __version__ = '0.1.0'
true
true
f70da2799444bc2f78a541b418d0dfd6472ce7be
1,312
py
Python
auth/setup.py
tombriden/google-music-manager
fcdd69eef51cffd65cc1ad8616ff15cb55619a07
[ "MIT" ]
null
null
null
auth/setup.py
tombriden/google-music-manager
fcdd69eef51cffd65cc1ad8616ff15cb55619a07
[ "MIT" ]
null
null
null
auth/setup.py
tombriden/google-music-manager
fcdd69eef51cffd65cc1ad8616ff15cb55619a07
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages __version__ = '1.0.1' setup( name='google_music_manager_auth', python_requires=">=3", version=__version__, packages=find_packages(), author="Jay MOULIN", author_email="jaymoulin@gmail.com", description="Google MusicManager package to manage your music library to Google Music - Auth module", long_description=open('README.rst').read(), install_requires=["gmusicapi"], include_package_data=True, url='http://github.com/jaymoulin/google-music-manager/', classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Topic :: Communications :: File Sharing", "Topic :: Artistic Software", "Topic :: Internet :: File Transfer Protocol (FTP)", "Topic :: Home Automation", "Topic :: Internet", "Topic :: Multimedia :: Sound/Audio", ], entry_points={ 'console_scripts': [ 'google-music-auth = google_music_manager_auth.auth:main', ], }, license="MIT", )
32
105
0.625762
from setuptools import setup, find_packages __version__ = '1.0.1' setup( name='google_music_manager_auth', python_requires=">=3", version=__version__, packages=find_packages(), author="Jay MOULIN", author_email="jaymoulin@gmail.com", description="Google MusicManager package to manage your music library to Google Music - Auth module", long_description=open('README.rst').read(), install_requires=["gmusicapi"], include_package_data=True, url='http://github.com/jaymoulin/google-music-manager/', classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Topic :: Communications :: File Sharing", "Topic :: Artistic Software", "Topic :: Internet :: File Transfer Protocol (FTP)", "Topic :: Home Automation", "Topic :: Internet", "Topic :: Multimedia :: Sound/Audio", ], entry_points={ 'console_scripts': [ 'google-music-auth = google_music_manager_auth.auth:main', ], }, license="MIT", )
true
true
f70da28ac6692ac932239b685896870375bd2ff4
49,635
py
Python
src/licensedcode/models.py
abhi27-web/scancode-toolk
66f59168fb7824ab9be332712dcaf16340e1aa22
[ "Apache-2.0", "CC0-1.0" ]
null
null
null
src/licensedcode/models.py
abhi27-web/scancode-toolk
66f59168fb7824ab9be332712dcaf16340e1aa22
[ "Apache-2.0", "CC0-1.0" ]
null
null
null
src/licensedcode/models.py
abhi27-web/scancode-toolk
66f59168fb7824ab9be332712dcaf16340e1aa22
[ "Apache-2.0", "CC0-1.0" ]
null
null
null
# # Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from collections import Counter from collections import defaultdict from collections import OrderedDict from functools import partial from itertools import chain import io from operator import itemgetter from os.path import abspath from os.path import dirname from os.path import exists from os.path import join import traceback import attr from license_expression import Licensing from commoncode.fileutils import copyfile from commoncode.fileutils import file_base_name from commoncode.fileutils import file_name from commoncode.fileutils import resource_iter from commoncode import saneyaml from licensedcode import MIN_MATCH_HIGH_LENGTH from licensedcode import MIN_MATCH_LENGTH from licensedcode import SMALL_RULE from licensedcode.tokenize import query_tokenizer from textcode.analysis import numbered_text_lines """ Reference License and license Rule structures persisted as a combo of a YAML data file and one or more text files containing license or notice texts. """ # Set to True to print more detailed representations of objects when tracing TRACE_REPR = False # these are globals but always side-by-side with the code so do no not move them around data_dir = join(abspath(dirname(__file__)), 'data') licenses_data_dir = join(data_dir, 'licenses') rules_data_dir = join(data_dir, 'rules') FOSS_CATEGORIES = set([ 'Copyleft', 'Copyleft Limited', 'Patent License', 'Permissive', 'Public Domain', ]) OTHER_CATEGORIES = set([ 'Commercial', 'Proprietary Free', 'Free Restricted', 'Source-available', 'Unstated License', ]) CATEGORIES = FOSS_CATEGORIES | OTHER_CATEGORIES @attr.s(slots=True) class License(object): """ A license consists of these files, where <key> is the license key: - <key>.yml : the license data in YAML - <key>.LICENSE: the license text A License object is identified by a unique `key` and its data stored in the `src_dir` directory. Key is a lower-case unique ascii string. """ __attrib = partial(attr.ib, repr=False) # unique key: lower case ASCII characters, digits, underscore and dots. key = attr.ib(default=None, repr=True) src_dir = __attrib(default=licenses_data_dir) # if this is a deprecated license, add also notes explaining why is_deprecated = __attrib(default=False) # if this license text is not in English, set this field to a two letter # ISO 639-1 language code https://en.wikipedia.org/wiki/ISO_639-1 # NOTE: this is not yet supported. # NOTE: each translation of a license text MUST have a different license key language = __attrib(default='en') # commonly used short name, often abbreviated. short_name = __attrib(default=None) # full name. name = __attrib(default=None) # Permissive, Copyleft, etc category = __attrib(default=None) owner = __attrib(default=None) homepage_url = __attrib(default=None) notes = __attrib(default=None) # if this is a license exception, the license key this exception applies to is_exception = __attrib(default=False) # SPDX key for SPDX licenses spdx_license_key = __attrib(default=None) # list of other keys, such as deprecated ones other_spdx_license_keys = __attrib(default=attr.Factory(list)) # OSI License Key osi_license_key = __attrib(default=None) # Various URLs for info text_urls = __attrib(default=attr.Factory(list)) osi_url = __attrib(default=None) faq_url = __attrib(default=None) other_urls = __attrib(default=attr.Factory(list)) # various alternate keys for this license key_aliases = __attrib(default=attr.Factory(list)) minimum_coverage = __attrib(default=0) standard_notice = __attrib(default=None) # lists of copuyrights, emails and URLs that can be ignored when detected # in this license as they are part of the license or rule text itself ignorable_copyrights = __attrib(default=attr.Factory(list)) ignorable_authors = __attrib(default=attr.Factory(list)) ignorable_holders = __attrib(default=attr.Factory(list)) ignorable_urls = __attrib(default=attr.Factory(list)) ignorable_emails = __attrib(default=attr.Factory(list)) # data file paths and known extensions data_file = __attrib(default=None) text_file = __attrib(default=None) def __attrs_post_init__(self, *args, **kwargs): if self.src_dir: self.set_file_paths() if exists(self.data_file): self.load() def set_file_paths(self): self.data_file = join(self.src_dir, self.key + '.yml') self.text_file = join(self.src_dir, self.key + '.LICENSE') def relocate(self, target_dir, new_key=None): """ Return f copy of this license object relocated to f new `src_dir`. The data and license text files are persisted in the new `src_dir`. """ if not target_dir or target_dir == self.src_dir: raise ValueError( 'Cannot relocate {} License to empty directory or same directory.'.format(self.key)) if new_key: key = new_key else: key = self.key newl = License(key, target_dir) # copy fields excluded_fields = ('key', 'src_dir', 'data_file', 'text_file',) all_fields = attr.fields(self.__class__) attrs = [f.name for f in all_fields if f.name not in excluded_fields] for name in attrs: setattr(newl, name, getattr(self, name)) # save it all to files if self.text: copyfile(self.text_file, newl.text_file) newl.dump() return newl def update(self, mapping): for k, v in mapping.items(): setattr(self, k, v) def __copy__(self): oldl = self.to_dict() newl = License(key=self.key) newl.update(oldl) return newl @property def text(self): """ License text, re-loaded on demand. """ return self._read_text(self.text_file) def to_dict(self): """ Return an OrderedDict of license data (excluding texts). Fields with empty values are not included. """ # do not dump false, empties and paths def dict_fields(attr, value): if not value: return False if attr.name in ('data_file', 'text_file', 'src_dir',): return False # default to English if attr.name == 'language' and value == 'en': return False if attr.name == 'minimum_coverage' and value == 100: return False return True data = attr.asdict(self, filter=dict_fields, dict_factory=OrderedDict) cv = data.get('minimum_coverage') if cv and isinstance(cv, float) and int(cv) == cv: cv = int(cv) data['minimum_coverage'] = cv return data def dump(self): """ Dump a representation of this license as two files: - <key>.yml : the license data in YAML - <key>.LICENSE: the license text """ def write(location, byte_string): # we write as binary because rules and licenses texts and data are UTF-8-encoded bytes with io.open(location, 'wb') as of: of.write(byte_string) as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8') write(self.data_file, as_yaml) if self.text: write(self.text_file, self.text.encode('utf-8')) def load(self): """ Populate license data from a YAML file stored in of self.src_dir. Does not load text files. Unknown fields are ignored and not bound to the License object. """ try: with io.open(self.data_file, encoding='utf-8') as f: data = saneyaml.load(f.read()) numeric_keys = ('minimum_coverage', 'relevance') for k, v in data.items(): if k in numeric_keys: v = int(v) if k == 'key': assert self.key == v, 'Inconsistent YAML key and file names for %r' % self.key setattr(self, k, v) except Exception as e: # this is a rare case: fail loudly print() print('#############################') print('INVALID LICENSE YAML FILE:', 'file://' + self.data_file) print('#############################') print(e) print('#############################') raise def _read_text(self, location): if not exists(location): text = '' else: with io.open(location, encoding='utf-8') as f: text = f.read() return text def spdx_keys(self): """ Yield SPDX keys for this license. """ if self.spdx_license_key: yield self.spdx_license_key for key in self.other_spdx_license_keys: yield key @staticmethod def validate(licenses, verbose=False, no_dupe_urls=False): """ Check that licenses are valid. `licenses` is a mapping of key -> License. Return dictionaries of infos, errors and warnings mapping a license key to validation issue messages. Print messages if verbose is True. NOTE: we DO NOT run this validation as part of the loading or construction of License objects. Instead this is invoked ONLY as part of the test suite. """ infos = defaultdict(list) warnings = defaultdict(list) errors = defaultdict(list) # used for global dedupe of texts by_spdx_key = defaultdict(list) by_text = defaultdict(list) by_short_name = defaultdict(list) by_name = defaultdict(list) for key, lic in licenses.items(): warn = warnings[key].append info = infos[key].append error = errors[key].append by_name[lic.name].append(lic) by_short_name[lic.short_name].append(lic) if not lic.short_name: error('No short name') if not lic.name: error('No name') if not lic.category: error('No category') if lic.category and lic.category not in CATEGORIES: cats = '\n'.join(sorted(CATEGORIES)) error('Unknown license category: {}.\nUse one of these valid categories:\n{}'.format(lic.category, cats)) if not lic.owner: error('No owner') # URLS dedupe and consistency if no_dupe_urls: if lic.text_urls and not all(lic.text_urls): warn('Some empty text_urls values') if lic.other_urls and not all(lic.other_urls): warn('Some empty other_urls values') # redundant URLs used multiple times if lic.homepage_url: if lic.homepage_url in lic.text_urls: warn('Homepage URL also in text_urls') if lic.homepage_url in lic.other_urls: warn('Homepage URL also in other_urls') if lic.homepage_url == lic.faq_url: warn('Homepage URL same as faq_url') if lic.homepage_url == lic.osi_url: warn('Homepage URL same as osi_url') if lic.osi_url or lic.faq_url: if lic.osi_url == lic.faq_url: warn('osi_url same as faq_url') all_licenses = lic.text_urls + lic.other_urls for url in lic.osi_url, lic.faq_url, lic.homepage_url: if url: all_licenses.append(url) if not len(all_licenses) == len(set(all_licenses)): warn('Some duplicated URLs') # local text consistency text = lic.text license_qtokens = tuple(query_tokenizer(text)) if not license_qtokens: info('No license text') else: # for global dedupe by_text[license_qtokens].append(key + ': TEXT') # SPDX consistency if lic.spdx_license_key: by_spdx_key[lic.spdx_license_key].append(key) for oslk in lic.other_spdx_license_keys: by_spdx_key[oslk].append(key) # global SPDX consistency multiple_spdx_keys_used = {k: v for k, v in by_spdx_key.items() if len(v) > 1} if multiple_spdx_keys_used: for k, lkeys in multiple_spdx_keys_used.items(): errors['GLOBAL'].append('SPDX key: ' + k + ' used in multiple licenses: ' + ', '.join(sorted(lkeys))) # global text dedupe multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1} if multiple_texts: for k, msgs in multiple_texts.items(): errors['GLOBAL'].append('Duplicate texts in multiple licenses:' + ', '.join(sorted(msgs))) # global short_name dedupe for short_name, licenses in by_short_name.items(): if len(licenses) == 1: continue errors['GLOBAL'].append('Duplicate short name:' + short_name + ' in licenses:' + ', '.join(l.key for l in licenses)) # global name dedupe for name, licenses in by_name.items(): if len(licenses) == 1: continue errors['GLOBAL'].append('Duplicate name:' + name + ' in licenses:' + ', '.join(l.key for l in licenses)) errors = {k: v for k, v in errors.items() if v} warnings = {k: v for k, v in warnings.items() if v} infos = {k: v for k, v in infos.items() if v} if verbose: print('Licenses validation errors:') for key, msgs in sorted(errors.items()): print('ERRORS for:', key, ':', '\n'.join(msgs)) print('Licenses validation warnings:') for key, msgs in sorted(warnings.items()): print('WARNINGS for:', key, ':', '\n'.join(msgs)) print('Licenses validation infos:') for key, msgs in sorted(infos.items()): print('INFOS for:', key, ':', '\n'.join(msgs)) return errors, warnings, infos def ignore_editor_tmp_files(location): return location.endswith('.swp') def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False): """ Return a mapping of key -> license objects, loaded from license files. Raise Exceptions if there are dangling orphaned files. """ licenses = {} used_files = set() all_files = set(resource_iter(licenses_data_dir, ignored=ignore_editor_tmp_files, with_dirs=False)) for data_file in sorted(all_files): if data_file.endswith('.yml'): key = file_base_name(data_file) lic = License(key, licenses_data_dir) used_files.add(data_file) if exists(lic.text_file): used_files.add(lic.text_file) if not with_deprecated and lic.is_deprecated: continue licenses[key] = lic dangling = all_files.difference(used_files) if dangling: msg = 'Some License data or text files are orphaned in "{}".\n'.format(licenses_data_dir) msg += '\n'.join('file://{}'.format(f) for f in sorted(dangling)) raise Exception(msg) return licenses def get_rules(licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir): """ Yield Rule objects loaded from license files found in `licenses_data_dir` and rule files fourn in `rules_data_dir`. Raise a Exceptions if a rule is inconsistent or incorrect. """ from licensedcode.cache import get_licenses_db licenses = get_licenses_db(licenses_data_dir=licenses_data_dir) rules = list(load_rules(rules_data_dir=rules_data_dir)) check_rules_integrity(rules, licenses) licenses_as_rules = build_rules_from_licenses(licenses) return chain(licenses_as_rules, rules) class MissingLicenses(Exception): pass class MissingFlags(Exception): pass def check_rules_integrity(rules, licenses_by_key): """ Given a lists of `rules`, check that all the rule license keys reference a known license from a mapping of `licenses_by_key `(key->license). Raise a MissingLicense exception with a message containing the list of rule files without a corresponding license. """ invalid_rules = defaultdict(set) rules_without_flags = set() for rule in rules: unknown_keys = [key for key in rule.license_keys() if key not in licenses_by_key] if unknown_keys: invalid_rules[rule.data_file].update(unknown_keys) if not rule.has_flags and not (rule.is_negative or rule.is_false_positive): rules_without_flags.add(rule.data_file) if invalid_rules: invalid_rules = ( ' '.join(keys) + '\n' + 'file://' + data_file + '\n' + 'file://' + data_file.replace('.yml', '.RULE') + '\n' for data_file, keys in invalid_rules.items() if keys) msg = 'Rules referencing missing licenses:\n' + '\n'.join(sorted(invalid_rules)) raise MissingLicenses(msg) if rules_without_flags: invalid_rules = ( 'file://' + data_file + '\n' + 'file://' + data_file.replace('.yml', '.RULE') + '\n' for data_file in sorted(rules_without_flags)) msg = 'Rules without is_license_xxx flags:\n' + '\n'.join(sorted(invalid_rules)) raise MissingFlags(msg) def build_rules_from_licenses(licenses): """ Return an iterable of rules built from each license text from a `licenses` iterable of license objects. """ for license_key, license_obj in licenses.items(): text_file = join(license_obj.src_dir, license_obj.text_file) if exists(text_file): minimum_coverage = license_obj.minimum_coverage or 0 yield Rule( text_file=text_file, license_expression=license_key, has_stored_relevance=False, relevance=100, has_stored_minimum_coverage=bool(minimum_coverage), minimum_coverage=minimum_coverage, is_license=True, is_license_text=True, ignorable_copyrights=license_obj.ignorable_copyrights, ignorable_holders=license_obj.ignorable_holders, ignorable_authors=license_obj.ignorable_authors, ignorable_urls=license_obj.ignorable_urls, ignorable_emails=license_obj.ignorable_emails, ) def get_all_spdx_keys(licenses): """ Return an iterable of SPDX license keys collected from a `licenses` iterable of license objects. """ for lic in licenses.values(): for spdx_key in lic.spdx_keys(): yield spdx_key def get_essential_spdx_tokens(): """ Yield essential SPDX tokens. """ yield 'spdx' yield 'license' yield 'licence' yield 'identifier' yield 'licenseref' def get_all_spdx_key_tokens(licenses): """ Yield token strings collected from a `licenses` iterable of license objects' SPDX license keys. """ for tok in get_essential_spdx_tokens(): yield tok for spdx_key in get_all_spdx_keys(licenses): for token in query_tokenizer(spdx_key): yield token def load_rules(rules_data_dir=rules_data_dir): """ Return an iterable of rules loaded from rule files. """ # TODO: OPTIMIZE: create a graph of rules to account for containment and # similarity clusters? seen_files = set() processed_files = set() lower_case_files = set() case_problems = set() space_problems = [] model_errors = [] for data_file in resource_iter(rules_data_dir, with_dirs=False): if data_file.endswith('.yml'): base_name = file_base_name(data_file) if ' ' in base_name: space_problems.append(data_file) rule_file = join(rules_data_dir, base_name + '.RULE') try: rule = Rule(data_file=data_file, text_file=rule_file) yield rule except Exception as re: model_errors.append(str(re)) # accumulate sets to ensures we do not have illegal names or extra # orphaned files data_lower = data_file.lower() if data_lower in lower_case_files: case_problems.add(data_lower) else: lower_case_files.add(data_lower) rule_lower = rule_file.lower() if rule_lower in lower_case_files: case_problems.add(rule_lower) else: lower_case_files.add(rule_lower) processed_files.update([data_file, rule_file]) if not data_file.endswith('~'): seen_files.add(data_file) unknown_files = seen_files - processed_files if unknown_files or case_problems or model_errors or space_problems: msg = '' if model_errors: errors = '\n'.join(model_errors) msg += '\nInvalid rule YAML in directory: %(rules_data_dir)r\n%(errors)s' % locals() if unknown_files: files = '\n'.join(sorted('file://' + f for f in unknown_files)) msg += '\nOrphaned files in rule directory: %(rules_data_dir)r\n%(files)s' % locals() if case_problems: files = '\n'.join(sorted('file://' + f for f in case_problems)) msg += '\nRule files with non-unique name ignoring casein rule directory: %(rules_data_dir)r\n%(files)s' % locals() if space_problems: files = '\n'.join(sorted('"file://' + f + '"' for f in space_problems)) msg += '\nRule files name cannot contain spaces: %(rules_data_dir)r\n%(files)s' % locals() raise Exception(msg) @attr.s(slots=True) class Rule(object): """ A detection rule object is a text to use for detection and corresponding detected licenses and metadata. """ licensing = Licensing() ########### # FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING ########### # optional rule id int typically assigned at indexing time rid = attr.ib(default=None, repr=TRACE_REPR) # unique identifier identifier = attr.ib(default=None) # License expression string license_expression = attr.ib(default=None) # License expression object, created at build time license_expression_object = attr.ib(default=None, repr=False) # an indication of what this rule importance is (e.g. how important is its # text when detected as a licensing clue) as one of several flags: # for a license full text: this provides the highest level of confidence wrt # detection is_license_text = attr.ib(default=False, repr=False) # for a license notice: this provides a strong confidence wrt detection is_license_notice = attr.ib(default=False, repr=False) # reference for a mere short license reference such as its bare name or a URL # this provides a weak confidence wrt detection is_license_reference = attr.ib(default=False, repr=False) # tag for a structured licensing tag such as a package manifest metadata or # an SPDX license identifier or similar package manifest tag # this provides a strong confidence wrt detection is_license_tag = attr.ib(default=False, repr=False) # is this rule text a false positive when matched? it will filtered out at # the end if matched is_false_positive = attr.ib(default=False, repr=False) # is this rule text a negative rule? it will be removed from the matchable # text the start if matched is_negative = attr.ib(default=False, repr=False) # is this rule text only to be matched with a minimum coverage e.g. a # minimum proportion of tokens as a float between 0 and 100 where 100 means # all tokens must be matched and a smaller value means a smaller propertion # of matched tokens is acceptable. this is computed unless this is provided # here. minimum_coverage = attr.ib(default=0) has_stored_minimum_coverage = attr.ib(default=False, repr=False) # same as minimum_coverage but divided/100 _minimum_containment = attr.ib(default=0, repr=False) # Can this rule be matched if there are unknown words in its matched range? # The default is to allow known and unknown words. Unknown words are words # that do not exist in the text of any indexed license or license detection # rule. only_known_words = attr.ib(default=False) # what is the relevance of a match to this rule text? a float between 0 and # 100 where 100 means highly relevant and 0 menas not relevant at all. # For instance a match to the "gpl" or the "cpol" words have a fairly low # relevance as they are a weak indication of an actual license and could be # a false positive. In somce cases, this may even be used to discard obvious # false positive matches automatically. relevance = attr.ib(default=100) has_stored_relevance = attr.ib(default=False, repr=False) # The rule contains a reference to some file name that comtains the text referenced_filenames = attr.ib(default=attr.Factory(list), repr=False) # optional, free text notes = attr.ib(default=None, repr=False) # set to True if the rule is built from a .LICENSE full text is_license = attr.ib(default=False, repr=False) # lists of copuyrights, emails and URLs that can be ignored when detected # in this license as they are part of the license or rule text itself ignorable_copyrights = attr.ib(default=attr.Factory(list), repr=False) ignorable_holders = attr.ib(default=attr.Factory(list), repr=False) ignorable_authors = attr.ib(default=attr.Factory(list), repr=False) ignorable_urls = attr.ib(default=attr.Factory(list), repr=False) ignorable_emails = attr.ib(default=attr.Factory(list), repr=False) ########################################################################### # path to the YAML data file for this rule data_file = attr.ib(default=None, repr=False) # path to the rule text file text_file = attr.ib(default=None, repr=False) # text of this rule for special cases where the rule is not backed by a file: # for SPDX license expression dynamic rules or testing stored_text = attr.ib(default=None, repr=False) # These attributes are computed upon text loading or setting the thresholds ########################################################################### # lengths in tokens length = attr.ib(default=0) min_matched_length = attr.ib(default=0, repr=TRACE_REPR) high_length = attr.ib(default=0, repr=TRACE_REPR) min_high_matched_length = attr.ib(default=0, repr=TRACE_REPR) # lengths in unique token. length_unique = attr.ib(default=0, repr=TRACE_REPR) min_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR) high_length_unique = attr.ib(default=0, repr=TRACE_REPR) min_high_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR) is_small = attr.ib(default=False, repr=TRACE_REPR) has_computed_thresholds = attr.ib(default=False, repr=False) def get_length(self, unique=False): return self.length_unique if unique else self.length def get_min_matched_length(self, unique=False): return (self.min_matched_length_unique if unique else self.min_matched_length) def get_high_length(self, unique=False): return self.high_length_unique if unique else self.high_length def get_min_high_matched_length(self, unique=False): return (self.min_high_matched_length_unique if unique else self.min_high_matched_length) def __attrs_post_init__(self, *args, **kwargs): if not self.text_file: # for SPDX or tests only if not self.stored_text : raise Exception('Invalid rule without its corresponding text file: {}'.format(self)) self.identifier = '_tst_' + str(len(self.stored_text)) else: self.identifier = file_name(self.text_file) if self.data_file: try: self.load() except Exception as e: data_file = self.data_file trace = traceback.format_exc() message = 'While loading: file://{data_file}\n{trace}'.format(**locals()) raise Exception(message) if self.relevance and self.relevance != 100: self.has_stored_relevance = True if self.minimum_coverage: self.has_stored_minimum_coverage = True if self.license_expression: try: expression = self.licensing.parse(self.license_expression) except: raise Exception( 'Unable to parse License rule expression: ' +repr(self.license_expression) + ' for: file://' + self.data_file + '\n' + traceback.format_exc() ) if expression is None: raise Exception( 'Unable to parse License rule expression: ' +repr(self.license_expression) + ' for: file://' + self.data_file) self.license_expression = expression.render() self.license_expression_object = expression def tokens(self): """ Return an iterable of token strings for this rule. Length, relevance and minimum_coverage may be recomputed as a side effect. """ length = 0 text = self.text() text = text.strip() # FIXME: this is weird: # We tag this rule as being a bare URL if it starts with a scheme and is # on one line: this is used to determine a matching approach # FIXME: this does not lower the text first?? if text.startswith(('http://', 'https://', 'ftp://')) and '\n' not in text[:1000].lower(): self.minimum_coverage = 100 for token in query_tokenizer(self.text()): length += 1 yield token self.length = length self.compute_relevance() def text(self): """ Return the rule text loaded from its file. """ if self.text_file and exists(self.text_file): # IMPORTANT: use the same process as query text loading for symmetry numbered_lines = numbered_text_lines(self.text_file, demarkup=False, plain_text=True) return ''.join(l for _, l in numbered_lines) # used for non-file backed rules elif self.stored_text: return self.stored_text else: raise Exception('Inconsistent rule text for: ' + self.identifier + '\nfile://' + self.text_file) def license_keys(self, unique=True): """ Return a list of license keys for this rule. """ if not self.license_expression: return [] return self.licensing.license_keys(self.license_expression_object, unique=unique) def same_licensing(self, other): """ Return True if the other rule has the same licensing as this rule. """ if self.license_expression and other.license_expression: return self.licensing.is_equivalent( self.license_expression_object, other.license_expression_object) def licensing_contains(self, other): """ Return True if this rule licensing contains the other rule licensing. """ if self.license_expression and other.license_expression: return self.licensing.contains( self.license_expression_object, other.license_expression_object) def compute_thresholds(self, small_rule=SMALL_RULE): """ Compute and set thresholds either considering the occurrence of all tokens or the occurance of unique tokens. """ minimum_coverage, self.min_matched_length, self.min_high_matched_length = ( compute_thresholds_occurences( self.minimum_coverage, self.length, self.high_length)) if not self.has_stored_minimum_coverage: self.minimum_coverage = minimum_coverage self._minimum_containment = self.minimum_coverage / 100 self.min_matched_length_unique, self.min_high_matched_length_unique = ( compute_thresholds_unique( self.minimum_coverage, self.length, self.length_unique, self.high_length_unique)) self.is_small = self.length < small_rule def to_dict(self): """ Return an ordered mapping of self, excluding texts. Used for serialization. Empty values are not included. """ data = OrderedDict() if self.license_expression: data['license_expression'] = self.license_expression flags = ( 'is_false_positive', 'is_negative', 'is_license_text', 'is_license_notice', 'is_license_reference', 'is_license_tag', 'only_known_words', ) for flag in flags: tag_value = getattr(self, flag, False) if tag_value: data[flag] = tag_value if self.has_stored_relevance and self.relevance: rl = self.relevance if isinstance(rl, float) and int(rl) == rl: rl = int(rl) data['relevance'] = rl if self.has_stored_minimum_coverage and self.minimum_coverage > 0: cv = self.minimum_coverage if isinstance(cv, float) and int(cv) == cv: cv = int(cv) data['minimum_coverage'] = cv if self.referenced_filenames: data['referenced_filenames'] = self.referenced_filenames if self.notes: data['notes'] = self.notes if self.ignorable_copyrights: data['ignorable_copyrights'] = self.ignorable_copyrights if self.ignorable_holders: data['ignorable_holders'] = self.ignorable_holders if self.ignorable_authors: data['ignorable_authors'] = self.ignorable_authors if self.ignorable_urls: data['ignorable_urls'] = self.ignorable_urls if self.ignorable_emails: data['ignorable_emails'] = self.ignorable_emails return data def dump(self): """ Dump a representation of this rule as two files: - a .yml for the rule data in YAML (self.data_file) - a .RULE: the rule text as a UTF-8 file (self.text_file) Does nothing if this rule was created from a License (e.g. `is_license` is True) """ if self.is_license: return def write(location, byte_string): # we write as binary because rules and licenses texts and data are UTF-8-encoded bytes with io.open(location, 'wb') as of: of.write(byte_string) if self.data_file: as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8') write(self.data_file, as_yaml) write(self.text_file, self.text().encode('utf-8')) def load(self): """ Load self from a .RULE YAML file stored in self.data_file. Does not load the rule text file. Unknown fields are ignored and not bound to the Rule object. """ try: with io.open(self.data_file, encoding='utf-8') as f: data = saneyaml.load(f.read()) except Exception as e: print('#############################') print('INVALID LICENSE RULE FILE:', 'file://' + self.data_file) print('#############################') print(e) print('#############################') # this is a rare case, but yes we abruptly stop. raise e known_attributes = set(attr.fields_dict(self.__class__)) data_file_attributes = set(data) unknown_attributes = data_file_attributes.difference(known_attributes) if unknown_attributes: unknown_attributes = ', '.join(sorted(unknown_attributes)) msg = 'License rule {} data file has unknown attributes: {}' raise Exception(msg.format(self, unknown_attributes)) self.license_expression = data.get('license_expression') self.is_negative = data.get('is_negative', False) self.is_false_positive = data.get('is_false_positive', False) if not self.license_expression and not (self.is_negative or self.is_false_positive): msg = 'License rule {} is missing a license_expression.' raise Exception(msg.format(self)) relevance = float(data.get('relevance', 0)) if relevance: if relevance <= 0 or relevance > 100: msg = ('License rule {} data file has an invalid relevance. ' 'Should be above 0 and 100 or less: {}') raise Exception(msg.format(self, repr(relevance))) # Keep track if we have a stored relevance of not. self.relevance = relevance self.has_stored_relevance = True self.minimum_coverage = float(data.get('minimum_coverage', 0)) self._minimum_containment = self.minimum_coverage / 100 if not (0 <= self.minimum_coverage <= 100): msg = ( 'License rule {} data file has an invalid minimum_coverage. ' 'Should be between 0 and 100: {}') raise Exception(msg.format(self, self.minimum_coverage)) self.is_license_text = data.get('is_license_text', False) self.is_license_notice = data.get('is_license_notice', False) self.is_license_tag = data.get('is_license_tag', False) self.is_license_reference = data.get('is_license_reference', False) self.only_known_words = data.get('only_known_words', False) self.referenced_filenames = data.get('referenced_filenames', []) or [] if not isinstance(self.referenced_filenames, list): msg = ( 'License rule {} data file has an invalid referenced_filenames. ' 'Should be a list: {}') raise Exception(msg.format(self, self.referenced_filenames)) # these are purely informational and not used at run time notes = data.get('notes') if notes: self.notes = notes.strip() if not self.notes and (self.is_negative or self.is_false_positive): msg = 'Special License rule {} is missing explanatory notes.' raise Exception(msg.format(self)) self.ignorable_copyrights = data.get('ignorable_copyrights', []) self.ignorable_holders = data.get('ignorable_holders', []) self.ignorable_authors = data.get('ignorable_authors', []) self.ignorable_urls = data.get('ignorable_urls', []) self.ignorable_emails = data.get('ignorable_emails', []) return self def compute_relevance(self): """ Compute and set the `relevance` attribute for this rule. The relevance is a float between 0 and 100 where 100 means highly relevant and 0 means not relevant at all. For instance a match to the "gpl" or the "cpol" words have a fairly low relevance as they are a weak indication of an actual license and could be a false positive and should therefore be assigned a low relevance. In contrast a match to most or all of the apache-2.0 license text is highly relevant. The Rule relevance is used as the basis to compute a match score. The relevance is either pre-defined in the rule YAML data file with the "relevance" attribute or computed base on the rule length here using this approach: - a false positive or a negative rule has a relevance of 100. - a rule of length equal to or larger than a threshold has a 100 relevance - a rule of length smaller than a threshold has a relevance of 100/threshold, rounded down. The current threshold is 18 words. """ if isinstance(self, SpdxRule): self.relevance = 100 return if self.has_stored_relevance: return # case for false positive if self.is_false_positive: self.relevance = 100 return # case for negative rules with no license (and are not an FP) # they do not have licenses and their matches are never returned if self.is_negative: self.relevance = 100 return threshold = 18.0 relevance_of_one_word = round((1 / threshold) * 100, 2) length = self.length if length >= threshold: # general case self.relevance = 100 else: computed = int(length * relevance_of_one_word) self.relevance = min([100, computed]) @property def has_flags(self): """ Return True if this Rule has at least one flag set. """ return (self.is_license_text or self.is_license_notice or self.is_license_reference or self.is_license_tag) def compute_thresholds_occurences(minimum_coverage, length, high_length, _MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH, _MIN_MATCH_LENGTH=MIN_MATCH_LENGTH): """ Compute and return thresholds considering the occurrence of all tokens. """ if minimum_coverage == 100: min_matched_length = length min_high_matched_length = high_length return minimum_coverage, min_matched_length, min_high_matched_length if length < 3: min_high_matched_length = high_length min_matched_length = length minimum_coverage = 100 elif length < 10: min_matched_length = length min_high_matched_length = high_length minimum_coverage = 80 elif length < 30: min_matched_length = length // 2 min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH) minimum_coverage = 50 elif length < 200: min_matched_length = _MIN_MATCH_LENGTH min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH) # minimum_coverage = max(15, int(length//10)) else: # if length >= 200: min_matched_length = length // 10 min_high_matched_length = high_length // 10 # minimum_coverage = int(length//10) return minimum_coverage, min_matched_length, min_high_matched_length def compute_thresholds_unique(minimum_coverage, length, length_unique, high_length_unique, _MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH, _MIN_MATCH_LENGTH=MIN_MATCH_LENGTH): """ Compute and set thresholds considering the occurrence of only unique tokens. """ if minimum_coverage == 100: min_matched_length_unique = length_unique min_high_matched_length_unique = high_length_unique return min_matched_length_unique, min_high_matched_length_unique if length > 200: min_matched_length_unique = length // 10 min_high_matched_length_unique = high_length_unique // 10 elif length < 5: min_matched_length_unique = length_unique min_high_matched_length_unique = high_length_unique elif length < 10: if length_unique < 2: min_matched_length_unique = length_unique else: min_matched_length_unique = length_unique - 1 min_high_matched_length_unique = high_length_unique elif length < 20: min_matched_length_unique = high_length_unique min_high_matched_length_unique = high_length_unique else: min_matched_length_unique = _MIN_MATCH_LENGTH highu = (int(high_length_unique // 2)) or high_length_unique min_high_matched_length_unique = min(highu, _MIN_MATCH_HIGH_LENGTH) return min_matched_length_unique, min_high_matched_length_unique @attr.s(slots=True, repr=False) class SpdxRule(Rule): """ A specialized rule object that is used for the special case of SPDX license expressions. Since we may have an infinite possible number of SPDX expressions and these are not backed by a traditional rule text file, we use this class to handle the specifics of these how rules that are built at matching time: one rule is created for each detected SPDX license expression. """ def __attrs_post_init__(self, *args, **kwargs): self.identifier = 'spdx-license-identifier: ' + self.license_expression expression = None try: expression = self.licensing.parse(self.license_expression) except: raise Exception( 'Unable to parse License rule expression: ' + repr(self.license_expression) + ' for: SPDX rule:' + self.stored_text + '\n' + traceback.format_exc()) if expression is None: raise Exception( 'Unable to parse License rule expression: ' +repr(self.license_expression) + ' for:' + repr(self.data_file)) self.license_expression = expression.render() self.license_expression_object = expression self.is_license_tag = True self.is_small = False self.relevance =100 self.has_stored_relevance = True def load(self): raise NotImplementedError def dump(self): raise NotImplementedError def _print_rule_stats(): """ Print rules statistics. """ from licensedcode.cache import get_index idx = get_index() rules = idx.rules_by_rid sizes = Counter(r.length for r in rules) print('Top 15 lengths: ', sizes.most_common(15)) print('15 smallest lengths: ', sorted(sizes.items(), key=itemgetter(0))[:15]) high_sizes = Counter(r.high_length for r in rules) print('Top 15 high lengths: ', high_sizes.most_common(15)) print('15 smallest high lengths: ', sorted(high_sizes.items(), key=itemgetter(0))[:15]) def update_ignorables(licensish, verbose=False, dump=True): """ Collect, update and save the ignorable_* attributes of a `licensish` Rule or License object. """ location = licensish.text_file if verbose: print('Processing:', 'file://' + location) if not exists(location): return licensish # collect and set ignorable copyrights, holders and authors from cluecode.copyrights import detect_copyrights copyrights = set() holders = set() authors = set() for dtype, value, _start, _end in detect_copyrights(location): if dtype == 'copyrights': copyrights.add(value) elif dtype == 'holders': holders.add(value) elif dtype == 'authors': authors.add(value) licensish.ignorable_copyrights = sorted(copyrights) licensish.ignorable_holders = sorted(holders) licensish.ignorable_authors = sorted(authors) # collect and set ignrable emails and urls from cluecode.finder import find_urls from cluecode.finder import find_emails urls = set(u for (u, _ln) in find_urls(location) if u) licensish.ignorable_urls = sorted(urls) emails = set(u for (u, _ln) in find_emails(location) if u) licensish.ignorable_emails = sorted(emails) if dump: licensish.dump() return licensish
37.207646
128
0.633223
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from collections import Counter from collections import defaultdict from collections import OrderedDict from functools import partial from itertools import chain import io from operator import itemgetter from os.path import abspath from os.path import dirname from os.path import exists from os.path import join import traceback import attr from license_expression import Licensing from commoncode.fileutils import copyfile from commoncode.fileutils import file_base_name from commoncode.fileutils import file_name from commoncode.fileutils import resource_iter from commoncode import saneyaml from licensedcode import MIN_MATCH_HIGH_LENGTH from licensedcode import MIN_MATCH_LENGTH from licensedcode import SMALL_RULE from licensedcode.tokenize import query_tokenizer from textcode.analysis import numbered_text_lines TRACE_REPR = False data_dir = join(abspath(dirname(__file__)), 'data') licenses_data_dir = join(data_dir, 'licenses') rules_data_dir = join(data_dir, 'rules') FOSS_CATEGORIES = set([ 'Copyleft', 'Copyleft Limited', 'Patent License', 'Permissive', 'Public Domain', ]) OTHER_CATEGORIES = set([ 'Commercial', 'Proprietary Free', 'Free Restricted', 'Source-available', 'Unstated License', ]) CATEGORIES = FOSS_CATEGORIES | OTHER_CATEGORIES @attr.s(slots=True) class License(object): __attrib = partial(attr.ib, repr=False) key = attr.ib(default=None, repr=True) src_dir = __attrib(default=licenses_data_dir) is_deprecated = __attrib(default=False) language = __attrib(default='en') short_name = __attrib(default=None) name = __attrib(default=None) category = __attrib(default=None) owner = __attrib(default=None) homepage_url = __attrib(default=None) notes = __attrib(default=None) is_exception = __attrib(default=False) spdx_license_key = __attrib(default=None) other_spdx_license_keys = __attrib(default=attr.Factory(list)) osi_license_key = __attrib(default=None) text_urls = __attrib(default=attr.Factory(list)) osi_url = __attrib(default=None) faq_url = __attrib(default=None) other_urls = __attrib(default=attr.Factory(list)) key_aliases = __attrib(default=attr.Factory(list)) minimum_coverage = __attrib(default=0) standard_notice = __attrib(default=None) ignorable_copyrights = __attrib(default=attr.Factory(list)) ignorable_authors = __attrib(default=attr.Factory(list)) ignorable_holders = __attrib(default=attr.Factory(list)) ignorable_urls = __attrib(default=attr.Factory(list)) ignorable_emails = __attrib(default=attr.Factory(list)) data_file = __attrib(default=None) text_file = __attrib(default=None) def __attrs_post_init__(self, *args, **kwargs): if self.src_dir: self.set_file_paths() if exists(self.data_file): self.load() def set_file_paths(self): self.data_file = join(self.src_dir, self.key + '.yml') self.text_file = join(self.src_dir, self.key + '.LICENSE') def relocate(self, target_dir, new_key=None): if not target_dir or target_dir == self.src_dir: raise ValueError( 'Cannot relocate {} License to empty directory or same directory.'.format(self.key)) if new_key: key = new_key else: key = self.key newl = License(key, target_dir) excluded_fields = ('key', 'src_dir', 'data_file', 'text_file',) all_fields = attr.fields(self.__class__) attrs = [f.name for f in all_fields if f.name not in excluded_fields] for name in attrs: setattr(newl, name, getattr(self, name)) if self.text: copyfile(self.text_file, newl.text_file) newl.dump() return newl def update(self, mapping): for k, v in mapping.items(): setattr(self, k, v) def __copy__(self): oldl = self.to_dict() newl = License(key=self.key) newl.update(oldl) return newl @property def text(self): return self._read_text(self.text_file) def to_dict(self): def dict_fields(attr, value): if not value: return False if attr.name in ('data_file', 'text_file', 'src_dir',): return False if attr.name == 'language' and value == 'en': return False if attr.name == 'minimum_coverage' and value == 100: return False return True data = attr.asdict(self, filter=dict_fields, dict_factory=OrderedDict) cv = data.get('minimum_coverage') if cv and isinstance(cv, float) and int(cv) == cv: cv = int(cv) data['minimum_coverage'] = cv return data def dump(self): def write(location, byte_string): with io.open(location, 'wb') as of: of.write(byte_string) as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8') write(self.data_file, as_yaml) if self.text: write(self.text_file, self.text.encode('utf-8')) def load(self): try: with io.open(self.data_file, encoding='utf-8') as f: data = saneyaml.load(f.read()) numeric_keys = ('minimum_coverage', 'relevance') for k, v in data.items(): if k in numeric_keys: v = int(v) if k == 'key': assert self.key == v, 'Inconsistent YAML key and file names for %r' % self.key setattr(self, k, v) except Exception as e: print() print('#############################') print('INVALID LICENSE YAML FILE:', 'file://' + self.data_file) print('#############################') print(e) print('#############################') raise def _read_text(self, location): if not exists(location): text = '' else: with io.open(location, encoding='utf-8') as f: text = f.read() return text def spdx_keys(self): if self.spdx_license_key: yield self.spdx_license_key for key in self.other_spdx_license_keys: yield key @staticmethod def validate(licenses, verbose=False, no_dupe_urls=False): infos = defaultdict(list) warnings = defaultdict(list) errors = defaultdict(list) by_spdx_key = defaultdict(list) by_text = defaultdict(list) by_short_name = defaultdict(list) by_name = defaultdict(list) for key, lic in licenses.items(): warn = warnings[key].append info = infos[key].append error = errors[key].append by_name[lic.name].append(lic) by_short_name[lic.short_name].append(lic) if not lic.short_name: error('No short name') if not lic.name: error('No name') if not lic.category: error('No category') if lic.category and lic.category not in CATEGORIES: cats = '\n'.join(sorted(CATEGORIES)) error('Unknown license category: {}.\nUse one of these valid categories:\n{}'.format(lic.category, cats)) if not lic.owner: error('No owner') if no_dupe_urls: if lic.text_urls and not all(lic.text_urls): warn('Some empty text_urls values') if lic.other_urls and not all(lic.other_urls): warn('Some empty other_urls values') if lic.homepage_url: if lic.homepage_url in lic.text_urls: warn('Homepage URL also in text_urls') if lic.homepage_url in lic.other_urls: warn('Homepage URL also in other_urls') if lic.homepage_url == lic.faq_url: warn('Homepage URL same as faq_url') if lic.homepage_url == lic.osi_url: warn('Homepage URL same as osi_url') if lic.osi_url or lic.faq_url: if lic.osi_url == lic.faq_url: warn('osi_url same as faq_url') all_licenses = lic.text_urls + lic.other_urls for url in lic.osi_url, lic.faq_url, lic.homepage_url: if url: all_licenses.append(url) if not len(all_licenses) == len(set(all_licenses)): warn('Some duplicated URLs') text = lic.text license_qtokens = tuple(query_tokenizer(text)) if not license_qtokens: info('No license text') else: by_text[license_qtokens].append(key + ': TEXT') if lic.spdx_license_key: by_spdx_key[lic.spdx_license_key].append(key) for oslk in lic.other_spdx_license_keys: by_spdx_key[oslk].append(key) multiple_spdx_keys_used = {k: v for k, v in by_spdx_key.items() if len(v) > 1} if multiple_spdx_keys_used: for k, lkeys in multiple_spdx_keys_used.items(): errors['GLOBAL'].append('SPDX key: ' + k + ' used in multiple licenses: ' + ', '.join(sorted(lkeys))) multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1} if multiple_texts: for k, msgs in multiple_texts.items(): errors['GLOBAL'].append('Duplicate texts in multiple licenses:' + ', '.join(sorted(msgs))) for short_name, licenses in by_short_name.items(): if len(licenses) == 1: continue errors['GLOBAL'].append('Duplicate short name:' + short_name + ' in licenses:' + ', '.join(l.key for l in licenses)) for name, licenses in by_name.items(): if len(licenses) == 1: continue errors['GLOBAL'].append('Duplicate name:' + name + ' in licenses:' + ', '.join(l.key for l in licenses)) errors = {k: v for k, v in errors.items() if v} warnings = {k: v for k, v in warnings.items() if v} infos = {k: v for k, v in infos.items() if v} if verbose: print('Licenses validation errors:') for key, msgs in sorted(errors.items()): print('ERRORS for:', key, ':', '\n'.join(msgs)) print('Licenses validation warnings:') for key, msgs in sorted(warnings.items()): print('WARNINGS for:', key, ':', '\n'.join(msgs)) print('Licenses validation infos:') for key, msgs in sorted(infos.items()): print('INFOS for:', key, ':', '\n'.join(msgs)) return errors, warnings, infos def ignore_editor_tmp_files(location): return location.endswith('.swp') def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False): licenses = {} used_files = set() all_files = set(resource_iter(licenses_data_dir, ignored=ignore_editor_tmp_files, with_dirs=False)) for data_file in sorted(all_files): if data_file.endswith('.yml'): key = file_base_name(data_file) lic = License(key, licenses_data_dir) used_files.add(data_file) if exists(lic.text_file): used_files.add(lic.text_file) if not with_deprecated and lic.is_deprecated: continue licenses[key] = lic dangling = all_files.difference(used_files) if dangling: msg = 'Some License data or text files are orphaned in "{}".\n'.format(licenses_data_dir) msg += '\n'.join('file://{}'.format(f) for f in sorted(dangling)) raise Exception(msg) return licenses def get_rules(licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir): from licensedcode.cache import get_licenses_db licenses = get_licenses_db(licenses_data_dir=licenses_data_dir) rules = list(load_rules(rules_data_dir=rules_data_dir)) check_rules_integrity(rules, licenses) licenses_as_rules = build_rules_from_licenses(licenses) return chain(licenses_as_rules, rules) class MissingLicenses(Exception): pass class MissingFlags(Exception): pass def check_rules_integrity(rules, licenses_by_key): invalid_rules = defaultdict(set) rules_without_flags = set() for rule in rules: unknown_keys = [key for key in rule.license_keys() if key not in licenses_by_key] if unknown_keys: invalid_rules[rule.data_file].update(unknown_keys) if not rule.has_flags and not (rule.is_negative or rule.is_false_positive): rules_without_flags.add(rule.data_file) if invalid_rules: invalid_rules = ( ' '.join(keys) + '\n' + 'file://' + data_file + '\n' + 'file://' + data_file.replace('.yml', '.RULE') + '\n' for data_file, keys in invalid_rules.items() if keys) msg = 'Rules referencing missing licenses:\n' + '\n'.join(sorted(invalid_rules)) raise MissingLicenses(msg) if rules_without_flags: invalid_rules = ( 'file://' + data_file + '\n' + 'file://' + data_file.replace('.yml', '.RULE') + '\n' for data_file in sorted(rules_without_flags)) msg = 'Rules without is_license_xxx flags:\n' + '\n'.join(sorted(invalid_rules)) raise MissingFlags(msg) def build_rules_from_licenses(licenses): for license_key, license_obj in licenses.items(): text_file = join(license_obj.src_dir, license_obj.text_file) if exists(text_file): minimum_coverage = license_obj.minimum_coverage or 0 yield Rule( text_file=text_file, license_expression=license_key, has_stored_relevance=False, relevance=100, has_stored_minimum_coverage=bool(minimum_coverage), minimum_coverage=minimum_coverage, is_license=True, is_license_text=True, ignorable_copyrights=license_obj.ignorable_copyrights, ignorable_holders=license_obj.ignorable_holders, ignorable_authors=license_obj.ignorable_authors, ignorable_urls=license_obj.ignorable_urls, ignorable_emails=license_obj.ignorable_emails, ) def get_all_spdx_keys(licenses): for lic in licenses.values(): for spdx_key in lic.spdx_keys(): yield spdx_key def get_essential_spdx_tokens(): yield 'spdx' yield 'license' yield 'licence' yield 'identifier' yield 'licenseref' def get_all_spdx_key_tokens(licenses): for tok in get_essential_spdx_tokens(): yield tok for spdx_key in get_all_spdx_keys(licenses): for token in query_tokenizer(spdx_key): yield token def load_rules(rules_data_dir=rules_data_dir): seen_files = set() processed_files = set() lower_case_files = set() case_problems = set() space_problems = [] model_errors = [] for data_file in resource_iter(rules_data_dir, with_dirs=False): if data_file.endswith('.yml'): base_name = file_base_name(data_file) if ' ' in base_name: space_problems.append(data_file) rule_file = join(rules_data_dir, base_name + '.RULE') try: rule = Rule(data_file=data_file, text_file=rule_file) yield rule except Exception as re: model_errors.append(str(re)) data_lower = data_file.lower() if data_lower in lower_case_files: case_problems.add(data_lower) else: lower_case_files.add(data_lower) rule_lower = rule_file.lower() if rule_lower in lower_case_files: case_problems.add(rule_lower) else: lower_case_files.add(rule_lower) processed_files.update([data_file, rule_file]) if not data_file.endswith('~'): seen_files.add(data_file) unknown_files = seen_files - processed_files if unknown_files or case_problems or model_errors or space_problems: msg = '' if model_errors: errors = '\n'.join(model_errors) msg += '\nInvalid rule YAML in directory: %(rules_data_dir)r\n%(errors)s' % locals() if unknown_files: files = '\n'.join(sorted('file://' + f for f in unknown_files)) msg += '\nOrphaned files in rule directory: %(rules_data_dir)r\n%(files)s' % locals() if case_problems: files = '\n'.join(sorted('file://' + f for f in case_problems)) msg += '\nRule files with non-unique name ignoring casein rule directory: %(rules_data_dir)r\n%(files)s' % locals() if space_problems: files = '\n'.join(sorted('"file://' + f + '"' for f in space_problems)) msg += '\nRule files name cannot contain spaces: %(rules_data_dir)r\n%(files)s' % locals() raise Exception(msg) @attr.s(slots=True) class Rule(object): licensing = Licensing() license_expression = attr.ib(default=None) license_expression_object = attr.ib(default=None, repr=False) is_license_text = attr.ib(default=False, repr=False) is_license_notice = attr.ib(default=False, repr=False) is_license_reference = attr.ib(default=False, repr=False) is_license_tag = attr.ib(default=False, repr=False) is_false_positive = attr.ib(default=False, repr=False) is_negative = attr.ib(default=False, repr=False) minimum_coverage = attr.ib(default=0) has_stored_minimum_coverage = attr.ib(default=False, repr=False) _minimum_containment = attr.ib(default=0, repr=False) only_known_words = attr.ib(default=False) relevance = attr.ib(default=100) has_stored_relevance = attr.ib(default=False, repr=False) referenced_filenames = attr.ib(default=attr.Factory(list), repr=False) notes = attr.ib(default=None, repr=False) is_license = attr.ib(default=False, repr=False) ignorable_copyrights = attr.ib(default=attr.Factory(list), repr=False) ignorable_holders = attr.ib(default=attr.Factory(list), repr=False) ignorable_authors = attr.ib(default=attr.Factory(list), repr=False) ignorable_urls = attr.ib(default=attr.Factory(list), repr=False) ignorable_emails = attr.ib(default=attr.Factory(list), repr=False) s_license_notice', 'is_license_reference', 'is_license_tag', 'only_known_words', ) for flag in flags: tag_value = getattr(self, flag, False) if tag_value: data[flag] = tag_value if self.has_stored_relevance and self.relevance: rl = self.relevance if isinstance(rl, float) and int(rl) == rl: rl = int(rl) data['relevance'] = rl if self.has_stored_minimum_coverage and self.minimum_coverage > 0: cv = self.minimum_coverage if isinstance(cv, float) and int(cv) == cv: cv = int(cv) data['minimum_coverage'] = cv if self.referenced_filenames: data['referenced_filenames'] = self.referenced_filenames if self.notes: data['notes'] = self.notes if self.ignorable_copyrights: data['ignorable_copyrights'] = self.ignorable_copyrights if self.ignorable_holders: data['ignorable_holders'] = self.ignorable_holders if self.ignorable_authors: data['ignorable_authors'] = self.ignorable_authors if self.ignorable_urls: data['ignorable_urls'] = self.ignorable_urls if self.ignorable_emails: data['ignorable_emails'] = self.ignorable_emails return data def dump(self): if self.is_license: return def write(location, byte_string): with io.open(location, 'wb') as of: of.write(byte_string) if self.data_file: as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8') write(self.data_file, as_yaml) write(self.text_file, self.text().encode('utf-8')) def load(self): try: with io.open(self.data_file, encoding='utf-8') as f: data = saneyaml.load(f.read()) except Exception as e: print('#############################') print('INVALID LICENSE RULE FILE:', 'file://' + self.data_file) print('#############################') print(e) print('#############################') raise e known_attributes = set(attr.fields_dict(self.__class__)) data_file_attributes = set(data) unknown_attributes = data_file_attributes.difference(known_attributes) if unknown_attributes: unknown_attributes = ', '.join(sorted(unknown_attributes)) msg = 'License rule {} data file has unknown attributes: {}' raise Exception(msg.format(self, unknown_attributes)) self.license_expression = data.get('license_expression') self.is_negative = data.get('is_negative', False) self.is_false_positive = data.get('is_false_positive', False) if not self.license_expression and not (self.is_negative or self.is_false_positive): msg = 'License rule {} is missing a license_expression.' raise Exception(msg.format(self)) relevance = float(data.get('relevance', 0)) if relevance: if relevance <= 0 or relevance > 100: msg = ('License rule {} data file has an invalid relevance. ' 'Should be above 0 and 100 or less: {}') raise Exception(msg.format(self, repr(relevance))) self.relevance = relevance self.has_stored_relevance = True self.minimum_coverage = float(data.get('minimum_coverage', 0)) self._minimum_containment = self.minimum_coverage / 100 if not (0 <= self.minimum_coverage <= 100): msg = ( 'License rule {} data file has an invalid minimum_coverage. ' 'Should be between 0 and 100: {}') raise Exception(msg.format(self, self.minimum_coverage)) self.is_license_text = data.get('is_license_text', False) self.is_license_notice = data.get('is_license_notice', False) self.is_license_tag = data.get('is_license_tag', False) self.is_license_reference = data.get('is_license_reference', False) self.only_known_words = data.get('only_known_words', False) self.referenced_filenames = data.get('referenced_filenames', []) or [] if not isinstance(self.referenced_filenames, list): msg = ( 'License rule {} data file has an invalid referenced_filenames. ' 'Should be a list: {}') raise Exception(msg.format(self, self.referenced_filenames)) notes = data.get('notes') if notes: self.notes = notes.strip() if not self.notes and (self.is_negative or self.is_false_positive): msg = 'Special License rule {} is missing explanatory notes.' raise Exception(msg.format(self)) self.ignorable_copyrights = data.get('ignorable_copyrights', []) self.ignorable_holders = data.get('ignorable_holders', []) self.ignorable_authors = data.get('ignorable_authors', []) self.ignorable_urls = data.get('ignorable_urls', []) self.ignorable_emails = data.get('ignorable_emails', []) return self def compute_relevance(self): if isinstance(self, SpdxRule): self.relevance = 100 return if self.has_stored_relevance: return if self.is_false_positive: self.relevance = 100 return if self.is_negative: self.relevance = 100 return threshold = 18.0 relevance_of_one_word = round((1 / threshold) * 100, 2) length = self.length if length >= threshold: self.relevance = 100 else: computed = int(length * relevance_of_one_word) self.relevance = min([100, computed]) @property def has_flags(self): return (self.is_license_text or self.is_license_notice or self.is_license_reference or self.is_license_tag) def compute_thresholds_occurences(minimum_coverage, length, high_length, _MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH, _MIN_MATCH_LENGTH=MIN_MATCH_LENGTH): if minimum_coverage == 100: min_matched_length = length min_high_matched_length = high_length return minimum_coverage, min_matched_length, min_high_matched_length if length < 3: min_high_matched_length = high_length min_matched_length = length minimum_coverage = 100 elif length < 10: min_matched_length = length min_high_matched_length = high_length minimum_coverage = 80 elif length < 30: min_matched_length = length // 2 min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH) minimum_coverage = 50 elif length < 200: min_matched_length = _MIN_MATCH_LENGTH min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH) else: min_matched_length = length // 10 min_high_matched_length = high_length // 10 return minimum_coverage, min_matched_length, min_high_matched_length def compute_thresholds_unique(minimum_coverage, length, length_unique, high_length_unique, _MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH, _MIN_MATCH_LENGTH=MIN_MATCH_LENGTH): if minimum_coverage == 100: min_matched_length_unique = length_unique min_high_matched_length_unique = high_length_unique return min_matched_length_unique, min_high_matched_length_unique if length > 200: min_matched_length_unique = length // 10 min_high_matched_length_unique = high_length_unique // 10 elif length < 5: min_matched_length_unique = length_unique min_high_matched_length_unique = high_length_unique elif length < 10: if length_unique < 2: min_matched_length_unique = length_unique else: min_matched_length_unique = length_unique - 1 min_high_matched_length_unique = high_length_unique elif length < 20: min_matched_length_unique = high_length_unique min_high_matched_length_unique = high_length_unique else: min_matched_length_unique = _MIN_MATCH_LENGTH highu = (int(high_length_unique // 2)) or high_length_unique min_high_matched_length_unique = min(highu, _MIN_MATCH_HIGH_LENGTH) return min_matched_length_unique, min_high_matched_length_unique @attr.s(slots=True, repr=False) class SpdxRule(Rule): def __attrs_post_init__(self, *args, **kwargs): self.identifier = 'spdx-license-identifier: ' + self.license_expression expression = None try: expression = self.licensing.parse(self.license_expression) except: raise Exception( 'Unable to parse License rule expression: ' + repr(self.license_expression) + ' for: SPDX rule:' + self.stored_text + '\n' + traceback.format_exc()) if expression is None: raise Exception( 'Unable to parse License rule expression: ' +repr(self.license_expression) + ' for:' + repr(self.data_file)) self.license_expression = expression.render() self.license_expression_object = expression self.is_license_tag = True self.is_small = False self.relevance =100 self.has_stored_relevance = True def load(self): raise NotImplementedError def dump(self): raise NotImplementedError def _print_rule_stats(): from licensedcode.cache import get_index idx = get_index() rules = idx.rules_by_rid sizes = Counter(r.length for r in rules) print('Top 15 lengths: ', sizes.most_common(15)) print('15 smallest lengths: ', sorted(sizes.items(), key=itemgetter(0))[:15]) high_sizes = Counter(r.high_length for r in rules) print('Top 15 high lengths: ', high_sizes.most_common(15)) print('15 smallest high lengths: ', sorted(high_sizes.items(), key=itemgetter(0))[:15]) def update_ignorables(licensish, verbose=False, dump=True): location = licensish.text_file if verbose: print('Processing:', 'file://' + location) if not exists(location): return licensish from cluecode.copyrights import detect_copyrights copyrights = set() holders = set() authors = set() for dtype, value, _start, _end in detect_copyrights(location): if dtype == 'copyrights': copyrights.add(value) elif dtype == 'holders': holders.add(value) elif dtype == 'authors': authors.add(value) licensish.ignorable_copyrights = sorted(copyrights) licensish.ignorable_holders = sorted(holders) licensish.ignorable_authors = sorted(authors) from cluecode.finder import find_urls from cluecode.finder import find_emails urls = set(u for (u, _ln) in find_urls(location) if u) licensish.ignorable_urls = sorted(urls) emails = set(u for (u, _ln) in find_emails(location) if u) licensish.ignorable_emails = sorted(emails) if dump: licensish.dump() return licensish
true
true
f70da33491ac3a433cbf0d0075a23b55629b38de
4,722
py
Python
OBLib/Model.py
stevenkfirth/OBLib
12ab46ca2c24d28d8ed5b14be0978fb5dacae394
[ "MIT" ]
1
2022-03-28T13:06:00.000Z
2022-03-28T13:06:00.000Z
OBLib/Model.py
stevenkfirth/OBLib
12ab46ca2c24d28d8ed5b14be0978fb5dacae394
[ "MIT" ]
null
null
null
OBLib/Model.py
stevenkfirth/OBLib
12ab46ca2c24d28d8ed5b14be0978fb5dacae394
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import pandas as pd class Model(): """Abstract model class. This is the top-level class and should not be used directly. Instead this class is inherited by other more specialised model classes. """ def __init__(self): "" self._inputs=Inputs() def run(self): """Runs the model. This is an abstract method and should be overloaded by subclasses. :returns: The model outputs. :rtype: Outputs """ outputs=Outputs() # timestamps passed from inputs to outputs outputs._timestamps=self._inputs._timestamps # CALCULATIONS HERE return outputs @property def inputs(self): """The model inputs. Access this object to change the model inputs. Read-only property. :rtype: Inputs """ return self._inputs class Inputs(): """Abstract model inputs class. This is the top-level class and should not be used directly. Instead this class is inherited by other more specialised model inputs classes. """ def __init__(self): "" self._timestamps=None def set_timestamps(self, start=None, end=None, *args, **kwargs): """Convenience method to set the `timestamps` property. :param start: The start timestamp (optional) :type start: tuple :param end: The end timestamp (optional) :type end: tuple The remaining input arguments here are passed to the `pandas.date_range` method. See the pandas documentation for details. Typcial inputs might be: * start=(2021,1,1,0,0) (i.e. 1st January 2021) * freq='H' (for hourly intervals) * periods=24 (to generate 1 day of hourly intervals) :rtype: pandas.DatetimeIndex """ if not start is None: start=pd.Timestamp(*start) if not end is None: end=pd.Timestamp(*end) self._timestamps=pd.date_range(start=start, end=end, *args, **kwargs) return self._timestamps @property def timestamps(self): """The input timestamps. Model predictions will be made for each timestamp. Read / write property. :rtype: pandas.DatetimeIndex """ return self._timestamps @timestamps.setter def timestamps(self,value): "" self._timestamps=value class Outputs(): """Abstract model outputs class. This is the top-level class and should not be used directly. Instead this class is inherited by other more specialised model outputs classes. """ def __init__(self): """ """ self._timestamps=None # ->pd.DatetimeIndex self._data={} # key -> data name; value-> np.array etc. def __repr__(self): "" return ("%s" % self.__class__.__name__ + "(" + "timestamps=%s" % self.timestamps + ", " + "data=%s" % self.data + ")" ) @property def timestamps(self): """The outputs timestamps. Read-only property. :rtype: pandas.DatetimeIndex """ return self._timestamps @property def data(self): """The model predictions. Read-only property. :returns: A dictionary of the model results. Key-value pairs are: keys -> the name of the quantity or variable; values -> a list of the model predictions (this list aligns with the output timestamps). :rtype: dict """ return self._data @property def df(self): """A Pandas dataframe of the timestamps and data. Read-only property. :returns: A dataframe with: index -> timestamps; columns -> 'data' keys; values -> `data` values. :rtype: pandas.DataFrame """ return pd.DataFrame(index=self.timestamps, data=self.data)
24.466321
89
0.498306
import pandas as pd class Model(): def __init__(self): self._inputs=Inputs() def run(self): outputs=Outputs() outputs._timestamps=self._inputs._timestamps return outputs @property def inputs(self): return self._inputs class Inputs(): def __init__(self): self._timestamps=None def set_timestamps(self, start=None, end=None, *args, **kwargs): if not start is None: start=pd.Timestamp(*start) if not end is None: end=pd.Timestamp(*end) self._timestamps=pd.date_range(start=start, end=end, *args, **kwargs) return self._timestamps @property def timestamps(self): return self._timestamps @timestamps.setter def timestamps(self,value): self._timestamps=value class Outputs(): def __init__(self): self._timestamps=None self._data={} def __repr__(self): return ("%s" % self.__class__.__name__ + "(" + "timestamps=%s" % self.timestamps + ", " + "data=%s" % self.data + ")" ) @property def timestamps(self): return self._timestamps @property def data(self): return self._data @property def df(self): return pd.DataFrame(index=self.timestamps, data=self.data)
true
true
f70da3d54ecbd05f1e5054b3cc945d5b62ae323c
4,133
py
Python
homeassistant/components/alarm_control_panel/__init__.py
instantchow/home-assistant
6797365d4fd74328a0c9e961f652cfb37f48bc7d
[ "MIT" ]
null
null
null
homeassistant/components/alarm_control_panel/__init__.py
instantchow/home-assistant
6797365d4fd74328a0c9e961f652cfb37f48bc7d
[ "MIT" ]
null
null
null
homeassistant/components/alarm_control_panel/__init__.py
instantchow/home-assistant
6797365d4fd74328a0c9e961f652cfb37f48bc7d
[ "MIT" ]
null
null
null
""" Component to interface with an alarm control panel. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/alarm_control_panel/ """ import logging import os from homeassistant.components import verisure from homeassistant.const import ( ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, SERVICE_ALARM_TRIGGER, SERVICE_ALARM_DISARM, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_AWAY) from homeassistant.config import load_yaml_config_file from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent DOMAIN = 'alarm_control_panel' SCAN_INTERVAL = 30 ENTITY_ID_FORMAT = DOMAIN + '.{}' # Maps discovered services to their platforms DISCOVERY_PLATFORMS = { verisure.DISCOVER_ALARMS: 'verisure' } SERVICE_TO_METHOD = { SERVICE_ALARM_DISARM: 'alarm_disarm', SERVICE_ALARM_ARM_HOME: 'alarm_arm_home', SERVICE_ALARM_ARM_AWAY: 'alarm_arm_away', SERVICE_ALARM_TRIGGER: 'alarm_trigger' } ATTR_TO_PROPERTY = [ ATTR_CODE, ATTR_CODE_FORMAT ] def setup(hass, config): """Track states and offer events for sensors.""" component = EntityComponent( logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL, DISCOVERY_PLATFORMS) component.setup(config) def alarm_service_handler(service): """Map services to methods on Alarm.""" target_alarms = component.extract_from_service(service) if ATTR_CODE not in service.data: code = None else: code = service.data[ATTR_CODE] method = SERVICE_TO_METHOD[service.service] for alarm in target_alarms: getattr(alarm, method)(code) if alarm.should_poll: alarm.update_ha_state(True) descriptions = load_yaml_config_file( os.path.join(os.path.dirname(__file__), 'services.yaml')) for service in SERVICE_TO_METHOD: hass.services.register(DOMAIN, service, alarm_service_handler, descriptions.get(service)) return True def alarm_disarm(hass, code=None, entity_id=None): """Send the alarm the command for disarm.""" data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_DISARM, data) def alarm_arm_home(hass, code=None, entity_id=None): """Send the alarm the command for arm home.""" data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_ARM_HOME, data) def alarm_arm_away(hass, code=None, entity_id=None): """Send the alarm the command for arm away.""" data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data) def alarm_trigger(hass, code=None, entity_id=None): """Send the alarm the command for trigger.""" data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_TRIGGER, data) # pylint: disable=no-self-use class AlarmControlPanel(Entity): """An abstract class for alarm control devices.""" @property def code_format(self): """Regex for code format or None if no code is required.""" return None def alarm_disarm(self, code=None): """Send disarm command.""" raise NotImplementedError() def alarm_arm_home(self, code=None): """Send arm home command.""" raise NotImplementedError() def alarm_arm_away(self, code=None): """Send arm away command.""" raise NotImplementedError() def alarm_trigger(self, code=None): """Send alarm trigger command.""" raise NotImplementedError() @property def state_attributes(self): """Return the state attributes.""" state_attr = { ATTR_CODE_FORMAT: self.code_format, } return state_attr
27.370861
74
0.681829
import logging import os from homeassistant.components import verisure from homeassistant.const import ( ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, SERVICE_ALARM_TRIGGER, SERVICE_ALARM_DISARM, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_AWAY) from homeassistant.config import load_yaml_config_file from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent DOMAIN = 'alarm_control_panel' SCAN_INTERVAL = 30 ENTITY_ID_FORMAT = DOMAIN + '.{}' DISCOVERY_PLATFORMS = { verisure.DISCOVER_ALARMS: 'verisure' } SERVICE_TO_METHOD = { SERVICE_ALARM_DISARM: 'alarm_disarm', SERVICE_ALARM_ARM_HOME: 'alarm_arm_home', SERVICE_ALARM_ARM_AWAY: 'alarm_arm_away', SERVICE_ALARM_TRIGGER: 'alarm_trigger' } ATTR_TO_PROPERTY = [ ATTR_CODE, ATTR_CODE_FORMAT ] def setup(hass, config): component = EntityComponent( logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL, DISCOVERY_PLATFORMS) component.setup(config) def alarm_service_handler(service): target_alarms = component.extract_from_service(service) if ATTR_CODE not in service.data: code = None else: code = service.data[ATTR_CODE] method = SERVICE_TO_METHOD[service.service] for alarm in target_alarms: getattr(alarm, method)(code) if alarm.should_poll: alarm.update_ha_state(True) descriptions = load_yaml_config_file( os.path.join(os.path.dirname(__file__), 'services.yaml')) for service in SERVICE_TO_METHOD: hass.services.register(DOMAIN, service, alarm_service_handler, descriptions.get(service)) return True def alarm_disarm(hass, code=None, entity_id=None): data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_DISARM, data) def alarm_arm_home(hass, code=None, entity_id=None): data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_ARM_HOME, data) def alarm_arm_away(hass, code=None, entity_id=None): data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data) def alarm_trigger(hass, code=None, entity_id=None): data = {} if code: data[ATTR_CODE] = code if entity_id: data[ATTR_ENTITY_ID] = entity_id hass.services.call(DOMAIN, SERVICE_ALARM_TRIGGER, data) class AlarmControlPanel(Entity): @property def code_format(self): return None def alarm_disarm(self, code=None): raise NotImplementedError() def alarm_arm_home(self, code=None): raise NotImplementedError() def alarm_arm_away(self, code=None): raise NotImplementedError() def alarm_trigger(self, code=None): raise NotImplementedError() @property def state_attributes(self): state_attr = { ATTR_CODE_FORMAT: self.code_format, } return state_attr
true
true
f70da40a6cff9b86074093df34904d86d9ac8793
3,570
py
Python
sample/sample_advection.py
ZoneTsuyoshi/pyassim
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
[ "MIT" ]
null
null
null
sample/sample_advection.py
ZoneTsuyoshi/pyassim
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
[ "MIT" ]
null
null
null
sample/sample_advection.py
ZoneTsuyoshi/pyassim
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
[ "MIT" ]
null
null
null
""" sample code for LLOCK, SLOCK, LSLOCK application the method to advection model (periodic boundary condition) """ import os, sys import numpy as np import matplotlib.pyplot as plt import seaborn as sns sys.path.append("..") from pyassim import KalmanFilter, LocalLOCK, SpatiallyUniformLOCK, LSLOCK,\ PeriodicAdvection, EulerScheme def main(): result_dir = "figures/advection" if not os.path.exists(result_dir): os.mkdir(result_dir) seed = 121 np.random.seed(seed) # parameters N = 20 x0 = np.exp(-(np.arange(N)-N//2)**2/20) dt = 0.01 dx = 1 c = 1 sys_sd = 0.001 obs_sd = 0.1 timestep = 10000 ds = 100 # generate data model = PeriodicAdvection(dx, c, dt, scheme="LW") scheme = EulerScheme(dt, timestep, model, seed=seed) true, obs = scheme.noise_added_simulation(x0, sys_sd, obs_sd) # setup matrices # adjacency matrix A = np.eye(N) A[np.arange(N-1), np.arange(1,N)] = 2 A[np.arange(1,N), np.arange(N-1)] = 3 A[0,-1] = 3 A[-1,0] = 2 # A[np.arange(N-2), np.arange(2,N)] = True # A[np.arange(2,N), np.arange(N-2)] = True # A[0,-2] = A[-2,0] = A[1,-1] = A[-1,1] = True # initial transition matrix F = np.eye(N) H = np.eye(N) # covariance Q = obs_sd**2 * np.eye(N) R = obs_sd**2 * np.eye(N) V0 = obs_sd**2 * np.eye(N) # execution kf = KalmanFilter(obs[::ds], x0, V0, F, H, Q, R, em_vars=["transition_matrices"]) kf.em(n_iter=10) kf.forward() llock = LocalLOCK(obs[::ds], x0, V0, F, H, Q, R, A.astype(bool), method="elementwise", estimation_length=20, estimation_interval=5, eta=1.0, cutoff=10, estimation_mode="forward") llock.forward() slock = SpatiallyUniformLOCK(obs[::ds], x0, V0, F, H, Q, R, np.zeros(N), A, estimation_length=1, estimation_interval=1, eta=1., cutoff=10., estimation_mode="forward") slock.forward() lslock = LSLOCK(obs[::ds], x0, V0, F, H, Q, R, A, method="gridwise", estimation_length=10, estimation_interval=5, eta=1., cutoff=10., estimation_mode="forward") lslock.forward() # draw results dim=0 plt.figure(figsize=(8,5)) plt.scatter(np.arange(timestep//ds), obs[::ds,dim], label="obs", c="k") plt.plot(true[::ds,dim], label="true", c="cyan", ls="--") plt.plot(kf.get_filtered_value(dim), label="kf w/ EM") plt.plot(llock.get_filtered_value(dim), label="llock") plt.plot(slock.get_filtered_value(dim), label="slock") plt.plot(lslock.get_filtered_value(dim), label="lslock") plt.legend() plt.savefig(os.path.join(result_dir, "dim{}_estimated.pdf".format(dim)), bbox_inches="tight") fig, ax = plt.subplots(2,2,figsize=(10,10)) vmin, vmax = obs.min(), obs.max() sns.heatmap(true[::ds], cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,0]) sns.heatmap(llock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,1]) sns.heatmap(slock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,0]) sns.heatmap(lslock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,1]) ax[0,0].set_title("True") ax[0,1].set_title("LLOCK") ax[1,0].set_title("SLOCK") ax[1,1].set_title("LSLOCK") for i in range(2): for j in range(2): ax[i,j].set_xlabel("space") ax[i,j].set_ylabel("timestep") fig.savefig(os.path.join(result_dir, "estimated.pdf")) if __name__ == "__main__": main()
31.875
97
0.604482
import os, sys import numpy as np import matplotlib.pyplot as plt import seaborn as sns sys.path.append("..") from pyassim import KalmanFilter, LocalLOCK, SpatiallyUniformLOCK, LSLOCK,\ PeriodicAdvection, EulerScheme def main(): result_dir = "figures/advection" if not os.path.exists(result_dir): os.mkdir(result_dir) seed = 121 np.random.seed(seed) N = 20 x0 = np.exp(-(np.arange(N)-N//2)**2/20) dt = 0.01 dx = 1 c = 1 sys_sd = 0.001 obs_sd = 0.1 timestep = 10000 ds = 100 model = PeriodicAdvection(dx, c, dt, scheme="LW") scheme = EulerScheme(dt, timestep, model, seed=seed) true, obs = scheme.noise_added_simulation(x0, sys_sd, obs_sd) A = np.eye(N) A[np.arange(N-1), np.arange(1,N)] = 2 A[np.arange(1,N), np.arange(N-1)] = 3 A[0,-1] = 3 A[-1,0] = 2 F = np.eye(N) H = np.eye(N) Q = obs_sd**2 * np.eye(N) R = obs_sd**2 * np.eye(N) V0 = obs_sd**2 * np.eye(N) kf = KalmanFilter(obs[::ds], x0, V0, F, H, Q, R, em_vars=["transition_matrices"]) kf.em(n_iter=10) kf.forward() llock = LocalLOCK(obs[::ds], x0, V0, F, H, Q, R, A.astype(bool), method="elementwise", estimation_length=20, estimation_interval=5, eta=1.0, cutoff=10, estimation_mode="forward") llock.forward() slock = SpatiallyUniformLOCK(obs[::ds], x0, V0, F, H, Q, R, np.zeros(N), A, estimation_length=1, estimation_interval=1, eta=1., cutoff=10., estimation_mode="forward") slock.forward() lslock = LSLOCK(obs[::ds], x0, V0, F, H, Q, R, A, method="gridwise", estimation_length=10, estimation_interval=5, eta=1., cutoff=10., estimation_mode="forward") lslock.forward() dim=0 plt.figure(figsize=(8,5)) plt.scatter(np.arange(timestep//ds), obs[::ds,dim], label="obs", c="k") plt.plot(true[::ds,dim], label="true", c="cyan", ls="--") plt.plot(kf.get_filtered_value(dim), label="kf w/ EM") plt.plot(llock.get_filtered_value(dim), label="llock") plt.plot(slock.get_filtered_value(dim), label="slock") plt.plot(lslock.get_filtered_value(dim), label="lslock") plt.legend() plt.savefig(os.path.join(result_dir, "dim{}_estimated.pdf".format(dim)), bbox_inches="tight") fig, ax = plt.subplots(2,2,figsize=(10,10)) vmin, vmax = obs.min(), obs.max() sns.heatmap(true[::ds], cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,0]) sns.heatmap(llock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,1]) sns.heatmap(slock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,0]) sns.heatmap(lslock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,1]) ax[0,0].set_title("True") ax[0,1].set_title("LLOCK") ax[1,0].set_title("SLOCK") ax[1,1].set_title("LSLOCK") for i in range(2): for j in range(2): ax[i,j].set_xlabel("space") ax[i,j].set_ylabel("timestep") fig.savefig(os.path.join(result_dir, "estimated.pdf")) if __name__ == "__main__": main()
true
true
f70da43a235cc40f7aed50d280517caead9baa9c
1,104
py
Python
metric.py
jdgwartney/sdk
2a3b57b572ba5e8ad3a61b1d3da0ab328d3981fe
[ "Apache-2.0" ]
null
null
null
metric.py
jdgwartney/sdk
2a3b57b572ba5e8ad3a61b1d3da0ab328d3981fe
[ "Apache-2.0" ]
null
null
null
metric.py
jdgwartney/sdk
2a3b57b572ba5e8ad3a61b1d3da0ab328d3981fe
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright 2014 Boundary, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Metric: def __init__(self): pass def setSource(self,source): self.source = source def getSource(self): return self.source def setName(self, name): self.name = name def getName(self): return self.name def setValue(self, value): self.value = value def getValue(self): return self.value def __str__(self): return "{} {} {}".format(self.name,self.value,self.source)
26.926829
74
0.661232
class Metric: def __init__(self): pass def setSource(self,source): self.source = source def getSource(self): return self.source def setName(self, name): self.name = name def getName(self): return self.name def setValue(self, value): self.value = value def getValue(self): return self.value def __str__(self): return "{} {} {}".format(self.name,self.value,self.source)
true
true
f70da5d5d5e7329b2bc1b2ba27f8e1e4a75f979e
20,260
py
Python
tensorflow_probability/python/distributions/mixture.py
jihunchoi/probability
685c5012eba03a23d1b849d35f5e8efe7fdc402d
[ "Apache-2.0" ]
1
2018-11-07T16:31:11.000Z
2018-11-07T16:31:11.000Z
tensorflow_probability/python/distributions/mixture.py
jihunchoi/probability
685c5012eba03a23d1b849d35f5e8efe7fdc402d
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/distributions/mixture.py
jihunchoi/probability
685c5012eba03a23d1b849d35f5e8efe7fdc402d
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The Mixture distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import tensorflow as tf from tensorflow_probability.python.distributions import categorical from tensorflow_probability.python.distributions import distribution from tensorflow_probability.python.distributions import seed_stream from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import reparameterization from tensorflow.python.framework import tensor_util class Mixture(distribution.Distribution): """Mixture distribution. The `Mixture` object implements batched mixture distributions. The mixture model is defined by a `Categorical` distribution (the mixture) and a python list of `Distribution` objects. Methods supported include `log_prob`, `prob`, `mean`, `sample`, and `entropy_lower_bound`. #### Examples ```python # Create a mixture of two Gaussians: tfd = tfp.distributions mix = 0.3 bimix_gauss = tfd.Mixture( cat=tfd.Categorical(probs=[mix, 1.-mix]), components=[ tfd.Normal(loc=-1., scale=0.1), tfd.Normal(loc=+1., scale=0.5), ]) # Plot the PDF. import matplotlib.pyplot as plt x = tf.linspace(-2., 3., int(1e4)).eval() plt.plot(x, bimix_gauss.prob(x).eval()); ``` """ def __init__(self, cat, components, validate_args=False, allow_nan_stats=True, use_static_graph=False, name="Mixture"): """Initialize a Mixture distribution. A `Mixture` is defined by a `Categorical` (`cat`, representing the mixture probabilities) and a list of `Distribution` objects all having matching dtype, batch shape, event shape, and continuity properties (the components). The `num_classes` of `cat` must be possible to infer at graph construction time and match `len(components)`. Args: cat: A `Categorical` distribution instance, representing the probabilities of `distributions`. components: A list or tuple of `Distribution` instances. Each instance must have the same type, be defined on the same domain, and have matching `event_shape` and `batch_shape`. validate_args: Python `bool`, default `False`. If `True`, raise a runtime error if batch or event ranks are inconsistent between cat and any of the distributions. This is only checked if the ranks cannot be determined statically at graph construction time. allow_nan_stats: Boolean, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. use_static_graph: Calls to `sample` will not rely on dynamic tensor indexing, allowing for some static graph compilation optimizations, but at the expense of sampling all underlying distributions in the mixture. (Possibly useful when running on TPUs). Default value: `False` (i.e., use dynamic indexing). name: A name for this distribution (optional). Raises: TypeError: If cat is not a `Categorical`, or `components` is not a list or tuple, or the elements of `components` are not instances of `Distribution`, or do not have matching `dtype`. ValueError: If `components` is an empty list or tuple, or its elements do not have a statically known event rank. If `cat.num_classes` cannot be inferred at graph creation time, or the constant value of `cat.num_classes` is not equal to `len(components)`, or all `components` and `cat` do not have matching static batch shapes, or all components do not have matching static event shapes. """ parameters = dict(locals()) # TODO(b/117098119): Remove tf.distribution references once they're gone. if not isinstance(cat, categorical.Categorical) and not isinstance( cat, tf.distributions.Categorical): raise TypeError("cat must be a Categorical distribution, but saw: %s" % cat) if not components: raise ValueError("components must be a non-empty list or tuple") if not isinstance(components, (list, tuple)): raise TypeError("components must be a list or tuple, but saw: %s" % components) # TODO(b/117098119): Remove tf.distribution references once they're gone. if not all( isinstance(c, distribution.Distribution) or isinstance(cat, tf.distributions.Distribution) for c in components): raise TypeError( "all entries in components must be Distribution instances" " but saw: %s" % components) dtype = components[0].dtype if not all(d.dtype == dtype for d in components): raise TypeError("All components must have the same dtype, but saw " "dtypes: %s" % [(d.name, d.dtype) for d in components]) static_event_shape = components[0].event_shape static_batch_shape = cat.batch_shape for d in components: static_event_shape = static_event_shape.merge_with(d.event_shape) static_batch_shape = static_batch_shape.merge_with(d.batch_shape) if static_event_shape.ndims is None: raise ValueError( "Expected to know rank(event_shape) from components, but " "none of the components provide a static number of ndims") # Ensure that all batch and event ndims are consistent. with tf.name_scope(name, values=[cat.logits]) as name: num_components = cat.event_size static_num_components = tensor_util.constant_value(num_components) if static_num_components is None: raise ValueError( "Could not infer number of classes from cat and unable " "to compare this value to the number of components passed in.") # Possibly convert from numpy 0-D array. static_num_components = int(static_num_components) if static_num_components != len(components): raise ValueError("cat.num_classes != len(components): %d vs. %d" % (static_num_components, len(components))) cat_batch_shape = cat.batch_shape_tensor() cat_batch_rank = tf.size(cat_batch_shape) if validate_args: batch_shapes = [d.batch_shape_tensor() for d in components] batch_ranks = [tf.size(bs) for bs in batch_shapes] check_message = ("components[%d] batch shape must match cat " "batch shape") self._assertions = [ tf.assert_equal( cat_batch_rank, batch_ranks[di], message=check_message % di) for di in range(len(components)) ] self._assertions += [ tf.assert_equal( cat_batch_shape, batch_shapes[di], message=check_message % di) for di in range(len(components)) ] else: self._assertions = [] self._cat = cat self._components = list(components) self._num_components = static_num_components self._static_event_shape = static_event_shape self._static_batch_shape = static_batch_shape self._use_static_graph = use_static_graph if use_static_graph and static_num_components is None: raise ValueError("Number of categories must be known statically when " "`static_sample=True`.") # We let the Mixture distribution access _graph_parents since its arguably # more like a baseclass. graph_parents = self._cat._graph_parents # pylint: disable=protected-access for c in self._components: graph_parents += c._graph_parents # pylint: disable=protected-access super(Mixture, self).__init__( dtype=dtype, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=graph_parents, name=name) @property def cat(self): return self._cat @property def components(self): return self._components @property def num_components(self): return self._num_components def _batch_shape_tensor(self): return self._cat.batch_shape_tensor() def _batch_shape(self): return self._static_batch_shape def _event_shape_tensor(self): return self._components[0].event_shape_tensor() def _event_shape(self): return self._static_event_shape def _expand_to_event_rank(self, x): """Expand the rank of x up to static_event_rank times for broadcasting. The static event rank was checked to not be None at construction time. Args: x: A tensor to expand. Returns: The expanded tensor. """ expanded_x = x for _ in range(self.event_shape.ndims): expanded_x = tf.expand_dims(expanded_x, -1) return expanded_x def _mean(self): with tf.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] cat_probs = self._cat_probs(log_probs=False) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] partial_means = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_means) ] # These should all be the same shape by virtue of matching # batch_shape and event_shape. return tf.add_n(partial_means) def _stddev(self): with tf.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] distribution_devs = [d.stddev() for d in self.components] cat_probs = self._cat_probs(log_probs=False) stacked_means = tf.stack(distribution_means, axis=-1) stacked_devs = tf.stack(distribution_devs, axis=-1) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] broadcasted_cat_probs = ( tf.stack(cat_probs, axis=-1) * tf.ones_like(stacked_means)) batched_dev = distribution_util.mixture_stddev( tf.reshape(broadcasted_cat_probs, [-1, len(self.components)]), tf.reshape(stacked_means, [-1, len(self.components)]), tf.reshape(stacked_devs, [-1, len(self.components)])) # I.e. re-shape to list(batch_shape) + list(event_shape). return tf.reshape(batched_dev, tf.shape(broadcasted_cat_probs)[:-1]) def _log_prob(self, x): with tf.control_dependencies(self._assertions): x = tf.convert_to_tensor(x, name="x") distribution_log_probs = [d.log_prob(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_probs = [ cat_lp + d_lp for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs) ] concat_log_probs = tf.stack(final_log_probs, 0) log_sum_exp = tf.reduce_logsumexp(concat_log_probs, [0]) return log_sum_exp def _log_cdf(self, x): with tf.control_dependencies(self._assertions): x = tf.convert_to_tensor(x, name="x") distribution_log_cdfs = [d.log_cdf(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_cdfs = [ cat_lp + d_lcdf for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs) ] concatted_log_cdfs = tf.stack(final_log_cdfs, axis=0) mixture_log_cdf = tf.reduce_logsumexp(concatted_log_cdfs, [0]) return mixture_log_cdf def _sample_n(self, n, seed=None): if self._use_static_graph: # This sampling approach is almost the same as the approach used by # `MixtureSameFamily`. The differences are due to having a list of # `Distribution` objects rather than a single object, and maintaining # random seed management that is consistent with the non-static code path. samples = [] cat_samples = self.cat.sample(n, seed=seed) stream = seed_stream.SeedStream(seed, salt="Mixture") for c in range(self.num_components): samples.append(self.components[c].sample(n, seed=stream())) x = tf.stack(samples, -self._static_event_shape.ndims - 1) # [n, B, k, E] npdt = x.dtype.as_numpy_dtype mask = tf.one_hot( indices=cat_samples, # [n, B] depth=self._num_components, # == k on_value=np.ones([], dtype=npdt), off_value=np.zeros([], dtype=npdt)) # [n, B, k] mask = distribution_util.pad_mixture_dimensions( mask, self, self._cat, self._static_event_shape.ndims) # [n, B, k, [1]*e] return tf.reduce_sum( x * mask, axis=-1 - self._static_event_shape.ndims) # [n, B, E] with tf.control_dependencies(self._assertions): n = tf.convert_to_tensor(n, name="n") static_n = tensor_util.constant_value(n) n = int(static_n) if static_n is not None else n cat_samples = self.cat.sample(n, seed=seed) static_samples_shape = cat_samples.shape if static_samples_shape.is_fully_defined(): samples_shape = static_samples_shape.as_list() samples_size = static_samples_shape.num_elements() else: samples_shape = tf.shape(cat_samples) samples_size = tf.size(cat_samples) static_batch_shape = self.batch_shape if static_batch_shape.is_fully_defined(): batch_shape = static_batch_shape.as_list() batch_size = static_batch_shape.num_elements() else: batch_shape = self.batch_shape_tensor() batch_size = tf.reduce_prod(batch_shape) static_event_shape = self.event_shape if static_event_shape.is_fully_defined(): event_shape = np.array(static_event_shape.as_list(), dtype=np.int32) else: event_shape = self.event_shape_tensor() # Get indices into the raw cat sampling tensor. We will # need these to stitch sample values back out after sampling # within the component partitions. samples_raw_indices = tf.reshape(tf.range(0, samples_size), samples_shape) # Partition the raw indices so that we can use # dynamic_stitch later to reconstruct the samples from the # known partitions. partitioned_samples_indices = tf.dynamic_partition( data=samples_raw_indices, partitions=cat_samples, num_partitions=self.num_components) # Copy the batch indices n times, as we will need to know # these to pull out the appropriate rows within the # component partitions. batch_raw_indices = tf.reshape( tf.tile(tf.range(0, batch_size), [n]), samples_shape) # Explanation of the dynamic partitioning below: # batch indices are i.e., [0, 1, 0, 1, 0, 1] # Suppose partitions are: # [1 1 0 0 1 1] # After partitioning, batch indices are cut as: # [batch_indices[x] for x in 2, 3] # [batch_indices[x] for x in 0, 1, 4, 5] # i.e. # [1 1] and [0 0 0 0] # Now we sample n=2 from part 0 and n=4 from part 1. # For part 0 we want samples from batch entries 1, 1 (samples 0, 1), # and for part 1 we want samples from batch entries 0, 0, 0, 0 # (samples 0, 1, 2, 3). partitioned_batch_indices = tf.dynamic_partition( data=batch_raw_indices, partitions=cat_samples, num_partitions=self.num_components) samples_class = [None for _ in range(self.num_components)] stream = seed_stream.SeedStream(seed, salt="Mixture") for c in range(self.num_components): n_class = tf.size(partitioned_samples_indices[c]) samples_class_c = self.components[c].sample( n_class, seed=stream()) # Pull out the correct batch entries from each index. # To do this, we may have to flatten the batch shape. # For sample s, batch element b of component c, we get the # partitioned batch indices from # partitioned_batch_indices[c]; and shift each element by # the sample index. The final lookup can be thought of as # a matrix gather along locations (s, b) in # samples_class_c where the n_class rows correspond to # samples within this component and the batch_size columns # correspond to batch elements within the component. # # Thus the lookup index is # lookup[c, i] = batch_size * s[i] + b[c, i] # for i = 0 ... n_class[c] - 1. lookup_partitioned_batch_indices = ( batch_size * tf.range(n_class) + partitioned_batch_indices[c]) samples_class_c = tf.reshape( samples_class_c, tf.concat([[n_class * batch_size], event_shape], 0)) samples_class_c = tf.gather( samples_class_c, lookup_partitioned_batch_indices, name="samples_class_c_gather") samples_class[c] = samples_class_c # Stitch back together the samples across the components. lhs_flat_ret = tf.dynamic_stitch( indices=partitioned_samples_indices, data=samples_class) # Reshape back to proper sample, batch, and event shape. ret = tf.reshape( lhs_flat_ret, tf.concat( [samples_shape, self.event_shape_tensor()], 0)) ret.set_shape( tf.TensorShape(static_samples_shape).concatenate(self.event_shape)) return ret def entropy_lower_bound(self, name="entropy_lower_bound"): r"""A lower bound on the entropy of this mixture model. The bound below is not always very tight, and its usefulness depends on the mixture probabilities and the components in use. A lower bound is useful for ELBO when the `Mixture` is the variational distribution: \\( \log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q] \\) where \\( p \\) is the prior distribution, \\( q \\) is the variational, and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound \\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in place of \\( H[q] \\). For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with \\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a simple lower bound is: \\( \begin{align} H[q] & = - \int q(z) \log q(z) dz \\\ & = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\ & \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\ & = \sum_i c_i H[q_i] \end{align} \\) This is the term we calculate below for \\( G[q] \\). Args: name: A name for this operation (optional). Returns: A lower bound on the Mixture's entropy. """ with self._name_scope(name, values=[self.cat.logits]): with tf.control_dependencies(self._assertions): distribution_entropies = [d.entropy() for d in self.components] cat_probs = self._cat_probs(log_probs=False) partial_entropies = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies) ] # These are all the same shape by virtue of matching batch_shape return tf.add_n(partial_entropies) def _cat_probs(self, log_probs): """Get a list of num_components batchwise probabilities.""" which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
41.178862
80
0.666091
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_probability.python.distributions import categorical from tensorflow_probability.python.distributions import distribution from tensorflow_probability.python.distributions import seed_stream from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import reparameterization from tensorflow.python.framework import tensor_util class Mixture(distribution.Distribution): def __init__(self, cat, components, validate_args=False, allow_nan_stats=True, use_static_graph=False, name="Mixture"): parameters = dict(locals()) if not isinstance(cat, categorical.Categorical) and not isinstance( cat, tf.distributions.Categorical): raise TypeError("cat must be a Categorical distribution, but saw: %s" % cat) if not components: raise ValueError("components must be a non-empty list or tuple") if not isinstance(components, (list, tuple)): raise TypeError("components must be a list or tuple, but saw: %s" % components) # TODO(b/117098119): Remove tf.distribution references once they're gone. if not all( isinstance(c, distribution.Distribution) or isinstance(cat, tf.distributions.Distribution) for c in components): raise TypeError( "all entries in components must be Distribution instances" " but saw: %s" % components) dtype = components[0].dtype if not all(d.dtype == dtype for d in components): raise TypeError("All components must have the same dtype, but saw " "dtypes: %s" % [(d.name, d.dtype) for d in components]) static_event_shape = components[0].event_shape static_batch_shape = cat.batch_shape for d in components: static_event_shape = static_event_shape.merge_with(d.event_shape) static_batch_shape = static_batch_shape.merge_with(d.batch_shape) if static_event_shape.ndims is None: raise ValueError( "Expected to know rank(event_shape) from components, but " "none of the components provide a static number of ndims") with tf.name_scope(name, values=[cat.logits]) as name: num_components = cat.event_size static_num_components = tensor_util.constant_value(num_components) if static_num_components is None: raise ValueError( "Could not infer number of classes from cat and unable " "to compare this value to the number of components passed in.") static_num_components = int(static_num_components) if static_num_components != len(components): raise ValueError("cat.num_classes != len(components): %d vs. %d" % (static_num_components, len(components))) cat_batch_shape = cat.batch_shape_tensor() cat_batch_rank = tf.size(cat_batch_shape) if validate_args: batch_shapes = [d.batch_shape_tensor() for d in components] batch_ranks = [tf.size(bs) for bs in batch_shapes] check_message = ("components[%d] batch shape must match cat " "batch shape") self._assertions = [ tf.assert_equal( cat_batch_rank, batch_ranks[di], message=check_message % di) for di in range(len(components)) ] self._assertions += [ tf.assert_equal( cat_batch_shape, batch_shapes[di], message=check_message % di) for di in range(len(components)) ] else: self._assertions = [] self._cat = cat self._components = list(components) self._num_components = static_num_components self._static_event_shape = static_event_shape self._static_batch_shape = static_batch_shape self._use_static_graph = use_static_graph if use_static_graph and static_num_components is None: raise ValueError("Number of categories must be known statically when " "`static_sample=True`.") graph_parents = self._cat._graph_parents for c in self._components: graph_parents += c._graph_parents super(Mixture, self).__init__( dtype=dtype, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=graph_parents, name=name) @property def cat(self): return self._cat @property def components(self): return self._components @property def num_components(self): return self._num_components def _batch_shape_tensor(self): return self._cat.batch_shape_tensor() def _batch_shape(self): return self._static_batch_shape def _event_shape_tensor(self): return self._components[0].event_shape_tensor() def _event_shape(self): return self._static_event_shape def _expand_to_event_rank(self, x): expanded_x = x for _ in range(self.event_shape.ndims): expanded_x = tf.expand_dims(expanded_x, -1) return expanded_x def _mean(self): with tf.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] cat_probs = self._cat_probs(log_probs=False) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] partial_means = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_means) ] return tf.add_n(partial_means) def _stddev(self): with tf.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] distribution_devs = [d.stddev() for d in self.components] cat_probs = self._cat_probs(log_probs=False) stacked_means = tf.stack(distribution_means, axis=-1) stacked_devs = tf.stack(distribution_devs, axis=-1) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] broadcasted_cat_probs = ( tf.stack(cat_probs, axis=-1) * tf.ones_like(stacked_means)) batched_dev = distribution_util.mixture_stddev( tf.reshape(broadcasted_cat_probs, [-1, len(self.components)]), tf.reshape(stacked_means, [-1, len(self.components)]), tf.reshape(stacked_devs, [-1, len(self.components)])) return tf.reshape(batched_dev, tf.shape(broadcasted_cat_probs)[:-1]) def _log_prob(self, x): with tf.control_dependencies(self._assertions): x = tf.convert_to_tensor(x, name="x") distribution_log_probs = [d.log_prob(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_probs = [ cat_lp + d_lp for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs) ] concat_log_probs = tf.stack(final_log_probs, 0) log_sum_exp = tf.reduce_logsumexp(concat_log_probs, [0]) return log_sum_exp def _log_cdf(self, x): with tf.control_dependencies(self._assertions): x = tf.convert_to_tensor(x, name="x") distribution_log_cdfs = [d.log_cdf(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_cdfs = [ cat_lp + d_lcdf for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs) ] concatted_log_cdfs = tf.stack(final_log_cdfs, axis=0) mixture_log_cdf = tf.reduce_logsumexp(concatted_log_cdfs, [0]) return mixture_log_cdf def _sample_n(self, n, seed=None): if self._use_static_graph: samples = [] cat_samples = self.cat.sample(n, seed=seed) stream = seed_stream.SeedStream(seed, salt="Mixture") for c in range(self.num_components): samples.append(self.components[c].sample(n, seed=stream())) x = tf.stack(samples, -self._static_event_shape.ndims - 1) npdt = x.dtype.as_numpy_dtype mask = tf.one_hot( indices=cat_samples, depth=self._num_components, on_value=np.ones([], dtype=npdt), off_value=np.zeros([], dtype=npdt)) mask = distribution_util.pad_mixture_dimensions( mask, self, self._cat, self._static_event_shape.ndims) return tf.reduce_sum( x * mask, axis=-1 - self._static_event_shape.ndims) with tf.control_dependencies(self._assertions): n = tf.convert_to_tensor(n, name="n") static_n = tensor_util.constant_value(n) n = int(static_n) if static_n is not None else n cat_samples = self.cat.sample(n, seed=seed) static_samples_shape = cat_samples.shape if static_samples_shape.is_fully_defined(): samples_shape = static_samples_shape.as_list() samples_size = static_samples_shape.num_elements() else: samples_shape = tf.shape(cat_samples) samples_size = tf.size(cat_samples) static_batch_shape = self.batch_shape if static_batch_shape.is_fully_defined(): batch_shape = static_batch_shape.as_list() batch_size = static_batch_shape.num_elements() else: batch_shape = self.batch_shape_tensor() batch_size = tf.reduce_prod(batch_shape) static_event_shape = self.event_shape if static_event_shape.is_fully_defined(): event_shape = np.array(static_event_shape.as_list(), dtype=np.int32) else: event_shape = self.event_shape_tensor() samples_raw_indices = tf.reshape(tf.range(0, samples_size), samples_shape) partitioned_samples_indices = tf.dynamic_partition( data=samples_raw_indices, partitions=cat_samples, num_partitions=self.num_components) batch_raw_indices = tf.reshape( tf.tile(tf.range(0, batch_size), [n]), samples_shape) partitioned_batch_indices = tf.dynamic_partition( data=batch_raw_indices, partitions=cat_samples, num_partitions=self.num_components) samples_class = [None for _ in range(self.num_components)] stream = seed_stream.SeedStream(seed, salt="Mixture") for c in range(self.num_components): n_class = tf.size(partitioned_samples_indices[c]) samples_class_c = self.components[c].sample( n_class, seed=stream()) lookup_partitioned_batch_indices = ( batch_size * tf.range(n_class) + partitioned_batch_indices[c]) samples_class_c = tf.reshape( samples_class_c, tf.concat([[n_class * batch_size], event_shape], 0)) samples_class_c = tf.gather( samples_class_c, lookup_partitioned_batch_indices, name="samples_class_c_gather") samples_class[c] = samples_class_c lhs_flat_ret = tf.dynamic_stitch( indices=partitioned_samples_indices, data=samples_class) ret = tf.reshape( lhs_flat_ret, tf.concat( [samples_shape, self.event_shape_tensor()], 0)) ret.set_shape( tf.TensorShape(static_samples_shape).concatenate(self.event_shape)) return ret def entropy_lower_bound(self, name="entropy_lower_bound"): with self._name_scope(name, values=[self.cat.logits]): with tf.control_dependencies(self._assertions): distribution_entropies = [d.entropy() for d in self.components] cat_probs = self._cat_probs(log_probs=False) partial_entropies = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies) ] return tf.add_n(partial_entropies) def _cat_probs(self, log_probs): which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
true
true
f70da7a4c157cdaccffb4cddb141f3fce3c4578b
3,690
py
Python
tests/selenium/test_scrolling.py
jbampton/dash-table
1e25a1296ccbe0f061cc791e259a3f37ed3fbed9
[ "MIT" ]
null
null
null
tests/selenium/test_scrolling.py
jbampton/dash-table
1e25a1296ccbe0f061cc791e259a3f37ed3fbed9
[ "MIT" ]
null
null
null
tests/selenium/test_scrolling.py
jbampton/dash-table
1e25a1296ccbe0f061cc791e259a3f37ed3fbed9
[ "MIT" ]
null
null
null
import dash import dash.testing.wait as wait from dash_table import DataTable import pandas as pd import pytest from selenium.webdriver.common.keys import Keys df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/solar.csv") base_props = dict( id="table", columns=[{"name": i, "id": i} for i in df.columns], row_selectable="single", row_deletable=True, data=df.to_dict("records"), editable=True, fixed_rows={"headers": True, "data": 1}, style_cell=dict(width=150), style_table=dict(width=500), ) def get_margin(test): return test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-1')).marginLeft);" ) def get_scroll(test): return test.driver.execute_script( "return document.querySelector('#table .dt-table-container__row-1').scrollLeft;" ) def scroll_by(test, value): test.driver.execute_script( "document.querySelector('#table .dt-table-container__row-1').scrollBy({}, 0);".format( value ) ) @pytest.mark.parametrize( "fixed_rows", [dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))], ) @pytest.mark.parametrize( "fixed_columns", [ dict(), dict(fixed_columns=dict(headers=True)), dict(fixed_columns=dict(headers=True, data=1)), ], ) @pytest.mark.parametrize( "ops", [dict(), dict(row_selectable="single", row_deletable=True)] ) def test_scrol001_fixed_alignment(test, fixed_rows, fixed_columns, ops): props = {**base_props, **fixed_rows, **fixed_columns, **ops} app = dash.Dash(__name__) app.layout = DataTable(**props) test.start_server(app) target = test.table("table") assert target.is_ready() fixed_width = test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;" ) assert -get_margin(test) == fixed_width scroll_by(test, 200) wait.until( lambda: -get_margin(test) == fixed_width + 200, 3, ) scroll_by(test, -200) wait.until( lambda: -get_margin(test) == fixed_width, 3, ) assert test.get_log_errors() == [] @pytest.mark.parametrize( "fixed_rows", [dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))], ) @pytest.mark.parametrize( "fixed_columns", [ dict(), dict(fixed_columns=dict(headers=True)), dict(fixed_columns=dict(headers=True, data=1)), ], ) @pytest.mark.parametrize( "ops", [dict(), dict(row_selectable="single", row_deletable=True)] ) def test_scrol002_edit_navigate(test, fixed_rows, fixed_columns, ops): props = {**base_props, **fixed_rows, **fixed_columns, **ops} app = dash.Dash(__name__) app.layout = DataTable(**props) test.start_server(app) target = test.table("table") assert target.is_ready() fixed_width = test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;" ) scroll_by(test, 200) # alignment is ok after editing a cell target.cell(0, 3).click() test.send_keys("abc" + Keys.ENTER) wait.until(lambda: target.cell(1, 3).is_selected(), 3) wait.until(lambda: -get_margin(test) == fixed_width + get_scroll(test), 3) # alignment is ok after navigating test.send_keys(Keys.ARROW_DOWN) test.send_keys(Keys.ARROW_RIGHT) wait.until(lambda: target.cell(2, 4).is_selected(), 3) wait.until( lambda: -get_margin(test) == fixed_width + get_scroll(test), 3, ) assert test.get_log_errors() == []
26.73913
101
0.663957
import dash import dash.testing.wait as wait from dash_table import DataTable import pandas as pd import pytest from selenium.webdriver.common.keys import Keys df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/solar.csv") base_props = dict( id="table", columns=[{"name": i, "id": i} for i in df.columns], row_selectable="single", row_deletable=True, data=df.to_dict("records"), editable=True, fixed_rows={"headers": True, "data": 1}, style_cell=dict(width=150), style_table=dict(width=500), ) def get_margin(test): return test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-1')).marginLeft);" ) def get_scroll(test): return test.driver.execute_script( "return document.querySelector('#table .dt-table-container__row-1').scrollLeft;" ) def scroll_by(test, value): test.driver.execute_script( "document.querySelector('#table .dt-table-container__row-1').scrollBy({}, 0);".format( value ) ) @pytest.mark.parametrize( "fixed_rows", [dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))], ) @pytest.mark.parametrize( "fixed_columns", [ dict(), dict(fixed_columns=dict(headers=True)), dict(fixed_columns=dict(headers=True, data=1)), ], ) @pytest.mark.parametrize( "ops", [dict(), dict(row_selectable="single", row_deletable=True)] ) def test_scrol001_fixed_alignment(test, fixed_rows, fixed_columns, ops): props = {**base_props, **fixed_rows, **fixed_columns, **ops} app = dash.Dash(__name__) app.layout = DataTable(**props) test.start_server(app) target = test.table("table") assert target.is_ready() fixed_width = test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;" ) assert -get_margin(test) == fixed_width scroll_by(test, 200) wait.until( lambda: -get_margin(test) == fixed_width + 200, 3, ) scroll_by(test, -200) wait.until( lambda: -get_margin(test) == fixed_width, 3, ) assert test.get_log_errors() == [] @pytest.mark.parametrize( "fixed_rows", [dict(fixed_rows=dict(headers=True)), dict(fixed_rows=dict(headers=True, data=1))], ) @pytest.mark.parametrize( "fixed_columns", [ dict(), dict(fixed_columns=dict(headers=True)), dict(fixed_columns=dict(headers=True, data=1)), ], ) @pytest.mark.parametrize( "ops", [dict(), dict(row_selectable="single", row_deletable=True)] ) def test_scrol002_edit_navigate(test, fixed_rows, fixed_columns, ops): props = {**base_props, **fixed_rows, **fixed_columns, **ops} app = dash.Dash(__name__) app.layout = DataTable(**props) test.start_server(app) target = test.table("table") assert target.is_ready() fixed_width = test.driver.execute_script( "return parseFloat(getComputedStyle(document.querySelector('#table .cell-0-0')).width) || 0;" ) scroll_by(test, 200) target.cell(0, 3).click() test.send_keys("abc" + Keys.ENTER) wait.until(lambda: target.cell(1, 3).is_selected(), 3) wait.until(lambda: -get_margin(test) == fixed_width + get_scroll(test), 3) test.send_keys(Keys.ARROW_DOWN) test.send_keys(Keys.ARROW_RIGHT) wait.until(lambda: target.cell(2, 4).is_selected(), 3) wait.until( lambda: -get_margin(test) == fixed_width + get_scroll(test), 3, ) assert test.get_log_errors() == []
true
true
f70da8d03af30456f249aa0bd8107f864cb6503c
860
py
Python
fabfile/france.local.py
cryptobioz/addok
4a6b2e935b144f3672a6ae66c9bdf4c0b321976d
[ "MIT" ]
215
2016-01-29T08:37:56.000Z
2022-03-28T06:28:41.000Z
fabfile/france.local.py
bendathierrycom/addok
07346046ed53993d8e2b66262f52d505f26f5ba9
[ "MIT" ]
487
2016-01-13T10:11:34.000Z
2022-03-31T10:56:24.000Z
fabfile/france.local.py
bendathierrycom/addok
07346046ed53993d8e2b66262f52d505f26f5ba9
[ "MIT" ]
52
2016-01-12T13:10:28.000Z
2022-03-24T15:45:39.000Z
QUERY_PROCESSORS_PYPATHS = [ 'addok.helpers.text.check_query_length', "addok_france.extract_address", "addok_france.clean_query", "addok_france.remove_leading_zeros", ] SEARCH_RESULT_PROCESSORS_PYPATHS = [ "addok.helpers.results.match_housenumber", "addok_france.make_labels", "addok.helpers.results.score_by_importance", "addok.helpers.results.score_by_autocomplete_distance", "addok.helpers.results.score_by_str_distance", "addok.helpers.results.score_by_geo_distance", "addok.helpers.results.adjust_scores", ] PROCESSORS_PYPATHS = [ "addok.helpers.text.tokenize", "addok.helpers.text.normalize", "addok_france.glue_ordinal", "addok_france.fold_ordinal", "addok_france.flag_housenumber", "addok.helpers.text.synonymize", "addok_fr.phonemicize", ] SQLITE_DB_PATH = '/srv/addok/addok.db'
33.076923
59
0.752326
QUERY_PROCESSORS_PYPATHS = [ 'addok.helpers.text.check_query_length', "addok_france.extract_address", "addok_france.clean_query", "addok_france.remove_leading_zeros", ] SEARCH_RESULT_PROCESSORS_PYPATHS = [ "addok.helpers.results.match_housenumber", "addok_france.make_labels", "addok.helpers.results.score_by_importance", "addok.helpers.results.score_by_autocomplete_distance", "addok.helpers.results.score_by_str_distance", "addok.helpers.results.score_by_geo_distance", "addok.helpers.results.adjust_scores", ] PROCESSORS_PYPATHS = [ "addok.helpers.text.tokenize", "addok.helpers.text.normalize", "addok_france.glue_ordinal", "addok_france.fold_ordinal", "addok_france.flag_housenumber", "addok.helpers.text.synonymize", "addok_fr.phonemicize", ] SQLITE_DB_PATH = '/srv/addok/addok.db'
true
true
f70da9883e7bde35729298c9faac1caca164c89e
418
py
Python
samples/python/54.teams-task-module/models/__init__.py
Aliacf21/BotBuilder-Samples
be48548edafd4efdc074f5a59ef2bb3af735ad9a
[ "MIT" ]
1,998
2019-05-07T06:33:22.000Z
2022-03-31T12:59:15.000Z
samples/python/54.teams-task-module/models/__init__.py
Aliacf21/BotBuilder-Samples
be48548edafd4efdc074f5a59ef2bb3af735ad9a
[ "MIT" ]
1,526
2020-09-05T18:57:14.000Z
2020-12-03T01:45:40.000Z
samples/python/54.teams-task-module/models/__init__.py
stevkan/BotBuilder-Samples
75a21b412d8873906bed3460f7c5f0940a067d58
[ "MIT" ]
2,820
2016-09-21T03:47:43.000Z
2019-05-03T15:12:46.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from .task_module_ids import TaskModuleIds from .task_module_response_factory import TaskModuleResponseFactory from .task_module_ui_constants import TaskModuleUIConstants from .ui_settings import UISettings __all__ = [ "TaskModuleIds", "TaskModuleResponseFactory", "TaskModuleUIConstants", "UISettings", ]
27.866667
67
0.803828
from .task_module_ids import TaskModuleIds from .task_module_response_factory import TaskModuleResponseFactory from .task_module_ui_constants import TaskModuleUIConstants from .ui_settings import UISettings __all__ = [ "TaskModuleIds", "TaskModuleResponseFactory", "TaskModuleUIConstants", "UISettings", ]
true
true
f70da99aaeb151784da471888218b15cb30313ac
4,494
py
Python
azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2020-07-25T20:36:02.000Z
2020-07-25T20:36:02.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .proxy_resource import ProxyResource class ApplicationResource(ProxyResource): """The application resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Azure resource ID. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: Required. Resource location. :type location: str :param type_version: :type type_version: str :param parameters: :type parameters: list[~azure.mgmt.servicefabric.models.ApplicationParameter] :param upgrade_policy: :type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node. Default value: 0 . :type maximum_nodes: long :param remove_application_capacity: The version of the application type :type remove_application_capacity: bool :param metrics: :type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription] :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response :vartype provisioning_state: str :param type_name: :type type_name: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'minimum_nodes': {'minimum': 0}, 'maximum_nodes': {'minimum': 0}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type_version': {'key': 'properties.typeVersion', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ApplicationParameter]'}, 'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'ApplicationUpgradePolicy'}, 'minimum_nodes': {'key': 'properties.minimumNodes', 'type': 'long'}, 'maximum_nodes': {'key': 'properties.maximumNodes', 'type': 'long'}, 'remove_application_capacity': {'key': 'properties.removeApplicationCapacity', 'type': 'bool'}, 'metrics': {'key': 'properties.metrics', 'type': '[ApplicationMetricDescription]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'type_name': {'key': 'properties.typeName', 'type': 'str'}, } def __init__(self, **kwargs): super(ApplicationResource, self).__init__(**kwargs) self.type_version = kwargs.get('type_version', None) self.parameters = kwargs.get('parameters', None) self.upgrade_policy = kwargs.get('upgrade_policy', None) self.minimum_nodes = kwargs.get('minimum_nodes', None) self.maximum_nodes = kwargs.get('maximum_nodes', 0) self.remove_application_capacity = kwargs.get('remove_application_capacity', None) self.metrics = kwargs.get('metrics', None) self.provisioning_state = None self.type_name = kwargs.get('type_name', None)
44.49505
103
0.653761
from .proxy_resource import ProxyResource class ApplicationResource(ProxyResource): _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'minimum_nodes': {'minimum': 0}, 'maximum_nodes': {'minimum': 0}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type_version': {'key': 'properties.typeVersion', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ApplicationParameter]'}, 'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'ApplicationUpgradePolicy'}, 'minimum_nodes': {'key': 'properties.minimumNodes', 'type': 'long'}, 'maximum_nodes': {'key': 'properties.maximumNodes', 'type': 'long'}, 'remove_application_capacity': {'key': 'properties.removeApplicationCapacity', 'type': 'bool'}, 'metrics': {'key': 'properties.metrics', 'type': '[ApplicationMetricDescription]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'type_name': {'key': 'properties.typeName', 'type': 'str'}, } def __init__(self, **kwargs): super(ApplicationResource, self).__init__(**kwargs) self.type_version = kwargs.get('type_version', None) self.parameters = kwargs.get('parameters', None) self.upgrade_policy = kwargs.get('upgrade_policy', None) self.minimum_nodes = kwargs.get('minimum_nodes', None) self.maximum_nodes = kwargs.get('maximum_nodes', 0) self.remove_application_capacity = kwargs.get('remove_application_capacity', None) self.metrics = kwargs.get('metrics', None) self.provisioning_state = None self.type_name = kwargs.get('type_name', None)
true
true
f70daa0be86e858ce0a00ff6a9d7ce0503ecbc63
3,143
py
Python
ImageNet/benchmark-dataflow.py
tensorpack/benchmarks
7c97174f9a00e440d60c38b54d4c45f58271fc3e
[ "Unlicense" ]
93
2017-09-28T00:48:47.000Z
2022-03-31T07:25:06.000Z
ImageNet/benchmark-dataflow.py
tensorpack/benchmarks
7c97174f9a00e440d60c38b54d4c45f58271fc3e
[ "Unlicense" ]
8
2018-02-26T14:18:50.000Z
2019-12-12T00:15:26.000Z
ImageNet/benchmark-dataflow.py
tensorpack/benchmarks
7c97174f9a00e440d60c38b54d4c45f58271fc3e
[ "Unlicense" ]
38
2017-09-28T01:43:34.000Z
2022-02-11T09:23:53.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: benchmark-dataflow.py import argparse import cv2 from tensorpack import * from tensorpack.dataflow.imgaug import * from tensorpack.dataflow.parallel import PlasmaGetData, PlasmaPutData # noqa from tensorpack.utils.serialize import loads import augmentors def test_orig(dir, name, augs, batch): ds = dataset.ILSVRC12(dir, name, shuffle=True) ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batch) # ds = PlasmaPutData(ds) ds = MultiProcessRunnerZMQ(ds, 50, hwm=80) # ds = PlasmaGetData(ds) return ds def test_lmdb_train(db, augs, batch): ds = LMDBData(db, shuffle=False) ds = LocallyShuffleData(ds, 50000) ds = MultiProcessRunner(ds, 5000, 1) return ds ds = LMDBDataPoint(ds) def f(x): return cv2.imdecode(x, cv2.IMREAD_COLOR) ds = MapDataComponent(ds, f, 0) ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batch, use_list=True) # ds = PlasmaPutData(ds) ds = MultiProcessRunnerZMQ(ds, 40, hwm=80) # ds = PlasmaGetData(ds) return ds def test_lmdb_inference(db, augs, batch): ds = LMDBData(db, shuffle=False) # ds = LocallyShuffleData(ds, 50000) augs = AugmentorList(augs) def mapper(data): im, label = loads(data[1]) im = cv2.imdecode(im, cv2.IMREAD_COLOR) im = augs.augment(im) return im, label ds = MultiProcessMapData(ds, 40, mapper, buffer_size=200) # ds = MultiThreadMapData(ds, 40, mapper, buffer_size=2000) ds = BatchData(ds, batch) ds = MultiProcessRunnerZMQ(ds, 1) return ds def test_inference(dir, name, augs, batch=128): ds = dataset.ILSVRC12Files(dir, name, shuffle=False, dir_structure='train') aug = imgaug.AugmentorList(augs) def mapf(dp): fname, cls = dp im = cv2.imread(fname, cv2.IMREAD_COLOR) im = aug.augment(im) return im, cls ds = MultiThreadMapData(ds, 30, mapf, buffer_size=2000, strict=True) ds = BatchData(ds, batch) ds = MultiProcessRunnerZMQ(ds, 1) return ds if __name__ == '__main__': available_augmentors = [ k[:-len("_augmentor")] for k in augmentors.__all__ if k.endswith('_augmentor')] parser = argparse.ArgumentParser() parser.add_argument('data', help='file or directory of dataset') parser.add_argument('--batch', type=int, default=64) parser.add_argument('--name', choices=['train', 'val'], default='train') parser.add_argument('--aug', choices=available_augmentors, required=True) args = parser.parse_args() augs = getattr(augmentors, args.aug + '_augmentor')() if args.data.endswith('lmdb'): if args.name == 'train': ds = test_lmdb_train(args.data, augs, args.batch) else: ds = test_lmdb_inference(args.data, augs, args.batch) else: if args.name == 'train': ds = test_orig(args.data, args.name, augs, args.batch) else: ds = test_inference(args.data, args.name, augs, args.batch) TestDataSpeed(ds, 500000, warmup=100).start()
29.101852
79
0.647471
import argparse import cv2 from tensorpack import * from tensorpack.dataflow.imgaug import * from tensorpack.dataflow.parallel import PlasmaGetData, PlasmaPutData from tensorpack.utils.serialize import loads import augmentors def test_orig(dir, name, augs, batch): ds = dataset.ILSVRC12(dir, name, shuffle=True) ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batch) ds = MultiProcessRunnerZMQ(ds, 50, hwm=80) return ds def test_lmdb_train(db, augs, batch): ds = LMDBData(db, shuffle=False) ds = LocallyShuffleData(ds, 50000) ds = MultiProcessRunner(ds, 5000, 1) return ds ds = LMDBDataPoint(ds) def f(x): return cv2.imdecode(x, cv2.IMREAD_COLOR) ds = MapDataComponent(ds, f, 0) ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batch, use_list=True) ds = MultiProcessRunnerZMQ(ds, 40, hwm=80) return ds def test_lmdb_inference(db, augs, batch): ds = LMDBData(db, shuffle=False) augs = AugmentorList(augs) def mapper(data): im, label = loads(data[1]) im = cv2.imdecode(im, cv2.IMREAD_COLOR) im = augs.augment(im) return im, label ds = MultiProcessMapData(ds, 40, mapper, buffer_size=200) ds = BatchData(ds, batch) ds = MultiProcessRunnerZMQ(ds, 1) return ds def test_inference(dir, name, augs, batch=128): ds = dataset.ILSVRC12Files(dir, name, shuffle=False, dir_structure='train') aug = imgaug.AugmentorList(augs) def mapf(dp): fname, cls = dp im = cv2.imread(fname, cv2.IMREAD_COLOR) im = aug.augment(im) return im, cls ds = MultiThreadMapData(ds, 30, mapf, buffer_size=2000, strict=True) ds = BatchData(ds, batch) ds = MultiProcessRunnerZMQ(ds, 1) return ds if __name__ == '__main__': available_augmentors = [ k[:-len("_augmentor")] for k in augmentors.__all__ if k.endswith('_augmentor')] parser = argparse.ArgumentParser() parser.add_argument('data', help='file or directory of dataset') parser.add_argument('--batch', type=int, default=64) parser.add_argument('--name', choices=['train', 'val'], default='train') parser.add_argument('--aug', choices=available_augmentors, required=True) args = parser.parse_args() augs = getattr(augmentors, args.aug + '_augmentor')() if args.data.endswith('lmdb'): if args.name == 'train': ds = test_lmdb_train(args.data, augs, args.batch) else: ds = test_lmdb_inference(args.data, augs, args.batch) else: if args.name == 'train': ds = test_orig(args.data, args.name, augs, args.batch) else: ds = test_inference(args.data, args.name, augs, args.batch) TestDataSpeed(ds, 500000, warmup=100).start()
true
true
f70dab9e7f2aa449224fcbce100d80d3ed025ca8
918
py
Python
hummingbot/strategy/dev_1_get_order_book/start.py
BGTCapital/hummingbot
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
[ "Apache-2.0" ]
3,027
2019-04-04T18:52:17.000Z
2022-03-30T09:38:34.000Z
hummingbot/strategy/dev_1_get_order_book/start.py
BGTCapital/hummingbot
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
[ "Apache-2.0" ]
4,080
2019-04-04T19:51:11.000Z
2022-03-31T23:45:21.000Z
hummingbot/strategy/dev_1_get_order_book/start.py
BGTCapital/hummingbot
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
[ "Apache-2.0" ]
1,342
2019-04-04T20:50:53.000Z
2022-03-31T15:22:36.000Z
#!/usr/bin/env python from hummingbot.connector.exchange_base import ExchangeBase from hummingbot.strategy.dev_1_get_order_book import GetOrderBookStrategy from hummingbot.strategy.dev_1_get_order_book.dev_1_get_order_book_config_map import dev_1_get_order_book_config_map def start(self): try: exchange: str = dev_1_get_order_book_config_map.get("exchange").value.lower() trading_pair: str = dev_1_get_order_book_config_map.get("trading_pair").value self._initialize_markets([(exchange, [trading_pair])]) exchange: ExchangeBase = self.markets[exchange] self.strategy = GetOrderBookStrategy(exchange=exchange, trading_pair=trading_pair, ) except Exception as e: self._notify(str(e)) self.logger().error("Unknown error during initialization.", exc_info=True)
39.913043
116
0.6939
from hummingbot.connector.exchange_base import ExchangeBase from hummingbot.strategy.dev_1_get_order_book import GetOrderBookStrategy from hummingbot.strategy.dev_1_get_order_book.dev_1_get_order_book_config_map import dev_1_get_order_book_config_map def start(self): try: exchange: str = dev_1_get_order_book_config_map.get("exchange").value.lower() trading_pair: str = dev_1_get_order_book_config_map.get("trading_pair").value self._initialize_markets([(exchange, [trading_pair])]) exchange: ExchangeBase = self.markets[exchange] self.strategy = GetOrderBookStrategy(exchange=exchange, trading_pair=trading_pair, ) except Exception as e: self._notify(str(e)) self.logger().error("Unknown error during initialization.", exc_info=True)
true
true
f70dac7a361dd95663ebe981f4c4b619c042eb36
2,347
py
Python
tests/msk_test.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
1
2021-05-03T18:35:30.000Z
2021-05-03T18:35:30.000Z
tests/msk_test.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
1
2022-02-16T00:55:15.000Z
2022-02-16T00:55:15.000Z
tests/msk_test.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
null
null
null
import logging import sys import time import datetime import unittest import spot_db from spot_msk import SpotMsk import json, requests import logging, logging.config, yaml logging.config.dictConfig(yaml.load(open('logging.conf'))) logfl = logging.getLogger('file') logconsole = logging.getLogger('console') logfl.debug("Debug FILE") logconsole.debug("Debug CONSOLE") class TestAccess(unittest.TestCase): def echo_elapsed_time(self): elapsed = time.time() - self._started_at elapsed_step = time.time() - self._step_started_at self._total_steps_cnt += 1.0 self._total_steps_elapsed += elapsed_step avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2))) def echo(self,r): logging.info("response=" + str(r)) logging.info("response.headers=" + str(r.headers)) logging.info("response.text=" + str(r.text)) self.echo_elapsed_time() @classmethod def setUpClass(self): self._started_at = time.time() self._total_steps_cnt = 0 self._total_steps_elapsed = 0 self.msk = SpotMsk() logging.info('executing setUpClass') def test_00_msk_parking(self): self.msk.get_datasets() def test_01_msk_622(self): self.msk.traverse_dataset(622) def test_01_parking_datasets(self): dss = self.msk.get_datasets() cnt = 0 for ds in sorted(dss): cnt += self.msk.traverse_dataset(ds) logging.info('total datasets '+str(cnt)) @classmethod def tearDownClass(self): logging.info('executing tearDownClass') self._step_started_at = time.time() elapsed = time.time() - self._started_at elapsed_step = time.time() - self._step_started_at self._total_steps_cnt += 1.0 self._total_steps_elapsed += elapsed_step avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2))) logging.info('executed tearDownClass') if __name__ == '__main__': unittest.main()
32.597222
159
0.668087
import logging import sys import time import datetime import unittest import spot_db from spot_msk import SpotMsk import json, requests import logging, logging.config, yaml logging.config.dictConfig(yaml.load(open('logging.conf'))) logfl = logging.getLogger('file') logconsole = logging.getLogger('console') logfl.debug("Debug FILE") logconsole.debug("Debug CONSOLE") class TestAccess(unittest.TestCase): def echo_elapsed_time(self): elapsed = time.time() - self._started_at elapsed_step = time.time() - self._step_started_at self._total_steps_cnt += 1.0 self._total_steps_elapsed += elapsed_step avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2))) def echo(self,r): logging.info("response=" + str(r)) logging.info("response.headers=" + str(r.headers)) logging.info("response.text=" + str(r.text)) self.echo_elapsed_time() @classmethod def setUpClass(self): self._started_at = time.time() self._total_steps_cnt = 0 self._total_steps_elapsed = 0 self.msk = SpotMsk() logging.info('executing setUpClass') def test_00_msk_parking(self): self.msk.get_datasets() def test_01_msk_622(self): self.msk.traverse_dataset(622) def test_01_parking_datasets(self): dss = self.msk.get_datasets() cnt = 0 for ds in sorted(dss): cnt += self.msk.traverse_dataset(ds) logging.info('total datasets '+str(cnt)) @classmethod def tearDownClass(self): logging.info('executing tearDownClass') self._step_started_at = time.time() elapsed = time.time() - self._started_at elapsed_step = time.time() - self._step_started_at self._total_steps_cnt += 1.0 self._total_steps_elapsed += elapsed_step avg_elapsed = self._total_steps_elapsed / self._total_steps_cnt logging.info("total_elapsed=" + str(round(elapsed, 2)) + " step_elapsed=" + str(round(elapsed_step, 2)) + " avg_elapsed=" + str(round(avg_elapsed, 2))) logging.info('executed tearDownClass') if __name__ == '__main__': unittest.main()
true
true
f70dacbfa4317925c8e12fd040862ec22b3790b3
12,105
py
Python
robinhoodbot/main.py
connorkerry/RobinhoodBot
6a1e1733d900abfc00a8e6fff1cf48184af4edc3
[ "MIT" ]
null
null
null
robinhoodbot/main.py
connorkerry/RobinhoodBot
6a1e1733d900abfc00a8e6fff1cf48184af4edc3
[ "MIT" ]
null
null
null
robinhoodbot/main.py
connorkerry/RobinhoodBot
6a1e1733d900abfc00a8e6fff1cf48184af4edc3
[ "MIT" ]
null
null
null
import robin_stocks as r import pandas as pd import numpy as np import ta as ta from pandas.plotting import register_matplotlib_converters from ta import * from misc import * from tradingstats import * #Log in to Robinhood login = r.login('YOUR_EMAIL','YOUR_PASSWORD') #Safe divide by zero division function def safe_division(n, d): return n / d if d else 0 def get_watchlist_symbols(): """ Returns: the symbol for each stock in your watchlist as a list of strings """ my_list_names = [] symbols = [] for name in r.get_all_watchlists(info='name'): my_list_names.append(name) for name in my_list_names: list = r.get_watchlist_by_name(name) for item in list: instrument_data = r.get_instrument_by_url(item.get('instrument')) symbol = instrument_data['symbol'] symbols.append(symbol) return symbols def get_portfolio_symbols(): """ Returns: the symbol for each stock in your portfolio as a list of strings """ symbols = [] holdings_data = r.get_open_stock_positions() for item in holdings_data: if not item: continue instrument_data = r.get_instrument_by_url(item.get('instrument')) symbol = instrument_data['symbol'] symbols.append(symbol) return symbols def get_position_creation_date(symbol, holdings_data): """Returns the time at which we bought a certain stock in our portfolio Args: symbol(str): Symbol of the stock that we are trying to figure out when it was bought holdings_data(dict): dict returned by r.get_open_stock_positions() Returns: A string containing the date and time the stock was bought, or "Not found" otherwise """ instrument = r.get_instruments_by_symbols(symbol) url = instrument[0].get('url') for dict in holdings_data: if(dict.get('instrument') == url): return dict.get('created_at') return "Not found" def get_modified_holdings(): """ Retrieves the same dictionary as r.build_holdings, but includes data about when the stock was purchased, which is useful for the read_trade_history() method in tradingstats.py Returns: the same dict from r.build_holdings, but with an extra key-value pair for each position you have, which is 'bought_at': (the time the stock was purchased) """ holdings = r.build_holdings() holdings_data = r.get_open_stock_positions() for symbol, dict in holdings.items(): bought_at = get_position_creation_date(symbol, holdings_data) bought_at = str(pd.to_datetime(bought_at)) holdings[symbol].update({'bought_at': bought_at}) return holdings def get_last_crossing(df, days, symbol="", direction=""): """Searches for a crossing between two indicators for a given stock Args: df(pandas.core.frame.DataFrame): Pandas dataframe with columns containing the stock's prices, both indicators, and the dates days(int): Specifies the maximum number of days that the cross can occur by symbol(str): Symbol of the stock we're querying. Optional, used for printing purposes direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes Returns: 1 if the short-term indicator crosses above the long-term one 0 if there is no cross between the indicators -1 if the short-term indicator crosses below the long-term one """ prices = df.loc[:,"Price"] shortTerm = df.loc[:,"Indicator1"] LongTerm = df.loc[:,"Indicator2"] dates = df.loc[:,"Dates"] lastIndex = prices.size - 1 index = lastIndex found = index recentDiff = (shortTerm.at[index] - LongTerm.at[index]) >= 0 if((direction == "above" and not recentDiff) or (direction == "below" and recentDiff)): return 0 index -= 1 while(index >= 0 and found == lastIndex and not np.isnan(shortTerm.at[index]) and not np.isnan(LongTerm.at[index]) \ and ((pd.Timestamp("now", tz='UTC') - dates.at[index]) <= pd.Timedelta(str(days) + " days"))): if(recentDiff): if((shortTerm.at[index] - LongTerm.at[index]) < 0): found = index else: if((shortTerm.at[index] - LongTerm.at[index]) > 0): found = index index -= 1 if(found != lastIndex): if((direction == "above" and recentDiff) or (direction == "below" and not recentDiff)): print(symbol + ": Short SMA crossed" + (" ABOVE " if recentDiff else " BELOW ") + "Long SMA at " + str(dates.at[found]) \ +", which was " + str(pd.Timestamp("now", tz='UTC') - dates.at[found]) + " ago", ", price at cross: " + str(prices.at[found]) \ + ", current price: " + str(prices.at[lastIndex])) return (1 if recentDiff else -1) else: return 0 def five_year_check(stockTicker): """Figure out if a stock has risen or been created within the last five years. Args: stockTicker(str): Symbol of the stock we're querying Returns: True if the stock's current price is higher than it was five years ago, or the stock IPO'd within the last five years False otherwise """ instrument = r.get_instruments_by_symbols(stockTicker) list_date = instrument[0].get("list_date") if ((pd.Timestamp("now") - pd.to_datetime(list_date)) < pd.Timedelta("5 Y")): return True fiveyear = r.get_historicals(stockTicker,span='5year',bounds='regular') closingPrices = [] for item in fiveyear: closingPrices.append(float(item['close_price'])) recent_price = closingPrices[len(closingPrices) - 1] oldest_price = closingPrices[0] return (recent_price > oldest_price) def golden_cross(stockTicker, n1, n2, days, direction=""): """Determine if a golden/death cross has occured for a specified stock in the last X trading days Args: stockTicker(str): Symbol of the stock we're querying n1(int): Specifies the short-term indicator as an X-day moving average. n2(int): Specifies the long-term indicator as an X-day moving average. (n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200) days(int): Specifies the maximum number of days that the cross can occur by direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes Returns: 1 if the short-term indicator crosses above the long-term one 0 if there is no cross between the indicators -1 if the short-term indicator crosses below the long-term one False if direction == "above" and five_year_check(stockTicker) returns False, meaning that we're considering whether to buy the stock but it hasn't risen overall in the last five years, suggesting it contains fundamental issues """ if(direction == "above" and not five_year_check(stockTicker)): return False history = r.get_historicals(stockTicker,span='year',bounds='regular') closingPrices = [] dates = [] for item in history: closingPrices.append(float(item['close_price'])) dates.append(item['begins_at']) price = pd.Series(closingPrices) dates = pd.Series(dates) dates = pd.to_datetime(dates) sma1 = ta.volatility.bollinger_mavg(price, n=int(n1), fillna=False) sma2 = ta.volatility.bollinger_mavg(price, n=int(n2), fillna=False) series = [price.rename("Price"), sma1.rename("Indicator1"), sma2.rename("Indicator2"), dates.rename("Dates")] df = pd.concat(series, axis=1) cross = get_last_crossing(df, days, symbol=stockTicker, direction=direction) # if(cross): # show_plot(price, sma1, sma2, dates, symbol=stockTicker, label1=str(n1)+" day SMA", label2=str(n2)+" day SMA") return cross def sell_holdings(symbol, holdings_data): """ Place an order to sell all holdings of a stock. Args: symbol(str): Symbol of the stock we want to sell holdings_data(dict): dict obtained from get_modified_holdings() method """ shares_owned = int(float(holdings_data[symbol].get("quantity"))) r.order_sell_market(symbol, shares_owned) print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######") def buy_holdings(potential_buys, profile_data, holdings_data): """ Places orders to buy holdings of stocks. This method will try to order an appropriate amount of shares such that your holdings of the stock will roughly match the average for the rest of your portfoilio. If the share price is too high considering the rest of your holdings and the amount of buying power in your account, it will not order any shares. Args: potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy symbol(str): Symbol of the stock we want to sell holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method """ cash = float(profile_data.get('cash')) portfolio_value = float(profile_data.get('equity')) - cash ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys)) prices = r.get_latest_price(potential_buys) for i in range(0, len(potential_buys)): stock_price = float(prices[i]) if(ideal_position_size < stock_price < ideal_position_size*1.5): num_shares = int(ideal_position_size*1.5/stock_price) elif (stock_price < ideal_position_size): num_shares = int(ideal_position_size/stock_price) else: print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######") break print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######") r.order_buy_market(potential_buys[i], num_shares) def scan_stocks(): """ The main method. Sells stocks in your portfolio if their 50 day moving average crosses below the 200 day, and buys stocks in your watchlist if the opposite happens. ############################################################################################### WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade. ############################################################################################### If you sell a stock, this updates tradehistory.txt with information about the position, how much you've earned/lost, etc. """ print("----- Starting scan... -----\n") register_matplotlib_converters() watchlist_symbols = get_watchlist_symbols() portfolio_symbols = get_portfolio_symbols() holdings_data = get_modified_holdings() potential_buys = [] sells = [] print("Current Portfolio: " + str(portfolio_symbols) + "\n") print("Current Watchlist: " + str(watchlist_symbols) + "\n") print("----- Scanning portfolio for stocks to sell -----\n") for symbol in portfolio_symbols: cross = golden_cross(symbol, n1=50, n2=200, days=30, direction="below") if(cross == -1): sell_holdings(symbol, holdings_data) sells.append(symbol) profile_data = r.build_user_profile() print("\n----- Scanning watchlist for stocks to buy -----\n") for symbol in watchlist_symbols: if(symbol not in portfolio_symbols): cross = golden_cross(symbol, n1=50, n2=200, days=10, direction="above") if(cross == 1): potential_buys.append(symbol) if(len(potential_buys) > 0): buy_holdings(potential_buys, profile_data, holdings_data) if(len(sells) > 0): update_trade_history(sells, holdings_data, "tradehistory.txt") print("----- Scan over -----\n") #execute the scan scan_stocks()
45.852273
162
0.656258
import robin_stocks as r import pandas as pd import numpy as np import ta as ta from pandas.plotting import register_matplotlib_converters from ta import * from misc import * from tradingstats import * login = r.login('YOUR_EMAIL','YOUR_PASSWORD') def safe_division(n, d): return n / d if d else 0 def get_watchlist_symbols(): my_list_names = [] symbols = [] for name in r.get_all_watchlists(info='name'): my_list_names.append(name) for name in my_list_names: list = r.get_watchlist_by_name(name) for item in list: instrument_data = r.get_instrument_by_url(item.get('instrument')) symbol = instrument_data['symbol'] symbols.append(symbol) return symbols def get_portfolio_symbols(): symbols = [] holdings_data = r.get_open_stock_positions() for item in holdings_data: if not item: continue instrument_data = r.get_instrument_by_url(item.get('instrument')) symbol = instrument_data['symbol'] symbols.append(symbol) return symbols def get_position_creation_date(symbol, holdings_data): instrument = r.get_instruments_by_symbols(symbol) url = instrument[0].get('url') for dict in holdings_data: if(dict.get('instrument') == url): return dict.get('created_at') return "Not found" def get_modified_holdings(): holdings = r.build_holdings() holdings_data = r.get_open_stock_positions() for symbol, dict in holdings.items(): bought_at = get_position_creation_date(symbol, holdings_data) bought_at = str(pd.to_datetime(bought_at)) holdings[symbol].update({'bought_at': bought_at}) return holdings def get_last_crossing(df, days, symbol="", direction=""): prices = df.loc[:,"Price"] shortTerm = df.loc[:,"Indicator1"] LongTerm = df.loc[:,"Indicator2"] dates = df.loc[:,"Dates"] lastIndex = prices.size - 1 index = lastIndex found = index recentDiff = (shortTerm.at[index] - LongTerm.at[index]) >= 0 if((direction == "above" and not recentDiff) or (direction == "below" and recentDiff)): return 0 index -= 1 while(index >= 0 and found == lastIndex and not np.isnan(shortTerm.at[index]) and not np.isnan(LongTerm.at[index]) \ and ((pd.Timestamp("now", tz='UTC') - dates.at[index]) <= pd.Timedelta(str(days) + " days"))): if(recentDiff): if((shortTerm.at[index] - LongTerm.at[index]) < 0): found = index else: if((shortTerm.at[index] - LongTerm.at[index]) > 0): found = index index -= 1 if(found != lastIndex): if((direction == "above" and recentDiff) or (direction == "below" and not recentDiff)): print(symbol + ": Short SMA crossed" + (" ABOVE " if recentDiff else " BELOW ") + "Long SMA at " + str(dates.at[found]) \ +", which was " + str(pd.Timestamp("now", tz='UTC') - dates.at[found]) + " ago", ", price at cross: " + str(prices.at[found]) \ + ", current price: " + str(prices.at[lastIndex])) return (1 if recentDiff else -1) else: return 0 def five_year_check(stockTicker): instrument = r.get_instruments_by_symbols(stockTicker) list_date = instrument[0].get("list_date") if ((pd.Timestamp("now") - pd.to_datetime(list_date)) < pd.Timedelta("5 Y")): return True fiveyear = r.get_historicals(stockTicker,span='5year',bounds='regular') closingPrices = [] for item in fiveyear: closingPrices.append(float(item['close_price'])) recent_price = closingPrices[len(closingPrices) - 1] oldest_price = closingPrices[0] return (recent_price > oldest_price) def golden_cross(stockTicker, n1, n2, days, direction=""): if(direction == "above" and not five_year_check(stockTicker)): return False history = r.get_historicals(stockTicker,span='year',bounds='regular') closingPrices = [] dates = [] for item in history: closingPrices.append(float(item['close_price'])) dates.append(item['begins_at']) price = pd.Series(closingPrices) dates = pd.Series(dates) dates = pd.to_datetime(dates) sma1 = ta.volatility.bollinger_mavg(price, n=int(n1), fillna=False) sma2 = ta.volatility.bollinger_mavg(price, n=int(n2), fillna=False) series = [price.rename("Price"), sma1.rename("Indicator1"), sma2.rename("Indicator2"), dates.rename("Dates")] df = pd.concat(series, axis=1) cross = get_last_crossing(df, days, symbol=stockTicker, direction=direction) return cross def sell_holdings(symbol, holdings_data): shares_owned = int(float(holdings_data[symbol].get("quantity"))) r.order_sell_market(symbol, shares_owned) print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######") def buy_holdings(potential_buys, profile_data, holdings_data): cash = float(profile_data.get('cash')) portfolio_value = float(profile_data.get('equity')) - cash ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys)) prices = r.get_latest_price(potential_buys) for i in range(0, len(potential_buys)): stock_price = float(prices[i]) if(ideal_position_size < stock_price < ideal_position_size*1.5): num_shares = int(ideal_position_size*1.5/stock_price) elif (stock_price < ideal_position_size): num_shares = int(ideal_position_size/stock_price) else: print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######") break print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######") r.order_buy_market(potential_buys[i], num_shares) def scan_stocks(): print("----- Starting scan... -----\n") register_matplotlib_converters() watchlist_symbols = get_watchlist_symbols() portfolio_symbols = get_portfolio_symbols() holdings_data = get_modified_holdings() potential_buys = [] sells = [] print("Current Portfolio: " + str(portfolio_symbols) + "\n") print("Current Watchlist: " + str(watchlist_symbols) + "\n") print("----- Scanning portfolio for stocks to sell -----\n") for symbol in portfolio_symbols: cross = golden_cross(symbol, n1=50, n2=200, days=30, direction="below") if(cross == -1): sell_holdings(symbol, holdings_data) sells.append(symbol) profile_data = r.build_user_profile() print("\n----- Scanning watchlist for stocks to buy -----\n") for symbol in watchlist_symbols: if(symbol not in portfolio_symbols): cross = golden_cross(symbol, n1=50, n2=200, days=10, direction="above") if(cross == 1): potential_buys.append(symbol) if(len(potential_buys) > 0): buy_holdings(potential_buys, profile_data, holdings_data) if(len(sells) > 0): update_trade_history(sells, holdings_data, "tradehistory.txt") print("----- Scan over -----\n") scan_stocks()
true
true
f70dae80edd19835c2bcdbcc5bb719a245bf79aa
7,066
py
Python
cca/scripts/diffinfo.py
mstmhsmt/cca
0dc69b0f0da1def4e8404e5f7b7fe35b6ac3198a
[ "Apache-2.0" ]
null
null
null
cca/scripts/diffinfo.py
mstmhsmt/cca
0dc69b0f0da1def4e8404e5f7b7fe35b6ac3198a
[ "Apache-2.0" ]
null
null
null
cca/scripts/diffinfo.py
mstmhsmt/cca
0dc69b0f0da1def4e8404e5f7b7fe35b6ac3198a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 ''' diffinfo.py Copyright 2012-2017 Codinuum Software Lab <http://codinuum.com> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import re import os import logging import pathsetup from fragment import Fragment import gzip logger = logging.getLogger() read_delete_insert_info_size_threshold = 4 excl_L_pat = re.compile('\) \[') excl_R_pat = re.compile('(?P<list>.*)\]') num_pat = re.compile('(?P<num>[0-9]+);?') def get_excluded(s): result = [] l = excl_L_pat.finditer(s) start = -1 for m in l: start = m.end() if start > 0: m = excl_R_pat.search(s, start) if m: s = m.group('list') result = [int(x) for x in num_pat.findall(s)] return result named_node_pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*) name=\'(?P<name>.*)\'(?P<rest>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$' pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$' named_node_insert_pat = re.compile('INSERT' + named_node_pat_s) insert_pat = re.compile('INSERT' + pat_s) named_node_delete_pat = re.compile('DELETE' + named_node_pat_s) delete_pat = re.compile('DELETE' + pat_s) def read_delete_insert_info(fname): logger.info('reading "{}"'.format(fname)) deletes = [] inserts = [] try: f = open(fname) for line in f: line = line.rstrip() m = named_node_delete_pat.search(line) if m: size = int(m.group('size')) name = m.group('name') if name and size > read_delete_insert_info_size_threshold: excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) rest = m.group('rest') loc = m.group('loc') kind = m.group('kind') + '|' + rest gnid = int(m.group('gnid')) r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems} deletes.append(r) else: m = delete_pat.search(line) if m: size = int(m.group('size')) if size > read_delete_insert_info_size_threshold: kind = m.group('kind') loc = m.group('loc') gnid = int(m.group('gnid')) excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems} deletes.append(r) m = named_node_insert_pat.search(line) if m: size = int(m.group('size')) name = m.group('name') if name and size > read_delete_insert_info_size_threshold: excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) rest = m.group('rest') loc = m.group('loc') kind = m.group('kind') + '|' + rest gnid = int(m.group('gnid')) r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems} inserts.append(r) else: m = insert_pat.search(line) if m: size = int(m.group('size')) if size > read_delete_insert_info_size_threshold: kind = m.group('kind') loc = m.group('loc') gnid = int(m.group('gnid')) excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems} inserts.append(r) f.close() except IOError as e: logger.warning(str(e)) return (deletes, inserts) map_pat = re.compile('(?P<kind>R|E)\[#([0-9]+)U:#(?P<gi1>[0-9]+)G\](?P<lab1>.*)\[(?P<loc1>.*)\] -- \[#([0-9]+)U:#(?P<gi2>[0-9]+)G\](?P<lab2>.*)\[(?P<loc2>.*)\]') def read_map_info(info, swapped=False): map_file_not_found = True gi_map = [] relabeled_gis = [] empty_map = True opener = open if os.path.exists(info): pass else: # maybe compressed info = info + '.gz' opener = gzip.open try: f = opener(info) map_file_not_found = False for line in f: m = map_pat.search(line) if m: empty_map = False gi1 = int(m.group('gi1')) gi2 = int(m.group('gi2')) kind = m.group('kind') lab1 = (m.group('lab1')) lab2 = (m.group('lab2')) loc1 = (m.group('loc1')) loc2 = (m.group('loc2')) if swapped: gi_map.append((gi2, gi1)) if kind == 'R': relabeled_gis.append(gi2) else: gi_map.append((gi1, gi2)) if kind == 'R': relabeled_gis.append(gi1) f.close() except BaseException as e: logger.warning(str(e)) if map_file_not_found: gi_map = None relabeled_gis = None if empty_map: logger.warning('empty map: "{}"'.format(info)) return (gi_map, relabeled_gis) lmap_pat = re.compile('(R|E)\[(?P<loc1>[0-9]+L.*)\].* -- .*\[(?P<loc2>[0-9]+L.*)\]') def read_lmap_info(info, swapped=False): result = [] try: f = open(info) for line in f: m = lmap_pat.search(line) if m: loc1 = m.group('loc1') loc2 = m.group('loc2') if swapped: result.append((loc2, loc1)) else: result.append((loc1, loc2)) f.close() except Exception as e: logger.warning(str(e)) return result def test(mapfile): (gi_map, relabeled_gis) = read_map_info('map.gz') print('gindex map read: size={}'.format(len(gi_map))) print('{} relabeled gindexes found'.format(len(relabeled_gis))) if __name__ == '__main__': test('map.gz')
33.808612
170
0.5
import re import os import logging import pathsetup from fragment import Fragment import gzip logger = logging.getLogger() read_delete_insert_info_size_threshold = 4 excl_L_pat = re.compile('\) \[') excl_R_pat = re.compile('(?P<list>.*)\]') num_pat = re.compile('(?P<num>[0-9]+);?') def get_excluded(s): result = [] l = excl_L_pat.finditer(s) start = -1 for m in l: start = m.end() if start > 0: m = excl_R_pat.search(s, start) if m: s = m.group('list') result = [int(x) for x in num_pat.findall(s)] return result named_node_pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*) name=\'(?P<name>.*)\'(?P<rest>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$' pat_s = '\((?P<size>[0-9]+)\) \(([0-9]+):(?P<gnid>[0-9]+)\)c:(?P<kind>.*)\((?P<loc>[0-9]+L.*)\)(?P<exc>.*)\((?P<elems>.*)\)$' named_node_insert_pat = re.compile('INSERT' + named_node_pat_s) insert_pat = re.compile('INSERT' + pat_s) named_node_delete_pat = re.compile('DELETE' + named_node_pat_s) delete_pat = re.compile('DELETE' + pat_s) def read_delete_insert_info(fname): logger.info('reading "{}"'.format(fname)) deletes = [] inserts = [] try: f = open(fname) for line in f: line = line.rstrip() m = named_node_delete_pat.search(line) if m: size = int(m.group('size')) name = m.group('name') if name and size > read_delete_insert_info_size_threshold: excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) rest = m.group('rest') loc = m.group('loc') kind = m.group('kind') + '|' + rest gnid = int(m.group('gnid')) r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems} deletes.append(r) else: m = delete_pat.search(line) if m: size = int(m.group('size')) if size > read_delete_insert_info_size_threshold: kind = m.group('kind') loc = m.group('loc') gnid = int(m.group('gnid')) excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems} deletes.append(r) m = named_node_insert_pat.search(line) if m: size = int(m.group('size')) name = m.group('name') if name and size > read_delete_insert_info_size_threshold: excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) rest = m.group('rest') loc = m.group('loc') kind = m.group('kind') + '|' + rest gnid = int(m.group('gnid')) r = {'loc':loc,'size':size,'kind':kind,'name':name,'gnid':gnid,'excluded':excluded,'elems':elems} inserts.append(r) else: m = insert_pat.search(line) if m: size = int(m.group('size')) if size > read_delete_insert_info_size_threshold: kind = m.group('kind') loc = m.group('loc') gnid = int(m.group('gnid')) excluded = get_excluded(m.group('exc')) elems = Fragment(m.group('elems')) r = {'loc':loc,'size':size,'kind':kind,'name':None,'gnid':gnid,'excluded':excluded,'elems':elems} inserts.append(r) f.close() except IOError as e: logger.warning(str(e)) return (deletes, inserts) map_pat = re.compile('(?P<kind>R|E)\[#([0-9]+)U:#(?P<gi1>[0-9]+)G\](?P<lab1>.*)\[(?P<loc1>.*)\] -- \[#([0-9]+)U:#(?P<gi2>[0-9]+)G\](?P<lab2>.*)\[(?P<loc2>.*)\]') def read_map_info(info, swapped=False): map_file_not_found = True gi_map = [] relabeled_gis = [] empty_map = True opener = open if os.path.exists(info): pass else: info = info + '.gz' opener = gzip.open try: f = opener(info) map_file_not_found = False for line in f: m = map_pat.search(line) if m: empty_map = False gi1 = int(m.group('gi1')) gi2 = int(m.group('gi2')) kind = m.group('kind') lab1 = (m.group('lab1')) lab2 = (m.group('lab2')) loc1 = (m.group('loc1')) loc2 = (m.group('loc2')) if swapped: gi_map.append((gi2, gi1)) if kind == 'R': relabeled_gis.append(gi2) else: gi_map.append((gi1, gi2)) if kind == 'R': relabeled_gis.append(gi1) f.close() except BaseException as e: logger.warning(str(e)) if map_file_not_found: gi_map = None relabeled_gis = None if empty_map: logger.warning('empty map: "{}"'.format(info)) return (gi_map, relabeled_gis) lmap_pat = re.compile('(R|E)\[(?P<loc1>[0-9]+L.*)\].* -- .*\[(?P<loc2>[0-9]+L.*)\]') def read_lmap_info(info, swapped=False): result = [] try: f = open(info) for line in f: m = lmap_pat.search(line) if m: loc1 = m.group('loc1') loc2 = m.group('loc2') if swapped: result.append((loc2, loc1)) else: result.append((loc1, loc2)) f.close() except Exception as e: logger.warning(str(e)) return result def test(mapfile): (gi_map, relabeled_gis) = read_map_info('map.gz') print('gindex map read: size={}'.format(len(gi_map))) print('{} relabeled gindexes found'.format(len(relabeled_gis))) if __name__ == '__main__': test('map.gz')
true
true
f70daf6e0c7e8b592b22fc07d4c633555bc31157
4,010
py
Python
old/ROC_foldx.py
TheGlobalExpert/protein-stability
b5cc6efaaa6a2f7784729420b746a7ec07bd0d97
[ "MIT" ]
null
null
null
old/ROC_foldx.py
TheGlobalExpert/protein-stability
b5cc6efaaa6a2f7784729420b746a7ec07bd0d97
[ "MIT" ]
null
null
null
old/ROC_foldx.py
TheGlobalExpert/protein-stability
b5cc6efaaa6a2f7784729420b746a7ec07bd0d97
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import math import matplotlib.pyplot as plt data = pd.read_csv("../results/master.csv") data = pd.read_csv("../data/FoldX_predictions.csv") x = list(data["ddG"]) y = list(data["FoldX_dGG"]) #clean # XXX: import itertools #lists = sorted(zip(*[x, y])) #x, y = list(zip(*lists)) #x = x[:10] #y = y[:10] for i in range(len(x)): x[i] = float(x[i]) print(y) print(x) x = np.array(x) y = np.array(y) data = {"x":x,"y":y} plt.scatter("x","y", data=data, label=None) plt.plot(x,y,"o") plt.ylabel("Predicted ddG") plt.xlabel("Experimental ddG") x = np.array(x) #plt.xticks(np.arange(x.min(), x.max(), 0.5)) corr = np.corrcoef(x, y)[0,1] plt.text(-2.5, 7, 'Spearman correlation \ncoefficent: '+str(round(corr,3))) print(corr) m, b = np.polyfit(x, y, 1) plt.plot(x, m*x + b, label="Best Fit") plt.text(3.3, -1.3, 'slope = '+str(round(m,2))) plt.text(3.3, -1.7, 'y-intercept = '+str(round(b,2))) x_hori = list(np.arange(-10,10, 0.5)) y_hori = list(np.arange(-10,10, 0.5)) plt.plot(x_hori, y_hori, linestyle="dashed", label="Ideal") plt.ylim(-3,8) plt.xlim(-3,6) plt.legend(loc="upper right") plt.title("New dataset (ProTherm+HotMusic)") plt.show() print(x) print(y) def check_accuracy(threshold): true_positive = 0 false_positive = 0 true_negative = 0 false_negative = 0 for i in range(data.shape[0]): if data.loc[i, "ddG"] >= threshold: #Positve if data.loc[i, "FoldX_predictions"] >= threshold: true_positive = true_positive + 1 elif data.loc[i, "FoldX_predictions"] <= threshold: false_positive = false_positive + 1 else: exit() else: #negative if data.loc[i, "FoldX_predictions"] <= threshold: true_negative = true_negative + 1 elif data.loc[i, "FoldX_predictions"] >= threshold: false_negative = false_negative + 1 else: exit() return [true_positive, false_positive, true_negative, false_negative] def check_accuracy(threshold, x, y): true_positive = 0 false_positive = 0 true_negative = 0 false_negative = 0 for i in range(len(x)): if float(x[i]) >= threshold: #Positve if y[i] >= threshold: true_positive = true_positive + 1 elif y[i] <= threshold: false_positive = false_positive + 1 else: exit() else: #negative if y[i] <= threshold: true_negative = true_negative + 1 elif y[i] >= threshold: false_negative = false_negative + 1 else: exit() return [true_positive, false_positive, true_negative, false_negative] results = [] thresholds = list(np.arange(-10,10, 0.1)) print(thresholds) for threshold in thresholds: results.append(check_accuracy(threshold, x, y)) print(threshold) pass print(results) x = [] y = [] for i, result in enumerate(results): print(result) try: x.append(result[1] / (result[1] + result[2])) y.append(result[0] / (result[0] + result[3])) except: x.append(np.nan) y.append(np.nan) print(x) for i in range(len(x)): print(i, "----") print(x[i]) print(results[i]) x_hori = list(np.arange(0,1.1, 0.1)) y_hori = list(np.arange(0,1.1, 0.1)) TOI = [100,103, 105, 107, 110, 112, 118, 120] plt.figure(figsize = (6,6)) for threshold in TOI: plt.text(x[threshold] - 0.06, y[threshold] + 0.01, str(round(thresholds[threshold],3))) #print(thresholds[threshold], threshold) plt.plot(x,y) plt.plot(x_hori, y_hori, linestyle="dashed") plt.xlabel("False Positive Rate") plt.ylabel("True Postive Rate") plt.xlim(0,1) plt.ylim(0,1) plt.title("ROC curve of FoldX predictions of ddG with relation\nto varying ddG threshold (HotMusic dataset)") for threshold in TOI: plt.scatter(x[threshold], y[threshold], c="b") plt.show()
22.655367
109
0.600499
import pandas as pd import numpy as np import math import matplotlib.pyplot as plt data = pd.read_csv("../results/master.csv") data = pd.read_csv("../data/FoldX_predictions.csv") x = list(data["ddG"]) y = list(data["FoldX_dGG"]) mport itertools for i in range(len(x)): x[i] = float(x[i]) print(y) print(x) x = np.array(x) y = np.array(y) data = {"x":x,"y":y} plt.scatter("x","y", data=data, label=None) plt.plot(x,y,"o") plt.ylabel("Predicted ddG") plt.xlabel("Experimental ddG") x = np.array(x) corr = np.corrcoef(x, y)[0,1] plt.text(-2.5, 7, 'Spearman correlation \ncoefficent: '+str(round(corr,3))) print(corr) m, b = np.polyfit(x, y, 1) plt.plot(x, m*x + b, label="Best Fit") plt.text(3.3, -1.3, 'slope = '+str(round(m,2))) plt.text(3.3, -1.7, 'y-intercept = '+str(round(b,2))) x_hori = list(np.arange(-10,10, 0.5)) y_hori = list(np.arange(-10,10, 0.5)) plt.plot(x_hori, y_hori, linestyle="dashed", label="Ideal") plt.ylim(-3,8) plt.xlim(-3,6) plt.legend(loc="upper right") plt.title("New dataset (ProTherm+HotMusic)") plt.show() print(x) print(y) def check_accuracy(threshold): true_positive = 0 false_positive = 0 true_negative = 0 false_negative = 0 for i in range(data.shape[0]): if data.loc[i, "ddG"] >= threshold: if data.loc[i, "FoldX_predictions"] >= threshold: true_positive = true_positive + 1 elif data.loc[i, "FoldX_predictions"] <= threshold: false_positive = false_positive + 1 else: exit() else: if data.loc[i, "FoldX_predictions"] <= threshold: true_negative = true_negative + 1 elif data.loc[i, "FoldX_predictions"] >= threshold: false_negative = false_negative + 1 else: exit() return [true_positive, false_positive, true_negative, false_negative] def check_accuracy(threshold, x, y): true_positive = 0 false_positive = 0 true_negative = 0 false_negative = 0 for i in range(len(x)): if float(x[i]) >= threshold: if y[i] >= threshold: true_positive = true_positive + 1 elif y[i] <= threshold: false_positive = false_positive + 1 else: exit() else: if y[i] <= threshold: true_negative = true_negative + 1 elif y[i] >= threshold: false_negative = false_negative + 1 else: exit() return [true_positive, false_positive, true_negative, false_negative] results = [] thresholds = list(np.arange(-10,10, 0.1)) print(thresholds) for threshold in thresholds: results.append(check_accuracy(threshold, x, y)) print(threshold) pass print(results) x = [] y = [] for i, result in enumerate(results): print(result) try: x.append(result[1] / (result[1] + result[2])) y.append(result[0] / (result[0] + result[3])) except: x.append(np.nan) y.append(np.nan) print(x) for i in range(len(x)): print(i, "----") print(x[i]) print(results[i]) x_hori = list(np.arange(0,1.1, 0.1)) y_hori = list(np.arange(0,1.1, 0.1)) TOI = [100,103, 105, 107, 110, 112, 118, 120] plt.figure(figsize = (6,6)) for threshold in TOI: plt.text(x[threshold] - 0.06, y[threshold] + 0.01, str(round(thresholds[threshold],3))) plt.plot(x,y) plt.plot(x_hori, y_hori, linestyle="dashed") plt.xlabel("False Positive Rate") plt.ylabel("True Postive Rate") plt.xlim(0,1) plt.ylim(0,1) plt.title("ROC curve of FoldX predictions of ddG with relation\nto varying ddG threshold (HotMusic dataset)") for threshold in TOI: plt.scatter(x[threshold], y[threshold], c="b") plt.show()
true
true
f70db04da37eb5520229d18abbe9dbe1fae56e06
4,119
py
Python
train.py
fancybian/ner-crf2
e4f4fe973057ee5f6ffcc87c8dddc502c981b9bf
[ "MIT" ]
122
2016-06-07T16:25:34.000Z
2021-04-06T14:01:00.000Z
train.py
fancybian/ner-crf2
e4f4fe973057ee5f6ffcc87c8dddc502c981b9bf
[ "MIT" ]
1
2018-04-12T19:16:14.000Z
2018-04-12T19:16:14.000Z
train.py
fancybian/ner-crf2
e4f4fe973057ee5f6ffcc87c8dddc502c981b9bf
[ "MIT" ]
41
2015-11-22T06:17:44.000Z
2021-04-02T09:12:09.000Z
# -*- coding: utf-8 -*- """ Main training file for the CRF. This file trains a CRF model and saves it under the filename provided via an 'identifier' command line argument. Usage example: python train.py --identifier="my_experiment" """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import random import pycrfsuite from model.datasets import load_windows, load_articles, generate_examples import model.features as features # All capitalized constants come from this file import config as cfg random.seed(42) def main(): """This function handles the command line arguments and then calls the train() method.""" parser = argparse.ArgumentParser() parser.add_argument("--identifier", required=True, help="A short name/identifier for your experiment, e.g. 'ex42b'.") args = parser.parse_args() train(args) def train(args): """Main training method. Does the following: 1. Create a new pycrfsuite trainer object. We will have to add feature chains and label chains to that object and then train on them. 2. Creates the feature (generators). A feature generator might e.g. take in a window of N tokens and then return ["upper=1"] for each token that starts with an uppercase letter and ["upper=0"] for each token that starts with a lowercase letter. (Lists, because a token can be converted into multiple features by a single feature generator, e.g. the case for LDA as a token may be part of multiple topics.) 3. Loads windows from the corpus. Each window has a fixed (maximum) size in tokens. We only load windows that contain at least one label (named entity), so that we don't waste too much time on windows without any label. 4. Generate features for each chain of tokens (window). That's basically described in (2.). Each chain of tokens from a window will be converted to a list of lists. One list at the top level representing each token, then another list for the feature values. E.g. [["w2v=123", "bc=742", "upper=0"], ["w2v=4", "bc=12", "upper=1", "lda4=1"]] for two tokens. 5. Add feature chains and label chains to the trainer. 6. Train. This may take several hours for 20k windows. Args: args: Command line arguments as parsed by argparse.ArgumentParser. """ trainer = pycrfsuite.Trainer(verbose=True) # Create/Initialize the feature generators # this may take a few minutes print("Creating features...") feature_generators = features.create_features() # Initialize the window generator # each window has a fixed maximum size of tokens print("Loading windows...") windows = load_windows(load_articles(cfg.ARTICLES_FILEPATH), cfg.WINDOW_SIZE, feature_generators, only_labeled_windows=True) # Add chains of features (each list of lists of strings) # and chains of labels (each list of strings) # to the trainer. # This may take a long while, especially because of the lengthy POS tagging. # POS tags and LDA results are cached, so the second run through this part will be significantly # faster. print("Adding example windows (up to max %d)..." % (cfg.COUNT_WINDOWS_TRAIN)) examples = generate_examples(windows, nb_append=cfg.COUNT_WINDOWS_TRAIN, nb_skip=cfg.COUNT_WINDOWS_TEST, verbose=True) for feature_values_lists, labels in examples: trainer.append(feature_values_lists, labels) # Train the model # this may take several hours print("Training...") if cfg.MAX_ITERATIONS is not None and cfg.MAX_ITERATIONS > 0: # set the maximum number of iterations of defined in the config file # the optimizer stops automatically after some iterations if this is not set trainer.set_params({'max_iterations': cfg.MAX_ITERATIONS}) trainer.train(args.identifier) # ---------------- if __name__ == "__main__": main()
42.90625
100
0.686574
from __future__ import absolute_import, division, print_function, unicode_literals import argparse import random import pycrfsuite from model.datasets import load_windows, load_articles, generate_examples import model.features as features import config as cfg random.seed(42) def main(): parser = argparse.ArgumentParser() parser.add_argument("--identifier", required=True, help="A short name/identifier for your experiment, e.g. 'ex42b'.") args = parser.parse_args() train(args) def train(args): trainer = pycrfsuite.Trainer(verbose=True) print("Creating features...") feature_generators = features.create_features() print("Loading windows...") windows = load_windows(load_articles(cfg.ARTICLES_FILEPATH), cfg.WINDOW_SIZE, feature_generators, only_labeled_windows=True) print("Adding example windows (up to max %d)..." % (cfg.COUNT_WINDOWS_TRAIN)) examples = generate_examples(windows, nb_append=cfg.COUNT_WINDOWS_TRAIN, nb_skip=cfg.COUNT_WINDOWS_TEST, verbose=True) for feature_values_lists, labels in examples: trainer.append(feature_values_lists, labels) print("Training...") if cfg.MAX_ITERATIONS is not None and cfg.MAX_ITERATIONS > 0: trainer.set_params({'max_iterations': cfg.MAX_ITERATIONS}) trainer.train(args.identifier) if __name__ == "__main__": main()
true
true
f70db16a2b9bb8472d5847e7ac204d8017366ff5
2,618
py
Python
ciw/network.py
CiwPython/Ciw
8d5978108f797a6c3e42d8f70f31510f889f2dd0
[ "MIT" ]
107
2016-11-18T22:44:58.000Z
2022-03-29T01:38:12.000Z
ciw/network.py
CiwPython/Ciw
8d5978108f797a6c3e42d8f70f31510f889f2dd0
[ "MIT" ]
117
2016-09-25T19:12:39.000Z
2022-03-31T14:01:47.000Z
ciw/network.py
CiwPython/Ciw
8d5978108f797a6c3e42d8f70f31510f889f2dd0
[ "MIT" ]
34
2016-12-21T12:04:29.000Z
2022-03-29T10:46:29.000Z
class ServiceCentre(object): """ An information store for each service centre in the queueing network. Contains all information that is independent of customer class: - number of servers - queueing capacity - server schedules + preemtion status - class change matrix """ def __init__(self, number_of_servers, queueing_capacity, class_change_matrix=None, schedule=None, preempt=False, ps_threshold=1): """ Initialises the ServiceCentre object. """ self.number_of_servers = number_of_servers self.queueing_capacity = queueing_capacity self.class_change_matrix = class_change_matrix self.schedule = schedule self.preempt = preempt self.ps_threshold = ps_threshold class CustomerClass(object): """ An information store for each customer class in the queueing network. Contains all information that is dependent on customer class: - arrival distributions - service distributions - routing matrices/functions - priority class - baulking functions - batching distributions """ def __init__(self, arrival_distributions, service_distributions, routing, priority_class, baulking_functions, batching_distributions): """ Initialises the CutomerCass object. """ self.arrival_distributions = arrival_distributions self.service_distributions = service_distributions self.batching_distributions = batching_distributions self.routing = routing self.priority_class = priority_class self.baulking_functions = baulking_functions class Network(object): """ An information store the queueing network. Contains a list of ServiceCentre objects for each service centre, and a list of CustomerClass objects for each customer class. """ def __init__(self, service_centres, customer_classes): """ Initialises the Network object """ self.service_centres = service_centres self.customer_classes = customer_classes self.number_of_nodes = len(service_centres) self.number_of_classes = len(customer_classes) self.number_of_priority_classes = len(set([clss.priority_class for clss in customer_classes])) self.priority_class_mapping = {i: clss.priority_class for i, clss in enumerate(customer_classes)}
36.361111
105
0.651642
class ServiceCentre(object): def __init__(self, number_of_servers, queueing_capacity, class_change_matrix=None, schedule=None, preempt=False, ps_threshold=1): self.number_of_servers = number_of_servers self.queueing_capacity = queueing_capacity self.class_change_matrix = class_change_matrix self.schedule = schedule self.preempt = preempt self.ps_threshold = ps_threshold class CustomerClass(object): def __init__(self, arrival_distributions, service_distributions, routing, priority_class, baulking_functions, batching_distributions): self.arrival_distributions = arrival_distributions self.service_distributions = service_distributions self.batching_distributions = batching_distributions self.routing = routing self.priority_class = priority_class self.baulking_functions = baulking_functions class Network(object): def __init__(self, service_centres, customer_classes): self.service_centres = service_centres self.customer_classes = customer_classes self.number_of_nodes = len(service_centres) self.number_of_classes = len(customer_classes) self.number_of_priority_classes = len(set([clss.priority_class for clss in customer_classes])) self.priority_class_mapping = {i: clss.priority_class for i, clss in enumerate(customer_classes)}
true
true
f70db18cb43c8477d2ce3de474b7e364be5012b3
19,374
py
Python
src/player.py
allineandric/progmind
6333ff3b76626c395d62fc2b6e96e99fe6e93061
[ "MIT" ]
1
2022-03-29T18:26:35.000Z
2022-03-29T18:26:35.000Z
src/player.py
henrikbeck95/progmind
6333ff3b76626c395d62fc2b6e96e99fe6e93061
[ "MIT" ]
null
null
null
src/player.py
henrikbeck95/progmind
6333ff3b76626c395d62fc2b6e96e99fe6e93061
[ "MIT" ]
1
2020-12-04T18:18:04.000Z
2020-12-04T18:18:04.000Z
"" "Este módulo implementa o objeto jogador (sprite) para o Progmind" "" from src.animation import Animation from src.animated_sprite import AnimatedSprite from src.time_bonus import TimeBonus import src.game_functions as gf import pygame import time class Player(AnimatedSprite): """Objeto de jogador""" def __init__(self, settings, screen, images, initial_bounding_rect, tile_map): """Inicialize o sprite do jogador""" # Calls AnimatedSprite, which in turn will call pygame.Sprite __init_() super().__init__(settings, screen, images) self.tile_map = tile_map # Substituir a posição inicial self.initial_bounding_rect = initial_bounding_rect self.rect.bottom = initial_bounding_rect.bottom self.rect.left = self.screen.get_rect().width / 2 # Defina as margens transparentes self.margin_left = self.settings.player_sprite_horz_margin self.margin_right = self.settings.player_sprite_horz_margin self.margin_top = self.settings.player_sprite_top_margin # definir o retorno de chamada de verificação de colisão opcional self.collision_check = self.collided # Estes são específicos para o objeto do jogador self.air_jumps = 0 self.max_air_jumps = settings.player_max_air_jumps self.idle_top = False self.idle_counter = 0 self.won_level = False self.at_top = False # Adicione as animações para o jogador self.animations[self.settings.anim_name_idle_left] = Animation([0, 1, 2, 3, 2, 1], 5) self.animations[self.settings.anim_name_idle_right] = Animation([5, 6, 7, 8, 7, 6], 5) self.animations[self.settings.anim_name_walk_left] = Animation([0, 10, 11, 10], 2) self.animations[self.settings.anim_name_walk_right] = Animation([5, 12, 13, 12], 2) self.animations[self.settings.anim_name_jump_up_left] = Animation([15], 5) self.animations[self.settings.anim_name_jump_down_left] = Animation([16], 5) self.animations[self.settings.anim_name_jump_up_right] = Animation([17], 5) self.animations[self.settings.anim_name_jump_down_right] = Animation([18], 5) self.animations[self.settings.anim_name_dead] = Animation([4], 5) self.current_animation = self.settings.anim_name_idle_left self.facing_left = True def reset(self): """Redefina o objeto do jogador para o mapa""" player = self player.rect.bottom = self.initial_bounding_rect.bottom player.dx = 0.0 player.dy = 0.0 player.dying = False player.idle_counter = 0 player.idle_top = False player.won_level = False player.at_top = False def update_current_animation(self): """Defina a animação correta com base no estado""" # DEAD if self.idle_top: self.set_current_animation(self.settings.anim_name_idle_left) elif self.dying: self.set_current_animation(self.settings.anim_name_dead) # IDLE elif self.dx == 0 and self.dy == 0: if self.facing_left: self.set_current_animation(self.settings.anim_name_idle_left) else: self.set_current_animation(self.settings.anim_name_idle_right) # WALKING elif self.dy == 0: if self.dx < 0: self.set_current_animation(self.settings.anim_name_walk_left) else: self.set_current_animation(self.settings.anim_name_walk_right) # JUMPING else: pygame.mixer.init() sounda= pygame.mixer.Sound("jumpland.wav") sounda.set_volume(0.05) sounda.play() if self.dy < 0: if self.facing_left: self.set_current_animation(self.settings.anim_name_jump_up_left) else: self.set_current_animation(self.settings.anim_name_jump_up_right) else: if self.facing_left: self.set_current_animation(self.settings.anim_name_jump_down_left) else: self.set_current_animation(self.settings.anim_name_jump_down_right) def collided(self, player, sprite): """Este retorno de chamada é usado para modificar a verificação de colisão básica para o sprite do jogador""" if sprite.dying: return False player_rect = player.rect.copy() # reduza o retângulo do jogador com base nas margens player_rect.height -= player.settings.player_sprite_top_margin player_rect.width -= (player.settings.player_sprite_horz_margin * 2) player_rect.midbottom = player.rect.midbottom # Agora faça uma verificação padrão com o Rect ajustado return player_rect.colliderect(sprite.rect) def update(self, tile_map, enemies): """Atualiza a posição do jogador sprite""" if not self.dying: # Verifique se estamos na linha superior if self.idle_top: self.idle_counter = 0 if self.idle_counter > (30 * 3): self.won_level = False # O AnimatedSprite lida com a maior parte disso, mas salve o Grupo de inimigos atuais para o manipulador self.enemies = enemies super().update(tile_map, tile_map.block_group) if self.dy == 0: self.air_jumps = 0 # O jogador também precisa verificar o grupo de sprites inimigos intersected_blobs = pygame.sprite.spritecollide(self, enemies, False, self.collision_check) if intersected_blobs: self.dying = True self.dy = -15 self.falling = True self.falling_frames = 1 player_idle = ((self.current_animation == self.settings.anim_name_idle_left) or (self.current_animation == self.settings.anim_name_idle_right)) player_walking = ((self.current_animation == self.settings.anim_name_walk_left) or (self.current_animation == self.settings.anim_name_walk_right)) if (self.rect.bottom <= tile_map.player_bounds_rect.top + 2 * self.settings.tile_height) and (player_idle or player_walking): self.idle_top = False self.at_top = True self.idle_counter = 0 else: if self.rect.top > self.screen_rect.bottom: # Por enquanto, apenas reinicie a posição do jogador, mas nada mais self.rect.bottom = tile_map.player_bounds_rect.bottom self.dx = 0.0 self.dy = 0.0 self.dying = False else: if self.dy < self.settings.terminal_velocity: self.dy += self.settings.gravity self.rect.centery += self.dy # pygame.mixer.init() # som= pygame.mixer.Sound("não consegue né.wav") # som.set_volume(0.1) # som.play() self.falling_frames += 1 self.finish_update() def handle_collision(self, collision_list, group): """Dada uma lista de sprites que colidem com o jogador, altere o estado, como posição, velocidade, etc.""" # Mesmo que seja uma lista, o primeiro item deve ser tudo de que precisamos por agora if collision_list: block = collision_list[0] #isso é uma colisão lateral? side_collision = self.rect.right > block.rect.right or self.rect.left < block.rect.left # Queda é o caso padrão, então verifique primeiro if self.dy > 0: self.falling = False self.falling_frames = 1 self.air_jumps = 0 self.dy = 0 self.rect.bottom = block.rect.top # Se o jogador estiver pulando, verifique se há um acerto menor elif self.dy < 0: if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 464 and self.rect.top < 470): self.settings.resposta_1 = ''; if self.settings.resposta_1_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 464 and self.rect.top < 477): self.settings.resposta_2 = '' if self.settings.resposta_2_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 320 and self.rect.top < 328): self.settings.resposta_3 = '' if self.settings.resposta_3_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 320 and self.rect.top < 328): self.settings.resposta_4 = '' if self.settings.resposta_4_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 170 and self.rect.top < 185): self.settings.resposta_5 = '' if self.settings.resposta_5_correta: self.settings.resposta_5 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 170 and self.rect.top < 185): self.settings.resposta_6 = '' if self.settings.resposta_6_correta: self.settings.resposta_6 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 480 and self.rect.left < 600): if (self.rect.top > 25 and self.rect.top < 40): self.settings.resposta_7 = '' if self.settings.resposta_7_correta: self.settings.resposta_7 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top > 25 and self.rect.top < 40): self.settings.resposta_8 = '' if self.settings.resposta_8_correta: self.settings.resposta_8 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if self.rect.bottom > block.rect.bottom: self.dy = 0 self.rect.top = block.rect.bottom - self.settings.player_sprite_top_margin # remova os blocos atingidos pela parte inferior group.remove(collision_list) # remova os inimigos acima desses blocos self.remove_enemies_above_blocks(collision_list) # Agora verifique a esquerda elif self.dx > 0: if side_collision: self.dx = 0 self.rect.right = block.rect.left + self.settings.player_sprite_horz_margin elif self.dx < 0: if side_collision: self.dx = 0 self.rect.left = block.rect.right - self.settings.player_sprite_horz_margin def remove_enemies_above_blocks(self, collision_list): # construir um kill rect para verificar os inimigos kill_rect = collision_list[0].rect for sprite in collision_list: kill_rect.union_ip(sprite.rect) #Subir um bloco kill_rect.move_ip(0, collision_list[0].rect.height * -1) # Agora veja se algum inimigo está neste bloco for enemy in self.enemies: if kill_rect.colliderect(enemy.rect): enemy.dying = True enemy.dy = self.settings.enemy_death_dy bonus = TimeBonus(enemy.rect, "ACERTOU!!!", 500, self.tile_map.level_timer, self.settings.bonus_font) self.tile_map.bonuses.append(bonus)
50.453125
158
0.532879
from src.animation import Animation from src.animated_sprite import AnimatedSprite from src.time_bonus import TimeBonus import src.game_functions as gf import pygame import time class Player(AnimatedSprite): def __init__(self, settings, screen, images, initial_bounding_rect, tile_map): super().__init__(settings, screen, images) self.tile_map = tile_map self.initial_bounding_rect = initial_bounding_rect self.rect.bottom = initial_bounding_rect.bottom self.rect.left = self.screen.get_rect().width / 2 self.margin_left = self.settings.player_sprite_horz_margin self.margin_right = self.settings.player_sprite_horz_margin self.margin_top = self.settings.player_sprite_top_margin self.collision_check = self.collided self.air_jumps = 0 self.max_air_jumps = settings.player_max_air_jumps self.idle_top = False self.idle_counter = 0 self.won_level = False self.at_top = False self.animations[self.settings.anim_name_idle_left] = Animation([0, 1, 2, 3, 2, 1], 5) self.animations[self.settings.anim_name_idle_right] = Animation([5, 6, 7, 8, 7, 6], 5) self.animations[self.settings.anim_name_walk_left] = Animation([0, 10, 11, 10], 2) self.animations[self.settings.anim_name_walk_right] = Animation([5, 12, 13, 12], 2) self.animations[self.settings.anim_name_jump_up_left] = Animation([15], 5) self.animations[self.settings.anim_name_jump_down_left] = Animation([16], 5) self.animations[self.settings.anim_name_jump_up_right] = Animation([17], 5) self.animations[self.settings.anim_name_jump_down_right] = Animation([18], 5) self.animations[self.settings.anim_name_dead] = Animation([4], 5) self.current_animation = self.settings.anim_name_idle_left self.facing_left = True def reset(self): player = self player.rect.bottom = self.initial_bounding_rect.bottom player.dx = 0.0 player.dy = 0.0 player.dying = False player.idle_counter = 0 player.idle_top = False player.won_level = False player.at_top = False def update_current_animation(self): if self.idle_top: self.set_current_animation(self.settings.anim_name_idle_left) elif self.dying: self.set_current_animation(self.settings.anim_name_dead) elif self.dx == 0 and self.dy == 0: if self.facing_left: self.set_current_animation(self.settings.anim_name_idle_left) else: self.set_current_animation(self.settings.anim_name_idle_right) elif self.dy == 0: if self.dx < 0: self.set_current_animation(self.settings.anim_name_walk_left) else: self.set_current_animation(self.settings.anim_name_walk_right) else: pygame.mixer.init() sounda= pygame.mixer.Sound("jumpland.wav") sounda.set_volume(0.05) sounda.play() if self.dy < 0: if self.facing_left: self.set_current_animation(self.settings.anim_name_jump_up_left) else: self.set_current_animation(self.settings.anim_name_jump_up_right) else: if self.facing_left: self.set_current_animation(self.settings.anim_name_jump_down_left) else: self.set_current_animation(self.settings.anim_name_jump_down_right) def collided(self, player, sprite): if sprite.dying: return False player_rect = player.rect.copy() player_rect.height -= player.settings.player_sprite_top_margin player_rect.width -= (player.settings.player_sprite_horz_margin * 2) player_rect.midbottom = player.rect.midbottom return player_rect.colliderect(sprite.rect) def update(self, tile_map, enemies): if not self.dying: if self.idle_top: self.idle_counter = 0 if self.idle_counter > (30 * 3): self.won_level = False self.enemies = enemies super().update(tile_map, tile_map.block_group) if self.dy == 0: self.air_jumps = 0 intersected_blobs = pygame.sprite.spritecollide(self, enemies, False, self.collision_check) if intersected_blobs: self.dying = True self.dy = -15 self.falling = True self.falling_frames = 1 player_idle = ((self.current_animation == self.settings.anim_name_idle_left) or (self.current_animation == self.settings.anim_name_idle_right)) player_walking = ((self.current_animation == self.settings.anim_name_walk_left) or (self.current_animation == self.settings.anim_name_walk_right)) if (self.rect.bottom <= tile_map.player_bounds_rect.top + 2 * self.settings.tile_height) and (player_idle or player_walking): self.idle_top = False self.at_top = True self.idle_counter = 0 else: if self.rect.top > self.screen_rect.bottom: self.rect.bottom = tile_map.player_bounds_rect.bottom self.dx = 0.0 self.dy = 0.0 self.dying = False else: if self.dy < self.settings.terminal_velocity: self.dy += self.settings.gravity self.rect.centery += self.dy self.falling_frames += 1 self.finish_update() def handle_collision(self, collision_list, group): if collision_list: block = collision_list[0] side_collision = self.rect.right > block.rect.right or self.rect.left < block.rect.left if self.dy > 0: self.falling = False self.falling_frames = 1 self.air_jumps = 0 self.dy = 0 self.rect.bottom = block.rect.top elif self.dy < 0: if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 464 and self.rect.top < 470): self.settings.resposta_1 = ''; if self.settings.resposta_1_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 464 and self.rect.top < 477): self.settings.resposta_2 = '' if self.settings.resposta_2_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 320 and self.rect.top < 328): self.settings.resposta_3 = '' if self.settings.resposta_3_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 320 and self.rect.top < 328): self.settings.resposta_4 = '' if self.settings.resposta_4_correta: self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 450 and self.rect.left < 600): if (self.rect.top >= 170 and self.rect.top < 185): self.settings.resposta_5 = '' if self.settings.resposta_5_correta: self.settings.resposta_5 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top >= 170 and self.rect.top < 185): self.settings.resposta_6 = '' if self.settings.resposta_6_correta: self.settings.resposta_6 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 480 and self.rect.left < 600): if (self.rect.top > 25 and self.rect.top < 40): self.settings.resposta_7 = '' if self.settings.resposta_7_correta: self.settings.resposta_7 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if (self.rect.left > 620 and self.rect.left < 730): if (self.rect.top > 25 and self.rect.top < 40): self.settings.resposta_8 = '' if self.settings.resposta_8_correta: self.settings.resposta_8 = '' self.settings.contador_nivel += 1 if self.settings.level_number < self.settings.desafio_Medio: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number > self.settings.desafio_Facil and self.settings.level_number < self.settings.desafio_Dificil: if self.settings.contador_nivel == 2: self.settings.desafio_concluido = True self.won_level = True if self.settings.level_number >= self.settings.desafio_Dificil: self.settings.desafio_concluido = True self.won_level = True if self.rect.bottom > block.rect.bottom: self.dy = 0 self.rect.top = block.rect.bottom - self.settings.player_sprite_top_margin group.remove(collision_list) self.remove_enemies_above_blocks(collision_list) elif self.dx > 0: if side_collision: self.dx = 0 self.rect.right = block.rect.left + self.settings.player_sprite_horz_margin elif self.dx < 0: if side_collision: self.dx = 0 self.rect.left = block.rect.right - self.settings.player_sprite_horz_margin def remove_enemies_above_blocks(self, collision_list): kill_rect = collision_list[0].rect for sprite in collision_list: kill_rect.union_ip(sprite.rect) kill_rect.move_ip(0, collision_list[0].rect.height * -1) for enemy in self.enemies: if kill_rect.colliderect(enemy.rect): enemy.dying = True enemy.dy = self.settings.enemy_death_dy bonus = TimeBonus(enemy.rect, "ACERTOU!!!", 500, self.tile_map.level_timer, self.settings.bonus_font) self.tile_map.bonuses.append(bonus)
true
true
f70db2de5a31ed9916519b20186561a12fdb235e
1,902
py
Python
test2.py
sleung852/mahjong
6af93a452c68a9bd5c233b6abcf07a53aca8a2fa
[ "MIT" ]
null
null
null
test2.py
sleung852/mahjong
6af93a452c68a9bd5c233b6abcf07a53aca8a2fa
[ "MIT" ]
1
2020-10-08T11:37:53.000Z
2020-10-08T11:37:53.000Z
testfancalculator.py
sleung852/mahjong
6af93a452c68a9bd5c233b6abcf07a53aca8a2fa
[ "MIT" ]
null
null
null
from engine import * chingyatsikpingwu = [] chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(1, 'Man')) chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(5, 'Man')) chingyatsikpingwu.append(SimpleTile(6, 'Man')) chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(9, 'Man')) chingyatsikpingwu.append(SimpleTile(9, 'Man')) supcharmyiu = [] supcharmyiu.append(SimpleTile(1, 'Man')) supcharmyiu.append(SimpleTile(1, 'Bamboo')) supcharmyiu.append(SimpleTile(1, 'Circle')) supcharmyiu.append(SimpleTile(9, 'Man')) supcharmyiu.append(SimpleTile(9, 'Bamboo')) supcharmyiu.append(SimpleTile(9, 'Circle')) supcharmyiu.append(HonorTile.from_str('g')) supcharmyiu.append(HonorTile.from_str('r')) supcharmyiu.append(HonorTile.from_str('wh')) supcharmyiu.append(HonorTile.from_str('e')) supcharmyiu.append(HonorTile.from_str('s')) supcharmyiu.append(HonorTile.from_str('we')) supcharmyiu.append(HonorTile.from_str('n')) supcharmyiu.append(HonorTile.from_str('n')) #print(supcharmyiu) asetoftiles = FanCalculator() asetoftiles.tiles = chingyatsikpingwu print(asetoftiles.tiles) print() print(asetoftiles.all_com()) eatable, legit_hands = asetoftiles.legitimate_hands() if eatable: print('It is a legitimate hand and there is(are) {} possible hand(s)!\n'.format(len(legit_hands))) for legit_hand in legit_hands: fan, reasons = asetoftiles.handtype_fan_calculator(legit_hand) print('Total {} Fan\n'.format(fan)) print("The reasons are:") for reason in reasons: print(reason) else: print('There is no legitimate hand!')
32.793103
99
0.773396
from engine import * chingyatsikpingwu = [] chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(1, 'Man')) chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(5, 'Man')) chingyatsikpingwu.append(SimpleTile(6, 'Man')) chingyatsikpingwu.append(SimpleTile(2, 'Man')) chingyatsikpingwu.append(SimpleTile(3, 'Man')) chingyatsikpingwu.append(SimpleTile(4, 'Man')) chingyatsikpingwu.append(SimpleTile(9, 'Man')) chingyatsikpingwu.append(SimpleTile(9, 'Man')) supcharmyiu = [] supcharmyiu.append(SimpleTile(1, 'Man')) supcharmyiu.append(SimpleTile(1, 'Bamboo')) supcharmyiu.append(SimpleTile(1, 'Circle')) supcharmyiu.append(SimpleTile(9, 'Man')) supcharmyiu.append(SimpleTile(9, 'Bamboo')) supcharmyiu.append(SimpleTile(9, 'Circle')) supcharmyiu.append(HonorTile.from_str('g')) supcharmyiu.append(HonorTile.from_str('r')) supcharmyiu.append(HonorTile.from_str('wh')) supcharmyiu.append(HonorTile.from_str('e')) supcharmyiu.append(HonorTile.from_str('s')) supcharmyiu.append(HonorTile.from_str('we')) supcharmyiu.append(HonorTile.from_str('n')) supcharmyiu.append(HonorTile.from_str('n')) asetoftiles = FanCalculator() asetoftiles.tiles = chingyatsikpingwu print(asetoftiles.tiles) print() print(asetoftiles.all_com()) eatable, legit_hands = asetoftiles.legitimate_hands() if eatable: print('It is a legitimate hand and there is(are) {} possible hand(s)!\n'.format(len(legit_hands))) for legit_hand in legit_hands: fan, reasons = asetoftiles.handtype_fan_calculator(legit_hand) print('Total {} Fan\n'.format(fan)) print("The reasons are:") for reason in reasons: print(reason) else: print('There is no legitimate hand!')
true
true
f70db34607ec02cb27fca267843c35a0086a13b0
573
py
Python
soulcalibur_vi/migrations/0020_auto_20201209_0127.py
RayGar7/SaltyFramesAPI
af831c1583a613193f059f45480147caf8d1d72d
[ "PostgreSQL" ]
null
null
null
soulcalibur_vi/migrations/0020_auto_20201209_0127.py
RayGar7/SaltyFramesAPI
af831c1583a613193f059f45480147caf8d1d72d
[ "PostgreSQL" ]
null
null
null
soulcalibur_vi/migrations/0020_auto_20201209_0127.py
RayGar7/SaltyFramesAPI
af831c1583a613193f059f45480147caf8d1d72d
[ "PostgreSQL" ]
null
null
null
# Generated by Django 3.1.3 on 2020-12-09 01:27 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('soulcalibur_vi', '0019_auto_20201205_0415'), ] operations = [ migrations.AlterField( model_name='move', name='attack_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='move', name='command', field=models.CharField(max_length=100), ), ]
23.875
74
0.588133
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('soulcalibur_vi', '0019_auto_20201205_0415'), ] operations = [ migrations.AlterField( model_name='move', name='attack_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='move', name='command', field=models.CharField(max_length=100), ), ]
true
true
f70db3e30f6c417576f165fd15b7507499961320
4,098
py
Python
beekeeper/settings.py
evan10s/beekeeper
13fd3bf812e3c78988429a165e149bfbaffb5b17
[ "MIT" ]
null
null
null
beekeeper/settings.py
evan10s/beekeeper
13fd3bf812e3c78988429a165e149bfbaffb5b17
[ "MIT" ]
null
null
null
beekeeper/settings.py
evan10s/beekeeper
13fd3bf812e3c78988429a165e149bfbaffb5b17
[ "MIT" ]
null
null
null
""" Django settings for beekeeper project on Heroku. For more info, see: https://github.com/heroku/heroku-django-template For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ['DJ_SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.getenv('DJ_DEBUG',False) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', # Disable Django's own staticfiles handling in favour of WhiteNoise, for # greater consistency between gunicorn and `./manage.py runserver`. See: # http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development 'whitenoise.runserver_nostatic', 'django.contrib.staticfiles', 'beekeeper' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'beekeeper.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'debug': DEBUG, }, }, ] WSGI_APPLICATION = 'beekeeper.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'US/Eastern' USE_I18N = True USE_L10N = True USE_TZ = True # Change 'default' database configuration with $DATABASE_URL. DATABASES['default'].update(dj_database_url.config(conn_max_age=500)) # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles') STATIC_URL = '/static/' # Extra places for collectstatic to find static files. STATICFILES_DIRS = [ os.path.join(PROJECT_ROOT, 'static'), ] # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
30.355556
91
0.716691
import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) SECRET_KEY = os.environ['DJ_SECRET_KEY'] DEBUG = os.getenv('DJ_DEBUG',False) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', # Disable Django's own staticfiles handling in favour of WhiteNoise, for tic', 'django.contrib.staticfiles', 'beekeeper' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'beekeeper.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'debug': DEBUG, }, }, ] WSGI_APPLICATION = 'beekeeper.wsgi.application' S = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'US/Eastern' USE_I18N = True USE_L10N = True USE_TZ = True DATABASES['default'].update(dj_database_url.config(conn_max_age=500)) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') ALLOWED_HOSTS = ['*'] STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles') STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(PROJECT_ROOT, 'static'), ] STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
true
true
f70db42fafeab99811b4a56e0139d204e50eaa81
2,852
py
Python
maza/modules/creds/generic/telnet_default.py
ArturSpirin/maza
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
[ "MIT" ]
2
2020-02-06T20:24:31.000Z
2022-03-08T19:07:16.000Z
maza/modules/creds/generic/telnet_default.py
ArturSpirin/maza
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
[ "MIT" ]
null
null
null
maza/modules/creds/generic/telnet_default.py
ArturSpirin/maza
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
[ "MIT" ]
null
null
null
from maza.core.exploit import * from maza.core.telnet.telnet_client import TelnetClient from maza.resources import wordlists class Exploit(TelnetClient): __info__ = { "name": "Telnet Default Creds", "description": "Module performs dictionary attack with default credentials against Telnet service. " "If valid credentials are found, they are displayed to the user.", "authors": ( "Marcin Bury <marcin[at]threat9.com>", # routersploit module ), "devices": ( "Multiple devices", ) } target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)") port = OptPort(23, "Target Telnet port") threads = OptInteger(8, "Number of threads") defaults = OptWordlist(wordlists.defaults, "User:Pass or file with default credentials (file://)") stop_on_success = OptBool(True, "Stop on first valid authentication attempt") verbosity = OptBool(True, "Display authentication attempts") def run(self): self.credentials = [] self.attack() @multi def attack(self): if not self.check(): return print_status("Starting default credentials attack against Telnet service") data = LockedIterator(self.defaults) self.run_threads(self.threads, self.target_function, data) if self.credentials: print_success("Credentials found!") headers = ("Target", "Port", "Service", "Username", "Password") print_table(headers, *self.credentials) else: print_error("Credentials not found") def target_function(self, running, data): while running.is_set(): try: username, password = data.next().split(":") telnet_client = self.telnet_create() if telnet_client.login(username, password, retries=3): if self.stop_on_success: running.clear() self.credentials.append((self.target, self.port, self.target_protocol, username, password)) telnet_client.close() except StopIteration: break def check(self): telnet_client = self.telnet_create() if telnet_client.test_connect(): print_status("Target exposes Telnet service", verbose=self.verbosity) return True print_status("Target does not expose Telnet service", verbose=self.verbosity) return False @mute def check_default(self): if self.check(): self.credentials = [] data = LockedIterator(self.defaults) self.run_threads(self.threads, self.target_function, data) if self.credentials: return self.credentials return None
33.162791
111
0.608345
from maza.core.exploit import * from maza.core.telnet.telnet_client import TelnetClient from maza.resources import wordlists class Exploit(TelnetClient): __info__ = { "name": "Telnet Default Creds", "description": "Module performs dictionary attack with default credentials against Telnet service. " "If valid credentials are found, they are displayed to the user.", "authors": ( "Marcin Bury <marcin[at]threat9.com>", ), "devices": ( "Multiple devices", ) } target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)") port = OptPort(23, "Target Telnet port") threads = OptInteger(8, "Number of threads") defaults = OptWordlist(wordlists.defaults, "User:Pass or file with default credentials (file://)") stop_on_success = OptBool(True, "Stop on first valid authentication attempt") verbosity = OptBool(True, "Display authentication attempts") def run(self): self.credentials = [] self.attack() @multi def attack(self): if not self.check(): return print_status("Starting default credentials attack against Telnet service") data = LockedIterator(self.defaults) self.run_threads(self.threads, self.target_function, data) if self.credentials: print_success("Credentials found!") headers = ("Target", "Port", "Service", "Username", "Password") print_table(headers, *self.credentials) else: print_error("Credentials not found") def target_function(self, running, data): while running.is_set(): try: username, password = data.next().split(":") telnet_client = self.telnet_create() if telnet_client.login(username, password, retries=3): if self.stop_on_success: running.clear() self.credentials.append((self.target, self.port, self.target_protocol, username, password)) telnet_client.close() except StopIteration: break def check(self): telnet_client = self.telnet_create() if telnet_client.test_connect(): print_status("Target exposes Telnet service", verbose=self.verbosity) return True print_status("Target does not expose Telnet service", verbose=self.verbosity) return False @mute def check_default(self): if self.check(): self.credentials = [] data = LockedIterator(self.defaults) self.run_threads(self.threads, self.target_function, data) if self.credentials: return self.credentials return None
true
true
f70db552291bdd651d9cd3db3990a9f662669345
2,788
py
Python
capture/noworkflow/now/persistence/models/trial_dot.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
108
2015-02-04T14:16:51.000Z
2022-03-06T13:52:45.000Z
capture/noworkflow/now/persistence/models/trial_dot.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
92
2015-01-19T14:58:06.000Z
2021-04-19T17:28:50.000Z
capture/noworkflow/now/persistence/models/trial_dot.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
31
2015-03-03T23:53:59.000Z
2021-11-11T04:23:44.000Z
# Copyright (c) 2016 Universidade Federal Fluminense (UFF) # Copyright (c) 2016 Polytechnic Institute of New York University. # This file is part of noWorkflow. # Please, consult the license terms in the LICENSE file. """Trial Dot Object""" from __future__ import (absolute_import, print_function, division, unicode_literals) import weakref from future.utils import viewitems from .base import Model from . import FileAccess from .graphs.dependency_graph import DependencyFilter, variable_id from .graphs.dependency_graph import DotVisitor CALL_SCHEMA = "#3A85B9", "box", "white", "filled" VAR_SCHEMA = "#85CBD0", "box", "black", "rounded,filled" FILE_SCHEMA = "white", "box", "black", "rounded,filled" BLACKBOX_SCHEMA = "black", "box", "grey", "filled" GRAYBOX_SCHEMA = "grey", "box", "black", "filled" IMPORT_SCHEMA = "#1B2881", "box", "#7AC5F9", "filled" TYPES = { "call": CALL_SCHEMA, "normal": VAR_SCHEMA, "virtual": VAR_SCHEMA, "param": VAR_SCHEMA, "import": IMPORT_SCHEMA, "--blackbox--": BLACKBOX_SCHEMA, "--graybox--": GRAYBOX_SCHEMA, "access": FILE_SCHEMA, } class TrialDot(Model): # pylint: disable=too-many-instance-attributes """Handle Dot export""" __modelname__ = "TrialDot" def __init__(self, trial): super(TrialDot, self).__init__() self.trial = weakref.proxy(trial) self.format = "svg" self.value_length = 0 self.name_length = 55 self.fallback = None self.run = True def simulation(self): """Configure simulation graph""" self.fallback = None def prospective(self): """Configure prospective graph""" self.fallback = None def dependency(self): """Configure dependency graph""" self.fallback = VAR_SCHEMA def export_text(self): """Export facts from trial as text""" dep_filter = self.trial.dependency_filter if self.run: dep_filter.run() getattr(self, self.trial.dependency_config.mode)() visitor = DotVisitor(self.fallback, self.name_length, self.value_length, TYPES, dep_filter) visitor.visit(dep_filter.main_cluster) return "\n".join(visitor.result) def _repr_svg_(self): if self.format == "svg": ipython = get_ipython() return ipython.run_cell_magic( "dot", "--format {}".format(self.format), self.export_text() ) def _repr_png_(self): if self.format == "png": ipython = get_ipython() return ipython.run_cell_magic( "dot", "--format {}".format(self.format), self.export_text() )
30.977778
127
0.615854
from __future__ import (absolute_import, print_function, division, unicode_literals) import weakref from future.utils import viewitems from .base import Model from . import FileAccess from .graphs.dependency_graph import DependencyFilter, variable_id from .graphs.dependency_graph import DotVisitor CALL_SCHEMA = "#3A85B9", "box", "white", "filled" VAR_SCHEMA = "#85CBD0", "box", "black", "rounded,filled" FILE_SCHEMA = "white", "box", "black", "rounded,filled" BLACKBOX_SCHEMA = "black", "box", "grey", "filled" GRAYBOX_SCHEMA = "grey", "box", "black", "filled" IMPORT_SCHEMA = "#1B2881", "box", "#7AC5F9", "filled" TYPES = { "call": CALL_SCHEMA, "normal": VAR_SCHEMA, "virtual": VAR_SCHEMA, "param": VAR_SCHEMA, "import": IMPORT_SCHEMA, "--blackbox--": BLACKBOX_SCHEMA, "--graybox--": GRAYBOX_SCHEMA, "access": FILE_SCHEMA, } class TrialDot(Model): __modelname__ = "TrialDot" def __init__(self, trial): super(TrialDot, self).__init__() self.trial = weakref.proxy(trial) self.format = "svg" self.value_length = 0 self.name_length = 55 self.fallback = None self.run = True def simulation(self): self.fallback = None def prospective(self): self.fallback = None def dependency(self): self.fallback = VAR_SCHEMA def export_text(self): dep_filter = self.trial.dependency_filter if self.run: dep_filter.run() getattr(self, self.trial.dependency_config.mode)() visitor = DotVisitor(self.fallback, self.name_length, self.value_length, TYPES, dep_filter) visitor.visit(dep_filter.main_cluster) return "\n".join(visitor.result) def _repr_svg_(self): if self.format == "svg": ipython = get_ipython() return ipython.run_cell_magic( "dot", "--format {}".format(self.format), self.export_text() ) def _repr_png_(self): if self.format == "png": ipython = get_ipython() return ipython.run_cell_magic( "dot", "--format {}".format(self.format), self.export_text() )
true
true
f70db553b343e43a65e07f970865b491dafc586f
866
py
Python
tests/statusupdate_test.py
jgkelly/FreezerState
788733c9d75fa7db99ab787e1b0b6d873c3f0eb2
[ "MIT" ]
null
null
null
tests/statusupdate_test.py
jgkelly/FreezerState
788733c9d75fa7db99ab787e1b0b6d873c3f0eb2
[ "MIT" ]
null
null
null
tests/statusupdate_test.py
jgkelly/FreezerState
788733c9d75fa7db99ab787e1b0b6d873c3f0eb2
[ "MIT" ]
null
null
null
import pytest import datetime #from datetime import datetime from freezerstate.statusupdate import StatusUpdate @pytest.fixture def statusobj(): obj = StatusUpdate(True, '8:00,9:30,21:00,26:99') return obj def test_update_initialization(statusobj): assert len(statusobj.notification_times) == 3 def test_should_notify_should_be_false_if_time_not_in_list(statusobj): test_time = datetime.time(10,30) result = statusobj.should_notify(test_time) assert result is False def test_should_notify_should_be_true_if_time_is_in_list(statusobj): test_time = datetime.time(9, 30, 0) result = statusobj.should_notify(test_time) assert result is True def test_should_notify_should_be_true_if_now_is_in_list(statusobj): test_time = datetime.datetime.now() result = statusobj.should_notify(test_time) assert result is False
26.242424
70
0.78291
import pytest import datetime from freezerstate.statusupdate import StatusUpdate @pytest.fixture def statusobj(): obj = StatusUpdate(True, '8:00,9:30,21:00,26:99') return obj def test_update_initialization(statusobj): assert len(statusobj.notification_times) == 3 def test_should_notify_should_be_false_if_time_not_in_list(statusobj): test_time = datetime.time(10,30) result = statusobj.should_notify(test_time) assert result is False def test_should_notify_should_be_true_if_time_is_in_list(statusobj): test_time = datetime.time(9, 30, 0) result = statusobj.should_notify(test_time) assert result is True def test_should_notify_should_be_true_if_now_is_in_list(statusobj): test_time = datetime.datetime.now() result = statusobj.should_notify(test_time) assert result is False
true
true
f70db616322dfb3e72041d64e42fd190201c6e36
13,707
py
Python
proc/crossref.py
scieloorg/cited-references-norm
e5e568fc6acc7963a2bd0905586f1d1aed181bf0
[ "BSD-2-Clause" ]
null
null
null
proc/crossref.py
scieloorg/cited-references-norm
e5e568fc6acc7963a2bd0905586f1d1aed181bf0
[ "BSD-2-Clause" ]
2
2020-05-29T01:53:35.000Z
2021-12-13T20:51:07.000Z
proc/crossref.py
scieloorg/cited-references-norm
e5e568fc6acc7963a2bd0905586f1d1aed181bf0
[ "BSD-2-Clause" ]
2
2020-05-15T21:24:04.000Z
2020-05-15T21:32:46.000Z
import argparse import asyncio import html import json import logging import os import textwrap import time import xmltodict from aiohttp import ClientSession, ClientConnectorError, ServerDisconnectedError, ContentTypeError from articlemeta.client import RestfulClient from datetime import datetime from json import JSONDecodeError from pyexpat import ExpatError from pymongo import errors, MongoClient, uri_parser from utils.string_processor import preprocess_author_name, preprocess_doi, preprocess_journal_title from xylose.scielodocument import Article, Citation DIR_DATA = os.environ.get('DIR_DATA', '/opt/data') MONGO_STDCITS_COLLECTION = os.environ.get('MONGO_STDCITS_COLLECTION', 'standardized') CROSSREF_URL_WORKS = os.environ.get('CROSSREF_URL_WORKS', 'https://api.crossref.org/works/{}') CROSSREF_URL_OPENURL = os.environ.get('CROSSREF_URL_OPENURL', 'https://doi.crossref.org/openurl?') CROSSREF_SEMAPHORE_LIMIT = int(os.environ.get('CROSSREF_SEMAPHORE_LIMIT', '20')) class CrossrefAsyncCollector(object): logging.basicConfig(level=logging.INFO) def __init__(self, email: None, mongo_uri_std_cits=None): self.email = email if mongo_uri_std_cits: try: self.persist_mode = 'mongo' mongo_col = uri_parser.parse_uri(mongo_uri_std_cits).get('collection') if not mongo_col: mongo_col = MONGO_STDCITS_COLLECTION self.standardizer = MongoClient(mongo_uri_std_cits).get_database().get_collection(mongo_col) total_docs = self.standardizer.count_documents({}) logging.info('There are {0} documents in the collection {1}'.format(total_docs, mongo_col)) except ConnectionError as e: logging.error('ConnectionError %s' % mongo_uri_std_cits) logging.error(e) else: self.persist_mode = 'json' file_name_results = 'crossref-results-' + str(time.time()) + '.json' self.path_results = os.path.join(DIR_DATA, file_name_results) def extract_attrs(self, article: Article): """ Extrai os atributos de todas as referências citadas de um documento. :param article: documento do qual serão extraídos os atributos das referências citadas :return: dicionário de ids de citações e respectivos atributos """ cit_id_to_attrs = {} if article.citations: for cit in article.citations: if cit.publication_type == 'article': cit_id = self.mount_id(cit, article.collection_acronym) cit_attrs = {} if self.persist_mode == 'json': cit_attrs = self._extract_cit_attrs(cit) elif self.persist_mode == 'mongo': cit_data = self.standardizer.find_one({'_id': cit_id}) if not cit_data or not cit_data.get('crossref'): cit_attrs = self._extract_cit_attrs(cit) if cit_attrs: cit_id_to_attrs[cit_id] = cit_attrs return cit_id_to_attrs def _extract_cit_attrs(self, cit: Citation): """ Extrai os atributos de uma referência citada necessários para requisitar metadados CrossRef. :param cit: referência citada :return: dicionário de atributos para consulta no serviço CrossRef """ if cit.doi: valid_doi = preprocess_doi(cit.doi) if valid_doi: return {'doi': valid_doi} attrs = {} if cit.first_author: first_author_surname = cit.first_author.get('surname', '') cleaned_author_surname = preprocess_author_name(first_author_surname) if cleaned_author_surname: attrs.update({'aulast': cleaned_author_surname}) journal_title = cit.source if journal_title: cleaned_journal_title = preprocess_journal_title(journal_title) if cleaned_journal_title: attrs.update({'title': cleaned_journal_title}) publication_date = html.unescape(cit.publication_date) if cit.publication_date else None if publication_date and len(publication_date) >= 4: publication_year = publication_date[:4] if publication_year.isdigit(): attrs.update({'data': publication_year}) volume = html.unescape(cit.volume) if cit.volume else None if volume: attrs.update({'volume': volume}) issue = html.unescape(cit.issue) if cit.issue else None if issue: attrs.update({'issue': issue}) first_page = html.unescape(cit.first_page) if cit.first_page else None if first_page: attrs.update({'spage': first_page}) if attrs: return attrs def parse_crossref_openurl_result(self, text): """ Converte response.text para JSON com metadados obtidos do endpoint OPENURL. :param response: resposta de requisição em formato de texto :return: JSON com metadados obtidos do serviço CrossRef """ try: raw = xmltodict.parse(text) for v in raw.get('doi_records', {}).values(): metadata = v.get('crossref') if metadata and 'error' not in metadata.keys(): owner = v.get('@owner') if owner: metadata.update({'owner': owner}) timestamp = v.get('@timestamp') if timestamp: metadata.update({'timestamp': timestamp}) journal_article = metadata.get('journal', {}).get('journal_article', {}) if 'citation_list' in journal_article: journal_article.__delitem__('citation_list') return metadata except ExpatError as e: logging.warning("ExpatError {0}".format(text)) logging.warning(e) def parse_crossref_works_result(self, raw_metadata): """ Limpa dicionário de metadados obtidos do endpoint WORKS. Remove campo de referências :param raw_metadata: resposta de requisição em formato de dicionário :return: JSON com metadados obtidos do serviço Crossref """ raw_status = raw_metadata.get('status', '') if raw_status == 'ok': metadata = raw_metadata.get('message') if metadata: if 'reference' in metadata: metadata.__delitem__('reference') return metadata def mount_id(self, cit: Citation, collection: str): """ Monta o identificador de uma referência citada. :param cit: referência citada :param collection: coleção em que a referência foi citada :return: código identificador da citação """ cit_id = cit.data['v880'][0]['_'] return '{0}-{1}'.format(cit_id, collection) def save_crossref_metadata(self, id_to_metadata: dict): """ Persiste os metadados da referência citada. :param id_to_metadata: dicionário com id da referência citada e seus respectivos metadados Crossref """ if self.persist_mode == 'json': with open(self.path_results, 'a') as f: json.dump(id_to_metadata, f) f.write('\n') elif self.persist_mode == 'mongo': self.standardizer.update_one(filter={'_id': id_to_metadata['_id']}, update={'$set': { 'crossref': id_to_metadata['crossref'], 'update-date': datetime.now().strftime('%Y-%m-%d') }}, upsert=True) async def run(self, citations_attrs: dict): sem = asyncio.Semaphore(CROSSREF_SEMAPHORE_LIMIT) tasks = [] async with ClientSession(headers={'mailto:': self.email}) as session: for cit_id, attrs in citations_attrs.items(): if 'doi' in attrs: url = CROSSREF_URL_WORKS.format(attrs['doi']) mode = 'doi' else: url = CROSSREF_URL_OPENURL for k, v in attrs.items(): if k != 'doi': url += '&' + k + '=' + v url += '&pid=' + self.email url += '&format=unixref' url += '&multihit=false' mode = 'attrs' task = asyncio.ensure_future(self.bound_fetch(cit_id, url, sem, session, mode)) tasks.append(task) responses = asyncio.gather(*tasks) await responses async def bound_fetch(self, cit_id, url, semaphore, session, mode): async with semaphore: await self.fetch(cit_id, url, session, mode) async def fetch(self, cit_id, url, session, mode): try: async with session.get(url) as response: try: logging.info('Collecting metadata for %s' % cit_id) if mode == 'doi': raw_metadata = await response.json(content_type=None) if raw_metadata: metadata = self.parse_crossref_works_result(raw_metadata) else: raw_metadata = await response.text() if raw_metadata: metadata = self.parse_crossref_openurl_result(raw_metadata) if metadata: id_to_metadata = {'_id': cit_id, 'crossref': metadata} self.save_crossref_metadata(id_to_metadata) except JSONDecodeError as e: logging.warning('JSONDecodeError: %s' % cit_id) logging.warning(e) except TimeoutError as e: logging.warning('TimeoutError [INNER]: %s' % cit_id) logging.warning(e) except ContentTypeError as e: logging.warning('ContentTypeError: %s' % cit_id) logging.warning(e) except ServerDisconnectedError as e: logging.warning('ServerDisconnectedError: %s' % cit_id) logging.warning(e) except TimeoutError as e: logging.warning('TimeoutError [OUTER]: %s' % cit_id) logging.warning(e) except ClientConnectorError as e: logging.warning('ClientConectorError: %s' % cit_id) logging.warning(e) def format_date(date: datetime): if not date: return None return date.strftime('%Y-%m-%d') def main(): usage = "collect metadata from the Crossref Service" parser = argparse.ArgumentParser(textwrap.dedent(usage)) parser.add_argument( '-c', '--col', default=None, dest='col', help='normalize cited references in an entire collection' ) parser.add_argument( '-f', '--from_date', type=lambda x: datetime.strptime(x, '%Y-%m-%d'), nargs='?', help='collect metadata for cited references in documents published from a date (YYYY-MM-DD)' ) parser.add_argument( '-u', '--until_date', type=lambda x: datetime.strptime(x, '%Y-%m-%d'), nargs='?', default=datetime.now(), help='collect metadata for cited references in documents published until a date (YYYY-MM-DD)' ) parser.add_argument( '-i', '--document_id', default=None, dest='pid', help='collect metadata for cited for the cited references in a PID (document)' ) parser.add_argument( '--mongo_uri', default=None, dest='mongo_uri_std_cits', help='mongo uri string in the format mongodb://[username:password@]host1[:port1][,...hostN[:portN]][/[defaultauthdb][?options]]' ) parser.add_argument( '-e', '--email', required=True, default=None, dest='email', help='an e-mail registered in the Crossref service' ) args = parser.parse_args() try: art_meta = RestfulClient() cac = CrossrefAsyncCollector(email=args.email, mongo_uri_std_cits=args.mongo_uri_std_cits) cit_ids_to_attrs = {} start_time = time.time() if args.pid: logging.info('Running in one PID mode') document = art_meta.document(collection=args.col, code=args.pid) if document: logging.info('Extracting info from cited references in %s ' % document.publisher_id) cit_ids_to_attrs = cac.extract_attrs(document) else: logging.info('Running in many PIDs mode') for document in art_meta.documents(collection=args.col, from_date=format_date(args.from_date), until_date=format_date(args.until_date)): logging.info('Extracting info from cited references in %s ' % document.publisher_id) cit_ids_to_attrs.update(cac.extract_attrs(document)) loop = asyncio.get_event_loop() future = asyncio.ensure_future(cac.run(cit_ids_to_attrs)) loop.run_until_complete(future) end_time = time.time() logging.info('Duration {0} seconds.'.format(end_time - start_time)) except KeyboardInterrupt: print("Interrupt by user")
37.348774
136
0.584738
import argparse import asyncio import html import json import logging import os import textwrap import time import xmltodict from aiohttp import ClientSession, ClientConnectorError, ServerDisconnectedError, ContentTypeError from articlemeta.client import RestfulClient from datetime import datetime from json import JSONDecodeError from pyexpat import ExpatError from pymongo import errors, MongoClient, uri_parser from utils.string_processor import preprocess_author_name, preprocess_doi, preprocess_journal_title from xylose.scielodocument import Article, Citation DIR_DATA = os.environ.get('DIR_DATA', '/opt/data') MONGO_STDCITS_COLLECTION = os.environ.get('MONGO_STDCITS_COLLECTION', 'standardized') CROSSREF_URL_WORKS = os.environ.get('CROSSREF_URL_WORKS', 'https://api.crossref.org/works/{}') CROSSREF_URL_OPENURL = os.environ.get('CROSSREF_URL_OPENURL', 'https://doi.crossref.org/openurl?') CROSSREF_SEMAPHORE_LIMIT = int(os.environ.get('CROSSREF_SEMAPHORE_LIMIT', '20')) class CrossrefAsyncCollector(object): logging.basicConfig(level=logging.INFO) def __init__(self, email: None, mongo_uri_std_cits=None): self.email = email if mongo_uri_std_cits: try: self.persist_mode = 'mongo' mongo_col = uri_parser.parse_uri(mongo_uri_std_cits).get('collection') if not mongo_col: mongo_col = MONGO_STDCITS_COLLECTION self.standardizer = MongoClient(mongo_uri_std_cits).get_database().get_collection(mongo_col) total_docs = self.standardizer.count_documents({}) logging.info('There are {0} documents in the collection {1}'.format(total_docs, mongo_col)) except ConnectionError as e: logging.error('ConnectionError %s' % mongo_uri_std_cits) logging.error(e) else: self.persist_mode = 'json' file_name_results = 'crossref-results-' + str(time.time()) + '.json' self.path_results = os.path.join(DIR_DATA, file_name_results) def extract_attrs(self, article: Article): cit_id_to_attrs = {} if article.citations: for cit in article.citations: if cit.publication_type == 'article': cit_id = self.mount_id(cit, article.collection_acronym) cit_attrs = {} if self.persist_mode == 'json': cit_attrs = self._extract_cit_attrs(cit) elif self.persist_mode == 'mongo': cit_data = self.standardizer.find_one({'_id': cit_id}) if not cit_data or not cit_data.get('crossref'): cit_attrs = self._extract_cit_attrs(cit) if cit_attrs: cit_id_to_attrs[cit_id] = cit_attrs return cit_id_to_attrs def _extract_cit_attrs(self, cit: Citation): if cit.doi: valid_doi = preprocess_doi(cit.doi) if valid_doi: return {'doi': valid_doi} attrs = {} if cit.first_author: first_author_surname = cit.first_author.get('surname', '') cleaned_author_surname = preprocess_author_name(first_author_surname) if cleaned_author_surname: attrs.update({'aulast': cleaned_author_surname}) journal_title = cit.source if journal_title: cleaned_journal_title = preprocess_journal_title(journal_title) if cleaned_journal_title: attrs.update({'title': cleaned_journal_title}) publication_date = html.unescape(cit.publication_date) if cit.publication_date else None if publication_date and len(publication_date) >= 4: publication_year = publication_date[:4] if publication_year.isdigit(): attrs.update({'data': publication_year}) volume = html.unescape(cit.volume) if cit.volume else None if volume: attrs.update({'volume': volume}) issue = html.unescape(cit.issue) if cit.issue else None if issue: attrs.update({'issue': issue}) first_page = html.unescape(cit.first_page) if cit.first_page else None if first_page: attrs.update({'spage': first_page}) if attrs: return attrs def parse_crossref_openurl_result(self, text): try: raw = xmltodict.parse(text) for v in raw.get('doi_records', {}).values(): metadata = v.get('crossref') if metadata and 'error' not in metadata.keys(): owner = v.get('@owner') if owner: metadata.update({'owner': owner}) timestamp = v.get('@timestamp') if timestamp: metadata.update({'timestamp': timestamp}) journal_article = metadata.get('journal', {}).get('journal_article', {}) if 'citation_list' in journal_article: journal_article.__delitem__('citation_list') return metadata except ExpatError as e: logging.warning("ExpatError {0}".format(text)) logging.warning(e) def parse_crossref_works_result(self, raw_metadata): raw_status = raw_metadata.get('status', '') if raw_status == 'ok': metadata = raw_metadata.get('message') if metadata: if 'reference' in metadata: metadata.__delitem__('reference') return metadata def mount_id(self, cit: Citation, collection: str): cit_id = cit.data['v880'][0]['_'] return '{0}-{1}'.format(cit_id, collection) def save_crossref_metadata(self, id_to_metadata: dict): if self.persist_mode == 'json': with open(self.path_results, 'a') as f: json.dump(id_to_metadata, f) f.write('\n') elif self.persist_mode == 'mongo': self.standardizer.update_one(filter={'_id': id_to_metadata['_id']}, update={'$set': { 'crossref': id_to_metadata['crossref'], 'update-date': datetime.now().strftime('%Y-%m-%d') }}, upsert=True) async def run(self, citations_attrs: dict): sem = asyncio.Semaphore(CROSSREF_SEMAPHORE_LIMIT) tasks = [] async with ClientSession(headers={'mailto:': self.email}) as session: for cit_id, attrs in citations_attrs.items(): if 'doi' in attrs: url = CROSSREF_URL_WORKS.format(attrs['doi']) mode = 'doi' else: url = CROSSREF_URL_OPENURL for k, v in attrs.items(): if k != 'doi': url += '&' + k + '=' + v url += '&pid=' + self.email url += '&format=unixref' url += '&multihit=false' mode = 'attrs' task = asyncio.ensure_future(self.bound_fetch(cit_id, url, sem, session, mode)) tasks.append(task) responses = asyncio.gather(*tasks) await responses async def bound_fetch(self, cit_id, url, semaphore, session, mode): async with semaphore: await self.fetch(cit_id, url, session, mode) async def fetch(self, cit_id, url, session, mode): try: async with session.get(url) as response: try: logging.info('Collecting metadata for %s' % cit_id) if mode == 'doi': raw_metadata = await response.json(content_type=None) if raw_metadata: metadata = self.parse_crossref_works_result(raw_metadata) else: raw_metadata = await response.text() if raw_metadata: metadata = self.parse_crossref_openurl_result(raw_metadata) if metadata: id_to_metadata = {'_id': cit_id, 'crossref': metadata} self.save_crossref_metadata(id_to_metadata) except JSONDecodeError as e: logging.warning('JSONDecodeError: %s' % cit_id) logging.warning(e) except TimeoutError as e: logging.warning('TimeoutError [INNER]: %s' % cit_id) logging.warning(e) except ContentTypeError as e: logging.warning('ContentTypeError: %s' % cit_id) logging.warning(e) except ServerDisconnectedError as e: logging.warning('ServerDisconnectedError: %s' % cit_id) logging.warning(e) except TimeoutError as e: logging.warning('TimeoutError [OUTER]: %s' % cit_id) logging.warning(e) except ClientConnectorError as e: logging.warning('ClientConectorError: %s' % cit_id) logging.warning(e) def format_date(date: datetime): if not date: return None return date.strftime('%Y-%m-%d') def main(): usage = "collect metadata from the Crossref Service" parser = argparse.ArgumentParser(textwrap.dedent(usage)) parser.add_argument( '-c', '--col', default=None, dest='col', help='normalize cited references in an entire collection' ) parser.add_argument( '-f', '--from_date', type=lambda x: datetime.strptime(x, '%Y-%m-%d'), nargs='?', help='collect metadata for cited references in documents published from a date (YYYY-MM-DD)' ) parser.add_argument( '-u', '--until_date', type=lambda x: datetime.strptime(x, '%Y-%m-%d'), nargs='?', default=datetime.now(), help='collect metadata for cited references in documents published until a date (YYYY-MM-DD)' ) parser.add_argument( '-i', '--document_id', default=None, dest='pid', help='collect metadata for cited for the cited references in a PID (document)' ) parser.add_argument( '--mongo_uri', default=None, dest='mongo_uri_std_cits', help='mongo uri string in the format mongodb://[username:password@]host1[:port1][,...hostN[:portN]][/[defaultauthdb][?options]]' ) parser.add_argument( '-e', '--email', required=True, default=None, dest='email', help='an e-mail registered in the Crossref service' ) args = parser.parse_args() try: art_meta = RestfulClient() cac = CrossrefAsyncCollector(email=args.email, mongo_uri_std_cits=args.mongo_uri_std_cits) cit_ids_to_attrs = {} start_time = time.time() if args.pid: logging.info('Running in one PID mode') document = art_meta.document(collection=args.col, code=args.pid) if document: logging.info('Extracting info from cited references in %s ' % document.publisher_id) cit_ids_to_attrs = cac.extract_attrs(document) else: logging.info('Running in many PIDs mode') for document in art_meta.documents(collection=args.col, from_date=format_date(args.from_date), until_date=format_date(args.until_date)): logging.info('Extracting info from cited references in %s ' % document.publisher_id) cit_ids_to_attrs.update(cac.extract_attrs(document)) loop = asyncio.get_event_loop() future = asyncio.ensure_future(cac.run(cit_ids_to_attrs)) loop.run_until_complete(future) end_time = time.time() logging.info('Duration {0} seconds.'.format(end_time - start_time)) except KeyboardInterrupt: print("Interrupt by user")
true
true
f70db867f62fffa6d6348dde3c8aeac9d337b99d
2,242
py
Python
Boston-Housing/boston_coreml.py
nikhiljay/ml
b99f2727bcffce6dc8c5313dfac3b7b8387c46f8
[ "MIT" ]
null
null
null
Boston-Housing/boston_coreml.py
nikhiljay/ml
b99f2727bcffce6dc8c5313dfac3b7b8387c46f8
[ "MIT" ]
null
null
null
Boston-Housing/boston_coreml.py
nikhiljay/ml
b99f2727bcffce6dc8c5313dfac3b7b8387c46f8
[ "MIT" ]
1
2019-02-27T18:56:09.000Z
2019-02-27T18:56:09.000Z
# Written by Nikhil D'Souza # Data from http://lib.stat.cmu.edu/datasets/boston # This neural network predicts the values of houses in Boston based on: # 1. per capita crime rate by town # 2. proportion of residential land zoned for lots over 25,000 sq.ft. # 3. proportion of non-retail business acres per town # 4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) # 5. nitric oxides concentration (parts per 10 million) # 6. average number of rooms per dwelling # 7. proportion of owner-occupied units built prior to 1940 # 8. weighted distances to five Boston employment centres # 9. index of accessibility to radial highways # 10. full-value property-tax rate per $10,000 # 11. pupil-teacher ratio by town # 12. 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town # 13. % lower status of the population from __future__ import print_function from matplotlib import pyplot as plt from keras.datasets import boston_housing from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K import coremltools import keras import numpy as np (x_train, y_train), (x_val, y_val) = boston_housing.load_data() # Model architecture model = Sequential() model.add(Dense(20, input_dim=13, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) # Callback list (saves models at checkpoints) callbacks_list = [ keras.callbacks.ModelCheckpoint( filepath='best_model.{epoch:02d}.h5', monitor='val_loss', save_best_only=True), ] # Compile model model.compile(loss='mean_squared_error', optimizer='adam') # Train model batch_size = 5 epochs = 100 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=callbacks_list, validation_data=(x_val, y_val), verbose=1) # Create CoreML model coreml_mnist = coremltools.converters.keras.convert( 'best_model.48.h5', input_names=['input'], output_names=['output'] ) coreml_mnist.author = 'Nikhil DSouza' coreml_mnist.license = 'Nikhil' coreml_mnist.short_description = 'Boston housing price regression' coreml_mnist.save('BostonClassifier.mlmodel')
33.462687
134
0.768064
# Data from http://lib.stat.cmu.edu/datasets/boston # This neural network predicts the values of houses in Boston based on: # 1. per capita crime rate by town # 2. proportion of residential land zoned for lots over 25,000 sq.ft. # 3. proportion of non-retail business acres per town # 4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) # 5. nitric oxides concentration (parts per 10 million) # 6. average number of rooms per dwelling # 7. proportion of owner-occupied units built prior to 1940 # 8. weighted distances to five Boston employment centres # 9. index of accessibility to radial highways # 10. full-value property-tax rate per $10,000 # 11. pupil-teacher ratio by town # 12. 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town # 13. % lower status of the population from __future__ import print_function from matplotlib import pyplot as plt from keras.datasets import boston_housing from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K import coremltools import keras import numpy as np (x_train, y_train), (x_val, y_val) = boston_housing.load_data() # Model architecture model = Sequential() model.add(Dense(20, input_dim=13, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) # Callback list (saves models at checkpoints) callbacks_list = [ keras.callbacks.ModelCheckpoint( filepath='best_model.{epoch:02d}.h5', monitor='val_loss', save_best_only=True), ] # Compile model model.compile(loss='mean_squared_error', optimizer='adam') # Train model batch_size = 5 epochs = 100 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=callbacks_list, validation_data=(x_val, y_val), verbose=1) # Create CoreML model coreml_mnist = coremltools.converters.keras.convert( 'best_model.48.h5', input_names=['input'], output_names=['output'] ) coreml_mnist.author = 'Nikhil DSouza' coreml_mnist.license = 'Nikhil' coreml_mnist.short_description = 'Boston housing price regression' coreml_mnist.save('BostonClassifier.mlmodel')
true
true
f70db896c615082ba7e5afd5363c7bcd246f4e23
426
py
Python
sdk/eventhub/azure-eventhub/azure/eventhub/extensions/__init__.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/eventhub/azure-eventhub/azure/eventhub/extensions/__init__.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/eventhub/azure-eventhub/azure/eventhub/extensions/__init__.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
71
94
0.434272
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
true
true
f70db8ab5e82377aee51f9454306a942435c95aa
29,595
py
Python
hparams.py
d3ft0uch/Tacotron-2
a508bb842053599697a7c0a20d2b8cbb32e28632
[ "MIT" ]
null
null
null
hparams.py
d3ft0uch/Tacotron-2
a508bb842053599697a7c0a20d2b8cbb32e28632
[ "MIT" ]
null
null
null
hparams.py
d3ft0uch/Tacotron-2
a508bb842053599697a7c0a20d2b8cbb32e28632
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import numpy as np import tensorflow as tf # Default hyperparameters hparams = tf.contrib.training.HParams( # Comma-separated list of cleaners to run on text prior to training and eval. For non-English # text, you may want to use "basic_cleaners" or "transliteration_cleaners". cleaners='transliteration_cleaners', # If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the GPU idx on run. example: # expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py --model='Tacotron' --hparams='tacotron_gpu_start_idx=2' # If you want to train on multiple GPUs, simply specify the number of GPUs available, and the idx of the first GPU to use. example: # example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py --model='Tacotron' --hparams='tacotron_num_gpus=4, tacotron_gpu_start_idx=0' # The hparams arguments can be directly modified on this hparams.py file instead of being specified on run if preferred! # If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be trained on True mel spectrograms), one needs to specify different GPU idxes. # example Tacotron+WaveNet on a machine with 4 or more GPUs. Two GPUs for each model: # CUDA_VISIBLE_DEVICES=0,1 python train.py --model='Tacotron' --hparams='tacotron_num_gpus=2' # Cuda_VISIBLE_DEVICES=2,3 python train.py --model='WaveNet' --hparams='wavenet_num_gpus=2' # IMPORTANT NOTES: The Multi-GPU performance highly depends on your hardware and optimal parameters change between rigs. Default are optimized for servers. # If using N GPUs, please multiply the tacotron_batch_size by N below in the hparams! (tacotron_batch_size = 32 * N) # Never use lower batch size than 32 on a single GPU! # Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than 8 if GPU is having OOM, minimum 2) # Please also apply the synthesis batch size modification likewise. (if N GPUs are used for synthesis, minimal batch size must be N, minimum of 1 sample per GPU) # We did not add an automatic multi-GPU batch size computation to avoid confusion in the user's mind and to provide more control to the user for # resources related decisions. # Acknowledgement: # Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a little faster than the original # pipeline for a single GPU as well. Great work! # Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Both Tacotron and WaveNet can be trained on multi-GPU: data parallelization) # Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis. tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training. wavenet_num_gpus=1, # Determines the number of gpus in use for WaveNet training. split_on_cpu=False, # Determines whether to split data on CPU or on first GPU. This is automatically True when more than 1 GPU is used. # (Recommend: False on slow CPUs/Disks, True otherwise for small speed boost) ########################################################################################################################################### # Audio # Audio parameters are the most important parameters to tune when using this work on your personal data. Below are the beginner steps to adapt # this work to your personal data: # 1- Determine my data sample rate: First you need to determine your audio sample_rate (how many samples are in a second of audio). This can be done using sox: "sox --i <filename>" # (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz, so there are plenty of examples to refer to) # 2- set sample_rate parameter to your data correct sample rate # 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms window_size, and 12.5ms frame_shift(hop_size)) # a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200 # b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead) # 4- Fix n_fft, num_freq and upsample_scales parameters accordingly. # a- n_fft can be either equal to win_size or the first power of 2 that comes after win_size. I usually recommend using the latter # to be more consistent with signal processing friends. No big difference to be seen however. For the tuto example: n_fft = 2048 = 2**11 # b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 + 1 = 1025. # c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto example: upsample_scales=[15, 20] where 15 * 20 = 300 # it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0] # so the training segments should be long enough (2.8~3x upsample_scales[0] * hop_size or longer) so that the first kernel size can see the middle # of the samples efficiently. The length of WaveNet training segments is under the parameter "max_time_steps". # 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying preprocessing (or part of it, ctrl-C to stop), then use the # .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That will first give you some idea about your above parameters, and # it will also give you an idea about trimming. If silences persist, try reducing trim_top_db slowly. If samples are trimmed mid words, try increasing it. # 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are showing black silent regions on top), then restart from step 2. num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality num_freq=1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network rescale=True, # Whether to rescale audio prior to preprocessing rescaling_max=0.999, # Rescaling value # train samples of lengths between 3sec and 14sec are more than enough to make a model capable of generating consistent speech. clip_mels_length=True, # For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors, also consider clipping your samples to smaller chunks) max_mel_frames=800, # Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3 and still getting OOM errors. output_per_steps=3, # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction # It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder # Does not work if n_ffit is not multiple of hop_size!! use_lws=False, # Only used to set as True if using WaveNet, no difference in performance is observed in either cases. silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing # Mel spectrogram n_fft=2048, # Extra window size is filled with 0 paddings to match this parameter hop_size=275, # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) win_size=1100, # For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) (0.05 * sample_rate) sample_rate=22050, # 22050 Hz (corresponding to ljspeech dataset) (sox --i <filename>) frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5) magnitude_power=2., # The power of the spectrogram magnitude (1. for energy, 2. for power) # M-AILABS (and other datasets) trim params (there parameters are usually correct for any data, but definitely must be tuned for specific speakers) trim_silence=True, # Whether to clip silence in Audio (at beginning and end of audio only, not the middle) trim_fft_size=2048, # Trimming window size trim_hop_size=512, # Trimmin hop length trim_top_db=40, # Trimming db difference from reference db (smaller==harder trim.) # Mel and Linear spectrograms normalization/scaling and clipping signal_normalization=True, # Whether to normalize mel spectrograms to some predefined range (following below parameters) allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True symmetric_mels=True, # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, faster and cleaner convergence) max_abs_value=4., # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not be too big to avoid gradient explosion, # not too small for fast convergence) normalize_for_wavenet=True, # whether to rescale to [0, 1] for wavenet. (better audio quality) clip_for_wavenet=True, # whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality) wavenet_pad_sides=1, # Can be 1 or 2. 1 for pad right only, 2 for both sides padding. # Contribution by @begeekmyfriend # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude levels. Also allows for better G&L phase reconstruction) preemphasize=True, # whether to apply filter preemphasis=0.97, # filter coefficient. # Limits min_level_db=-100, ref_level_db=20, fmin=55, # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) fmax=7600, # To be increased/reduced depending on data. # Griffin Lim power=1.5, # Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice. griffin_lim_iters=60, # Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence. GL_on_GPU=True, # Whether to use G&L GPU version as part of tensorflow graph. (Usually much faster than CPU but slightly worse quality too). ########################################################################################################################################### # Tacotron # Model general type outputs_per_step=3, # number of frames to generate at each decoding step (increase to speed up computation and allows for higher batch size, decreases G&L audio quality) stop_at_any=True, # Determines whether the decoder should stop when predicting <stop> to any frame or to all of them (True works pretty well) batch_norm_position='after', # Can be in ('before', 'after'). Determines whether we use batch norm before or after the activation function (relu). Matter for debate. clip_outputs=True, # Whether to clip spectrograms to T2_output_range (even in loss computation). ie: Don't penalize model for exceeding output range and bring back to borders. lower_bound_decay=0.1, # Small regularizer for noise synthesis by adding small range of penalty for silence regions. Set to 0 to clip in Tacotron range. # Input parameters embedding_dim=512, # dimension of embedding space # Encoder parameters enc_conv_num_layers=3, # number of encoder convolutional layers enc_conv_kernel_size=(5,), # size of encoder convolution filters for each layer enc_conv_channels=512, # number of encoder convolutions filters for each layer encoder_lstm_units=256, # number of lstm units for each direction (forward and backward) # Attention mechanism smoothing=False, # Whether to smooth the attention normalization function attention_dim=128, # dimension of attention space attention_filters=32, # number of attention convolution filters attention_kernel=(31,), # kernel size of attention convolution cumulative_weights=True, # Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True) # Attention synthesis constraints # "Monotonic" constraint forces the model to only look at the forwards attention_win_size steps. # "Window" allows the model to look at attention_win_size neighbors, both forward and backward steps. synthesis_constraint=False, # Whether to use attention windows constraints in synthesis only (Useful for long utterances synthesis) synthesis_constraint_type='window', # can be in ('window', 'monotonic'). attention_win_size=7, # Side of the window. Current step does not count. If mode is window and attention_win_size is not pair, the 1 extra is provided to backward part of the window. # Decoder prenet_layers=[256, 256], # number of layers and number of units of prenet decoder_layers=2, # number of decoder lstm layers decoder_lstm_units=1024, # number of decoder lstm units on each layer max_iters=10000, # Max decoder steps during inference (Just for safety from infinite loop cases) # Residual postnet postnet_num_layers=5, # number of postnet convolutional layers postnet_kernel_size=(5,), # size of postnet convolution filters for each layer postnet_channels=512, # number of postnet convolution filters for each layer # CBHG mel->linear postnet cbhg_kernels=8, # All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act as "K-grams" cbhg_conv_channels=128, # Channels of the convolution bank cbhg_pool_size=2, # pooling size of the CBHG cbhg_projection=256, # projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels) cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections cbhg_highwaynet_layers=4, # Number of HighwayNet layers cbhg_highway_units=128, # Number of units used in HighwayNet fully connected layers cbhg_rnn_units=128, # Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in shape # Loss params mask_encoder=True, # whether to mask encoder padding while computing attention. Set to True for better prosody but slower convergence. mask_decoder=False, # Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not be weighted, else recommended pos_weight = 20) cross_entropy_pos_weight=1, # Use class weights to reduce the stop token classes imbalance (by adding more penalty on False Negatives (FN)) (1 = disabled) predict_linear=True, # Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!) ########################################################################################################################################### # Wavenet # Input type: # 1. raw [-1, 1] # 2. mulaw [-1, 1] # 3. mulaw-quantize [0, mu] # If input_type is raw or mulaw, network assumes scalar input and # discretized mixture of logistic distributions output, otherwise one-hot # input and softmax output are assumed. # Model general type input_type="raw", # Raw has better quality but harder to train. mulaw-quantize is easier to train but has lower quality. quantize_channels=2 ** 16, # 65536 (16-bit) (raw) or 256 (8-bit) (mulaw or mulaw-quantize) // number of classes = 256 <=> mu = 255 use_bias=True, # Whether to use bias in convolutional layers of the Wavenet legacy=True, # Whether to use legacy mode: Multiply all skip outputs but the first one with sqrt(0.5) (True for more early training stability, especially for large models) residual_legacy=True, # Whether to scale residual blocks outputs by a factor of sqrt(0.5) (True for input variance preservation early in training and better overall stability) # Model Losses parmeters # Minimal scales ranges for MoL and Gaussian modeling log_scale_min=float(np.log(1e-14)), # Mixture of logistic distributions minimal log scale log_scale_min_gauss=float(np.log(1e-7)), # Gaussian distribution minimal allowed log scale # Loss type cdf_loss=False, # Whether to use CDF loss in Gaussian modeling. Advantages: non-negative loss term and more training stability. (Automatically True for MoL) # model parameters # To use Gaussian distribution as output distribution instead of mixture of logistics, set "out_channels = 2" instead of "out_channels = 10 * 3". (UNDER TEST) out_channels=2, # This should be equal to quantize channels when input type is 'mulaw-quantize' else: num_distributions * 3 (prob, mean, log_scale). layers=20, # Number of dilated convolutions (Default: Simplified Wavenet of Tacotron-2 paper) stacks=2, # Number of dilated convolution stacks (Default: Simplified Wavenet of Tacotron-2 paper) residual_channels=128, # Number of residual block input/output channels. gate_channels=256, # split in 2 in gated convolutions skip_out_channels=128, # Number of residual block skip convolution channels. kernel_size=3, # The number of inputs to consider in dilated convolutions. # Upsampling parameters (local conditioning) cin_channels=80, # Set this to -1 to disable local conditioning, else it must be equal to num_mels!! # Upsample types: ('1D', '2D', 'Resize', 'SubPixel', 'NearestNeighbor') # All upsampling initialization/kernel_size are chosen to omit checkerboard artifacts as much as possible. (Resize is designed to omit that by nature). # To be specific, all initial upsample weights/biases (when NN_init=True) ensure that the upsampling layers act as a "Nearest neighbor upsample" of size "hop_size" (checkerboard free). # 1D spans all frequency bands for each frame (channel-wise) while 2D spans "freq_axis_kernel_size" bands at a time. Both are vanilla transpose convolutions. # Resize is a 2D convolution that follows a Nearest Neighbor (NN) resize. For reference, this is: "NN resize->convolution". # SubPixel (2D) is the ICNR version (initialized to be equivalent to "convolution->NN resize") of Sub-Pixel convolutions. also called "checkered artifact free sub-pixel conv". # Finally, NearestNeighbor is a non-trainable upsampling layer that just expands each frame (or "pixel") to the equivalent hop size. Ignores all upsampling parameters. upsample_type='SubPixel', # Type of the upsampling deconvolution. Can be ('1D' or '2D', 'Resize', 'SubPixel' or simple 'NearestNeighbor'). upsample_activation='Relu', # Activation function used during upsampling. Can be ('LeakyRelu', 'Relu' or None) upsample_scales=[11, 25], # prod(upsample_scales) should be equal to hop_size freq_axis_kernel_size=3, # Only used for 2D upsampling types. This is the number of requency bands that are spanned at a time for each frame. leaky_alpha=0.4, # slope of the negative portion of LeakyRelu (LeakyRelu: y=x if x>0 else y=alpha * x) NN_init=True, # Determines whether we want to initialize upsampling kernels/biases in a way to ensure upsample is initialize to Nearest neighbor upsampling. (Mostly for debug) NN_scaler=0.3, # Determines the initial Nearest Neighbor upsample values scale. i.e: upscaled_input_values = input_values * NN_scaler (1. to disable) # global conditioning gin_channels=-1, # Set this to -1 to disable global conditioning, Only used for multi speaker dataset. It defines the depth of the embeddings (Recommended: 16) use_speaker_embedding=True, # whether to make a speaker embedding n_speakers=5, # number of speakers (rows of the embedding) speakers_path=None, # Defines path to speakers metadata. Can be either in "speaker\tglobal_id" (with header) tsv format, or a single column tsv with speaker names. If None, use "speakers". speakers=['speaker0', 'speaker1', # List of speakers used for embeddings visualization. (Consult "wavenet_vocoder/train.py" if you want to modify the speaker names source). 'speaker2', 'speaker3', 'speaker4'], # Must be consistent with speaker ids specified for global conditioning for correct visualization. ########################################################################################################################################### # Tacotron Training # Reproduction seeds tacotron_random_seed=5339, # Determines initial graph and operations (i.e: model) random state for reproducibility tacotron_data_random_state=1234, # random state for train test split repeatability # performance parameters tacotron_swap_with_cpu=False, # Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!) # train/test split ratios, mini-batches sizes tacotron_batch_size=32, # number of training samples on each training steps # Tacotron Batch synthesis supports ~16x the training batch size (no gradients during testing). # Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times different from training. We thus recommend masking the encoder. tacotron_synthesis_batch_size=1, # DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN'T TRAIN TACOTRON WITH "mask_encoder=True"!! tacotron_test_size=0.05, # % of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is enough to have a good idea about overfit) tacotron_test_batches=None, # number of test batches. # Learning rate schedule tacotron_decay_learning_rate=True, # boolean, determines if the learning rate will follow an exponential decay tacotron_start_decay=40000, # Step at which learning decay starts tacotron_decay_steps=18000, # Determines the learning rate decay slope (UNDER TEST) tacotron_decay_rate=0.5, # learning rate decay rate (UNDER TEST) tacotron_initial_learning_rate=1e-3, # starting learning rate tacotron_final_learning_rate=1e-4, # minimal learning rate # Optimization parameters tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter # Regularization parameters tacotron_reg_weight=1e-6, # regularization weight (for L2 regularization) tacotron_scale_regularization=False, # Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model) tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet tacotron_clip_gradients=True, # whether to clip gradients # Evaluation parameters tacotron_natural_eval=False, # Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same teacher-forcing ratio as in training (just for overfit) # Decoder RNN learning can take be done in one of two ways: # Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode='constant' # Scheduled Sampling Scheme: From Teacher-Forcing to sampling from previous outputs is function of global step. (teacher forcing ratio decay) mode='scheduled' # The second approach is inspired by: # Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks. # Can be found under: https://arxiv.org/pdf/1506.03099.pdf tacotron_teacher_forcing_mode='constant', # Can be ('constant' or 'scheduled'). 'scheduled' mode applies a cosine teacher forcing ratio decay. (Preference: scheduled) tacotron_teacher_forcing_ratio=1., # Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs, Only relevant if mode='constant' tacotron_teacher_forcing_init_ratio=1., # initial teacher forcing ratio. Relevant if mode='scheduled' tacotron_teacher_forcing_final_ratio=0., # final teacher forcing ratio. (Set None to use alpha instead) Relevant if mode='scheduled' tacotron_teacher_forcing_start_decay=10000, # starting point of teacher forcing ratio decay. Relevant if mode='scheduled' tacotron_teacher_forcing_decay_steps=40000, # Determines the teacher forcing ratio decay slope. Relevant if mode='scheduled' tacotron_teacher_forcing_decay_alpha=None, # teacher forcing ratio decay rate. Defines the final tfr as a ratio of initial tfr. Relevant if mode='scheduled' # Speaker adaptation parameters tacotron_fine_tuning=False, # Set to True to freeze encoder and only keep training pretrained decoder. Used for speaker adaptation with small data. ########################################################################################################################################### # Wavenet Training wavenet_random_seed=5339, # S=5, E=3, D=9 :) wavenet_data_random_state=1234, # random state for train test split repeatability # performance parameters wavenet_swap_with_cpu=False, # Whether to use cpu as support to gpu for synthesis computation (while loop).(Not recommended: may cause major slowdowns! Only use when critical!) # train/test split ratios, mini-batches sizes wavenet_batch_size=8, # batch size used to train wavenet. # During synthesis, there is no max_time_steps limitation so the model can sample much longer audio than 8k(or 13k) steps. (Audio can go up to 500k steps, equivalent to ~21sec on 24kHz) # Usually your GPU can handle ~2x wavenet_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop) wavenet_synthesis_batch_size=10 * 2, # This ensure that wavenet synthesis goes up to 4x~8x faster when synthesizing multiple sentences. Watch out for OOM with long audios. wavenet_test_size=None, # % of data to keep as test data, if None, wavenet_test_batches must be not None wavenet_test_batches=1, # number of test batches. # Learning rate schedule wavenet_lr_schedule='exponential', # learning rate schedule. Can be ('exponential', 'noam') wavenet_learning_rate=1e-3, # wavenet initial learning rate wavenet_warmup=float(4000), # Only used with 'noam' scheme. Defines the number of ascending learning rate steps. wavenet_decay_rate=0.5, # Only used with 'exponential' scheme. Defines the decay rate. wavenet_decay_steps=200000, # Only used with 'exponential' scheme. Defines the decay steps. # Optimization parameters wavenet_adam_beta1=0.9, # Adam beta1 wavenet_adam_beta2=0.999, # Adam beta2 wavenet_adam_epsilon=1e-6, # Adam Epsilon # Regularization parameters wavenet_clip_gradients=True, # Whether the clip the gradients during wavenet training. wavenet_ema_decay=0.9999, # decay rate of exponential moving average wavenet_weight_normalization=False, # Whether to Apply Saliman & Kingma Weight Normalization (reparametrization) technique. (Used in DeepVoice3, not critical here) wavenet_init_scale=1., # Only relevent if weight_normalization=True. Defines the initial scale in data dependent initialization of parameters. wavenet_dropout=0.05, # drop rate of wavenet layers wavenet_gradient_max_norm=100.0, # Norm used to clip wavenet gradients wavenet_gradient_max_value=5.0, # Value used to clip wavenet gradients # training samples length max_time_sec=None, # Max time of audio for training. If None, we use max_time_steps. max_time_steps=11000, # Max time steps in audio used to train wavenet (decrease to save memory) (Recommend: 8000 on modest GPUs, 13000 on stronger ones) # Evaluation parameters wavenet_natural_eval=False, # Whether to use 100% natural eval (to evaluate autoregressivity performance) or with teacher forcing to evaluate overfit and model consistency. # Tacotron-2 integration parameters train_with_GTA=True, # Whether to use GTA mels to train WaveNet instead of ground truth mels. ########################################################################################################################################### # Eval/Debug parameters # Eval sentences (if no eval text file was specified during synthesis, these sentences are used for eval) sentences=[ 'Handballer setzen mit Training aus', 'Weil die Einsicht bei den Verantwortlichen spät kam', 'Die geforderte Kehrtwende war am Ende unausweichlich', 'Wir haben uns am Nachmittag dazu entschieden', 'Er plädierte zugleich für eine Absage der Ende März geplanten', 'Für die prestigeträchtigen Spiele hat Japan aus der Staatskasse Milliarden investiert und laut Aussagen', 'Mit einem eindringlichen Appell an Kunden und Politik', 'Knapp eine halbe Million Nutzerinnen und Nutzer sahen binnen' ], # Wavenet Debug wavenet_synth_debug=False, # Set True to use target as debug in WaveNet synthesis. wavenet_debug_wavs=['training_data/audio/audio-alter_afrikaner_01_f000008.npy'], # Path to debug audios. Must be multiple of wavenet_num_gpus. wavenet_debug_mels=['training_data/mels/mel-alter_afrikaner_01_f000008.npy'], # Path to corresponding mels. Must be of same length and order as wavenet_debug_wavs. ) def hparams_debug_string(): values = hparams.values() hp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences'] return 'Hyperparameters:\n' + '\n'.join(hp)
69.964539
189
0.722859
import numpy as np import tensorflow as tf hparams = tf.contrib.training.HParams( cleaners='transliteration_cleaners', # resources related decisions. # Acknowledgement: # Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a little faster than the original # pipeline for a single GPU as well. Great work! # Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Both Tacotron and WaveNet can be trained on multi-GPU: data parallelization) # Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis. tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training. wavenet_num_gpus=1, # Determines the number of gpus in use for WaveNet training. split_on_cpu=False, # Determines whether to split data on CPU or on first GPU. This is automatically True when more than 1 GPU is used. # (Recommend: False on slow CPUs/Disks, True otherwise for small speed boost) ########################################################################################################################################### # Audio # Audio parameters are the most important parameters to tune when using this work on your personal data. Below are the beginner steps to adapt # this work to your personal data: # 1- Determine my data sample rate: First you need to determine your audio sample_rate (how many samples are in a second of audio). This can be done using sox: "sox --i <filename>" # (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz, so there are plenty of examples to refer to) # 2- set sample_rate parameter to your data correct sample rate # 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms window_size, and 12.5ms frame_shift(hop_size)) # a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200 # b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead) # 4- Fix n_fft, num_freq and upsample_scales parameters accordingly. # a- n_fft can be either equal to win_size or the first power of 2 that comes after win_size. I usually recommend using the latter # to be more consistent with signal processing friends. No big difference to be seen however. For the tuto example: n_fft = 2048 = 2**11 # b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 + 1 = 1025. # c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto example: upsample_scales=[15, 20] where 15 * 20 = 300 # it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0] # so the training segments should be long enough (2.8~3x upsample_scales[0] * hop_size or longer) so that the first kernel size can see the middle # of the samples efficiently. The length of WaveNet training segments is under the parameter "max_time_steps". # 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying preprocessing (or part of it, ctrl-C to stop), then use the # .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That will first give you some idea about your above parameters, and # it will also give you an idea about trimming. If silences persist, try reducing trim_top_db slowly. If samples are trimmed mid words, try increasing it. # 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are showing black silent regions on top), then restart from step 2. num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality num_freq=1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network rescale=True, # Whether to rescale audio prior to preprocessing rescaling_max=0.999, # Rescaling value # train samples of lengths between 3sec and 14sec are more than enough to make a model capable of generating consistent speech. clip_mels_length=True, # For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors, also consider clipping your samples to smaller chunks) max_mel_frames=800, # Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3 and still getting OOM errors. output_per_steps=3, # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction # It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder use_lws=False, silence_threshold=2, n_fft=2048, hop_size=275, win_size=1100, sample_rate=22050, frame_shift_ms=None, magnitude_power=2., trim_silence=True, trim_fft_size=2048, trim_hop_size=512, trim_top_db=40, signal_normalization=True, allow_clipping_in_normalization=True, symmetric_mels=True, max_abs_value=4., normalize_for_wavenet=True, clip_for_wavenet=True, wavenet_pad_sides=1, preemphasize=True, preemphasis=0.97, min_level_db=-100, ref_level_db=20, fmin=55, fmax=7600, power=1.5, griffin_lim_iters=60, GL_on_GPU=True, ns (i.e: model) random state for reproducibility tacotron_data_random_state=1234, # random state for train test split repeatability # performance parameters tacotron_swap_with_cpu=False, # Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!) # train/test split ratios, mini-batches sizes tacotron_batch_size=32, # number of training samples on each training steps # Tacotron Batch synthesis supports ~16x the training batch size (no gradients during testing). # Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times different from training. We thus recommend masking the encoder. tacotron_synthesis_batch_size=1, # DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN'T TRAIN TACOTRON WITH "mask_encoder=True"!! tacotron_test_size=0.05, tacotron_test_batches=None, tacotron_decay_learning_rate=True, tacotron_start_decay=40000, tacotron_decay_steps=18000, tacotron_decay_rate=0.5, tacotron_initial_learning_rate=1e-3, tacotron_final_learning_rate=1e-4, tacotron_adam_beta1=0.9, tacotron_adam_beta2=0.999, tacotron_adam_epsilon=1e-6, tacotron_reg_weight=1e-6, tacotron_scale_regularization=False, tacotron_zoneout_rate=0.1, tacotron_dropout_rate=0.5, tacotron_clip_gradients=True, tacotron_natural_eval=False, tacotron_teacher_forcing_mode='constant', tacotron_teacher_forcing_ratio=1., tacotron_teacher_forcing_init_ratio=1., tacotron_teacher_forcing_final_ratio=0., tacotron_teacher_forcing_start_decay=10000, tacotron_teacher_forcing_decay_steps=40000, tacotron_teacher_forcing_decay_alpha=None, tacotron_fine_tuning=False,
true
true
f70db8c8f8be1dcf1d4fbc35a03c790efce94b44
676
py
Python
settings.py
awesome-archive/DouYinAPI
dec22da42fa9fce3a3731775398e207934c3c354
[ "BSD-3-Clause" ]
12
2019-04-09T09:04:58.000Z
2021-03-05T14:29:56.000Z
settings.py
awesome-archive/DouYinAPI
dec22da42fa9fce3a3731775398e207934c3c354
[ "BSD-3-Clause" ]
1
2020-02-23T15:31:13.000Z
2020-02-23T15:31:13.000Z
settings.py
awesome-archive/DouYinAPI
dec22da42fa9fce3a3731775398e207934c3c354
[ "BSD-3-Clause" ]
7
2019-04-04T09:05:16.000Z
2020-05-02T20:02:20.000Z
API_GET_TOKEN = 'http://sign.vsdouyin.com/api/token/gen/' API_EP_DOUYIN = "https://sign.vsdouyin.com/api" API_DEVICE_REGISTER = "https://log.snssdk.com/service/2/device_register/" #生成签名服务器专用token ROUTE_GEN_TOKEN = "token/gen" ROUTE_INFO_TOKEN = "token/info" # 抖音相关入口 ROUTE_SIGN_DOUYIN = "653d33c/sign" ROUTE_CRYPT_DOUYIN = "653d33c/crypt" #操作相关 FEED_OPT = "v1/feed" USER_INFO_OPT = "v1/user" USER_FANS_OPT = "v1/user/follower/list" USER_POSTS_OPT = "v1/aweme/post" USER_LIKE_OPT = "v1/aweme/favorite" VIDEO_COMMENTS_OPT = "v1/comment/list" HEADERS = { 'Content-Type': 'application/octet-stream;tt-data=a', 'sdk-version': '1', 'user-agent': 'okhttp/3.10.0.1', }
27.04
73
0.730769
API_GET_TOKEN = 'http://sign.vsdouyin.com/api/token/gen/' API_EP_DOUYIN = "https://sign.vsdouyin.com/api" API_DEVICE_REGISTER = "https://log.snssdk.com/service/2/device_register/" ROUTE_GEN_TOKEN = "token/gen" ROUTE_INFO_TOKEN = "token/info" ROUTE_SIGN_DOUYIN = "653d33c/sign" ROUTE_CRYPT_DOUYIN = "653d33c/crypt" FEED_OPT = "v1/feed" USER_INFO_OPT = "v1/user" USER_FANS_OPT = "v1/user/follower/list" USER_POSTS_OPT = "v1/aweme/post" USER_LIKE_OPT = "v1/aweme/favorite" VIDEO_COMMENTS_OPT = "v1/comment/list" HEADERS = { 'Content-Type': 'application/octet-stream;tt-data=a', 'sdk-version': '1', 'user-agent': 'okhttp/3.10.0.1', }
true
true