hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f70eda886905b05d99fcfd52ab749c90f86d04df
2,185
py
Python
budweiser/budweiser.py
lukebeer/budweiser
8e77377c5952375f74371229d1d770cb1efe176f
[ "MIT" ]
null
null
null
budweiser/budweiser.py
lukebeer/budweiser
8e77377c5952375f74371229d1d770cb1efe176f
[ "MIT" ]
null
null
null
budweiser/budweiser.py
lukebeer/budweiser
8e77377c5952375f74371229d1d770cb1efe176f
[ "MIT" ]
null
null
null
__author__ = 'luke.beer' import subprocess import threading import logging import socket import time import questions import states class Executor(threading.Thread): def __init__(self, r, channel): threading.Thread.__init__(self) self.redis = r self.channel = channel self.pubsub = self.redis.pubsub() self.pubsub.subscribe([channel]) self.state = states.State.INIT self.name = socket.gethostname() self.hosts = [] def communicate(self, msg='Sup?'): msg = '%s :: %s :: %s' % (socket.gethostname(), self.state, msg) logging.info(msg) self.redis.publish(self.channel, msg) def set_state(self, state): state_msg = 'State changed from %s to %s' % (self.state, state) logging.info(state_msg) self.communicate(state_msg) def archive(self): self.set_state(states.State.ARCHIVE) time.sleep(5) def compress(self): self.set_state(states.State.COMPRESS) time.sleep(5) def file_sync(self, host): self.set_state(states.State.COLLECT) self.communicate("%s: Valid config." % host.hostname) try: destination = "%s/%s" % (host.destination, host.hostname) src = "%s@%s:%s/" % (host.username, host.address, host.source) result = subprocess.check_output(['/usr/bin/rsync', '-pvvctrz', '--include=\"%s\"' % host.match, src, destination], stderr=subprocess.STDOUT) self.communicate(result) except Exception as e: self.communicate(e.message) def stop(self): self.pubsub.unsubscribe() self.communicate('Goodbye....') def ready(self, item): hostname, state, msg = item['data'].split(' :: ', 3) if hostname == self.name: return if msg == questions.Questions.WHAT: self.communicate("Hey friend, I'm %s" % self.state) else: if state == states.State.IDLE: return True if state in [states.State.COLLECT, states.State.ARCHIVE, states.State.COMPRESS]: return False
32.132353
113
0.585812
__author__ = 'luke.beer' import subprocess import threading import logging import socket import time import questions import states class Executor(threading.Thread): def __init__(self, r, channel): threading.Thread.__init__(self) self.redis = r self.channel = channel self.pubsub = self.redis.pubsub() self.pubsub.subscribe([channel]) self.state = states.State.INIT self.name = socket.gethostname() self.hosts = [] def communicate(self, msg='Sup?'): msg = '%s :: %s :: %s' % (socket.gethostname(), self.state, msg) logging.info(msg) self.redis.publish(self.channel, msg) def set_state(self, state): state_msg = 'State changed from %s to %s' % (self.state, state) logging.info(state_msg) self.communicate(state_msg) def archive(self): self.set_state(states.State.ARCHIVE) time.sleep(5) def compress(self): self.set_state(states.State.COMPRESS) time.sleep(5) def file_sync(self, host): self.set_state(states.State.COLLECT) self.communicate("%s: Valid config." % host.hostname) try: destination = "%s/%s" % (host.destination, host.hostname) src = "%s@%s:%s/" % (host.username, host.address, host.source) result = subprocess.check_output(['/usr/bin/rsync', '-pvvctrz', '--include=\"%s\"' % host.match, src, destination], stderr=subprocess.STDOUT) self.communicate(result) except Exception as e: self.communicate(e.message) def stop(self): self.pubsub.unsubscribe() self.communicate('Goodbye....') def ready(self, item): hostname, state, msg = item['data'].split(' :: ', 3) if hostname == self.name: return if msg == questions.Questions.WHAT: self.communicate("Hey friend, I'm %s" % self.state) else: if state == states.State.IDLE: return True if state in [states.State.COLLECT, states.State.ARCHIVE, states.State.COMPRESS]: return False
true
true
f70edb0bd9f3d76f8d1b1327fa9458f595a64ab3
3,738
py
Python
rule1to6_words.py
e96031413/proofread
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
[ "MIT" ]
null
null
null
rule1to6_words.py
e96031413/proofread
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
[ "MIT" ]
null
null
null
rule1to6_words.py
e96031413/proofread
f4fc798d3b57c6f7a55f4e18de68f2272eb9fc44
[ "MIT" ]
null
null
null
import re import json from color_print import * # read rules from json file with open("rules.json", "r", encoding="utf-8") as json_data: rules = json.load(json_data) # create new rules by replacing 'is' to 'was', 'has been', ... def augment(sentence): """change 'is' in the sentence to was, has been, should be ... to augment some new sentences Args: sentence: str, candidate sentence to be removed/replaced Return: augmented_strs: array of new candidates """ pad_sentence = " " + sentence + " " augmented_str = [pad_sentence] if 'is' in pad_sentence: index = pad_sentence.find("is") reps = ["was", "have been", "has been", "had been", "should be"] for replace_candidate in reps: new_str = pad_sentence[:index] new_str += replace_candidate new_str += pad_sentence[index+2:] augmented_str.append(new_str) return augmented_str def get_context(src_text, index): '''get the full sentence that contain the position index''' stop_puncs = ['.', ',', '!', '?', ';', ':', '\n'] istart = max([src_text[:index].rfind(st) for st in stop_puncs]) iend = min([src_text[index:].find(st) for st in stop_puncs if src_text[index:].find(st)!=-1]) return istart, iend # create suggestions for sentences to remove def suggest_remove(file_path, to_remove, verbose=True): with open(file_path, "r", encoding="utf-8") as fsrc: src_text = fsrc.read().lower() suggestions = [] for item in to_remove: for s in augment(item): if s not in src_text: continue suggestions.append(color_red("remove: "+s)) if verbose: indices = [m.start() for m in re.finditer(s, src_text)] for index in indices: istart, iend = get_context(src_text, index) ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)]) ctx += src_text[index+len(s):index+iend] suggestions.append(ctx) return suggestions # create suggestions for sentences to replace def suggest_replace(file_path, to_replace, verbose=True): with open(file_path, "r", encoding="utf-8") as fsrc: src_text = fsrc.read().lower() suggestions = [] for key, value in to_replace.items(): for s in augment(key): if s not in src_text: continue suggestions.append(color_red("replace: "+s)+" ---> "+color_magenta(value)) if verbose: indices = [m.start() for m in re.finditer(s, src_text)] for index in indices: istart, iend = get_context(src_text, index) ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)]) ctx += src_text[index+len(s):index+iend] suggestions.append(ctx) return suggestions def report_wrong_words(fname, verbose=True): '''report problematic words that are not simple, not precise, sexial, or needless''' to_remove = rules["to_remove"] to_replace = rules["to_replace"] sug1 = suggest_remove(fname, to_remove, verbose) sug2 = suggest_replace(fname, to_replace, verbose) suggestions = sug1 + sug2 # if no suggestions, continue to process next file if len(suggestions) == 0: return # otherwise print suggestions to screen print(color_blue("******************************************************")) print("Words suggestions for ", color_cyan(fname.split('/')[-1])) for suggestion in suggestions: print(suggestion) print("")
41.076923
98
0.593098
import re import json from color_print import * with open("rules.json", "r", encoding="utf-8") as json_data: rules = json.load(json_data) def augment(sentence): pad_sentence = " " + sentence + " " augmented_str = [pad_sentence] if 'is' in pad_sentence: index = pad_sentence.find("is") reps = ["was", "have been", "has been", "had been", "should be"] for replace_candidate in reps: new_str = pad_sentence[:index] new_str += replace_candidate new_str += pad_sentence[index+2:] augmented_str.append(new_str) return augmented_str def get_context(src_text, index): stop_puncs = ['.', ',', '!', '?', ';', ':', '\n'] istart = max([src_text[:index].rfind(st) for st in stop_puncs]) iend = min([src_text[index:].find(st) for st in stop_puncs if src_text[index:].find(st)!=-1]) return istart, iend def suggest_remove(file_path, to_remove, verbose=True): with open(file_path, "r", encoding="utf-8") as fsrc: src_text = fsrc.read().lower() suggestions = [] for item in to_remove: for s in augment(item): if s not in src_text: continue suggestions.append(color_red("remove: "+s)) if verbose: indices = [m.start() for m in re.finditer(s, src_text)] for index in indices: istart, iend = get_context(src_text, index) ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)]) ctx += src_text[index+len(s):index+iend] suggestions.append(ctx) return suggestions def suggest_replace(file_path, to_replace, verbose=True): with open(file_path, "r", encoding="utf-8") as fsrc: src_text = fsrc.read().lower() suggestions = [] for key, value in to_replace.items(): for s in augment(key): if s not in src_text: continue suggestions.append(color_red("replace: "+s)+" ---> "+color_magenta(value)) if verbose: indices = [m.start() for m in re.finditer(s, src_text)] for index in indices: istart, iend = get_context(src_text, index) ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)]) ctx += src_text[index+len(s):index+iend] suggestions.append(ctx) return suggestions def report_wrong_words(fname, verbose=True): to_remove = rules["to_remove"] to_replace = rules["to_replace"] sug1 = suggest_remove(fname, to_remove, verbose) sug2 = suggest_replace(fname, to_replace, verbose) suggestions = sug1 + sug2 if len(suggestions) == 0: return print(color_blue("******************************************************")) print("Words suggestions for ", color_cyan(fname.split('/')[-1])) for suggestion in suggestions: print(suggestion) print("")
true
true
f70edd2a8408b41deb049f6068375e7a834dd31f
17,816
py
Python
examples/optics/pyoptic.py
abbasegbeyemi/pyqtgraph
6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed
[ "MIT" ]
1
2021-04-28T05:16:24.000Z
2021-04-28T05:16:24.000Z
examples/optics/pyoptic.py
abbasegbeyemi/pyqtgraph
6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed
[ "MIT" ]
1
2021-04-04T15:05:47.000Z
2021-05-15T23:56:42.000Z
examples/optics/pyoptic.py
abbasegbeyemi/pyqtgraph
6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed
[ "MIT" ]
1
2021-05-19T10:11:17.000Z
2021-05-19T10:11:17.000Z
# -*- coding: utf-8 -*- import pyqtgraph as pg from pyqtgraph.Qt import QtGui, QtCore import numpy as np import csv, gzip, os from pyqtgraph import Point class GlassDB: """ Database of dispersion coefficients for Schott glasses + Corning 7980 """ def __init__(self, fileName='schott_glasses.csv'): path = os.path.dirname(__file__) fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb') r = csv.reader(map(str, fh.readlines())) lines = [x for x in r] self.data = {} header = lines[0] for l in lines[1:]: info = {} for i in range(1, len(l)): info[header[i]] = l[i] self.data[l[0]] = info self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog. 'B1': 0.68374049400, 'B2': 0.42032361300, 'B3': 0.58502748000, 'C1': 0.00460352869, 'C2': 0.01339688560, 'C3': 64.49327320000, 'TAUI25/250': 0.95, ## transmission data is fabricated, but close. 'TAUI25/1400': 0.98, } for k in self.data: self.data[k]['ior_cache'] = {} def ior(self, glass, wl): """ Return the index of refraction for *glass* at wavelength *wl*. The *glass* argument must be a key in self.data. """ info = self.data[glass] cache = info['ior_cache'] if wl not in cache: B = list(map(float, [info['B1'], info['B2'], info['B3']])) C = list(map(float, [info['C1'], info['C2'], info['C3']])) w2 = (wl/1000.)**2 n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2]))) cache[wl] = n return cache[wl] def transmissionCurve(self, glass): data = self.data[glass] keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x] keys.sort() curve = np.empty((2,len(keys))) for i in range(len(keys)): curve[0][i] = keys[i] key = 'TAUI25/%d' % keys[i] val = data[key] if val == '': val = 0 else: val = float(val) curve[1][i] = val return curve GLASSDB = GlassDB() def wlPen(wl): """Return a pen representing the given wavelength""" l1 = 400 l2 = 700 hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8) val = 1.0 if wl > 700: val = 1.0 * (((700-wl)/700.) + 1) elif wl < 400: val = wl * 1.0/400. #print hue, val color = pg.hsvColor(hue, 1.0, val) pen = pg.mkPen(color) return pen class ParamObj(object): # Just a helper for tracking parameters and responding to changes def __init__(self): self.__params = {} def __setitem__(self, item, val): self.setParam(item, val) def setParam(self, param, val): self.setParams(**{param:val}) def setParams(self, **params): """Set parameters for this optic. This is a good function to override for subclasses.""" self.__params.update(params) self.paramStateChanged() def paramStateChanged(self): pass def __getitem__(self, item): # bug in pyside 1.2.2 causes getitem to be called inside QGraphicsObject.parentItem: return self.getParam(item) # PySide bug: https://bugreports.qt.io/browse/PYSIDE-671 def __len__(self): # Workaround for PySide bug: https://bugreports.qt.io/browse/PYSIDE-671 return 0 def getParam(self, param): return self.__params[param] class Optic(pg.GraphicsObject, ParamObj): sigStateChanged = QtCore.Signal() def __init__(self, gitem, **params): ParamObj.__init__(self) pg.GraphicsObject.__init__(self) #, [0,0], [1,1]) self.gitem = gitem self.surfaces = gitem.surfaces gitem.setParentItem(self) self.roi = pg.ROI([0,0], [1,1]) self.roi.addRotateHandle([1, 1], [0.5, 0.5]) self.roi.setParentItem(self) defaults = { 'pos': Point(0,0), 'angle': 0, } defaults.update(params) self._ior_cache = {} self.roi.sigRegionChanged.connect(self.roiChanged) self.setParams(**defaults) def updateTransform(self): self.setPos(0, 0) tr = QtGui.QTransform() self.setTransform(tr.translate(Point(self['pos'])).rotate(self['angle'])) def setParam(self, param, val): ParamObj.setParam(self, param, val) def paramStateChanged(self): """Some parameters of the optic have changed.""" # Move graphics item self.gitem.setPos(Point(self['pos'])) self.gitem.resetTransform() self.gitem.setRotation(self['angle']) # Move ROI to match try: self.roi.sigRegionChanged.disconnect(self.roiChanged) br = self.gitem.boundingRect() o = self.gitem.mapToParent(br.topLeft()) self.roi.setAngle(self['angle']) self.roi.setPos(o) self.roi.setSize([br.width(), br.height()]) finally: self.roi.sigRegionChanged.connect(self.roiChanged) self.sigStateChanged.emit() def roiChanged(self, *args): pos = self.roi.pos() # rotate gitem temporarily so we can decide where it will need to move self.gitem.resetTransform() self.gitem.setRotation(self.roi.angle()) br = self.gitem.boundingRect() o1 = self.gitem.mapToParent(br.topLeft()) self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1)) def boundingRect(self): return QtCore.QRectF() def paint(self, p, *args): pass def ior(self, wavelength): return GLASSDB.ior(self['glass'], wavelength) class Lens(Optic): def __init__(self, **params): defaults = { 'dia': 25.4, ## diameter of lens 'r1': 50., ## positive means convex, use 0 for planar 'r2': 0, ## negative means convex 'd': 4.0, 'glass': 'N-BK7', 'reflect': False, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): """Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays""" """ NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs) For the incident vector I and surface normal N, and the ratio of indices of refraction eta, return the refraction vector. The result is computed by k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) if (k < 0.0) return genType(0.0) else return eta * I - (eta * dot(N, I) + sqrt(k)) * N The input parameters for the incident vector I and the surface normal N must already be normalized to get the desired results. eta == ratio of IORs For reflection: For the incident vector I and surface orientation N, returns the reflection direction: I – 2 ∗ dot(N, I) ∗ N N must already be normalized in order to achieve the desired result. """ iors = [self.ior(ray['wl']), 1.0] for i in [0,1]: surface = self.surfaces[i] ior = iors[i] p1, ai = surface.intersectRay(ray) if p1 is None: ray.setEnd(None) break p1 = surface.mapToItem(ray, p1) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior)) ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) ray = Ray(parent=ray, ior=ior, dir=dp) return [ray] class Mirror(Optic): def __init__(self, **params): defaults = { 'r1': 0, 'r2': 0, 'd': 0.01, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100,100,100,255), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): """Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays""" surface = self.surfaces[0] p1, ai = surface.intersectRay(ray) if p1 is not None: p1 = surface.mapToItem(ray, p1) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 + np.pi - 2*ai ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) ray = Ray(parent=ray, dir=dp) else: ray.setEnd(None) return [ray] class CircularSolid(pg.GraphicsObject, ParamObj): """GraphicsObject with two circular or flat surfaces.""" def __init__(self, pen=None, brush=None, **opts): """ Arguments for each surface are: x1,x2 - position of center of _physical surface_ r1,r2 - radius of curvature d1,d2 - diameter of optic """ defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4) defaults.update(opts) ParamObj.__init__(self) self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])] pg.GraphicsObject.__init__(self) for s in self.surfaces: s.setParentItem(self) if pen is None: self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True) else: self.pen = pg.mkPen(pen) if brush is None: self.brush = pg.mkBrush((230, 230, 255, 30)) else: self.brush = pg.mkBrush(brush) self.setParams(**defaults) def paramStateChanged(self): self.updateSurfaces() def updateSurfaces(self): self.surfaces[0].setParams(self['r1'], self['d1']) self.surfaces[1].setParams(-self['r2'], self['d2']) self.surfaces[0].setPos(self['x1'], 0) self.surfaces[1].setPos(self['x2'], 0) self.path = QtGui.QPainterPath() self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos())) self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed()) self.path.closeSubpath() def boundingRect(self): return self.path.boundingRect() def shape(self): return self.path def paint(self, p, *args): p.setRenderHints(p.renderHints() | p.Antialiasing) p.setPen(self.pen) p.fillPath(self.path, self.brush) p.drawPath(self.path) class CircleSurface(pg.GraphicsObject): def __init__(self, radius=None, diameter=None): """center of physical surface is at 0,0 radius is the radius of the surface. If radius is None, the surface is flat. diameter is of the optic's edge.""" pg.GraphicsObject.__init__(self) self.r = radius self.d = diameter self.mkPath() def setParams(self, r, d): self.r = r self.d = d self.mkPath() def mkPath(self): self.prepareGeometryChange() r = self.r d = self.d h2 = d/2. self.path = QtGui.QPainterPath() if r == 0: ## flat surface self.path.moveTo(0, h2) self.path.lineTo(0, -h2) else: ## half-height of surface can't be larger than radius h2 = min(h2, abs(r)) arc = QtCore.QRectF(0, -r, r*2, r*2) a1 = np.arcsin(h2/r) * 180. / np.pi a2 = -2*a1 a1 += 180. self.path.arcMoveTo(arc, a1) self.path.arcTo(arc, a1, a2) self.h2 = h2 def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): return ## usually we let the optic draw. def intersectRay(self, ray): ## return the point of intersection and the angle of incidence #print "intersect ray" h = self.h2 r = self.r p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords. #print " ray: ", p, dir p = p - Point(r, 0) ## move position so center of circle is at 0,0 #print " adj: ", p, r if r == 0: #print " flat" if dir[0] == 0: y = 0 else: y = p[1] - p[0] * dir[1]/dir[0] if abs(y) > h: return None, None else: return (Point(0, y), np.arctan2(dir[1], dir[0])) else: #print " curve" ## find intersection of circle and line (quadratic formula) dx = dir[0] dy = dir[1] dr = (dx**2 + dy**2) ** 0.5 D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1] idr2 = 1.0 / dr**2 disc = r**2 * dr**2 - D**2 if disc < 0: return None, None disc2 = disc**0.5 if dy < 0: sgn = -1 else: sgn = 1 br = self.path.boundingRect() x1 = (D*dy + sgn*dx*disc2) * idr2 y1 = (-D*dx + abs(dy)*disc2) * idr2 if br.contains(x1+r, y1): pt = Point(x1, y1) else: x2 = (D*dy - sgn*dx*disc2) * idr2 y2 = (-D*dx - abs(dy)*disc2) * idr2 pt = Point(x2, y2) if not br.contains(x2+r, y2): return None, None raise Exception("No intersection!") norm = np.arctan2(pt[1], pt[0]) if r < 0: norm += np.pi #print " norm:", norm*180/3.1415 dp = p - pt #print " dp:", dp ang = np.arctan2(dp[1], dp[0]) #print " ang:", ang*180/3.1415 #print " ai:", (ang-norm)*180/3.1415 #print " intersection:", pt return pt + Point(r, 0), ang-norm class Ray(pg.GraphicsObject, ParamObj): """Represents a single straight segment of a ray""" sigStateChanged = QtCore.Signal() def __init__(self, **params): ParamObj.__init__(self) defaults = { 'ior': 1.0, 'wl': 500, 'end': None, 'dir': Point(1,0), } self.params = {} pg.GraphicsObject.__init__(self) self.children = [] parent = params.get('parent', None) if parent is not None: defaults['start'] = parent['end'] defaults['wl'] = parent['wl'] self['ior'] = parent['ior'] self['dir'] = parent['dir'] parent.addChild(self) defaults.update(params) defaults['dir'] = Point(defaults['dir']) self.setParams(**defaults) self.mkPath() def clearChildren(self): for c in self.children: c.clearChildren() c.setParentItem(None) self.scene().removeItem(c) self.children = [] def paramStateChanged(self): pass def addChild(self, ch): self.children.append(ch) ch.setParentItem(self) def currentState(self, relativeTo=None): pos = self['start'] dir = self['dir'] if relativeTo is None: return pos, dir else: trans = self.itemTransform(relativeTo)[0] p1 = trans.map(pos) p2 = trans.map(pos + dir) return Point(p1), Point(p2-p1) def setEnd(self, end): self['end'] = end self.mkPath() def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): #p.setPen(pg.mkPen((255,0,0, 150))) p.setRenderHints(p.renderHints() | p.Antialiasing) p.setCompositionMode(p.CompositionMode_Plus) p.setPen(wlPen(self['wl'])) p.drawPath(self.path) def mkPath(self): self.prepareGeometryChange() self.path = QtGui.QPainterPath() self.path.moveTo(self['start']) if self['end'] is not None: self.path.lineTo(self['end']) else: self.path.lineTo(self['start']+500*self['dir']) def trace(rays, optics): if len(optics) < 1 or len(rays) < 1: return for r in rays: r.clearChildren() o = optics[0] r2 = o.propagateRay(r) trace(r2, optics[1:]) class Tracer(QtCore.QObject): """ Simple ray tracer. Initialize with a list of rays and optics; calling trace() will cause rays to be extended by propagating them through each optic in sequence. """ def __init__(self, rays, optics): QtCore.QObject.__init__(self) self.optics = optics self.rays = rays for o in self.optics: o.sigStateChanged.connect(self.trace) self.trace() def trace(self): trace(self.rays, self.optics)
31.701068
119
0.516446
import pyqtgraph as pg from pyqtgraph.Qt import QtGui, QtCore import numpy as np import csv, gzip, os from pyqtgraph import Point class GlassDB: def __init__(self, fileName='schott_glasses.csv'): path = os.path.dirname(__file__) fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb') r = csv.reader(map(str, fh.readlines())) lines = [x for x in r] self.data = {} header = lines[0] for l in lines[1:]: info = {} for i in range(1, len(l)): info[header[i]] = l[i] self.data[l[0]] = info self.data['Corning7980'] = { : 0.42032361300, 'B3': 0.58502748000, 'C1': 0.00460352869, 'C2': 0.01339688560, 'C3': 64.49327320000, 'TAUI25/250': 0.95, for k in self.data: self.data[k]['ior_cache'] = {} def ior(self, glass, wl): info = self.data[glass] cache = info['ior_cache'] if wl not in cache: B = list(map(float, [info['B1'], info['B2'], info['B3']])) C = list(map(float, [info['C1'], info['C2'], info['C3']])) w2 = (wl/1000.)**2 n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2]))) cache[wl] = n return cache[wl] def transmissionCurve(self, glass): data = self.data[glass] keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x] keys.sort() curve = np.empty((2,len(keys))) for i in range(len(keys)): curve[0][i] = keys[i] key = 'TAUI25/%d' % keys[i] val = data[key] if val == '': val = 0 else: val = float(val) curve[1][i] = val return curve GLASSDB = GlassDB() def wlPen(wl): l1 = 400 l2 = 700 hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8) val = 1.0 if wl > 700: val = 1.0 * (((700-wl)/700.) + 1) elif wl < 400: val = wl * 1.0/400. color = pg.hsvColor(hue, 1.0, val) pen = pg.mkPen(color) return pen class ParamObj(object): def __init__(self): self.__params = {} def __setitem__(self, item, val): self.setParam(item, val) def setParam(self, param, val): self.setParams(**{param:val}) def setParams(self, **params): self.__params.update(params) self.paramStateChanged() def paramStateChanged(self): pass def __getitem__(self, item): return self.getParam(item) def __len__(self): return 0 def getParam(self, param): return self.__params[param] class Optic(pg.GraphicsObject, ParamObj): sigStateChanged = QtCore.Signal() def __init__(self, gitem, **params): ParamObj.__init__(self) pg.GraphicsObject.__init__(self) self.gitem = gitem self.surfaces = gitem.surfaces gitem.setParentItem(self) self.roi = pg.ROI([0,0], [1,1]) self.roi.addRotateHandle([1, 1], [0.5, 0.5]) self.roi.setParentItem(self) defaults = { 'pos': Point(0,0), 'angle': 0, } defaults.update(params) self._ior_cache = {} self.roi.sigRegionChanged.connect(self.roiChanged) self.setParams(**defaults) def updateTransform(self): self.setPos(0, 0) tr = QtGui.QTransform() self.setTransform(tr.translate(Point(self['pos'])).rotate(self['angle'])) def setParam(self, param, val): ParamObj.setParam(self, param, val) def paramStateChanged(self): self.gitem.setPos(Point(self['pos'])) self.gitem.resetTransform() self.gitem.setRotation(self['angle']) try: self.roi.sigRegionChanged.disconnect(self.roiChanged) br = self.gitem.boundingRect() o = self.gitem.mapToParent(br.topLeft()) self.roi.setAngle(self['angle']) self.roi.setPos(o) self.roi.setSize([br.width(), br.height()]) finally: self.roi.sigRegionChanged.connect(self.roiChanged) self.sigStateChanged.emit() def roiChanged(self, *args): pos = self.roi.pos() self.gitem.resetTransform() self.gitem.setRotation(self.roi.angle()) br = self.gitem.boundingRect() o1 = self.gitem.mapToParent(br.topLeft()) self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1)) def boundingRect(self): return QtCore.QRectF() def paint(self, p, *args): pass def ior(self, wavelength): return GLASSDB.ior(self['glass'], wavelength) class Lens(Optic): def __init__(self, **params): defaults = { 'dia': 25.4, 50., s': 'N-BK7', 'reflect': False, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): iors = [self.ior(ray['wl']), 1.0] for i in [0,1]: surface = self.surfaces[i] ior = iors[i] p1, ai = surface.intersectRay(ray) if p1 is None: ray.setEnd(None) break p1 = surface.mapToItem(ray, p1) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior)) ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) ray = Ray(parent=ray, ior=ior, dir=dp) return [ray] class Mirror(Optic): def __init__(self, **params): defaults = { 'r1': 0, 'r2': 0, 'd': 0.01, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100,100,100,255), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): surface = self.surfaces[0] p1, ai = surface.intersectRay(ray) if p1 is not None: p1 = surface.mapToItem(ray, p1) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 + np.pi - 2*ai ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) ray = Ray(parent=ray, dir=dp) else: ray.setEnd(None) return [ray] class CircularSolid(pg.GraphicsObject, ParamObj): def __init__(self, pen=None, brush=None, **opts): defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4) defaults.update(opts) ParamObj.__init__(self) self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])] pg.GraphicsObject.__init__(self) for s in self.surfaces: s.setParentItem(self) if pen is None: self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True) else: self.pen = pg.mkPen(pen) if brush is None: self.brush = pg.mkBrush((230, 230, 255, 30)) else: self.brush = pg.mkBrush(brush) self.setParams(**defaults) def paramStateChanged(self): self.updateSurfaces() def updateSurfaces(self): self.surfaces[0].setParams(self['r1'], self['d1']) self.surfaces[1].setParams(-self['r2'], self['d2']) self.surfaces[0].setPos(self['x1'], 0) self.surfaces[1].setPos(self['x2'], 0) self.path = QtGui.QPainterPath() self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos())) self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed()) self.path.closeSubpath() def boundingRect(self): return self.path.boundingRect() def shape(self): return self.path def paint(self, p, *args): p.setRenderHints(p.renderHints() | p.Antialiasing) p.setPen(self.pen) p.fillPath(self.path, self.brush) p.drawPath(self.path) class CircleSurface(pg.GraphicsObject): def __init__(self, radius=None, diameter=None): pg.GraphicsObject.__init__(self) self.r = radius self.d = diameter self.mkPath() def setParams(self, r, d): self.r = r self.d = d self.mkPath() def mkPath(self): self.prepareGeometryChange() r = self.r d = self.d h2 = d/2. self.path = QtGui.QPainterPath() if r == 0: elf.path.moveTo(0, h2) self.path.lineTo(0, -h2) else: QtCore.QRectF(0, -r, r*2, r*2) a1 = np.arcsin(h2/r) * 180. / np.pi a2 = -2*a1 a1 += 180. self.path.arcMoveTo(arc, a1) self.path.arcTo(arc, a1, a2) self.h2 = h2 def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): return ## usually we let the optic draw. def intersectRay(self, ray): ## return the point of intersection and the angle of incidence #print "intersect ray" h = self.h2 r = self.r p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords. #print " ray: ", p, dir p = p - Point(r, 0) ## move position so center of circle is at 0,0 #print " adj: ", p, r if r == 0: #print " flat" if dir[0] == 0: y = 0 else: y = p[1] - p[0] * dir[1]/dir[0] if abs(y) > h: return None, None else: return (Point(0, y), np.arctan2(dir[1], dir[0])) else: #print " curve" ## find intersection of circle and line (quadratic formula) dx = dir[0] dy = dir[1] dr = (dx**2 + dy**2) ** 0.5 D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1] idr2 = 1.0 / dr**2 disc = r**2 * dr**2 - D**2 if disc < 0: return None, None disc2 = disc**0.5 if dy < 0: sgn = -1 else: sgn = 1 br = self.path.boundingRect() x1 = (D*dy + sgn*dx*disc2) * idr2 y1 = (-D*dx + abs(dy)*disc2) * idr2 if br.contains(x1+r, y1): pt = Point(x1, y1) else: x2 = (D*dy - sgn*dx*disc2) * idr2 y2 = (-D*dx - abs(dy)*disc2) * idr2 pt = Point(x2, y2) if not br.contains(x2+r, y2): return None, None raise Exception("No intersection!") norm = np.arctan2(pt[1], pt[0]) if r < 0: norm += np.pi #print " norm:", norm*180/3.1415 dp = p - pt #print " dp:", dp ang = np.arctan2(dp[1], dp[0]) #print " ang:", ang*180/3.1415 #print " ai:", (ang-norm)*180/3.1415 #print " intersection:", pt return pt + Point(r, 0), ang-norm class Ray(pg.GraphicsObject, ParamObj): sigStateChanged = QtCore.Signal() def __init__(self, **params): ParamObj.__init__(self) defaults = { 'ior': 1.0, 'wl': 500, 'end': None, 'dir': Point(1,0), } self.params = {} pg.GraphicsObject.__init__(self) self.children = [] parent = params.get('parent', None) if parent is not None: defaults['start'] = parent['end'] defaults['wl'] = parent['wl'] self['ior'] = parent['ior'] self['dir'] = parent['dir'] parent.addChild(self) defaults.update(params) defaults['dir'] = Point(defaults['dir']) self.setParams(**defaults) self.mkPath() def clearChildren(self): for c in self.children: c.clearChildren() c.setParentItem(None) self.scene().removeItem(c) self.children = [] def paramStateChanged(self): pass def addChild(self, ch): self.children.append(ch) ch.setParentItem(self) def currentState(self, relativeTo=None): pos = self['start'] dir = self['dir'] if relativeTo is None: return pos, dir else: trans = self.itemTransform(relativeTo)[0] p1 = trans.map(pos) p2 = trans.map(pos + dir) return Point(p1), Point(p2-p1) def setEnd(self, end): self['end'] = end self.mkPath() def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): #p.setPen(pg.mkPen((255,0,0, 150))) p.setRenderHints(p.renderHints() | p.Antialiasing) p.setCompositionMode(p.CompositionMode_Plus) p.setPen(wlPen(self['wl'])) p.drawPath(self.path) def mkPath(self): self.prepareGeometryChange() self.path = QtGui.QPainterPath() self.path.moveTo(self['start']) if self['end'] is not None: self.path.lineTo(self['end']) else: self.path.lineTo(self['start']+500*self['dir']) def trace(rays, optics): if len(optics) < 1 or len(rays) < 1: return for r in rays: r.clearChildren() o = optics[0] r2 = o.propagateRay(r) trace(r2, optics[1:]) class Tracer(QtCore.QObject): def __init__(self, rays, optics): QtCore.QObject.__init__(self) self.optics = optics self.rays = rays for o in self.optics: o.sigStateChanged.connect(self.trace) self.trace() def trace(self): trace(self.rays, self.optics)
true
true
f70eddb35e62f56ceb999ef8f00c042b5f03b8cb
22,177
py
Python
src/app_server/tasks/proto/task_pb2.py
ArneGudermann/app_server
f28179861dd96766610ddc3867dbe1285eb8fcbc
[ "MIT" ]
null
null
null
src/app_server/tasks/proto/task_pb2.py
ArneGudermann/app_server
f28179861dd96766610ddc3867dbe1285eb8fcbc
[ "MIT" ]
null
null
null
src/app_server/tasks/proto/task_pb2.py
ArneGudermann/app_server
f28179861dd96766610ddc3867dbe1285eb8fcbc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/tasks_v2/proto/task.proto import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from ..proto import ( target_pb2 as google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2, ) from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/tasks_v2/proto/task.proto", package="google.cloud.tasks.v2", syntax="proto3", serialized_options=_b( "\n\031com.google.cloud.tasks.v2B\tTaskProtoP\001Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasks" ), serialized_pb=_b( '\n&google/cloud/tasks_v2/proto/task.proto\x12\x15google.cloud.tasks.v2\x1a\x19google/api/resource.proto\x1a(google/cloud/tasks_v2/proto/target.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\xb4\x05\n\x04Task\x12\x0c\n\x04name\x18\x01 \x01(\t\x12N\n\x17\x61pp_engine_http_request\x18\x02 \x01(\x0b\x32+.google.cloud.tasks.v2.AppEngineHttpRequestH\x00\x12:\n\x0chttp_request\x18\x03 \x01(\x0b\x32".google.cloud.tasks.v2.HttpRequestH\x00\x12\x31\n\rschedule_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x11\x64ispatch_deadline\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x16\n\x0e\x64ispatch_count\x18\x07 \x01(\x05\x12\x16\n\x0eresponse_count\x18\x08 \x01(\x05\x12\x35\n\rfirst_attempt\x18\t \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12\x34\n\x0clast_attempt\x18\n \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12.\n\x04view\x18\x0b \x01(\x0e\x32 .google.cloud.tasks.v2.Task.View"1\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02:h\xea\x41\x65\n\x1e\x63loudtasks.googleapis.com/Task\x12\x43projects/{project}/locations/{location}/queues/{queue}/tasks/{task}B\x0e\n\x0cmessage_type"\xcf\x01\n\x07\x41ttempt\x12\x31\n\rschedule_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rdispatch_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rresponse_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x0fresponse_status\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusBd\n\x19\x63om.google.cloud.tasks.v2B\tTaskProtoP\x01Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasksb\x06proto3' ), dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) _TASK_VIEW = _descriptor.EnumDescriptor( name="View", full_name="google.cloud.tasks.v2.Task.View", filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name="VIEW_UNSPECIFIED", index=0, number=0, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( name="BASIC", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( name="FULL", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, serialized_options=None, serialized_start=776, serialized_end=825, ) _sym_db.RegisterEnumDescriptor(_TASK_VIEW) _TASK = _descriptor.Descriptor( name="Task", full_name="google.cloud.tasks.v2.Task", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="name", full_name="google.cloud.tasks.v2.Task.name", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="app_engine_http_request", full_name="google.cloud.tasks.v2.Task.app_engine_http_request", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="http_request", full_name="google.cloud.tasks.v2.Task.http_request", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="schedule_time", full_name="google.cloud.tasks.v2.Task.schedule_time", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="create_time", full_name="google.cloud.tasks.v2.Task.create_time", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_deadline", full_name="google.cloud.tasks.v2.Task.dispatch_deadline", index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_count", full_name="google.cloud.tasks.v2.Task.dispatch_count", index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_count", full_name="google.cloud.tasks.v2.Task.response_count", index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="first_attempt", full_name="google.cloud.tasks.v2.Task.first_attempt", index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="last_attempt", full_name="google.cloud.tasks.v2.Task.last_attempt", index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="view", full_name="google.cloud.tasks.v2.Task.view", index=10, number=11, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[_TASK_VIEW], serialized_options=_b( "\352Ae\n\036cloudtasks.googleapis.com/Task\022Cprojects/{project}/locations/{location}/queues/{queue}/tasks/{task}" ), is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name="message_type", full_name="google.cloud.tasks.v2.Task.message_type", index=0, containing_type=None, fields=[], ) ], serialized_start=255, serialized_end=947, ) _ATTEMPT = _descriptor.Descriptor( name="Attempt", full_name="google.cloud.tasks.v2.Attempt", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="schedule_time", full_name="google.cloud.tasks.v2.Attempt.schedule_time", index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_time", full_name="google.cloud.tasks.v2.Attempt.dispatch_time", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_time", full_name="google.cloud.tasks.v2.Attempt.response_time", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_status", full_name="google.cloud.tasks.v2.Attempt.response_status", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=950, serialized_end=1157, ) _TASK.fields_by_name[ "app_engine_http_request" ].message_type = ( google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._APPENGINEHTTPREQUEST ) _TASK.fields_by_name[ "http_request" ].message_type = google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._HTTPREQUEST _TASK.fields_by_name[ "schedule_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _TASK.fields_by_name[ "create_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _TASK.fields_by_name[ "dispatch_deadline" ].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _TASK.fields_by_name["first_attempt"].message_type = _ATTEMPT _TASK.fields_by_name["last_attempt"].message_type = _ATTEMPT _TASK.fields_by_name["view"].enum_type = _TASK_VIEW _TASK_VIEW.containing_type = _TASK _TASK.oneofs_by_name["message_type"].fields.append( _TASK.fields_by_name["app_engine_http_request"] ) _TASK.fields_by_name["app_engine_http_request"].containing_oneof = _TASK.oneofs_by_name[ "message_type" ] _TASK.oneofs_by_name["message_type"].fields.append(_TASK.fields_by_name["http_request"]) _TASK.fields_by_name["http_request"].containing_oneof = _TASK.oneofs_by_name[ "message_type" ] _ATTEMPT.fields_by_name[ "schedule_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "dispatch_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "response_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "response_status" ].message_type = google_dot_rpc_dot_status__pb2._STATUS DESCRIPTOR.message_types_by_name["Task"] = _TASK DESCRIPTOR.message_types_by_name["Attempt"] = _ATTEMPT _sym_db.RegisterFileDescriptor(DESCRIPTOR) Task = _reflection.GeneratedProtocolMessageType( "Task", (_message.Message,), dict( DESCRIPTOR=_TASK, __module__="google.cloud.tasks_v2.proto.task_pb2", __doc__="""A unit of scheduled work. Attributes: name: Optionally caller-specified in [CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask]. The task name. The task name must have the following format: ``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/ta sks/TASK_ID`` - ``PROJECT_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see `Identifying projects <https://cloud.google.com/resource- manager/docs/creating-managing- projects#identifying_projects>`_ - ``LOCATION_ID`` is the canonical ID for the task's location. The list of available locations can be obtained by calling [ListLocations][google .cloud.location.Locations.ListLocations]. For more information, see https://cloud.google.com/about/locations/. - ``QUEUE_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. - ``TASK_ID`` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (\_). The maximum length is 500 characters. message_type: Required. The message to send to the worker. app_engine_http_request: HTTP request that is sent to the App Engine app handler. An App Engine task is a task that has [AppEngineHttpRequest][goog le.cloud.tasks.v2.AppEngineHttpRequest] set. http_request: HTTP request that is sent to the worker. An HTTP task is a task that has [HttpRequest][google.cloud.tasks.v2.HttpRequest] set. schedule_time: The time when the task is scheduled to be attempted or retried. ``schedule_time`` will be truncated to the nearest microsecond. create_time: Output only. The time that the task was created. ``create_time`` will be truncated to the nearest second. dispatch_deadline: The deadline for requests sent to the worker. If the worker does not respond by this deadline then the request is cancelled and the attempt is marked as a ``DEADLINE_EXCEEDED`` failure. Cloud Tasks will retry the task according to the [RetryConfig][google.cloud.tasks.v2.RetryConfig]. Note that when the request is cancelled, Cloud Tasks will stop listing for the response, but whether the worker stops processing depends on the worker. For example, if the worker is stuck, it may not react to cancelled requests. The default and maximum values depend on the type of request: - For [HTTP tasks][google.cloud.tasks.v2.HttpRequest], the default is 10 minutes. The deadline must be in the interval [15 seconds, 30 minutes]. - For [App Engine tasks][google.cloud.tasks.v2.AppEngineHttpRequest], 0 indicates that the request has the default deadline. The default deadline depends on the `scaling type <https://cloud.google.com/appengine/docs/standard/go/how- instances-are-managed#instance_scaling>`_ of the service: 10 minutes for standard apps with automatic scaling, 24 hours for standard apps with manual and basic scaling, and 60 minutes for flex apps. If the request deadline is set, it must be in the interval [15 seconds, 24 hours 15 seconds]. Regardless of the task's ``dispatch_deadline``, the app handler will not run for longer than than the service's timeout. We recommend setting the ``dispatch_deadline`` to at most a few seconds more than the app handler's timeout. For more information see `Timeouts <https://cloud.google.com/tasks/docs/creating-appengine- handlers#timeouts>`_. ``dispatch_deadline`` will be truncated to the nearest millisecond. The deadline is an approximate deadline. dispatch_count: Output only. The number of attempts dispatched. This count includes attempts which have been dispatched but haven't received a response. response_count: Output only. The number of attempts which have received a response. first_attempt: Output only. The status of the task's first attempt. Only [dispatch\_time][google.cloud.tasks.v2.Attempt.dispatch\_time] will be set. The other [Attempt][google.cloud.tasks.v2.Attempt] information is not retained by Cloud Tasks. last_attempt: Output only. The status of the task's last attempt. view: Output only. The view specifies which subset of the [Task][google.cloud.tasks.v2.Task] has been returned. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Task) ), ) _sym_db.RegisterMessage(Task) Attempt = _reflection.GeneratedProtocolMessageType( "Attempt", (_message.Message,), dict( DESCRIPTOR=_ATTEMPT, __module__="google.cloud.tasks_v2.proto.task_pb2", __doc__="""The status of a task attempt. Attributes: schedule_time: Output only. The time that this attempt was scheduled. ``schedule_time`` will be truncated to the nearest microsecond. dispatch_time: Output only. The time that this attempt was dispatched. ``dispatch_time`` will be truncated to the nearest microsecond. response_time: Output only. The time that this attempt response was received. ``response_time`` will be truncated to the nearest microsecond. response_status: Output only. The response from the worker for this attempt. If ``response_time`` is unset, then the task has not been attempted or is currently running and the ``response_status`` field is meaningless. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Attempt) ), ) _sym_db.RegisterMessage(Attempt) DESCRIPTOR._options = None _TASK._options = None # @@protoc_insertion_point(module_scope)
37.909402
1,822
0.632908
import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from ..proto import ( target_pb2 as google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2, ) from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/tasks_v2/proto/task.proto", package="google.cloud.tasks.v2", syntax="proto3", serialized_options=_b( "\n\031com.google.cloud.tasks.v2B\tTaskProtoP\001Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasks" ), serialized_pb=_b( '\n&google/cloud/tasks_v2/proto/task.proto\x12\x15google.cloud.tasks.v2\x1a\x19google/api/resource.proto\x1a(google/cloud/tasks_v2/proto/target.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\xb4\x05\n\x04Task\x12\x0c\n\x04name\x18\x01 \x01(\t\x12N\n\x17\x61pp_engine_http_request\x18\x02 \x01(\x0b\x32+.google.cloud.tasks.v2.AppEngineHttpRequestH\x00\x12:\n\x0chttp_request\x18\x03 \x01(\x0b\x32".google.cloud.tasks.v2.HttpRequestH\x00\x12\x31\n\rschedule_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x11\x64ispatch_deadline\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x16\n\x0e\x64ispatch_count\x18\x07 \x01(\x05\x12\x16\n\x0eresponse_count\x18\x08 \x01(\x05\x12\x35\n\rfirst_attempt\x18\t \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12\x34\n\x0clast_attempt\x18\n \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12.\n\x04view\x18\x0b \x01(\x0e\x32 .google.cloud.tasks.v2.Task.View"1\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02:h\xea\x41\x65\n\x1e\x63loudtasks.googleapis.com/Task\x12\x43projects/{project}/locations/{location}/queues/{queue}/tasks/{task}B\x0e\n\x0cmessage_type"\xcf\x01\n\x07\x41ttempt\x12\x31\n\rschedule_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rdispatch_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rresponse_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x0fresponse_status\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusBd\n\x19\x63om.google.cloud.tasks.v2B\tTaskProtoP\x01Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasksb\x06proto3' ), dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) _TASK_VIEW = _descriptor.EnumDescriptor( name="View", full_name="google.cloud.tasks.v2.Task.View", filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name="VIEW_UNSPECIFIED", index=0, number=0, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( name="BASIC", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( name="FULL", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, serialized_options=None, serialized_start=776, serialized_end=825, ) _sym_db.RegisterEnumDescriptor(_TASK_VIEW) _TASK = _descriptor.Descriptor( name="Task", full_name="google.cloud.tasks.v2.Task", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="name", full_name="google.cloud.tasks.v2.Task.name", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="app_engine_http_request", full_name="google.cloud.tasks.v2.Task.app_engine_http_request", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="http_request", full_name="google.cloud.tasks.v2.Task.http_request", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="schedule_time", full_name="google.cloud.tasks.v2.Task.schedule_time", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="create_time", full_name="google.cloud.tasks.v2.Task.create_time", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_deadline", full_name="google.cloud.tasks.v2.Task.dispatch_deadline", index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_count", full_name="google.cloud.tasks.v2.Task.dispatch_count", index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_count", full_name="google.cloud.tasks.v2.Task.response_count", index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="first_attempt", full_name="google.cloud.tasks.v2.Task.first_attempt", index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="last_attempt", full_name="google.cloud.tasks.v2.Task.last_attempt", index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="view", full_name="google.cloud.tasks.v2.Task.view", index=10, number=11, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[_TASK_VIEW], serialized_options=_b( "\352Ae\n\036cloudtasks.googleapis.com/Task\022Cprojects/{project}/locations/{location}/queues/{queue}/tasks/{task}" ), is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name="message_type", full_name="google.cloud.tasks.v2.Task.message_type", index=0, containing_type=None, fields=[], ) ], serialized_start=255, serialized_end=947, ) _ATTEMPT = _descriptor.Descriptor( name="Attempt", full_name="google.cloud.tasks.v2.Attempt", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="schedule_time", full_name="google.cloud.tasks.v2.Attempt.schedule_time", index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="dispatch_time", full_name="google.cloud.tasks.v2.Attempt.dispatch_time", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_time", full_name="google.cloud.tasks.v2.Attempt.response_time", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="response_status", full_name="google.cloud.tasks.v2.Attempt.response_status", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=950, serialized_end=1157, ) _TASK.fields_by_name[ "app_engine_http_request" ].message_type = ( google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._APPENGINEHTTPREQUEST ) _TASK.fields_by_name[ "http_request" ].message_type = google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._HTTPREQUEST _TASK.fields_by_name[ "schedule_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _TASK.fields_by_name[ "create_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _TASK.fields_by_name[ "dispatch_deadline" ].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _TASK.fields_by_name["first_attempt"].message_type = _ATTEMPT _TASK.fields_by_name["last_attempt"].message_type = _ATTEMPT _TASK.fields_by_name["view"].enum_type = _TASK_VIEW _TASK_VIEW.containing_type = _TASK _TASK.oneofs_by_name["message_type"].fields.append( _TASK.fields_by_name["app_engine_http_request"] ) _TASK.fields_by_name["app_engine_http_request"].containing_oneof = _TASK.oneofs_by_name[ "message_type" ] _TASK.oneofs_by_name["message_type"].fields.append(_TASK.fields_by_name["http_request"]) _TASK.fields_by_name["http_request"].containing_oneof = _TASK.oneofs_by_name[ "message_type" ] _ATTEMPT.fields_by_name[ "schedule_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "dispatch_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "response_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _ATTEMPT.fields_by_name[ "response_status" ].message_type = google_dot_rpc_dot_status__pb2._STATUS DESCRIPTOR.message_types_by_name["Task"] = _TASK DESCRIPTOR.message_types_by_name["Attempt"] = _ATTEMPT _sym_db.RegisterFileDescriptor(DESCRIPTOR) Task = _reflection.GeneratedProtocolMessageType( "Task", (_message.Message,), dict( DESCRIPTOR=_TASK, __module__="google.cloud.tasks_v2.proto.task_pb2", __doc__="""A unit of scheduled work. Attributes: name: Optionally caller-specified in [CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask]. The task name. The task name must have the following format: ``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/ta sks/TASK_ID`` - ``PROJECT_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see `Identifying projects <https://cloud.google.com/resource- manager/docs/creating-managing- projects#identifying_projects>`_ - ``LOCATION_ID`` is the canonical ID for the task's location. The list of available locations can be obtained by calling [ListLocations][google .cloud.location.Locations.ListLocations]. For more information, see https://cloud.google.com/about/locations/. - ``QUEUE_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. - ``TASK_ID`` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (\_). The maximum length is 500 characters. message_type: Required. The message to send to the worker. app_engine_http_request: HTTP request that is sent to the App Engine app handler. An App Engine task is a task that has [AppEngineHttpRequest][goog le.cloud.tasks.v2.AppEngineHttpRequest] set. http_request: HTTP request that is sent to the worker. An HTTP task is a task that has [HttpRequest][google.cloud.tasks.v2.HttpRequest] set. schedule_time: The time when the task is scheduled to be attempted or retried. ``schedule_time`` will be truncated to the nearest microsecond. create_time: Output only. The time that the task was created. ``create_time`` will be truncated to the nearest second. dispatch_deadline: The deadline for requests sent to the worker. If the worker does not respond by this deadline then the request is cancelled and the attempt is marked as a ``DEADLINE_EXCEEDED`` failure. Cloud Tasks will retry the task according to the [RetryConfig][google.cloud.tasks.v2.RetryConfig]. Note that when the request is cancelled, Cloud Tasks will stop listing for the response, but whether the worker stops processing depends on the worker. For example, if the worker is stuck, it may not react to cancelled requests. The default and maximum values depend on the type of request: - For [HTTP tasks][google.cloud.tasks.v2.HttpRequest], the default is 10 minutes. The deadline must be in the interval [15 seconds, 30 minutes]. - For [App Engine tasks][google.cloud.tasks.v2.AppEngineHttpRequest], 0 indicates that the request has the default deadline. The default deadline depends on the `scaling type <https://cloud.google.com/appengine/docs/standard/go/how- instances-are-managed#instance_scaling>`_ of the service: 10 minutes for standard apps with automatic scaling, 24 hours for standard apps with manual and basic scaling, and 60 minutes for flex apps. If the request deadline is set, it must be in the interval [15 seconds, 24 hours 15 seconds]. Regardless of the task's ``dispatch_deadline``, the app handler will not run for longer than than the service's timeout. We recommend setting the ``dispatch_deadline`` to at most a few seconds more than the app handler's timeout. For more information see `Timeouts <https://cloud.google.com/tasks/docs/creating-appengine- handlers#timeouts>`_. ``dispatch_deadline`` will be truncated to the nearest millisecond. The deadline is an approximate deadline. dispatch_count: Output only. The number of attempts dispatched. This count includes attempts which have been dispatched but haven't received a response. response_count: Output only. The number of attempts which have received a response. first_attempt: Output only. The status of the task's first attempt. Only [dispatch\_time][google.cloud.tasks.v2.Attempt.dispatch\_time] will be set. The other [Attempt][google.cloud.tasks.v2.Attempt] information is not retained by Cloud Tasks. last_attempt: Output only. The status of the task's last attempt. view: Output only. The view specifies which subset of the [Task][google.cloud.tasks.v2.Task] has been returned. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Task) ), ) _sym_db.RegisterMessage(Task) Attempt = _reflection.GeneratedProtocolMessageType( "Attempt", (_message.Message,), dict( DESCRIPTOR=_ATTEMPT, __module__="google.cloud.tasks_v2.proto.task_pb2", __doc__="""The status of a task attempt. Attributes: schedule_time: Output only. The time that this attempt was scheduled. ``schedule_time`` will be truncated to the nearest microsecond. dispatch_time: Output only. The time that this attempt was dispatched. ``dispatch_time`` will be truncated to the nearest microsecond. response_time: Output only. The time that this attempt response was received. ``response_time`` will be truncated to the nearest microsecond. response_status: Output only. The response from the worker for this attempt. If ``response_time`` is unset, then the task has not been attempted or is currently running and the ``response_status`` field is meaningless. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Attempt) ), ) _sym_db.RegisterMessage(Attempt) DESCRIPTOR._options = None _TASK._options = None # @@protoc_insertion_point(module_scope)
true
true
f70eddcaeb1d3f78ffc1a296a080675447ca2f91
1,215
py
Python
game/plugins/amblight/amblight.py
danieldugas/naith
04c85412c93ea1572f3146ddadae39af3dc9e9f7
[ "Apache-2.0" ]
5
2015-07-12T04:26:10.000Z
2021-03-23T19:56:45.000Z
game/plugins/amblight/amblight.py
danieldugas/naith
04c85412c93ea1572f3146ddadae39af3dc9e9f7
[ "Apache-2.0" ]
1
2015-04-11T07:03:04.000Z
2018-07-18T11:12:38.000Z
game/plugins/amblight/amblight.py
danieldugas/naith
04c85412c93ea1572f3146ddadae39af3dc9e9f7
[ "Apache-2.0" ]
3
2016-01-12T13:58:29.000Z
2021-05-26T12:55:07.000Z
# Copyright Tom SF Haines, Reinier de Blois, Aaron Snoswell # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from panda3d.core import VBase4 from panda3d.core import AmbientLight as PAmbientLight class AmbLight: """Creates a simple ambient light""" def __init__(self,manager,xml): self.light = PAmbientLight('alight') self.lightNode = render.attachNewNode(self.light) self.reload(manager,xml) def reload(self,manager,xml): color = xml.find('color') if color!=None: self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0)) def start(self): render.setLight(self.lightNode) def stop(self): render.clearLight(self.lightNode)
31.153846
107
0.730864
from panda3d.core import VBase4 from panda3d.core import AmbientLight as PAmbientLight class AmbLight: def __init__(self,manager,xml): self.light = PAmbientLight('alight') self.lightNode = render.attachNewNode(self.light) self.reload(manager,xml) def reload(self,manager,xml): color = xml.find('color') if color!=None: self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0)) def start(self): render.setLight(self.lightNode) def stop(self): render.clearLight(self.lightNode)
true
true
f70edf1a09050130a9565a23e6e442da972cb095
3,196
py
Python
machine_learning/tf_notMNIST_Training_Gradient_Descent.py
XinyueZ/some-python-codes
2d7296a4deebb0cd086be34ad7d66f5042cdf6e6
[ "Unlicense" ]
null
null
null
machine_learning/tf_notMNIST_Training_Gradient_Descent.py
XinyueZ/some-python-codes
2d7296a4deebb0cd086be34ad7d66f5042cdf6e6
[ "Unlicense" ]
null
null
null
machine_learning/tf_notMNIST_Training_Gradient_Descent.py
XinyueZ/some-python-codes
2d7296a4deebb0cd086be34ad7d66f5042cdf6e6
[ "Unlicense" ]
null
null
null
# # Run NN, multinomial logistic regression using simple gradient descent. # import config import numpy as np import tensorflow as tf from tensorflow import (Variable, constant, global_variables_initializer, truncated_normal, zeros) from tf_training_helper import TrainingHelper class TF_notMNIST_Training_Gradient_Descent: def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_batch=10000, train_steps=800, train_learning_rate=0.5): """ Constructor. """ self.each_object_size_width = each_object_size_width self.each_object_size_height = each_object_size_height self.train_batch = train_batch self.train_steps = train_steps self.train_learning_rate = train_learning_rate helper = TrainingHelper() self.__print_predications__ = helper.print_predications self.__print_test_accuracy__ = helper.print_test_accuracy self.__activation__ = helper.activation self.__loss_optimizer__ = helper.loss_optimizer def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, beta_for_regularizer=0.01): """ Start multinomial logistic regression using simple gradient descent. """ # # Fixed values while training # tf_train_dataset = constant(train_dataset[:self.train_batch, :]) tf_train_labels = constant(train_labels[:self.train_batch]) tf_valid_dataset = constant(valid_dataset) tf_test_dataset = constant(test_dataset) # # Variables should be trained. # Classical weight and biases. # tf_weights = Variable(truncated_normal( [self.each_object_size_width * self.each_object_size_height, count_classes])) tf_biases = Variable(zeros([count_classes])) logits = self.__activation__(tf_train_dataset, tf_weights, tf_biases) loss, optimizer = self.__loss_optimizer__( tf_train_labels, logits, self.train_learning_rate, beta_for_regularizer, [tf_weights]) # # Convert dataset to predication # The actual problem is transformed into a probabilistic problem. # predication_for_train = tf.nn.softmax(logits) predication_for_valid = tf.nn.softmax( self.__activation__(tf_valid_dataset, tf_weights, tf_biases)) predication_for_test = tf.nn.softmax( self.__activation__(tf_test_dataset, tf_weights, tf_biases)) # # Training # print("\n") with tf.Session() as sess: init = global_variables_initializer() sess.run(init) for step in range(self.train_steps): _, ls, predications = sess.run( [optimizer, loss, predication_for_train]) self.__print_predications__( step, ls, predications, train_labels[:self.train_batch, :], predication_for_valid, valid_labels) self.__print_test_accuracy__(predication_for_test, test_labels)
40.974359
186
0.682728
import config import numpy as np import tensorflow as tf from tensorflow import (Variable, constant, global_variables_initializer, truncated_normal, zeros) from tf_training_helper import TrainingHelper class TF_notMNIST_Training_Gradient_Descent: def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_batch=10000, train_steps=800, train_learning_rate=0.5): self.each_object_size_width = each_object_size_width self.each_object_size_height = each_object_size_height self.train_batch = train_batch self.train_steps = train_steps self.train_learning_rate = train_learning_rate helper = TrainingHelper() self.__print_predications__ = helper.print_predications self.__print_test_accuracy__ = helper.print_test_accuracy self.__activation__ = helper.activation self.__loss_optimizer__ = helper.loss_optimizer def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, beta_for_regularizer=0.01): tf_train_dataset = constant(train_dataset[:self.train_batch, :]) tf_train_labels = constant(train_labels[:self.train_batch]) tf_valid_dataset = constant(valid_dataset) tf_test_dataset = constant(test_dataset) tf_weights = Variable(truncated_normal( [self.each_object_size_width * self.each_object_size_height, count_classes])) tf_biases = Variable(zeros([count_classes])) logits = self.__activation__(tf_train_dataset, tf_weights, tf_biases) loss, optimizer = self.__loss_optimizer__( tf_train_labels, logits, self.train_learning_rate, beta_for_regularizer, [tf_weights]) predication_for_train = tf.nn.softmax(logits) predication_for_valid = tf.nn.softmax( self.__activation__(tf_valid_dataset, tf_weights, tf_biases)) predication_for_test = tf.nn.softmax( self.__activation__(tf_test_dataset, tf_weights, tf_biases)) print("\n") with tf.Session() as sess: init = global_variables_initializer() sess.run(init) for step in range(self.train_steps): _, ls, predications = sess.run( [optimizer, loss, predication_for_train]) self.__print_predications__( step, ls, predications, train_labels[:self.train_batch, :], predication_for_valid, valid_labels) self.__print_test_accuracy__(predication_for_test, test_labels)
true
true
f70edfed142a1adff7c6e18b38c7bde60881368c
35,539
py
Python
peprint/peprint.py
tommikaikkonen/peprint
7248ae6f92f1b05b2c9089ce69280120ad4fcd69
[ "MIT" ]
3
2017-10-24T08:35:18.000Z
2017-12-18T03:23:19.000Z
peprint/peprint.py
tommikaikkonen/peprint
7248ae6f92f1b05b2c9089ce69280120ad4fcd69
[ "MIT" ]
null
null
null
peprint/peprint.py
tommikaikkonen/peprint
7248ae6f92f1b05b2c9089ce69280120ad4fcd69
[ "MIT" ]
null
null
null
import inspect import math import re from functools import singledispatch, partial from itertools import chain, cycle from .api import ( always_break, annotate, concat, contextual, flat_choice, fill, group, nest, NIL, LINE, SOFTLINE, HARDLINE ) from .doc import ( Annotated, Doc ) from .layout import layout_smart from .syntax import Token from .utils import identity, intersperse UNSET_SENTINEL = object() COMMA = annotate(Token.PUNCTUATION, ',') COLON = annotate(Token.PUNCTUATION, ':') ELLIPSIS = annotate(Token.PUNCTUATION, '...') LPAREN = annotate(Token.PUNCTUATION, '(') RPAREN = annotate(Token.PUNCTUATION, ')') LBRACKET = annotate(Token.PUNCTUATION, '[') RBRACKET = annotate(Token.PUNCTUATION, ']') LBRACE = annotate(Token.PUNCTUATION, '{') RBRACE = annotate(Token.PUNCTUATION, '}') NEG_OP = annotate(Token.OPERATOR, '-') MUL_OP = annotate(Token.OPERATOR, '*') ADD_OP = annotate(Token.OPERATOR, '+') ASSIGN_OP = annotate(Token.OPERATOR, '=') WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)') WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)') NONWORD_PATTERN_TEXT = re.compile(r'(\W+)') NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)') # For dict keys """ ( 'aaaaaaaaaa' 'aaaaaa' ) """ MULTILINE_STATEGY_PARENS = 'MULTILINE_STATEGY_PARENS' # For dict values """ 'aaaaaaaaaa' 'aaaaa' """ MULTILINE_STATEGY_INDENTED = 'MULTILINE_STATEGY_INDENTED' # For sequence elements """ 'aaaaaaaaa' 'aaaaaa' """ MULTILINE_STATEGY_HANG = 'MULTILINE_STATEGY_HANG' # For top level strs """ 'aaaaaaaaa' 'aaaaaa' """ MULTILINE_STATEGY_PLAIN = 'MULTILINE_STATEGY_PLAIN' IMPLICIT_MODULES = { '__main__', 'builtins', } class CommentAnnotation: def __init__(self, value): assert isinstance(value, str) self.value = value def __repr__(self): return f'ValueComment({repr(self.value)})' class _CommentedValue: def __init__(self, value, comment): self.value = value self.comment = comment class _TrailingCommentedValue: def __init__(self, value, comment): self.value = value self.comment = comment def annotate_comment(comment, doc): """Annotate ``doc`` with ``comment`` text. Peprint will make sure the parent (or top-level) handler will render the comment in a proper way. E.g. if ``doc`` represents an element in a list, then the ``list`` pretty printer will handle where to place the comment. Differs from ``comment`` and ``trailing_comment`` by operating only on Docs, not normal values. """ return annotate(CommentAnnotation(comment), doc) def comment(comment_str, value): """Annotates a value with a comment str. Allows you to insert comments into Peprint output by annotating them on the values directly, instead of first having to render them into a Doc and then annotating the Doc with ``annotate_comment``. Generally, you want to use this to annotate arguments to ``prettycall``. """ return _CommentedValue(value, comment_str) def trailing_comment(comment_str, value): """Annotates a value with a comment str, so that the comment will be rendered "trailing", e.g. in place of the last element in a list, set or tuple, or after the last argument in a function. This will force the rendering of `value` to be broken to multple lines as Python does not have inline comments. """ return _TrailingCommentedValue(value, comment_str) def unwrap_comments(value): comment = None trailing_comment = None while isinstance(value, (_CommentedValue, _TrailingCommentedValue)): if isinstance(value, _CommentedValue): comment = value.comment value = value.value elif isinstance(value, _TrailingCommentedValue): trailing_comment = value.comment value = value.value return (value, comment, trailing_comment) def is_commented(value): return ( isinstance(value, Annotated) and isinstance(value.annotation, CommentAnnotation) ) def builtin_identifier(s): return annotate(Token.NAME_BUILTIN, s) def identifier(s): return annotate(Token.NAME_FUNCTION, s) def general_identifier(s): if callable(s): module, qualname = s.__module__, s.__qualname__ if module in IMPLICIT_MODULES: if module == 'builtins': return builtin_identifier(qualname) return identifier(qualname) return identifier(f'{module}.{qualname}') return identifier(s) def classattr(cls, attrname): return concat([ general_identifier(cls), identifier(f'.{attrname}') ]) class PrettyContext: __slots__ = ( 'indent', 'depth_left', 'visited', 'multiline_strategy', 'user_ctx' ) def __init__( self, indent, depth_left, visited=None, multiline_strategy=MULTILINE_STATEGY_PLAIN, user_ctx=None, ): self.indent = indent self.depth_left = depth_left self.multiline_strategy = multiline_strategy if visited is None: visited = set() self.visited = visited if user_ctx is None: user_ctx = {} self.user_ctx = user_ctx def _replace(self, **kwargs): passed_keys = set(kwargs.keys()) fieldnames = type(self).__slots__ assert passed_keys.issubset(set(fieldnames)) return PrettyContext( **{ k: ( kwargs[k] if k in passed_keys else getattr(self, k) ) for k in fieldnames } ) def use_multiline_strategy(self, strategy): return self._replace(multiline_strategy=strategy) def set(self, key, value): return self._replace(user_ctx={ **self.user_ctx, key: value, }) def get(self, key, default=None): return self.user_ctx.get(key, default) def nested_call(self): return self._replace(depth_left=self.depth_left - 1) def start_visit(self, value): self.visited.add(id(value)) def end_visit(self, value): self.visited.remove(id(value)) def is_visited(self, value): return id(value) in self.visited def _run_pretty(pretty_fn, value, ctx, trailing_comment=None): if ctx.is_visited(value): return _pretty_recursion(value) ctx.start_visit(value) if trailing_comment: doc = pretty_fn(value, ctx, trailing_comment=trailing_comment) else: doc = pretty_fn(value, ctx) if not ( isinstance(doc, str) or isinstance(doc, Doc) ): fnname = f'{pretty_fn.__module__}.{pretty_fn.__qualname__}' raise ValueError( 'Functions decorated with register_pretty must return ' f'an instance of str or Doc. {fnname} returned ' f'{repr(doc)} instead.' ) ctx.end_visit(value) return doc _PREDICATE_REGISTRY = [] def _repr_pretty(value, ctx): for predicate, fn in _PREDICATE_REGISTRY: if predicate(value): return fn(value, ctx) return repr(value) pretty_dispatch = singledispatch(partial(_run_pretty, _repr_pretty)) def pretty_python_value(value, ctx): comment = None trailing_comment = None value, comment, trailing_comment = unwrap_comments(value) if trailing_comment: doc = pretty_dispatch( value, ctx, trailing_comment=trailing_comment ) else: doc = pretty_dispatch( value, ctx ) if comment: return annotate_comment( comment, doc ) return doc def register_pretty(type=None, predicate=None): """Returns a decorator that registers the decorated function as the pretty printer for instances of ``type``. """ if type is None and predicate is None: raise ValueError( "You must provide either the 'type' or 'predicate' argument." ) if type is not None and predicate is not None: raise ValueError( "You must provide either the 'type' or 'predicate' argument," "but not both" ) if predicate is not None: if not callable(predicate): raise ValueError( f"Expected a callable for 'predicate', got {repr(predicate)}" ) def decorator(fn): sig = inspect.signature(fn) value = None ctx = None try: sig.bind(value, ctx) except TypeError: fnname = f'{fn.__module__}.{fn.__qualname__}' raise ValueError( "Functions decorated with register_pretty must accept " "exactly two positional parameters: 'value' and 'ctx'. " f"The function signature for {fnname} was not compatible." ) if type: pretty_dispatch.register(type, partial(_run_pretty, fn)) else: assert callable(predicate) _PREDICATE_REGISTRY.append((predicate, fn)) return fn return decorator def bracket(ctx, left, child, right): return concat([ left, nest(ctx.indent, concat([SOFTLINE, child])), SOFTLINE, right ]) def commentdoc(text): """Returns a Doc representing a comment `text`. `text` is treated as words, and any whitespace may be used to break the comment to multiple lines.""" if not text: raise ValueError( f'Expected non-empty comment str, got {repr(text)}' ) commentlines = [] for line in text.splitlines(): alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line))) starts_with_whitespace = bool( WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0]) ) if starts_with_whitespace: prefix = alternating_words_ws[0] alternating_words_ws = alternating_words_ws[1:] else: prefix = NIL if len(alternating_words_ws) % 2 == 0: # The last part must be whitespace. alternating_words_ws = alternating_words_ws[:-1] for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))): part, is_ws = tup if is_ws: alternating_words_ws[idx] = flat_choice( when_flat=part, when_broken=always_break( concat([ HARDLINE, '# ', ]) ) ) commentlines.append( concat([ '# ', prefix, fill(alternating_words_ws) ]) ) outer = identity if len(commentlines) > 1: outer = always_break return annotate( Token.COMMENT_SINGLE, outer(concat(intersperse(HARDLINE, commentlines))) ) def sequence_of_docs(ctx, left, docs, right, dangle=False): docs = list(docs) # Performance optimization: # in case of really long sequences, # the layout algorithm can be quite slow. # No branching here is needed if the sequence # is long enough that even with the shortest # element output, it does not fit the ribbon width. minimum_output_len = ( 2 + # Assume left and right are one character each len(', ') * (len(docs) - 1) + len(docs) # each element must take at least one character ) MAX_PRACTICAL_RIBBON_WIDTH = 150 will_break = minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH has_comment = any(is_commented(doc) for doc in docs) parts = [] for idx, doc in enumerate(docs): last = idx == len(docs) - 1 if is_commented(doc): comment_str = doc.annotation.value # Try to fit the comment at the end of the same line. flat_version = concat([ doc, COMMA if not last else NIL, ' ', commentdoc(comment_str), HARDLINE if not last else NIL ]) # If the value is broken to multiple lines, add # comment on the line above. broken_version = concat([ commentdoc(comment_str), HARDLINE, doc, COMMA if not last else NIL, HARDLINE if not last else NIL ]) parts.append( group( flat_choice( when_flat=flat_version, when_broken=broken_version, ) ) ) else: parts.append(doc) if not last: parts.append( concat([COMMA, LINE]) ) if dangle: parts.append(COMMA) outer = ( always_break if will_break or has_comment else group ) return outer(bracket(ctx, left, concat(parts), right)) def prettycall(ctx, fn, *args, **kwargs): """Returns a Doc that represents a function call to :keyword:`fn` with the remaining positional and keyword arguments. Given an arbitrary context ``ctx``,:: prettycall(ctx, sorted, [7, 4, 5], reverse=True) Will result in output:: sorted([7, 4, 5], reverse=True) The layout algorithm will automatically break the call to multiple lines if needed:: sorted( [7, 4, 5], reverse=True ) ``prettycall`` automatically handles syntax highlighting. :param ctx: a context value :type ctx: peprint.peprint.PrettyContext :param fn: a callable :param args: positional arguments to render to the call :param kwargs: keyword arguments to render to the call :returns: :class:`~peprint.doc.Doc` """ fndoc = general_identifier(fn) if ctx.depth_left <= 0: return concat([fndoc, LPAREN, ELLIPSIS, RPAREN]) if not kwargs and len(args) == 1: sole_arg = args[0] unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0]) if type(unwrapped_sole_arg) in (list, dict, tuple): return build_fncall( ctx, fndoc, argdocs=[pretty_python_value(sole_arg, ctx)], hug_sole_arg=True, ) nested_ctx = ( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_HANG) ) return build_fncall( ctx, fndoc, argdocs=( pretty_python_value(arg, nested_ctx) for arg in args ), kwargdocs=( (kwarg, pretty_python_value(v, nested_ctx)) for kwarg, v in kwargs.items() ), ) def build_fncall( ctx, fndoc, argdocs=(), kwargdocs=(), hug_sole_arg=False, trailing_comment=None, ): """Builds a doc that looks like a function call, from docs that represent the function, arguments and keyword arguments. If ``hug_sole_arg`` is True, and the represented functional call is done with a single non-keyword argument, the function call parentheses will hug the sole argument doc without newlines and indentation in break mode. This makes a difference in calls like this:: > hug_sole_arg = False frozenset( [ 1, 2, 3, 4, 5 ] ) > hug_sole_arg = True frozenset([ 1, 2, 3, 4, 5, ]) If ``trailing_comment`` is provided, the text is rendered as a comment after the last argument and before the closing parenthesis. This will force the function call to be broken to multiple lines. """ if callable(fndoc): fndoc = general_identifier(fndoc) has_comment = bool(trailing_comment) argdocs = list(argdocs) kwargdocs = list(kwargdocs) kwargdocs = [ # Propagate any comments to the kwarg doc. ( annotate_comment( doc.annotation.value, concat([binding, ASSIGN_OP, doc.doc]) ) if is_commented(doc) else concat([binding, ASSIGN_OP, doc]) ) for binding, doc in kwargdocs ] if not (argdocs or kwargdocs): return concat([ fndoc, LPAREN, RPAREN, ]) if ( hug_sole_arg and not kwargdocs and len(argdocs) == 1 and not is_commented(argdocs[0]) ): return group( concat([ fndoc, LPAREN, argdocs[0], RPAREN ]) ) allarg_docs = [*argdocs, *kwargdocs] if trailing_comment: allarg_docs.append(commentdoc(trailing_comment)) parts = [] for idx, doc in enumerate(allarg_docs): last = idx == len(allarg_docs) - 1 if is_commented(doc): has_comment = True comment_str = doc.annotation.value doc = doc.doc else: comment_str = None part = concat([doc, NIL if last else COMMA]) if comment_str: part = group( flat_choice( when_flat=concat([ part, ' ', commentdoc(comment_str) ]), when_broken=concat([ commentdoc(comment_str), HARDLINE, part, ]), ) ) if not last: part = concat([part, HARDLINE if has_comment else LINE]) parts.append(part) outer = ( always_break if has_comment else group ) return outer( concat([ fndoc, LPAREN, nest( ctx.indent, concat([ SOFTLINE, concat(parts), ]) ), SOFTLINE, RPAREN ]) ) @register_pretty(tuple) @register_pretty(list) @register_pretty(set) def pretty_bracketable_iterable(value, ctx, trailing_comment=None): dangle = False if isinstance(value, list): left, right = LBRACKET, RBRACKET elif isinstance(value, tuple): left, right = LPAREN, RPAREN if len(value) == 1: dangle = True elif isinstance(value, set): left, right = LBRACE, RBRACE if not value: if isinstance(value, (list, tuple)): return concat([left, right]) else: assert isinstance(value, set) return prettycall(ctx, set) if ctx.depth_left == 0: return concat([left, ELLIPSIS, right]) if len(value) == 1: sole_value = list(value)[0] els = [ pretty_python_value( sole_value, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_PLAIN) ) ) ] else: els = ( pretty_python_value( el, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_HANG) ) ) for el in value ) if trailing_comment: els = chain(els, [commentdoc(trailing_comment)]) dangle = False return sequence_of_docs(ctx, left, els, right, dangle=dangle) @register_pretty(frozenset) def pretty_frozenset(value, ctx): if value: return prettycall(ctx, frozenset, list(value)) return prettycall(ctx, frozenset) class _AlwaysSortable(object): __slots__ = ('value', ) def __init__(self, value): self.value = value def sortable_value(self): return (str(type(self)), id(self)) def __lt__(self, other): try: return self.value < other.value except TypeError: return self.sortable_value() < other.sortable_value() @register_pretty(dict) def pretty_dict(d, ctx): if ctx.depth_left == 0: return concat([LBRACE, ELLIPSIS, RBRACE]) has_comment = False pairs = [] for k in sorted(d.keys(), key=_AlwaysSortable): v = d[k] if isinstance(k, (str, bytes)): kdoc = pretty_str( k, # not a nested call on purpose ctx=ctx.use_multiline_strategy(MULTILINE_STATEGY_PARENS), ) else: kdoc = pretty_python_value( k, ctx=ctx.nested_call() ) vdoc = pretty_python_value( v, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_INDENTED) ), ) kcomment = None if is_commented(kdoc): has_comment = True kcomment = kdoc.annotation.value kdoc = kdoc.doc vcomment = None if is_commented(vdoc): has_comment = True vcomment = vdoc.annotation.value vdoc = vdoc.doc pairs.append((k, v, kdoc, vdoc, kcomment, vcomment)) parts = [] for idx, tup in enumerate(pairs): last = idx == len(pairs) - 1 k, v, kdoc, vdoc, kcomment, vcomment = tup if not (kcomment or vcomment): parts.append( concat([ kdoc, concat([COLON, ' ']), vdoc, NIL if last else COMMA, NIL if last else LINE, ]), ) continue if kcomment: kcommented = concat([ commentdoc(kcomment), HARDLINE, kdoc, ]) else: kcommented = kdoc if vcomment: vcommented = group( flat_choice( # Add comment at the end of the line when_flat=concat([ vdoc, NIL if last else COMMA, ' ', commentdoc(vcomment), NIL if last else HARDLINE, ]), # Put comment above the value # on its own line when_broken=concat([ nest( ctx.indent, concat([ HARDLINE, commentdoc(vcomment), HARDLINE, # Rerender vdoc with plain multiline strategy, # since we already have an indentation. pretty_python_value( v, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_PLAIN) ), ), COMMA if not last else NIL, HARDLINE if not last else NIL ]) ), ]) ) ) else: vcommented = concat([ vdoc, COMMA if not last else NIL, LINE if not last else NIL ]) parts.append( concat([ kcommented, concat([COLON, ' ']), vcommented ]) ) doc = bracket( ctx, LBRACE, concat(parts), RBRACE, ) if len(pairs) > 2 or has_comment: doc = always_break(doc) else: doc = group(doc) return doc INF_FLOAT = float('inf') NEG_INF_FLOAT = float('-inf') @register_pretty(float) def pretty_float(value, ctx): if ctx.depth_left == 0: return prettycall(ctx, float, ...) if value == INF_FLOAT: return prettycall(ctx, float, 'inf') elif value == NEG_INF_FLOAT: return prettycall(ctx, float, '-inf') elif math.isnan(value): return prettycall(ctx, float, 'nan') return annotate(Token.NUMBER_FLOAT, repr(value)) @register_pretty(int) def pretty_int(value, ctx): if ctx.depth_left == 0: return prettycall(ctx, int, ...) return annotate(Token.NUMBER_INT, repr(value)) @register_pretty(type(...)) def pretty_ellipsis(value, ctx): return ELLIPSIS @register_pretty(bool) @register_pretty(type(None)) def pretty_singletons(value, ctx): return annotate(Token.KEYWORD_CONSTANT, repr(value)) SINGLE_QUOTE_TEXT = "'" SINGLE_QUOTE_BYTES = b"'" DOUBLE_QUOTE_TEXT = '"' DOUBLE_QUOTE_BYTES = b'"' def determine_quote_strategy(s): if isinstance(s, str): single_quote = SINGLE_QUOTE_TEXT double_quote = DOUBLE_QUOTE_TEXT else: single_quote = SINGLE_QUOTE_BYTES double_quote = DOUBLE_QUOTE_BYTES contains_single = single_quote in s contains_double = double_quote in s if not contains_single: return SINGLE_QUOTE_TEXT if not contains_double: return DOUBLE_QUOTE_TEXT assert contains_single and contains_double single_count = s.count(single_quote) double_count = s.count(double_quote) if single_count <= double_count: return SINGLE_QUOTE_TEXT return DOUBLE_QUOTE_TEXT def escape_str_for_quote(use_quote, s): escaped_with_quotes = repr(s) repr_used_quote = escaped_with_quotes[-1] # string may have a prefix first_quote_at_index = escaped_with_quotes.find(repr_used_quote) repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1] if repr_used_quote == use_quote: # repr produced the quotes we wanted - # escaping is correct. return repr_escaped # repr produced different quotes, which escapes # alternate quotes. if use_quote == SINGLE_QUOTE_TEXT: # repr used double quotes return ( repr_escaped .replace('\\"', DOUBLE_QUOTE_TEXT) .replace(SINGLE_QUOTE_TEXT, "\\'") ) else: # repr used single quotes return ( repr_escaped .replace("\\'", SINGLE_QUOTE_TEXT) .replace(DOUBLE_QUOTE_TEXT, '\\"') ) STR_LITERAL_ESCAPES = re.compile( r'''((?:\\[\\abfnrtv"'])|''' r'(?:\\N\{.*?\})|' r'(?:\\u[a-fA-F0-9]{4})|' r'(?:\\U[a-fA-F0-9]{8})|' r'(?:\\x[a-fA-F0-9]{2})|' r'(?:\\[0-7]{1,3}))' ) def highlight_escapes(s): if not s: return NIL matches = STR_LITERAL_ESCAPES.split(s) starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0])) docs = [] for part, is_escaped in zip( matches, cycle([starts_with_match, not starts_with_match]) ): if not part: continue docs.append( annotate( ( Token.STRING_ESCAPE if is_escaped else Token.LITERAL_STRING ), part ) ) return concat(docs) def pretty_single_line_str(s, indent, use_quote=None): prefix = ( annotate(Token.STRING_AFFIX, 'b') if isinstance(s, bytes) else '' ) if use_quote is None: use_quote = determine_quote_strategy(s) escaped = escape_str_for_quote(use_quote, s) escapes_highlighted = highlight_escapes(escaped) return concat([ prefix, annotate( Token.LITERAL_STRING, concat([ use_quote, escapes_highlighted, use_quote ]) ) ]) def split_at(idx, sequence): return (sequence[:idx], sequence[idx:]) def escaped_len(s, use_quote): return len(escape_str_for_quote(use_quote, s)) def str_to_lines(max_len, use_quote, s): if isinstance(s, str): whitespace_pattern = WHITESPACE_PATTERN_TEXT nonword_pattern = NONWORD_PATTERN_TEXT empty = '' else: assert isinstance(s, bytes) whitespace_pattern = WHITESPACE_PATTERN_BYTES nonword_pattern = NONWORD_PATTERN_BYTES empty = b'' alternating_words_ws = whitespace_pattern.split(s) if len(alternating_words_ws) <= 1: # no whitespace: try splitting with nonword pattern. alternating_words_ws = nonword_pattern.split(s) starts_with_whitespace = bool(nonword_pattern.match(alternating_words_ws[0])) else: starts_with_whitespace = bool(whitespace_pattern.match(alternating_words_ws[0])) # List[Tuple[str, bool]] # The boolean associated with each part indicates if it is a # whitespce/non-word part or not. tagged_alternating = list( zip( alternating_words_ws, cycle([starts_with_whitespace, not starts_with_whitespace]) ) ) remaining_stack = list(reversed(tagged_alternating)) curr_line_parts = [] curr_line_len = 0 while remaining_stack: curr, is_whitespace = remaining_stack.pop() curr_line_parts.append(curr) curr_line_len += escaped_len(curr, use_quote) if curr_line_len == max_len: if not is_whitespace and len(curr_line_parts) > 2: curr_line_parts.pop() yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 remaining_stack.append((curr, is_whitespace)) else: yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 continue elif curr_line_len > max_len: if not is_whitespace and len(curr_line_parts) > 1: curr_line_parts.pop() yield empty.join(curr_line_parts) remaining_stack.append((curr, is_whitespace)) curr_line_parts = [] curr_line_len = 0 continue curr_line_parts.pop() remaining_len = max_len - (curr_line_len - escaped_len(curr, use_quote)) this_line_part, next_line_part = split_at(max(remaining_len, 0), curr) curr_line_parts.append(this_line_part) yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 if next_line_part: remaining_stack.append((next_line_part, is_whitespace)) if curr_line_parts: yield empty.join(curr_line_parts) @register_pretty(str) @register_pretty(bytes) def pretty_str(s, ctx): # Subclasses of str/bytes # will be printed as StrSubclass('the actual string') constructor = type(s) is_native_type = constructor in (str, bytes) if ctx.depth_left == 0: if isinstance(s, str): return prettycall(ctx, constructor, ...) else: assert isinstance(s, bytes) return prettycall(ctx, constructor, ...) multiline_strategy = ctx.multiline_strategy peprint_indent = ctx.indent def evaluator(indent, column, page_width, ribbon_width): nonlocal multiline_strategy columns_left_in_line = page_width - column columns_left_in_ribbon = indent + ribbon_width - column available_width = min(columns_left_in_line, columns_left_in_ribbon) singleline_str_chars = len(s) + len('""') flat_version = pretty_single_line_str(s, peprint_indent) if singleline_str_chars <= available_width: if is_native_type: return flat_version return build_fncall(ctx, constructor, argdocs=[flat_version]) # multiline string each_line_starts_on_col = indent + peprint_indent each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width) each_line_max_str_len = each_line_ends_on_col - each_line_starts_on_col - 2 use_quote = determine_quote_strategy(s) lines = str_to_lines( max_len=each_line_max_str_len, use_quote=use_quote, s=s, ) parts = intersperse( HARDLINE, ( pretty_single_line_str( line, indent=peprint_indent, use_quote=use_quote, ) for line in lines ) ) if not is_native_type: multiline_strategy = MULTILINE_STATEGY_PLAIN if multiline_strategy == MULTILINE_STATEGY_PLAIN: res = always_break(concat(parts)) if is_native_type: return res return build_fncall(ctx, constructor, argdocs=[res]) elif multiline_strategy == MULTILINE_STATEGY_HANG: return always_break( nest( peprint_indent, concat(parts) ) ) else: if multiline_strategy == MULTILINE_STATEGY_PARENS: left_paren, right_paren = LPAREN, RPAREN else: assert multiline_strategy == MULTILINE_STATEGY_INDENTED left_paren, right_paren = '', '' return always_break( concat([ left_paren, nest( peprint_indent, concat([ HARDLINE, *parts, ]) ), ( HARDLINE if multiline_strategy == MULTILINE_STATEGY_PARENS else NIL ), right_paren ]) ) return contextual(evaluator) def _pretty_recursion(value): return f'<Recursion on {type(value).__name__} with id={id(value)}>' def python_to_sdocs(value, indent, width, depth, ribbon_width=71): if depth is None: depth = float('inf') doc = pretty_python_value( value, ctx=PrettyContext(indent=indent, depth_left=depth, visited=set()) ) if is_commented(doc): doc = group( flat_choice( when_flat=concat([ doc, ' ', commentdoc(doc.annotation.value), ]), when_broken=concat([ commentdoc(doc.annotation.value), HARDLINE, doc ]) ) ) ribbon_frac = min(1.0, ribbon_width / width) return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
26.305699
88
0.552886
import inspect import math import re from functools import singledispatch, partial from itertools import chain, cycle from .api import ( always_break, annotate, concat, contextual, flat_choice, fill, group, nest, NIL, LINE, SOFTLINE, HARDLINE ) from .doc import ( Annotated, Doc ) from .layout import layout_smart from .syntax import Token from .utils import identity, intersperse UNSET_SENTINEL = object() COMMA = annotate(Token.PUNCTUATION, ',') COLON = annotate(Token.PUNCTUATION, ':') ELLIPSIS = annotate(Token.PUNCTUATION, '...') LPAREN = annotate(Token.PUNCTUATION, '(') RPAREN = annotate(Token.PUNCTUATION, ')') LBRACKET = annotate(Token.PUNCTUATION, '[') RBRACKET = annotate(Token.PUNCTUATION, ']') LBRACE = annotate(Token.PUNCTUATION, '{') RBRACE = annotate(Token.PUNCTUATION, '}') NEG_OP = annotate(Token.OPERATOR, '-') MUL_OP = annotate(Token.OPERATOR, '*') ADD_OP = annotate(Token.OPERATOR, '+') ASSIGN_OP = annotate(Token.OPERATOR, '=') WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)') WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)') NONWORD_PATTERN_TEXT = re.compile(r'(\W+)') NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)') MULTILINE_STATEGY_PARENS = 'MULTILINE_STATEGY_PARENS' MULTILINE_STATEGY_INDENTED = 'MULTILINE_STATEGY_INDENTED' MULTILINE_STATEGY_HANG = 'MULTILINE_STATEGY_HANG' MULTILINE_STATEGY_PLAIN = 'MULTILINE_STATEGY_PLAIN' IMPLICIT_MODULES = { '__main__', 'builtins', } class CommentAnnotation: def __init__(self, value): assert isinstance(value, str) self.value = value def __repr__(self): return f'ValueComment({repr(self.value)})' class _CommentedValue: def __init__(self, value, comment): self.value = value self.comment = comment class _TrailingCommentedValue: def __init__(self, value, comment): self.value = value self.comment = comment def annotate_comment(comment, doc): return annotate(CommentAnnotation(comment), doc) def comment(comment_str, value): return _CommentedValue(value, comment_str) def trailing_comment(comment_str, value): return _TrailingCommentedValue(value, comment_str) def unwrap_comments(value): comment = None trailing_comment = None while isinstance(value, (_CommentedValue, _TrailingCommentedValue)): if isinstance(value, _CommentedValue): comment = value.comment value = value.value elif isinstance(value, _TrailingCommentedValue): trailing_comment = value.comment value = value.value return (value, comment, trailing_comment) def is_commented(value): return ( isinstance(value, Annotated) and isinstance(value.annotation, CommentAnnotation) ) def builtin_identifier(s): return annotate(Token.NAME_BUILTIN, s) def identifier(s): return annotate(Token.NAME_FUNCTION, s) def general_identifier(s): if callable(s): module, qualname = s.__module__, s.__qualname__ if module in IMPLICIT_MODULES: if module == 'builtins': return builtin_identifier(qualname) return identifier(qualname) return identifier(f'{module}.{qualname}') return identifier(s) def classattr(cls, attrname): return concat([ general_identifier(cls), identifier(f'.{attrname}') ]) class PrettyContext: __slots__ = ( 'indent', 'depth_left', 'visited', 'multiline_strategy', 'user_ctx' ) def __init__( self, indent, depth_left, visited=None, multiline_strategy=MULTILINE_STATEGY_PLAIN, user_ctx=None, ): self.indent = indent self.depth_left = depth_left self.multiline_strategy = multiline_strategy if visited is None: visited = set() self.visited = visited if user_ctx is None: user_ctx = {} self.user_ctx = user_ctx def _replace(self, **kwargs): passed_keys = set(kwargs.keys()) fieldnames = type(self).__slots__ assert passed_keys.issubset(set(fieldnames)) return PrettyContext( **{ k: ( kwargs[k] if k in passed_keys else getattr(self, k) ) for k in fieldnames } ) def use_multiline_strategy(self, strategy): return self._replace(multiline_strategy=strategy) def set(self, key, value): return self._replace(user_ctx={ **self.user_ctx, key: value, }) def get(self, key, default=None): return self.user_ctx.get(key, default) def nested_call(self): return self._replace(depth_left=self.depth_left - 1) def start_visit(self, value): self.visited.add(id(value)) def end_visit(self, value): self.visited.remove(id(value)) def is_visited(self, value): return id(value) in self.visited def _run_pretty(pretty_fn, value, ctx, trailing_comment=None): if ctx.is_visited(value): return _pretty_recursion(value) ctx.start_visit(value) if trailing_comment: doc = pretty_fn(value, ctx, trailing_comment=trailing_comment) else: doc = pretty_fn(value, ctx) if not ( isinstance(doc, str) or isinstance(doc, Doc) ): fnname = f'{pretty_fn.__module__}.{pretty_fn.__qualname__}' raise ValueError( 'Functions decorated with register_pretty must return ' f'an instance of str or Doc. {fnname} returned ' f'{repr(doc)} instead.' ) ctx.end_visit(value) return doc _PREDICATE_REGISTRY = [] def _repr_pretty(value, ctx): for predicate, fn in _PREDICATE_REGISTRY: if predicate(value): return fn(value, ctx) return repr(value) pretty_dispatch = singledispatch(partial(_run_pretty, _repr_pretty)) def pretty_python_value(value, ctx): comment = None trailing_comment = None value, comment, trailing_comment = unwrap_comments(value) if trailing_comment: doc = pretty_dispatch( value, ctx, trailing_comment=trailing_comment ) else: doc = pretty_dispatch( value, ctx ) if comment: return annotate_comment( comment, doc ) return doc def register_pretty(type=None, predicate=None): if type is None and predicate is None: raise ValueError( "You must provide either the 'type' or 'predicate' argument." ) if type is not None and predicate is not None: raise ValueError( "You must provide either the 'type' or 'predicate' argument," "but not both" ) if predicate is not None: if not callable(predicate): raise ValueError( f"Expected a callable for 'predicate', got {repr(predicate)}" ) def decorator(fn): sig = inspect.signature(fn) value = None ctx = None try: sig.bind(value, ctx) except TypeError: fnname = f'{fn.__module__}.{fn.__qualname__}' raise ValueError( "Functions decorated with register_pretty must accept " "exactly two positional parameters: 'value' and 'ctx'. " f"The function signature for {fnname} was not compatible." ) if type: pretty_dispatch.register(type, partial(_run_pretty, fn)) else: assert callable(predicate) _PREDICATE_REGISTRY.append((predicate, fn)) return fn return decorator def bracket(ctx, left, child, right): return concat([ left, nest(ctx.indent, concat([SOFTLINE, child])), SOFTLINE, right ]) def commentdoc(text): if not text: raise ValueError( f'Expected non-empty comment str, got {repr(text)}' ) commentlines = [] for line in text.splitlines(): alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line))) starts_with_whitespace = bool( WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0]) ) if starts_with_whitespace: prefix = alternating_words_ws[0] alternating_words_ws = alternating_words_ws[1:] else: prefix = NIL if len(alternating_words_ws) % 2 == 0: alternating_words_ws = alternating_words_ws[:-1] for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))): part, is_ws = tup if is_ws: alternating_words_ws[idx] = flat_choice( when_flat=part, when_broken=always_break( concat([ HARDLINE, '# ', ]) ) ) commentlines.append( concat([ '# ', prefix, fill(alternating_words_ws) ]) ) outer = identity if len(commentlines) > 1: outer = always_break return annotate( Token.COMMENT_SINGLE, outer(concat(intersperse(HARDLINE, commentlines))) ) def sequence_of_docs(ctx, left, docs, right, dangle=False): docs = list(docs) minimum_output_len = ( 2 + len(', ') * (len(docs) - 1) + len(docs) ) MAX_PRACTICAL_RIBBON_WIDTH = 150 will_break = minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH has_comment = any(is_commented(doc) for doc in docs) parts = [] for idx, doc in enumerate(docs): last = idx == len(docs) - 1 if is_commented(doc): comment_str = doc.annotation.value flat_version = concat([ doc, COMMA if not last else NIL, ' ', commentdoc(comment_str), HARDLINE if not last else NIL ]) broken_version = concat([ commentdoc(comment_str), HARDLINE, doc, COMMA if not last else NIL, HARDLINE if not last else NIL ]) parts.append( group( flat_choice( when_flat=flat_version, when_broken=broken_version, ) ) ) else: parts.append(doc) if not last: parts.append( concat([COMMA, LINE]) ) if dangle: parts.append(COMMA) outer = ( always_break if will_break or has_comment else group ) return outer(bracket(ctx, left, concat(parts), right)) def prettycall(ctx, fn, *args, **kwargs): fndoc = general_identifier(fn) if ctx.depth_left <= 0: return concat([fndoc, LPAREN, ELLIPSIS, RPAREN]) if not kwargs and len(args) == 1: sole_arg = args[0] unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0]) if type(unwrapped_sole_arg) in (list, dict, tuple): return build_fncall( ctx, fndoc, argdocs=[pretty_python_value(sole_arg, ctx)], hug_sole_arg=True, ) nested_ctx = ( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_HANG) ) return build_fncall( ctx, fndoc, argdocs=( pretty_python_value(arg, nested_ctx) for arg in args ), kwargdocs=( (kwarg, pretty_python_value(v, nested_ctx)) for kwarg, v in kwargs.items() ), ) def build_fncall( ctx, fndoc, argdocs=(), kwargdocs=(), hug_sole_arg=False, trailing_comment=None, ): if callable(fndoc): fndoc = general_identifier(fndoc) has_comment = bool(trailing_comment) argdocs = list(argdocs) kwargdocs = list(kwargdocs) kwargdocs = [ ( annotate_comment( doc.annotation.value, concat([binding, ASSIGN_OP, doc.doc]) ) if is_commented(doc) else concat([binding, ASSIGN_OP, doc]) ) for binding, doc in kwargdocs ] if not (argdocs or kwargdocs): return concat([ fndoc, LPAREN, RPAREN, ]) if ( hug_sole_arg and not kwargdocs and len(argdocs) == 1 and not is_commented(argdocs[0]) ): return group( concat([ fndoc, LPAREN, argdocs[0], RPAREN ]) ) allarg_docs = [*argdocs, *kwargdocs] if trailing_comment: allarg_docs.append(commentdoc(trailing_comment)) parts = [] for idx, doc in enumerate(allarg_docs): last = idx == len(allarg_docs) - 1 if is_commented(doc): has_comment = True comment_str = doc.annotation.value doc = doc.doc else: comment_str = None part = concat([doc, NIL if last else COMMA]) if comment_str: part = group( flat_choice( when_flat=concat([ part, ' ', commentdoc(comment_str) ]), when_broken=concat([ commentdoc(comment_str), HARDLINE, part, ]), ) ) if not last: part = concat([part, HARDLINE if has_comment else LINE]) parts.append(part) outer = ( always_break if has_comment else group ) return outer( concat([ fndoc, LPAREN, nest( ctx.indent, concat([ SOFTLINE, concat(parts), ]) ), SOFTLINE, RPAREN ]) ) @register_pretty(tuple) @register_pretty(list) @register_pretty(set) def pretty_bracketable_iterable(value, ctx, trailing_comment=None): dangle = False if isinstance(value, list): left, right = LBRACKET, RBRACKET elif isinstance(value, tuple): left, right = LPAREN, RPAREN if len(value) == 1: dangle = True elif isinstance(value, set): left, right = LBRACE, RBRACE if not value: if isinstance(value, (list, tuple)): return concat([left, right]) else: assert isinstance(value, set) return prettycall(ctx, set) if ctx.depth_left == 0: return concat([left, ELLIPSIS, right]) if len(value) == 1: sole_value = list(value)[0] els = [ pretty_python_value( sole_value, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_PLAIN) ) ) ] else: els = ( pretty_python_value( el, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_HANG) ) ) for el in value ) if trailing_comment: els = chain(els, [commentdoc(trailing_comment)]) dangle = False return sequence_of_docs(ctx, left, els, right, dangle=dangle) @register_pretty(frozenset) def pretty_frozenset(value, ctx): if value: return prettycall(ctx, frozenset, list(value)) return prettycall(ctx, frozenset) class _AlwaysSortable(object): __slots__ = ('value', ) def __init__(self, value): self.value = value def sortable_value(self): return (str(type(self)), id(self)) def __lt__(self, other): try: return self.value < other.value except TypeError: return self.sortable_value() < other.sortable_value() @register_pretty(dict) def pretty_dict(d, ctx): if ctx.depth_left == 0: return concat([LBRACE, ELLIPSIS, RBRACE]) has_comment = False pairs = [] for k in sorted(d.keys(), key=_AlwaysSortable): v = d[k] if isinstance(k, (str, bytes)): kdoc = pretty_str( k, ctx=ctx.use_multiline_strategy(MULTILINE_STATEGY_PARENS), ) else: kdoc = pretty_python_value( k, ctx=ctx.nested_call() ) vdoc = pretty_python_value( v, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_INDENTED) ), ) kcomment = None if is_commented(kdoc): has_comment = True kcomment = kdoc.annotation.value kdoc = kdoc.doc vcomment = None if is_commented(vdoc): has_comment = True vcomment = vdoc.annotation.value vdoc = vdoc.doc pairs.append((k, v, kdoc, vdoc, kcomment, vcomment)) parts = [] for idx, tup in enumerate(pairs): last = idx == len(pairs) - 1 k, v, kdoc, vdoc, kcomment, vcomment = tup if not (kcomment or vcomment): parts.append( concat([ kdoc, concat([COLON, ' ']), vdoc, NIL if last else COMMA, NIL if last else LINE, ]), ) continue if kcomment: kcommented = concat([ commentdoc(kcomment), HARDLINE, kdoc, ]) else: kcommented = kdoc if vcomment: vcommented = group( flat_choice( when_flat=concat([ vdoc, NIL if last else COMMA, ' ', commentdoc(vcomment), NIL if last else HARDLINE, ]), when_broken=concat([ nest( ctx.indent, concat([ HARDLINE, commentdoc(vcomment), HARDLINE, pretty_python_value( v, ctx=( ctx .nested_call() .use_multiline_strategy(MULTILINE_STATEGY_PLAIN) ), ), COMMA if not last else NIL, HARDLINE if not last else NIL ]) ), ]) ) ) else: vcommented = concat([ vdoc, COMMA if not last else NIL, LINE if not last else NIL ]) parts.append( concat([ kcommented, concat([COLON, ' ']), vcommented ]) ) doc = bracket( ctx, LBRACE, concat(parts), RBRACE, ) if len(pairs) > 2 or has_comment: doc = always_break(doc) else: doc = group(doc) return doc INF_FLOAT = float('inf') NEG_INF_FLOAT = float('-inf') @register_pretty(float) def pretty_float(value, ctx): if ctx.depth_left == 0: return prettycall(ctx, float, ...) if value == INF_FLOAT: return prettycall(ctx, float, 'inf') elif value == NEG_INF_FLOAT: return prettycall(ctx, float, '-inf') elif math.isnan(value): return prettycall(ctx, float, 'nan') return annotate(Token.NUMBER_FLOAT, repr(value)) @register_pretty(int) def pretty_int(value, ctx): if ctx.depth_left == 0: return prettycall(ctx, int, ...) return annotate(Token.NUMBER_INT, repr(value)) @register_pretty(type(...)) def pretty_ellipsis(value, ctx): return ELLIPSIS @register_pretty(bool) @register_pretty(type(None)) def pretty_singletons(value, ctx): return annotate(Token.KEYWORD_CONSTANT, repr(value)) SINGLE_QUOTE_TEXT = "'" SINGLE_QUOTE_BYTES = b"'" DOUBLE_QUOTE_TEXT = '"' DOUBLE_QUOTE_BYTES = b'"' def determine_quote_strategy(s): if isinstance(s, str): single_quote = SINGLE_QUOTE_TEXT double_quote = DOUBLE_QUOTE_TEXT else: single_quote = SINGLE_QUOTE_BYTES double_quote = DOUBLE_QUOTE_BYTES contains_single = single_quote in s contains_double = double_quote in s if not contains_single: return SINGLE_QUOTE_TEXT if not contains_double: return DOUBLE_QUOTE_TEXT assert contains_single and contains_double single_count = s.count(single_quote) double_count = s.count(double_quote) if single_count <= double_count: return SINGLE_QUOTE_TEXT return DOUBLE_QUOTE_TEXT def escape_str_for_quote(use_quote, s): escaped_with_quotes = repr(s) repr_used_quote = escaped_with_quotes[-1] first_quote_at_index = escaped_with_quotes.find(repr_used_quote) repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1] if repr_used_quote == use_quote: return repr_escaped if use_quote == SINGLE_QUOTE_TEXT: return ( repr_escaped .replace('\\"', DOUBLE_QUOTE_TEXT) .replace(SINGLE_QUOTE_TEXT, "\\'") ) else: # repr used single quotes return ( repr_escaped .replace("\\'", SINGLE_QUOTE_TEXT) .replace(DOUBLE_QUOTE_TEXT, '\\"') ) STR_LITERAL_ESCAPES = re.compile( r'''((?:\\[\\abfnrtv"'])|''' r'(?:\\N\{.*?\})|' r'(?:\\u[a-fA-F0-9]{4})|' r'(?:\\U[a-fA-F0-9]{8})|' r'(?:\\x[a-fA-F0-9]{2})|' r'(?:\\[0-7]{1,3}))' ) def highlight_escapes(s): if not s: return NIL matches = STR_LITERAL_ESCAPES.split(s) starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0])) docs = [] for part, is_escaped in zip( matches, cycle([starts_with_match, not starts_with_match]) ): if not part: continue docs.append( annotate( ( Token.STRING_ESCAPE if is_escaped else Token.LITERAL_STRING ), part ) ) return concat(docs) def pretty_single_line_str(s, indent, use_quote=None): prefix = ( annotate(Token.STRING_AFFIX, 'b') if isinstance(s, bytes) else '' ) if use_quote is None: use_quote = determine_quote_strategy(s) escaped = escape_str_for_quote(use_quote, s) escapes_highlighted = highlight_escapes(escaped) return concat([ prefix, annotate( Token.LITERAL_STRING, concat([ use_quote, escapes_highlighted, use_quote ]) ) ]) def split_at(idx, sequence): return (sequence[:idx], sequence[idx:]) def escaped_len(s, use_quote): return len(escape_str_for_quote(use_quote, s)) def str_to_lines(max_len, use_quote, s): if isinstance(s, str): whitespace_pattern = WHITESPACE_PATTERN_TEXT nonword_pattern = NONWORD_PATTERN_TEXT empty = '' else: assert isinstance(s, bytes) whitespace_pattern = WHITESPACE_PATTERN_BYTES nonword_pattern = NONWORD_PATTERN_BYTES empty = b'' alternating_words_ws = whitespace_pattern.split(s) if len(alternating_words_ws) <= 1: # no whitespace: try splitting with nonword pattern. alternating_words_ws = nonword_pattern.split(s) starts_with_whitespace = bool(nonword_pattern.match(alternating_words_ws[0])) else: starts_with_whitespace = bool(whitespace_pattern.match(alternating_words_ws[0])) # List[Tuple[str, bool]] # The boolean associated with each part indicates if it is a # whitespce/non-word part or not. tagged_alternating = list( zip( alternating_words_ws, cycle([starts_with_whitespace, not starts_with_whitespace]) ) ) remaining_stack = list(reversed(tagged_alternating)) curr_line_parts = [] curr_line_len = 0 while remaining_stack: curr, is_whitespace = remaining_stack.pop() curr_line_parts.append(curr) curr_line_len += escaped_len(curr, use_quote) if curr_line_len == max_len: if not is_whitespace and len(curr_line_parts) > 2: curr_line_parts.pop() yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 remaining_stack.append((curr, is_whitespace)) else: yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 continue elif curr_line_len > max_len: if not is_whitespace and len(curr_line_parts) > 1: curr_line_parts.pop() yield empty.join(curr_line_parts) remaining_stack.append((curr, is_whitespace)) curr_line_parts = [] curr_line_len = 0 continue curr_line_parts.pop() remaining_len = max_len - (curr_line_len - escaped_len(curr, use_quote)) this_line_part, next_line_part = split_at(max(remaining_len, 0), curr) curr_line_parts.append(this_line_part) yield empty.join(curr_line_parts) curr_line_parts = [] curr_line_len = 0 if next_line_part: remaining_stack.append((next_line_part, is_whitespace)) if curr_line_parts: yield empty.join(curr_line_parts) @register_pretty(str) @register_pretty(bytes) def pretty_str(s, ctx): # Subclasses of str/bytes # will be printed as StrSubclass('the actual string') constructor = type(s) is_native_type = constructor in (str, bytes) if ctx.depth_left == 0: if isinstance(s, str): return prettycall(ctx, constructor, ...) else: assert isinstance(s, bytes) return prettycall(ctx, constructor, ...) multiline_strategy = ctx.multiline_strategy peprint_indent = ctx.indent def evaluator(indent, column, page_width, ribbon_width): nonlocal multiline_strategy columns_left_in_line = page_width - column columns_left_in_ribbon = indent + ribbon_width - column available_width = min(columns_left_in_line, columns_left_in_ribbon) singleline_str_chars = len(s) + len('""') flat_version = pretty_single_line_str(s, peprint_indent) if singleline_str_chars <= available_width: if is_native_type: return flat_version return build_fncall(ctx, constructor, argdocs=[flat_version]) # multiline string each_line_starts_on_col = indent + peprint_indent each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width) each_line_max_str_len = each_line_ends_on_col - each_line_starts_on_col - 2 use_quote = determine_quote_strategy(s) lines = str_to_lines( max_len=each_line_max_str_len, use_quote=use_quote, s=s, ) parts = intersperse( HARDLINE, ( pretty_single_line_str( line, indent=peprint_indent, use_quote=use_quote, ) for line in lines ) ) if not is_native_type: multiline_strategy = MULTILINE_STATEGY_PLAIN if multiline_strategy == MULTILINE_STATEGY_PLAIN: res = always_break(concat(parts)) if is_native_type: return res return build_fncall(ctx, constructor, argdocs=[res]) elif multiline_strategy == MULTILINE_STATEGY_HANG: return always_break( nest( peprint_indent, concat(parts) ) ) else: if multiline_strategy == MULTILINE_STATEGY_PARENS: left_paren, right_paren = LPAREN, RPAREN else: assert multiline_strategy == MULTILINE_STATEGY_INDENTED left_paren, right_paren = '', '' return always_break( concat([ left_paren, nest( peprint_indent, concat([ HARDLINE, *parts, ]) ), ( HARDLINE if multiline_strategy == MULTILINE_STATEGY_PARENS else NIL ), right_paren ]) ) return contextual(evaluator) def _pretty_recursion(value): return f'<Recursion on {type(value).__name__} with id={id(value)}>' def python_to_sdocs(value, indent, width, depth, ribbon_width=71): if depth is None: depth = float('inf') doc = pretty_python_value( value, ctx=PrettyContext(indent=indent, depth_left=depth, visited=set()) ) if is_commented(doc): doc = group( flat_choice( when_flat=concat([ doc, ' ', commentdoc(doc.annotation.value), ]), when_broken=concat([ commentdoc(doc.annotation.value), HARDLINE, doc ]) ) ) ribbon_frac = min(1.0, ribbon_width / width) return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
true
true
f70ee17398871ce80a6a796f7ad57bf07638ed4a
2,933
py
Python
fastai/callback/tensorboard.py
mone27/fastai
af8dfc07ca3f333f8c1bdbea1803af669a53738f
[ "Apache-2.0" ]
5
2020-08-27T00:52:27.000Z
2022-03-31T02:46:05.000Z
fastai/callback/tensorboard.py
mone27/fastai
af8dfc07ca3f333f8c1bdbea1803af669a53738f
[ "Apache-2.0" ]
22
2021-01-07T23:35:00.000Z
2022-03-20T00:16:40.000Z
fastai/callback/tensorboard.py
mone27/fastai
af8dfc07ca3f333f8c1bdbea1803af669a53738f
[ "Apache-2.0" ]
2
2021-04-17T03:33:21.000Z
2022-02-25T19:32:34.000Z
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/71_callback.tensorboard.ipynb (unless otherwise specified). __all__ = ['TensorBoardCallback'] # Cell from ..basics import * # Cell import tensorboard from torch.utils.tensorboard import SummaryWriter from .fp16 import ModelToHalf # Cell class TensorBoardCallback(Callback): "Saves model topology, losses & metrics" def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9): store_attr(self, 'log_dir,trace_model,log_preds,n_preds') def before_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0 self.writer = SummaryWriter(log_dir=self.log_dir) if self.trace_model: if hasattr(self.learn, 'mixed_precision'): raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.") b = self.dls.one_batch() self.learn._split(b) self.writer.add_graph(self.model, *self.xb) def after_batch(self): self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter) for i,h in enumerate(self.opt.hypers): for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter) def after_epoch(self): for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]): self.writer.add_scalar(n, v, self.train_iter) if self.log_preds: b = self.dls.valid.one_batch() self.learn.one_batch(0, b) preds = getattr(self.loss_func, 'activation', noop)(self.pred) out = getattr(self.loss_func, 'decodes', noop)(preds) x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds) tensorboard_log(x, y, its, outs, self.writer, self.train_iter) def after_fit(self): self.writer.close() # Cell from ..vision.data import * # Cell @typedispatch def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True) for i in range(2): axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)] axs = [r.show(ctx=c, color='green' if b==r else 'red') for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)] writer.add_figure('Sample results', fig, step) # Cell from ..vision.core import TensorPoint,TensorBBox # Cell @typedispatch def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True) for i in range(2): axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])] for x in [samples,outs]: axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])] writer.add_figure('Sample results', fig, step)
41.309859
116
0.663484
__all__ = ['TensorBoardCallback'] from ..basics import * import tensorboard from torch.utils.tensorboard import SummaryWriter from .fp16 import ModelToHalf class TensorBoardCallback(Callback): def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9): store_attr(self, 'log_dir,trace_model,log_preds,n_preds') def before_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0 self.writer = SummaryWriter(log_dir=self.log_dir) if self.trace_model: if hasattr(self.learn, 'mixed_precision'): raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.") b = self.dls.one_batch() self.learn._split(b) self.writer.add_graph(self.model, *self.xb) def after_batch(self): self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter) for i,h in enumerate(self.opt.hypers): for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter) def after_epoch(self): for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]): self.writer.add_scalar(n, v, self.train_iter) if self.log_preds: b = self.dls.valid.one_batch() self.learn.one_batch(0, b) preds = getattr(self.loss_func, 'activation', noop)(self.pred) out = getattr(self.loss_func, 'decodes', noop)(preds) x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds) tensorboard_log(x, y, its, outs, self.writer, self.train_iter) def after_fit(self): self.writer.close() from ..vision.data import * @typedispatch def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True) for i in range(2): axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)] axs = [r.show(ctx=c, color='green' if b==r else 'red') for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)] writer.add_figure('Sample results', fig, step) from ..vision.core import TensorPoint,TensorBBox @typedispatch def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True) for i in range(2): axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])] for x in [samples,outs]: axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])] writer.add_figure('Sample results', fig, step)
true
true
f70ee1a5cbe2ca7f5dad96cb3fc51ffba04714a7
4,057
py
Python
plugins/action/session_service_node_info.py
steinzi/ansible-ise
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
[ "MIT" ]
null
null
null
plugins/action/session_service_node_info.py
steinzi/ansible-ise
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
[ "MIT" ]
null
null
null
plugins/action/session_service_node_info.py
steinzi/ansible-ise
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
[ "MIT" ]
null
null
null
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase try: from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import ( AnsibleArgSpecValidator, ) except ImportError: ANSIBLE_UTILS_IS_INSTALLED = False else: ANSIBLE_UTILS_IS_INSTALLED = True from ansible.errors import AnsibleActionFail from ansible_collections.cisco.ise.plugins.module_utils.ise import ( ISESDK, ise_argument_spec, ) # Get common arguements specification argument_spec = ise_argument_spec() # Add arguments specific for this module argument_spec.update(dict( name=dict(type="str"), id=dict(type="str"), page=dict(type="int"), size=dict(type="int"), )) required_if = [] required_one_of = [] mutually_exclusive = [] required_together = [] class ActionModule(ActionBase): def __init__(self, *args, **kwargs): if not ANSIBLE_UTILS_IS_INSTALLED: raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'") super(ActionModule, self).__init__(*args, **kwargs) self._supports_async = True self._result = None # Checks the supplied parameters against the argument spec for this module def _check_argspec(self): aav = AnsibleArgSpecValidator( data=self._task.args, schema=dict(argument_spec=argument_spec), schema_format="argspec", schema_conditionals=dict( required_if=required_if, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, required_together=required_together, ), name=self._task.action, ) valid, errors, self._task.args = aav.validate() if not valid: raise AnsibleActionFail(errors) def get_object(self, params): new_object = dict( name=params.get("name"), id=params.get("id"), page=params.get("page"), size=params.get("size"), ) return new_object def run(self, tmp=None, task_vars=None): self._task.diff = False self._result = super(ActionModule, self).run(tmp, task_vars) self._result["changed"] = False self._check_argspec() ise = ISESDK(params=self._task.args) id = self._task.args.get("id") name = self._task.args.get("name") if id: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_id', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if name: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_name', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if not name and not id: response = [] generator = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_generator', params=self.get_object(self._task.args), ) for item in generator: tmp_response = item.response['SearchResult']['resources'] if isinstance(tmp_response, list): response += tmp_response else: response.append(tmp_response) self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result
35.902655
128
0.627064
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase try: from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import ( AnsibleArgSpecValidator, ) except ImportError: ANSIBLE_UTILS_IS_INSTALLED = False else: ANSIBLE_UTILS_IS_INSTALLED = True from ansible.errors import AnsibleActionFail from ansible_collections.cisco.ise.plugins.module_utils.ise import ( ISESDK, ise_argument_spec, ) argument_spec = ise_argument_spec() argument_spec.update(dict( name=dict(type="str"), id=dict(type="str"), page=dict(type="int"), size=dict(type="int"), )) required_if = [] required_one_of = [] mutually_exclusive = [] required_together = [] class ActionModule(ActionBase): def __init__(self, *args, **kwargs): if not ANSIBLE_UTILS_IS_INSTALLED: raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'") super(ActionModule, self).__init__(*args, **kwargs) self._supports_async = True self._result = None def _check_argspec(self): aav = AnsibleArgSpecValidator( data=self._task.args, schema=dict(argument_spec=argument_spec), schema_format="argspec", schema_conditionals=dict( required_if=required_if, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, required_together=required_together, ), name=self._task.action, ) valid, errors, self._task.args = aav.validate() if not valid: raise AnsibleActionFail(errors) def get_object(self, params): new_object = dict( name=params.get("name"), id=params.get("id"), page=params.get("page"), size=params.get("size"), ) return new_object def run(self, tmp=None, task_vars=None): self._task.diff = False self._result = super(ActionModule, self).run(tmp, task_vars) self._result["changed"] = False self._check_argspec() ise = ISESDK(params=self._task.args) id = self._task.args.get("id") name = self._task.args.get("name") if id: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_id', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if name: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_name', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if not name and not id: response = [] generator = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_generator', params=self.get_object(self._task.args), ) for item in generator: tmp_response = item.response['SearchResult']['resources'] if isinstance(tmp_response, list): response += tmp_response else: response.append(tmp_response) self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result
true
true
f70ee1ba459f4fe70e554fa936e19ebdb676d822
10,877
py
Python
azure-batch/azure/batch/models/cloud_pool.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
azure-batch/azure/batch/models/cloud_pool.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
azure-batch/azure/batch/models/cloud_pool.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class CloudPool(Model): """ A pool in the Azure Batch service. :param id: A string that uniquely identifies the pool within the account. The id can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. :type id: str :param display_name: The display name for the pool. :type display_name: str :param url: The URL of the pool. :type url: str :param e_tag: The ETag of the pool. :type e_tag: str :param last_modified: The last modified time of the pool. :type last_modified: datetime :param creation_time: The creation time of the pool. :type creation_time: datetime :param state: The current state of the pool. Possible values include: 'active', 'deleting', 'upgrading' :type state: str or :class:`PoolState <azure.batch.models.PoolState>` :param state_transition_time: The time at which the pool entered its current state. :type state_transition_time: datetime :param allocation_state: Whether the pool is resizing. Possible values include: 'steady', 'resizing', 'stopping' :type allocation_state: str or :class:`AllocationState <azure.batch.models.AllocationState>` :param allocation_state_transition_time: The time at which the pool entered its current allocation state. :type allocation_state_transition_time: datetime :param vm_size: The size of virtual machines in the pool. All virtual machines in a pool are the same size. :type vm_size: str :param cloud_service_configuration: The cloud service configuration for the pool. This property and VirtualMachineConfiguration are mutually exclusive and one of the properties must be specified. :type cloud_service_configuration: :class:`CloudServiceConfiguration <azure.batch.models.CloudServiceConfiguration>` :param virtual_machine_configuration: The virtual machine configuration for the pool. This property and CloudServiceConfiguration are mutually exclusive and one of the properties must be specified. :type virtual_machine_configuration: :class:`VirtualMachineConfiguration <azure.batch.models.VirtualMachineConfiguration>` :param resize_timeout: The timeout for allocation of compute nodes to the pool. In a Get Pool operation, this is the timeout for the most recent resize operation. The default value is 10 minutes. :type resize_timeout: timedelta :param resize_error: Details of any error encountered while performing the last resize on the pool. This property is set only if an error occurred during the last pool resize, and only when the pool AllocationState is Steady. :type resize_error: :class:`ResizeError <azure.batch.models.ResizeError>` :param current_dedicated: The number of compute nodes currently in the pool. :type current_dedicated: int :param target_dedicated: The desired number of compute nodes in the pool. This property must have the default value if EnableAutoScale is true. It is required if EnableAutoScale is false. :type target_dedicated: int :param enable_auto_scale: Whether the pool size should automatically adjust over time. If true, the AutoScaleFormula property must be set. If false, the TargetDedicated property must be set. :type enable_auto_scale: bool :param auto_scale_formula: A formula for the desired number of compute nodes in the pool. :type auto_scale_formula: str :param auto_scale_evaluation_interval: A time interval for the desired AutoScale evaluation period in the pool. :type auto_scale_evaluation_interval: timedelta :param auto_scale_run: The results and errors from the last execution of the autoscale formula. :type auto_scale_run: :class:`AutoScaleRun <azure.batch.models.AutoScaleRun>` :param enable_inter_node_communication: Whether the pool permits direct communication between nodes. :type enable_inter_node_communication: bool :param start_task: A task specified to run on each compute node as it joins the pool. :type start_task: :class:`StartTask <azure.batch.models.StartTask>` :param certificate_references: The list of certificates to be installed on each compute node in the pool. :type certificate_references: list of :class:`CertificateReference <azure.batch.models.CertificateReference>` :param application_package_references: The list of application packages to be installed on each compute node in the pool. :type application_package_references: list of :class:`ApplicationPackageReference <azure.batch.models.ApplicationPackageReference>` :param max_tasks_per_node: The maximum number of tasks that can run concurrently on a single compute node in the pool. :type max_tasks_per_node: int :param task_scheduling_policy: How the Batch service distributes tasks between compute nodes in the pool. :type task_scheduling_policy: :class:`TaskSchedulingPolicy <azure.batch.models.TaskSchedulingPolicy>` :param metadata: A list of name-value pairs associated with the pool as metadata. :type metadata: list of :class:`MetadataItem <azure.batch.models.MetadataItem>` :param stats: Utilization and resource usage statistics for the entire lifetime of the pool. :type stats: :class:`PoolStatistics <azure.batch.models.PoolStatistics>` """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'PoolState'}, 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, 'vm_size': {'key': 'vmSize', 'type': 'str'}, 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, 'resize_error': {'key': 'resizeError', 'type': 'ResizeError'}, 'current_dedicated': {'key': 'currentDedicated', 'type': 'int'}, 'target_dedicated': {'key': 'targetDedicated', 'type': 'int'}, 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, 'start_task': {'key': 'startTask', 'type': 'StartTask'}, 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, } def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_error=None, current_dedicated=None, target_dedicated=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication=None, start_task=None, certificate_references=None, application_package_references=None, max_tasks_per_node=None, task_scheduling_policy=None, metadata=None, stats=None): self.id = id self.display_name = display_name self.url = url self.e_tag = e_tag self.last_modified = last_modified self.creation_time = creation_time self.state = state self.state_transition_time = state_transition_time self.allocation_state = allocation_state self.allocation_state_transition_time = allocation_state_transition_time self.vm_size = vm_size self.cloud_service_configuration = cloud_service_configuration self.virtual_machine_configuration = virtual_machine_configuration self.resize_timeout = resize_timeout self.resize_error = resize_error self.current_dedicated = current_dedicated self.target_dedicated = target_dedicated self.enable_auto_scale = enable_auto_scale self.auto_scale_formula = auto_scale_formula self.auto_scale_evaluation_interval = auto_scale_evaluation_interval self.auto_scale_run = auto_scale_run self.enable_inter_node_communication = enable_inter_node_communication self.start_task = start_task self.certificate_references = certificate_references self.application_package_references = application_package_references self.max_tasks_per_node = max_tasks_per_node self.task_scheduling_policy = task_scheduling_policy self.metadata = metadata self.stats = stats
56.651042
695
0.717753
from msrest.serialization import Model class CloudPool(Model): _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'PoolState'}, 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, 'vm_size': {'key': 'vmSize', 'type': 'str'}, 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, 'resize_error': {'key': 'resizeError', 'type': 'ResizeError'}, 'current_dedicated': {'key': 'currentDedicated', 'type': 'int'}, 'target_dedicated': {'key': 'targetDedicated', 'type': 'int'}, 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, 'start_task': {'key': 'startTask', 'type': 'StartTask'}, 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, } def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_error=None, current_dedicated=None, target_dedicated=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication=None, start_task=None, certificate_references=None, application_package_references=None, max_tasks_per_node=None, task_scheduling_policy=None, metadata=None, stats=None): self.id = id self.display_name = display_name self.url = url self.e_tag = e_tag self.last_modified = last_modified self.creation_time = creation_time self.state = state self.state_transition_time = state_transition_time self.allocation_state = allocation_state self.allocation_state_transition_time = allocation_state_transition_time self.vm_size = vm_size self.cloud_service_configuration = cloud_service_configuration self.virtual_machine_configuration = virtual_machine_configuration self.resize_timeout = resize_timeout self.resize_error = resize_error self.current_dedicated = current_dedicated self.target_dedicated = target_dedicated self.enable_auto_scale = enable_auto_scale self.auto_scale_formula = auto_scale_formula self.auto_scale_evaluation_interval = auto_scale_evaluation_interval self.auto_scale_run = auto_scale_run self.enable_inter_node_communication = enable_inter_node_communication self.start_task = start_task self.certificate_references = certificate_references self.application_package_references = application_package_references self.max_tasks_per_node = max_tasks_per_node self.task_scheduling_policy = task_scheduling_policy self.metadata = metadata self.stats = stats
true
true
f70ee258c4ab57190cefb9567f8aebd2ad94bf85
8,509
py
Python
utils/inout.py
alarca94/senti-transfer
da83a072c8d471bc74aa25b237b5e301502db869
[ "MIT" ]
null
null
null
utils/inout.py
alarca94/senti-transfer
da83a072c8d471bc74aa25b237b5e301502db869
[ "MIT" ]
null
null
null
utils/inout.py
alarca94/senti-transfer
da83a072c8d471bc74aa25b237b5e301502db869
[ "MIT" ]
null
null
null
import os import yaml import pandas as pd import xml.etree.ElementTree as ET from types import SimpleNamespace from sklearn.model_selection import train_test_split from utils.experiment_utils import create_linspace from utils.preprocess import * SOURCE_PATH = './source_data' DATA_PATH = './data' CONFIG_PATH = './conf' DATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis'] class Colors: BLACK = '\033[1;30m' RED = '\033[1;31m' GREEN = '\033[1;32m' YELLOW = '\033[1;33m' BLUE = '\033[1;34m' PURPLE = '\033[1;35m' CYAN = '\033[1;36m' WHITE = '\033[1;37m' ENDC = '\033[0m' def colored(text, color): return f'{color}{text}{Colors.ENDC}' def write_split_files(dataset, trn, dev, tst): trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\t', mode='w') dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\t', mode='w') tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\t', mode='w') def prepare_files(): seed = 100 test_ratio = 0.2 # EmoEvent and HaterNet filename = 'original_es.tsv' data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\t'), 'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\|\\|;', names=['id', 'text', 'hateful'], header=None, engine="python")} labels = {'emoevent': 'offensive', 'haternet': 'hateful'} for dataset in data: data[dataset].text = basic_text_normalization(data[dataset].text) y = data[dataset][labels[dataset]] trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed) y = trn[labels[dataset]] trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') # HatEval 2019 dataset = 'hateval2019' n_instances = {} for phase in ['train', 'dev', 'test']: data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',') data.text = basic_text_normalization(data.text) data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w') n_instances[phase] = data.shape[0] print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: ' f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}') # MEX-A3T dataset = 'mex-a3t' columns = ['text', 'aggressiveness'] trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\t', names=columns) tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\t', names=columns) trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed) for subset in [trn, dev, tst]: subset.text = basic_text_normalization(subset.text) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') # TASS 2019 dataset = 'tass2019' n_instances = {} for phase in ['train', 'dev', 'test']: phase_data = pd.DataFrame() for country in ['ES', 'CR', 'MX', 'PE', 'UY']: root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot() tweets = [] for item in root.iter('tweet'): tweet = {'country': country} for tweet_field in item.iter(): if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']: tweet[tweet_field.tag] = tweet_field.text tweets.append(tweet) phase_data = phase_data.append(tweets) new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'} phase_data.rename(columns=new_cols, inplace=True) phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']] phase_data.text = basic_text_normalization(phase_data.text) phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w') n_instances[phase] = phase_data.shape[0] print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: ' f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}') # Universal Joy dataset = 'universal_joy' trn_data = {} for filename in ['small', 'large', 'combi']: trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv')) trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es'] trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning) # Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those trn = pd.concat(trn_data.values(), axis=0, ignore_index=True) trn.drop_duplicates(inplace=True, subset='text') # There is no overlapping between training, validation and test (also, they do not contain duplicates) dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv')) dev.drop_duplicates(inplace=True, subset='text') tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv')) tst.drop_duplicates(inplace=True, subset='text') # The test set approximately represents 12.5% of the total data # print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0])) # DETOXIS dataset = 'detoxis' trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',') tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',') trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed) for subset in [trn, dev, tst]: subset.rename(columns={'comment': 'text'}, inplace=True) subset.text = basic_text_normalization(subset.text) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') def read_datasets(datasets, tasks, lang='es'): data = {} for dataset in datasets: if dataset not in DATASETS: raise Exception(f'Dataset {dataset} is not in the list of available datasets!') data[dataset] = { 'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\t'), 'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\t'), 'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\t') } for phase in data[dataset]: data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]] return data def create_namespace_from_dict(dic, name=None): for k, v in dic.items(): if isinstance(v, dict): dic[k] = create_namespace_from_dict(v, k) ns = SimpleNamespace(**dic) ns.__name__ = name return ns def process_config(dic, name=None): for k, v in dic.items(): if k not in ['transfer_learning', 'optimization']: if isinstance(v, dict): dic[k] = process_config(v, k) elif isinstance(v, list): for vi in v: if isinstance(vi, dict): dic[k] += create_linspace(vi) dic[k] = dic[k][1:] else: dic[k] = [v] return dic def load_config(config_file): with open(os.path.join(CONFIG_PATH, config_file), 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) return process_config(config) # create_namespace_from_dict(config) def log(string, indent=0): start = '\t' * indent print(f'{start}{string}')
41.916256
120
0.616289
import os import yaml import pandas as pd import xml.etree.ElementTree as ET from types import SimpleNamespace from sklearn.model_selection import train_test_split from utils.experiment_utils import create_linspace from utils.preprocess import * SOURCE_PATH = './source_data' DATA_PATH = './data' CONFIG_PATH = './conf' DATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis'] class Colors: BLACK = '\033[1;30m' RED = '\033[1;31m' GREEN = '\033[1;32m' YELLOW = '\033[1;33m' BLUE = '\033[1;34m' PURPLE = '\033[1;35m' CYAN = '\033[1;36m' WHITE = '\033[1;37m' ENDC = '\033[0m' def colored(text, color): return f'{color}{text}{Colors.ENDC}' def write_split_files(dataset, trn, dev, tst): trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\t', mode='w') dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\t', mode='w') tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\t', mode='w') def prepare_files(): seed = 100 test_ratio = 0.2 filename = 'original_es.tsv' data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\t'), 'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\|\\|;', names=['id', 'text', 'hateful'], header=None, engine="python")} labels = {'emoevent': 'offensive', 'haternet': 'hateful'} for dataset in data: data[dataset].text = basic_text_normalization(data[dataset].text) y = data[dataset][labels[dataset]] trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed) y = trn[labels[dataset]] trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') dataset = 'hateval2019' n_instances = {} for phase in ['train', 'dev', 'test']: data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',') data.text = basic_text_normalization(data.text) data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w') n_instances[phase] = data.shape[0] print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: ' f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}') dataset = 'mex-a3t' columns = ['text', 'aggressiveness'] trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\t', names=columns) tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\t', names=columns) trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed) for subset in [trn, dev, tst]: subset.text = basic_text_normalization(subset.text) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') dataset = 'tass2019' n_instances = {} for phase in ['train', 'dev', 'test']: phase_data = pd.DataFrame() for country in ['ES', 'CR', 'MX', 'PE', 'UY']: root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot() tweets = [] for item in root.iter('tweet'): tweet = {'country': country} for tweet_field in item.iter(): if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']: tweet[tweet_field.tag] = tweet_field.text tweets.append(tweet) phase_data = phase_data.append(tweets) new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'} phase_data.rename(columns=new_cols, inplace=True) phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']] phase_data.text = basic_text_normalization(phase_data.text) phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w') n_instances[phase] = phase_data.shape[0] print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: ' f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}') dataset = 'universal_joy' trn_data = {} for filename in ['small', 'large', 'combi']: trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv')) trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es'] trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning) trn = pd.concat(trn_data.values(), axis=0, ignore_index=True) trn.drop_duplicates(inplace=True, subset='text') dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv')) dev.drop_duplicates(inplace=True, subset='text') tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv')) tst.drop_duplicates(inplace=True, subset='text') dataset = 'detoxis' trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',') tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',') trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed) for subset in [trn, dev, tst]: subset.rename(columns={'comment': 'text'}, inplace=True) subset.text = basic_text_normalization(subset.text) write_split_files(dataset, trn, dev, tst) print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: ' f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}') def read_datasets(datasets, tasks, lang='es'): data = {} for dataset in datasets: if dataset not in DATASETS: raise Exception(f'Dataset {dataset} is not in the list of available datasets!') data[dataset] = { 'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\t'), 'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\t'), 'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\t') } for phase in data[dataset]: data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]] return data def create_namespace_from_dict(dic, name=None): for k, v in dic.items(): if isinstance(v, dict): dic[k] = create_namespace_from_dict(v, k) ns = SimpleNamespace(**dic) ns.__name__ = name return ns def process_config(dic, name=None): for k, v in dic.items(): if k not in ['transfer_learning', 'optimization']: if isinstance(v, dict): dic[k] = process_config(v, k) elif isinstance(v, list): for vi in v: if isinstance(vi, dict): dic[k] += create_linspace(vi) dic[k] = dic[k][1:] else: dic[k] = [v] return dic def load_config(config_file): with open(os.path.join(CONFIG_PATH, config_file), 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) return process_config(config) def log(string, indent=0): start = '\t' * indent print(f'{start}{string}')
true
true
f70ee2731d6b20084634923bada7c3aa114f534c
2,124
py
Python
django/contrib/admin/templatetags/log.py
tomleo/django
ebfb71c64a786620947c9d598fd1ebae2958acff
[ "BSD-3-Clause" ]
1
2017-12-01T06:26:57.000Z
2017-12-01T06:26:57.000Z
django/contrib/admin/templatetags/log.py
liuliang2015/django
86d3079d5797811c1e118cfd600a913685212165
[ "BSD-3-Clause" ]
null
null
null
django/contrib/admin/templatetags/log.py
liuliang2015/django
86d3079d5797811c1e118cfd600a913685212165
[ "BSD-3-Clause" ]
1
2020-04-12T19:00:12.000Z
2020-04-12T19:00:12.000Z
from django import template from django.contrib.admin.models import LogEntry register = template.Library() class AdminLogNode(template.Node): def __init__(self, limit, varname, user): self.limit, self.varname, self.user = limit, varname, user def __repr__(self): return "<GetAdminLog Node>" def render(self, context): if self.user is None: context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit] else: user_id = self.user if not user_id.isdigit(): user_id = context[self.user].pk context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)] return '' @register.tag def get_admin_log(parser, token): """ Populates a template variable with the admin log for the given criteria. Usage:: {% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %} Examples:: {% get_admin_log 10 as admin_log for_user 23 %} {% get_admin_log 10 as admin_log for_user user %} {% get_admin_log 10 as admin_log %} Note that ``context_var_containing_user_obj`` can be a hard-coded integer (user ID) or the name of a template context variable containing the user object whose ID you want. """ tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError( "'get_admin_log' statements require two arguments") if not tokens[1].isdigit(): raise template.TemplateSyntaxError( "First argument to 'get_admin_log' must be an integer") if tokens[2] != 'as': raise template.TemplateSyntaxError( "Second argument to 'get_admin_log' must be 'as'") if len(tokens) > 4: if tokens[4] != 'for_user': raise template.TemplateSyntaxError( "Fourth argument to 'get_admin_log' must be 'for_user'") return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
37.263158
141
0.653955
from django import template from django.contrib.admin.models import LogEntry register = template.Library() class AdminLogNode(template.Node): def __init__(self, limit, varname, user): self.limit, self.varname, self.user = limit, varname, user def __repr__(self): return "<GetAdminLog Node>" def render(self, context): if self.user is None: context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit] else: user_id = self.user if not user_id.isdigit(): user_id = context[self.user].pk context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)] return '' @register.tag def get_admin_log(parser, token): tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError( "'get_admin_log' statements require two arguments") if not tokens[1].isdigit(): raise template.TemplateSyntaxError( "First argument to 'get_admin_log' must be an integer") if tokens[2] != 'as': raise template.TemplateSyntaxError( "Second argument to 'get_admin_log' must be 'as'") if len(tokens) > 4: if tokens[4] != 'for_user': raise template.TemplateSyntaxError( "Fourth argument to 'get_admin_log' must be 'for_user'") return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
true
true
f70ee2ccfbf232fca633a434e05e913a78eb9412
19,418
py
Python
LTAR_Flux_QC.py
cafltar/CAF_EC_Column_Rename
7375678081d8931f34e7ab8b4a6e02eca112e721
[ "MIT" ]
null
null
null
LTAR_Flux_QC.py
cafltar/CAF_EC_Column_Rename
7375678081d8931f34e7ab8b4a6e02eca112e721
[ "MIT" ]
null
null
null
LTAR_Flux_QC.py
cafltar/CAF_EC_Column_Rename
7375678081d8931f34e7ab8b4a6e02eca112e721
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Aug 21 15:53:46 2018 @author: Eric S. Russell Laboratory for Atmospheric Research Dept. of Civil and Environmental Engineering Washington State University eric.s.russell@wsu.edu Not all of these functions are used in the column rename script; these are potentially to be used with this processing depending on other's thoughts. This is a trial run of dealing with code across sites. """ import numpy as np import pandas as pd import datetime """ QA/QC processing for flux data: Inputs: data: Full input data grade: Maximum QA/QC grade as assigned by the flux calculation code LE_B: Two number array with the highest (LE_B[1]) and lowest (LE_B[0]) hard limit LE value H_B: Same as LE_B but for H F_B: Same as LE-B but for Fc cls: gg: Outputs: data: Dataframe with the filtered data; does not track reason for removing data. Conditional for door_is_open_Hst since not all sites will/do have enclosure door sensors installed """ # This function not implemented into the script; still thinking about how I want to format this and integrate so user doesn't have to do a lot to make work def Grade_cs(df,info, Site, site=False): if site == True: grade = int(info['grade'][Site]) LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])] H_B = [float(info['HL'][Site]),float(info['HU'][Site])] F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])] T_B = [float(info['TL'][Site]),float(info['TU'][Site])] elif site == False: grade = int(info['Val_L']['grade']) LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])] H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])] F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])] T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])] gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST'] cls =['H','LE','FC', 'TAU'] # var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC pd.options.mode.chained_assignment = None if (grade >9) | (grade<1): print('Grade number must be between 0-9.') return # 'exit' function and return error Good = None data = []; data=pd.DataFrame(data,index=df.index) if cls[1] in df.columns: HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull() if gg[1] in df.columns: Grade = (df[gg[1]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[1]][~Grade] = np.NaN data[cls[1]+'_Flag'] = 0 data[cls[1]+'_Flag'][~Grade] = 1 if cls[0] in df.columns: HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull() if gg[0] in df.columns: Grade = (df[gg[0]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[0]][~Grade] = np.NaN data[cls[0]+'_Flag'] = 0 data[cls[0]+'_Flag'][~Grade] = 1 if cls[2] in df.columns: HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull() if gg[2] in df.columns: Grade = (df[gg[2]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[2]][~Grade] = np.NaN data[cls[2]+'_Flag'] = 0 data[cls[2]+'_Flag'][~Grade] = 1 if cls[3] in df.columns: HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull() if gg[3] in df.columns: Grade = (df[gg[3]].astype(float) <= grade) & (~HL) else: Grade = ~HL data[cls[3]+'_Flag'] = 0 data[cls[3]+'_Flag'][~Grade] = 1 # Rain Mask if 'P' in df.columns: Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999) precip = True data['P_Flag'] = 0 data['P_Flag'][~Precip] = 1 else: precip = False if 'CO2_sig_strgth_Min' in df.columns: c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7 data['CO2_Signal_Strength'] = 0 data['CO2_Signal_Strength'][~c_sig_strength] = 1 if 'H2O_sig_strgth_Min' in df.columns: w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7 data['H2O_Signal_Strength'] = 0 data['H2O_Signal_Strength'][~w_sig_strength] = 1 if 'CO2_samples_Tot' in df.columns: Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400 data['CO2_Samples_Flag'] = 0 data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1 irga = True else: irga=False if 'sonic_samples_Tot' in df.columns: Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400 data['Sonic_Samples_Flag'] = 0 data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1 sonic = True else: sonic=False if 'used_records' in df.columns: Samp_Good_Sonic = df['used_records'].astype(float)>14400 sonic = True else: sonic=False if 'door_is_open_Hst' in df.columns: Door_Closed = df['door_is_open_Hst'].astype(float) == 0 pc = True else: pc = False if precip&irga&sonic&pc: Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength elif precip&irga&sonic&~pc: Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength elif precip&~irga&~sonic&~pc: Good = Precip&w_sig_strength&c_sig_strength elif precip&~irga&sonic&~pc: Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength elif ~precip&~irga&sonic&~pc: Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength elif ~precip&irga&sonic&pc: Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength if Good is not None: if cls[3] in df.columns: df[cls[3]][~Good] = np.NaN if cls[2] in df.columns: df[cls[2]][~Good] = np.NaN if cls[1] in df.columns: df[cls[1]][~Good] = np.NaN if cls[0] in df.columns: df[cls[0]][~Good] = np.NaN return df, data #Fills in the blanks spaces with NaN's so the time index is continuous def indx_fill(df, time): df.index = pd.to_datetime(df.index) # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme df = df.sort_index() # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints df = df[~df.index.duplicated(keep='first')] for k in range (0,len(df)): if str(df.index[k])=='NaT': df = df.drop(df.index[k]) # Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time) df = df.reindex(idx, fill_value=np.NaN) return df # Used to format EddyPro data by combining the date and time into a common index and dropping the filename column def format_ep(df): df.index = df['date']+' '+df['time'] df = df.drop(['filename'],1) df.index = pd.to_datetime(df.index) return df # This function not used in main script; potential to be used with QC function def ReadIn_Initial(info): # Values pulled in from a separate *.csv file because easier and flexible grade = int(info['Val_L']['grade']) LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])] H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])] F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])] gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])] cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])] return grade, LE_B,H_B,F_B,gg,cls # Reads in a directory of files based on the format for either EddyPro or EasyFlux def Fast_Read(filenames, time, form): if len(filenames) == 0: print('No Files in directory, check the path name.') return # 'exit' function and return error else: #Initialize dataframe used within function Final = [];Final = pd.DataFrame(Final) if form == 'EF': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False) Final = pd.concat([Final,df], sort = False) elif form == 'EP': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False) Final = pd.concat([Final,df]) Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns Final =Final.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point elif form == 'Biomet': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False) Final = pd.concat([Final,df]) Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns else: print('Format must be either EF or EP') return # Convert time index Final = Final.sort_index() Out = indx_fill(Final, time) return Out # Return dataframe to main function. def Despike_7(s,ss,x,lab,delta_time, multi): an,Tim = [],[] while ss < x.index[-1]: x_m = np.nanmean(x[ss:s]) x_s = np.nanstd(x[ss:s]) x_d = x[ss:s] an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s)))) ss+= datetime.timedelta(days=delta_time) Tim.append((x_d.index)) s+= datetime.timedelta(days=delta_time) qq = np.hstack(an) an = pd.DataFrame(qq, columns = [lab]) an.index = np.hstack(Tim) an = an[~an.index.duplicated(keep='first')] # x[an[lab]==False] = np.NaN return an def Met_QAQC(**kwargs): Q = None if 'Tair' in kwargs.keys(): Tair = pd.DataFrame(kwargs['Tair']) Q = Tair; Q = pd.DataFrame(Q); Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40) Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # (~np.isnan(Q[Tair.columns[0]].diff())) & Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0) Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']] else: print('**** Temperature not present ****') if 'RH' in kwargs.keys(): RH = pd.DataFrame(kwargs['RH']) if Q is None: Q = RH; Q = pd.DataFrame(Q) else: Q= Q.join(RH) Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0) Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110) Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) # & (~np.isnan(Q[RH.columns[0]].astype(float).diff())) Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']] Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100) # Q['RH_Filtered'][Q['RH_gt_100']]=100 else: print('**** RH not present ****') if 'P' in kwargs.keys(): P = pd.DataFrame(kwargs['P']); if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(P) Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70) Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) # & (~np.isnan(Q[P.columns[0]].diff())) Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']] if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()): MSLP = []; H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:"MSLP"}) Q= Q.join(MSLP) Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80) Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff())) Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']] else: print('**** Mean sea level pressure not present ****') else: print('**** Pressure not present ****') if 'WS' in kwargs.keys(): WS = pd.DataFrame(kwargs['WS']) if Q is None: Q = WS; Q = pd.DataFrame(Q) else: Q= Q.join(WS) Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0) Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff())) Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']] else: print('**** Wind Speed not present ****') if 'WD' in kwargs.keys(): WD = pd.DataFrame(kwargs['WD']) if Q is None: Q = WD; Q = pd.DataFrame(Q) else: Q= Q.join(WD) Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0) Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) & Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']] else: print('**** Wind Direction not present ****') if 'PAR' in kwargs.keys(): PAR = pd.DataFrame(kwargs['PAR']); if Q is None: Q = PAR; Q = pd.DataFrame(Q) else: Q= Q.join(PAR) Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000) Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff())) Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']] else: print('**** PAR not present ****') if 'Rn' in kwargs.keys(): Rn = pd.DataFrame(kwargs['Rn']) if Q is None: Q = Rn; Q = pd.DataFrame(Q) else: Q= Q.join(Rn) Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff())) Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']] else: print('**** Net Radiations not present ****') if 'Precip' in kwargs.keys(): Precip = pd.DataFrame(kwargs['Precip']) if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(Precip) Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0) Z_Precip = Q[Precip.columns[0]].astype(float) ==0 # if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()): # Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90) # Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0) # Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']] # Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) # elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()): # Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90) # Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']] # Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) if 'Tair' in kwargs.keys(): Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0) Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) else: Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) else: print('**** Precipitation not present ****') if 'VPD' in kwargs.keys(): VPD = pd.DataFrame(kwargs['VPD']) if Q is None: Q = VPD; Q = pd.DataFrame(Q) else: Q= Q.join(VPD) Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0) Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']] if 'e' in kwargs.keys(): e = pd.DataFrame(kwargs['e']) if Q is None: Q = e; Q = pd.DataFrame(Q) else: Q= Q.join(e) Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0) Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']] if 'e_s' in kwargs.keys(): e_s = pd.DataFrame(kwargs['e_s']) if Q is None: Q = e_s; Q = pd.DataFrame(Q) else: Q= Q.join(e_s) Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0) Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] return Q
50.832461
177
0.575136
import numpy as np import pandas as pd import datetime def Grade_cs(df,info, Site, site=False): if site == True: grade = int(info['grade'][Site]) LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])] H_B = [float(info['HL'][Site]),float(info['HU'][Site])] F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])] T_B = [float(info['TL'][Site]),float(info['TU'][Site])] elif site == False: grade = int(info['Val_L']['grade']) LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])] H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])] F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])] T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])] gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST'] cls =['H','LE','FC', 'TAU'] # var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC pd.options.mode.chained_assignment = None if (grade >9) | (grade<1): print('Grade number must be between 0-9.') return # 'exit' function and return error Good = None data = []; data=pd.DataFrame(data,index=df.index) if cls[1] in df.columns: HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull() if gg[1] in df.columns: Grade = (df[gg[1]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[1]][~Grade] = np.NaN data[cls[1]+'_Flag'] = 0 data[cls[1]+'_Flag'][~Grade] = 1 if cls[0] in df.columns: HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull() if gg[0] in df.columns: Grade = (df[gg[0]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[0]][~Grade] = np.NaN data[cls[0]+'_Flag'] = 0 data[cls[0]+'_Flag'][~Grade] = 1 if cls[2] in df.columns: HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull() if gg[2] in df.columns: Grade = (df[gg[2]].astype(float) <= grade) & (~HL) else: Grade = ~HL df[cls[2]][~Grade] = np.NaN data[cls[2]+'_Flag'] = 0 data[cls[2]+'_Flag'][~Grade] = 1 if cls[3] in df.columns: HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull() if gg[3] in df.columns: Grade = (df[gg[3]].astype(float) <= grade) & (~HL) else: Grade = ~HL data[cls[3]+'_Flag'] = 0 data[cls[3]+'_Flag'][~Grade] = 1 # Rain Mask if 'P' in df.columns: Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999) precip = True data['P_Flag'] = 0 data['P_Flag'][~Precip] = 1 else: precip = False if 'CO2_sig_strgth_Min' in df.columns: c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7 data['CO2_Signal_Strength'] = 0 data['CO2_Signal_Strength'][~c_sig_strength] = 1 if 'H2O_sig_strgth_Min' in df.columns: w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7 data['H2O_Signal_Strength'] = 0 data['H2O_Signal_Strength'][~w_sig_strength] = 1 if 'CO2_samples_Tot' in df.columns: Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400 data['CO2_Samples_Flag'] = 0 data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1 irga = True else: irga=False if 'sonic_samples_Tot' in df.columns: Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400 data['Sonic_Samples_Flag'] = 0 data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1 sonic = True else: sonic=False if 'used_records' in df.columns: Samp_Good_Sonic = df['used_records'].astype(float)>14400 sonic = True else: sonic=False if 'door_is_open_Hst' in df.columns: Door_Closed = df['door_is_open_Hst'].astype(float) == 0 pc = True else: pc = False if precip&irga&sonic&pc: Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength elif precip&irga&sonic&~pc: Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength elif precip&~irga&~sonic&~pc: Good = Precip&w_sig_strength&c_sig_strength elif precip&~irga&sonic&~pc: Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength elif ~precip&~irga&sonic&~pc: Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength elif ~precip&irga&sonic&pc: Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength if Good is not None: if cls[3] in df.columns: df[cls[3]][~Good] = np.NaN if cls[2] in df.columns: df[cls[2]][~Good] = np.NaN if cls[1] in df.columns: df[cls[1]][~Good] = np.NaN if cls[0] in df.columns: df[cls[0]][~Good] = np.NaN return df, data #Fills in the blanks spaces with NaN's so the time index is continuous def indx_fill(df, time): df.index = pd.to_datetime(df.index) df = df.sort_index() df = df[~df.index.duplicated(keep='first')] for k in range (0,len(df)): if str(df.index[k])=='NaT': df = df.drop(df.index[k]) idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time) df = df.reindex(idx, fill_value=np.NaN) return df def format_ep(df): df.index = df['date']+' '+df['time'] df = df.drop(['filename'],1) df.index = pd.to_datetime(df.index) return df def ReadIn_Initial(info): grade = int(info['Val_L']['grade']) LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])] H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])] F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])] gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])] cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])] return grade, LE_B,H_B,F_B,gg,cls def Fast_Read(filenames, time, form): if len(filenames) == 0: print('No Files in directory, check the path name.') return else: Final = [];Final = pd.DataFrame(Final) if form == 'EF': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False) Final = pd.concat([Final,df], sort = False) elif form == 'EP': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False) Final = pd.concat([Final,df]) Final.index = Final['date']+' '+Final['time'] Final =Final.drop(['filename'],1) elif form == 'Biomet': for k in range (0,len(filenames)): df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False) Final = pd.concat([Final,df]) Final.index = Final['date']+' '+Final['time'] else: print('Format must be either EF or EP') return Final = Final.sort_index() Out = indx_fill(Final, time) return Out def Despike_7(s,ss,x,lab,delta_time, multi): an,Tim = [],[] while ss < x.index[-1]: x_m = np.nanmean(x[ss:s]) x_s = np.nanstd(x[ss:s]) x_d = x[ss:s] an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s)))) ss+= datetime.timedelta(days=delta_time) Tim.append((x_d.index)) s+= datetime.timedelta(days=delta_time) qq = np.hstack(an) an = pd.DataFrame(qq, columns = [lab]) an.index = np.hstack(Tim) an = an[~an.index.duplicated(keep='first')] return an def Met_QAQC(**kwargs): Q = None if 'Tair' in kwargs.keys(): Tair = pd.DataFrame(kwargs['Tair']) Q = Tair; Q = pd.DataFrame(Q); Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40) Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0) Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']] else: print('**** Temperature not present ****') if 'RH' in kwargs.keys(): RH = pd.DataFrame(kwargs['RH']) if Q is None: Q = RH; Q = pd.DataFrame(Q) else: Q= Q.join(RH) Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0) Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110) Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']] Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100) else: print('**** RH not present ****') if 'P' in kwargs.keys(): P = pd.DataFrame(kwargs['P']); if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(P) Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70) Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']] if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()): MSLP = []; H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:"MSLP"}) Q= Q.join(MSLP) Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80) Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']] else: print('**** Mean sea level pressure not present ****') else: print('**** Pressure not present ****') if 'WS' in kwargs.keys(): WS = pd.DataFrame(kwargs['WS']) if Q is None: Q = WS; Q = pd.DataFrame(Q) else: Q= Q.join(WS) Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0) Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']] else: print('**** Wind Speed not present ****') if 'WD' in kwargs.keys(): WD = pd.DataFrame(kwargs['WD']) if Q is None: Q = WD; Q = pd.DataFrame(Q) else: Q= Q.join(WD) Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0) Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']] else: print('**** Wind Direction not present ****') if 'PAR' in kwargs.keys(): PAR = pd.DataFrame(kwargs['PAR']); if Q is None: Q = PAR; Q = pd.DataFrame(Q) else: Q= Q.join(PAR) Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000) Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500)) Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']] else: print('**** PAR not present ****') if 'Rn' in kwargs.keys(): Rn = pd.DataFrame(kwargs['Rn']) if Q is None: Q = Rn; Q = pd.DataFrame(Q) else: Q= Q.join(Rn) Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']] else: print('**** Net Radiations not present ****') if 'Precip' in kwargs.keys(): Precip = pd.DataFrame(kwargs['Precip']) if Q is None: Q = P; Q = pd.DataFrame(Q) else: Q= Q.join(Precip) Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0) Z_Precip = Q[Precip.columns[0]].astype(float) ==0 if 'Tair' in kwargs.keys(): Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0) Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) else: Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']] Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0) else: print('**** Precipitation not present ****') if 'VPD' in kwargs.keys(): VPD = pd.DataFrame(kwargs['VPD']) if Q is None: Q = VPD; Q = pd.DataFrame(Q) else: Q= Q.join(VPD) Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0) Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']] if 'e' in kwargs.keys(): e = pd.DataFrame(kwargs['e']) if Q is None: Q = e; Q = pd.DataFrame(Q) else: Q= Q.join(e) Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0) Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']] if 'e_s' in kwargs.keys(): e_s = pd.DataFrame(kwargs['e_s']) if Q is None: Q = e_s; Q = pd.DataFrame(Q) else: Q= Q.join(e_s) Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0) Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] return Q
true
true
f70ee2f8b953fcf7d3eace5aa1239816e97a391b
102
py
Python
ch_05/if.py
berchev/python_learning
5b99065cade53c64b4ede3d0e583c58573ca654c
[ "MIT" ]
null
null
null
ch_05/if.py
berchev/python_learning
5b99065cade53c64b4ede3d0e583c58573ca654c
[ "MIT" ]
null
null
null
ch_05/if.py
berchev/python_learning
5b99065cade53c64b4ede3d0e583c58573ca654c
[ "MIT" ]
null
null
null
#!/usr/bin/env python ph = float(input('enter pH level: ')) if ph < 7.0: print(ph, "is acidic")
14.571429
37
0.588235
ph = float(input('enter pH level: ')) if ph < 7.0: print(ph, "is acidic")
true
true
f70ee3ce2302cf60a5ab6d4a9e15b021694ced9e
1,791
py
Python
tests/cli_tests/test_command_input.py
itamarhaber/iredis
61208aab34c731f88232abd2cacdf0e075e701f2
[ "BSD-3-Clause" ]
1,857
2019-08-09T02:36:13.000Z
2022-03-31T05:53:09.000Z
tests/cli_tests/test_command_input.py
itamarhaber/iredis
61208aab34c731f88232abd2cacdf0e075e701f2
[ "BSD-3-Clause" ]
281
2019-08-08T16:00:59.000Z
2022-03-07T14:05:36.000Z
tests/cli_tests/test_command_input.py
itamarhaber/iredis
61208aab34c731f88232abd2cacdf0e075e701f2
[ "BSD-3-Clause" ]
105
2019-08-09T04:36:12.000Z
2022-02-03T13:27:29.000Z
import os import pytest def test_wrong_select_db_index(cli): cli.sendline("select 1") cli.expect(["OK", "127.0.0.1"]) cli.sendline("select 128") cli.expect(["DB index is out of range", "127.0.0.1:6379[1]>"]) if int(os.environ["REDIS_VERSION"]) > 5: text = "value is not an integer or out of range" else: text = "invalid DB index" cli.sendline("select abc") cli.expect([text, "127.0.0.1:6379[1]>"]) cli.sendline("select 15") cli.expect("OK") def test_set_command_with_shash(clean_redis, cli): cli.sendline("set a \\hello\\") # legal redis command cli.expect("OK") cli.sendline("get a") cli.expect(r"hello") def test_enter_key_binding(clean_redis, cli): cli.send("set") cli.expect("set") cli.send("\033[B") # down cli.sendline() # enter cli.sendline(" a 'hello'") cli.expect("OK") cli.sendline("get a") cli.expect(r"hello") @pytest.mark.skipif("int(os.environ['REDIS_VERSION']) < 6") def test_auth_hidden_password_with_username(clean_redis, cli): cli.send("auth default hello-world") cli.expect("default") cli.expect(r"\*{11}") @pytest.mark.skipif("int(os.environ['REDIS_VERSION']) > 5") def test_auth_hidden_password(clean_redis, cli): cli.send("auth hello-world") cli.expect("auth") cli.expect(r"\*{11}") def test_hello_command_is_not_supported(cli): cli.sendline("hello 3") cli.expect("IRedis currently not support RESP3") def test_abort_reading_connection(cli): cli.sendline("blpop mylist 30") cli.send(chr(3)) cli.expect( r"KeyboardInterrupt received! User canceled reading response!", timeout=10 ) cli.sendline("set foo bar") cli.expect("OK") cli.sendline("get foo") cli.expect("bar")
23.88
82
0.647125
import os import pytest def test_wrong_select_db_index(cli): cli.sendline("select 1") cli.expect(["OK", "127.0.0.1"]) cli.sendline("select 128") cli.expect(["DB index is out of range", "127.0.0.1:6379[1]>"]) if int(os.environ["REDIS_VERSION"]) > 5: text = "value is not an integer or out of range" else: text = "invalid DB index" cli.sendline("select abc") cli.expect([text, "127.0.0.1:6379[1]>"]) cli.sendline("select 15") cli.expect("OK") def test_set_command_with_shash(clean_redis, cli): cli.sendline("set a \\hello\\") cli.expect("OK") cli.sendline("get a") cli.expect(r"hello") def test_enter_key_binding(clean_redis, cli): cli.send("set") cli.expect("set") cli.send("\033[B") cli.sendline() cli.sendline(" a 'hello'") cli.expect("OK") cli.sendline("get a") cli.expect(r"hello") @pytest.mark.skipif("int(os.environ['REDIS_VERSION']) < 6") def test_auth_hidden_password_with_username(clean_redis, cli): cli.send("auth default hello-world") cli.expect("default") cli.expect(r"\*{11}") @pytest.mark.skipif("int(os.environ['REDIS_VERSION']) > 5") def test_auth_hidden_password(clean_redis, cli): cli.send("auth hello-world") cli.expect("auth") cli.expect(r"\*{11}") def test_hello_command_is_not_supported(cli): cli.sendline("hello 3") cli.expect("IRedis currently not support RESP3") def test_abort_reading_connection(cli): cli.sendline("blpop mylist 30") cli.send(chr(3)) cli.expect( r"KeyboardInterrupt received! User canceled reading response!", timeout=10 ) cli.sendline("set foo bar") cli.expect("OK") cli.sendline("get foo") cli.expect("bar")
true
true
f70ee48502d9ed246520453354000d57c64db59a
1,103
py
Python
src/check_setup.py
imagexdsearch/imagesearch
7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d
[ "BSD-2-Clause" ]
4
2018-05-15T10:57:49.000Z
2019-11-18T23:00:42.000Z
src/check_setup.py
imagexdsearch/imagesearch
7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d
[ "BSD-2-Clause" ]
null
null
null
src/check_setup.py
imagexdsearch/imagesearch
7f4d18906d6ebd9f5d7b4e0db4bc6c7e675fbb1d
[ "BSD-2-Clause" ]
2
2018-05-16T20:20:02.000Z
2021-11-24T16:14:38.000Z
import sys from distutils.version import LooseVersion if sys.version_info.major < 3: print('[!] You are running an old version of Python. ' 'This tutorial requires Python 3.') sys.exit(1) with open('requirements.txt') as f: reqs = f.readlines() reqs = [(pkg, ver) for (pkg, _, ver) in (req.split() for req in reqs if req.strip())] pkg_names = { 'scikit-image': 'skimage', 'scikit-learn': 'sklearn' } for (pkg, version_wanted) in reqs: module_name = pkg_names.get(pkg, pkg) try: m = __import__(module_name) status = '✓' except ImportError as e: m = None if (pkg != 'numpy' and 'numpy' in str(e)): status = '?' version_installed = 'Needs NumPy' else: version_installed = 'Not installed' status = 'X' if m is not None: version_installed = m.__version__ if LooseVersion(version_wanted) > LooseVersion(version_installed): status = 'X' print('[{}] {:<11} {}'.format( status, pkg.ljust(13), version_installed) )
25.651163
74
0.579329
import sys from distutils.version import LooseVersion if sys.version_info.major < 3: print('[!] You are running an old version of Python. ' 'This tutorial requires Python 3.') sys.exit(1) with open('requirements.txt') as f: reqs = f.readlines() reqs = [(pkg, ver) for (pkg, _, ver) in (req.split() for req in reqs if req.strip())] pkg_names = { 'scikit-image': 'skimage', 'scikit-learn': 'sklearn' } for (pkg, version_wanted) in reqs: module_name = pkg_names.get(pkg, pkg) try: m = __import__(module_name) status = '✓' except ImportError as e: m = None if (pkg != 'numpy' and 'numpy' in str(e)): status = '?' version_installed = 'Needs NumPy' else: version_installed = 'Not installed' status = 'X' if m is not None: version_installed = m.__version__ if LooseVersion(version_wanted) > LooseVersion(version_installed): status = 'X' print('[{}] {:<11} {}'.format( status, pkg.ljust(13), version_installed) )
true
true
f70ee59ad2fff7af06efafd5608c2cdd3cb0a977
299
py
Python
contests/2020.1/rinha_de_calouros/pacotes/M/solver/solver.py
bkpedrosuper/brute_class
fa41e9206d74de394e2dd69f0afa556d1630a39d
[ "MIT" ]
null
null
null
contests/2020.1/rinha_de_calouros/pacotes/M/solver/solver.py
bkpedrosuper/brute_class
fa41e9206d74de394e2dd69f0afa556d1630a39d
[ "MIT" ]
null
null
null
contests/2020.1/rinha_de_calouros/pacotes/M/solver/solver.py
bkpedrosuper/brute_class
fa41e9206d74de394e2dd69f0afa556d1630a39d
[ "MIT" ]
null
null
null
from math import ceil S, D = [int(i) for i in input().split()] cont = [0 for i in range(S)] for d in range(D): t = [int(i) for i in input().split()] for i in range(S): cont[i] += t[i] media = ceil(sum(cont) / D) pref = cont.index(max(cont)) print(str(media)) print(str(pref + 1))
19.933333
41
0.575251
from math import ceil S, D = [int(i) for i in input().split()] cont = [0 for i in range(S)] for d in range(D): t = [int(i) for i in input().split()] for i in range(S): cont[i] += t[i] media = ceil(sum(cont) / D) pref = cont.index(max(cont)) print(str(media)) print(str(pref + 1))
true
true
f70ee767186d8fcd993c43b14c72310227172b6e
2,246
py
Python
voxelgrid/tsdf/run_tsdf_fusion.py
n1ckfg/RoutedFusion
1733911c7fe025b461b75e48461658709996e39c
[ "BSD-3-Clause" ]
null
null
null
voxelgrid/tsdf/run_tsdf_fusion.py
n1ckfg/RoutedFusion
1733911c7fe025b461b75e48461658709996e39c
[ "BSD-3-Clause" ]
null
null
null
voxelgrid/tsdf/run_tsdf_fusion.py
n1ckfg/RoutedFusion
1733911c7fe025b461b75e48461658709996e39c
[ "BSD-3-Clause" ]
null
null
null
#!/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin/python import os app_path = '/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin' os.environ["PATH"] = app_path + os.pathsep + os.environ["PATH"] from TSDFHandle import * import numpy as np import cv2 from utils import extract_mesh_marching_cubes from visualization import plot_mesh import plyfile from sys import argv import pathlib if (len(argv) < 3): print('Usage: {0} <name of depth directory> <save mode>'.format(argv[0])) exit(0) CURRENT_DIR = str(pathlib.Path().absolute()) depth_path = CURRENT_DIR + '/' + argv[1] campose_path = CURRENT_DIR + '/' + 'left_camera_matrix' box = np.array([[-4,4],[-4,4],[-4,4]]) # each cell depicts the interval where we will reconstruct the shape i.e. # [[-xmin,xmax],[-ymin,ymax],[-zmin,zmax]] tsdf = TSDF(bbox=box, resolution=0.025, resolution_factor=1) depth_dir = os.listdir(depth_path) sortOrder_depth = [int(x[:-4]) for x in depth_dir] depth_dir = [x for _, x in sorted(zip(sortOrder_depth, depth_dir))] campose_dir = os.listdir(campose_path) sortOrder_pose = [int(x[:-4]) for x in campose_dir] campose_dir = [x for _, x in sorted(zip(sortOrder_pose, campose_dir))] camera_intrinsics = np.array([[256, 0, 256], [0, 256, 256], [0, 0, 1]]).astype(np.float32) # apparently, the tsdf fusion code expects that the camera coordinate system is such that z is in the # camera viewing direction, y is down and x is to the right. This is achieved by a serie of rotations rot_180_around_y = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]).astype(np.float32) rot_180_around_z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).astype(np.float32) rotation = np.matmul(rot_180_around_z, rot_180_around_y) for i in range(len(depth_dir)): depth = cv2.imread(depth_path + '/' + depth_dir[i], -1) depth = depth / 1000 weight_map = np.ones(depth.shape) campose = np.linalg.inv(np.loadtxt(campose_path + '/' + campose_dir[i]).astype(np.float32)) campose = np.matmul(camera_intrinsics, np.matmul(rotation,campose[0:3, 0:4])) tsdf.fuse(campose, depth.astype(np.float32), weight_map.astype(np.float32)) mesh = extract_mesh_marching_cubes(tsdf.get_volume()[:, :, :, 0]) if argv[2]: mesh.write('tsdf_fusion_' + argv[1] + '.ply') plot_mesh(mesh)
39.403509
113
0.714604
import os app_path = '/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin' os.environ["PATH"] = app_path + os.pathsep + os.environ["PATH"] from TSDFHandle import * import numpy as np import cv2 from utils import extract_mesh_marching_cubes from visualization import plot_mesh import plyfile from sys import argv import pathlib if (len(argv) < 3): print('Usage: {0} <name of depth directory> <save mode>'.format(argv[0])) exit(0) CURRENT_DIR = str(pathlib.Path().absolute()) depth_path = CURRENT_DIR + '/' + argv[1] campose_path = CURRENT_DIR + '/' + 'left_camera_matrix' box = np.array([[-4,4],[-4,4],[-4,4]]) tsdf = TSDF(bbox=box, resolution=0.025, resolution_factor=1) depth_dir = os.listdir(depth_path) sortOrder_depth = [int(x[:-4]) for x in depth_dir] depth_dir = [x for _, x in sorted(zip(sortOrder_depth, depth_dir))] campose_dir = os.listdir(campose_path) sortOrder_pose = [int(x[:-4]) for x in campose_dir] campose_dir = [x for _, x in sorted(zip(sortOrder_pose, campose_dir))] camera_intrinsics = np.array([[256, 0, 256], [0, 256, 256], [0, 0, 1]]).astype(np.float32) rot_180_around_y = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]).astype(np.float32) rot_180_around_z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).astype(np.float32) rotation = np.matmul(rot_180_around_z, rot_180_around_y) for i in range(len(depth_dir)): depth = cv2.imread(depth_path + '/' + depth_dir[i], -1) depth = depth / 1000 weight_map = np.ones(depth.shape) campose = np.linalg.inv(np.loadtxt(campose_path + '/' + campose_dir[i]).astype(np.float32)) campose = np.matmul(camera_intrinsics, np.matmul(rotation,campose[0:3, 0:4])) tsdf.fuse(campose, depth.astype(np.float32), weight_map.astype(np.float32)) mesh = extract_mesh_marching_cubes(tsdf.get_volume()[:, :, :, 0]) if argv[2]: mesh.write('tsdf_fusion_' + argv[1] + '.ply') plot_mesh(mesh)
true
true
f70eeb6bd74cdee12b83b651d78589e6981087b7
3,266
py
Python
tests/util/test_get_descendants.py
xiangze/edward
6419751d1d849c84c502e5ff3f7249b9bbc7b3aa
[ "Apache-2.0" ]
5,200
2016-05-03T04:59:01.000Z
2022-03-31T03:32:26.000Z
tests/util/test_get_descendants.py
xiangze/edward
6419751d1d849c84c502e5ff3f7249b9bbc7b3aa
[ "Apache-2.0" ]
724
2016-05-04T09:04:37.000Z
2022-02-28T02:41:12.000Z
tests/util/test_get_descendants.py
xiangze/edward
6419751d1d849c84c502e5ff3f7249b9bbc7b3aa
[ "Apache-2.0" ]
1,004
2016-05-03T22:45:14.000Z
2022-03-25T00:08:08.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from edward.models import Bernoulli, Normal from edward.util import get_descendants class test_get_descendants_class(tf.test.TestCase): def test_v_structure(self): """a -> b -> e <- d <- c""" with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(0.0, 1.0) d = Normal(c, 1.0) e = Normal(b * d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, e])) self.assertEqual(get_descendants(b), [e]) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_a_structure(self): """e <- d <- a -> b -> c""" with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(b, 1.0) d = Normal(a, 1.0) e = Normal(d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(get_descendants(b), [c]) self.assertEqual(get_descendants(c), []) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_chain_structure(self): """a -> b -> c -> d -> e""" with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(b, 1.0) d = Normal(c, 1.0) e = Normal(d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(set(get_descendants(b)), set([c, d, e])) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_tensor(self): with self.test_session(): a = Normal(0.0, 1.0) b = tf.constant(2.0) c = a + b d = Normal(c, 1.0) self.assertEqual(get_descendants(a), [d]) self.assertEqual(get_descendants(b), [d]) self.assertEqual(get_descendants(c), [d]) self.assertEqual(get_descendants(d), []) def test_control_flow(self): with self.test_session(): a = Bernoulli(0.5) b = Normal(0.0, 1.0) c = tf.constant(0.0) d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c) e = Normal(d, 1.0) self.assertEqual(get_descendants(a), [e]) self.assertEqual(get_descendants(b), [e]) self.assertEqual(get_descendants(c), [e]) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_scan(self): """copied from test_chain_structure""" def cumsum(x): return tf.scan(lambda a, x: a + x, x) with self.test_session(): a = Normal(tf.ones([3]), tf.ones([3])) b = Normal(cumsum(a), tf.ones([3])) c = Normal(cumsum(b), tf.ones([3])) d = Normal(cumsum(c), tf.ones([3])) e = Normal(cumsum(d), tf.ones([3])) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(set(get_descendants(b)), set([c, d, e])) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) if __name__ == '__main__': tf.test.main()
33.326531
66
0.600122
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from edward.models import Bernoulli, Normal from edward.util import get_descendants class test_get_descendants_class(tf.test.TestCase): def test_v_structure(self): with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(0.0, 1.0) d = Normal(c, 1.0) e = Normal(b * d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, e])) self.assertEqual(get_descendants(b), [e]) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_a_structure(self): with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(b, 1.0) d = Normal(a, 1.0) e = Normal(d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(get_descendants(b), [c]) self.assertEqual(get_descendants(c), []) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_chain_structure(self): with self.test_session(): a = Normal(0.0, 1.0) b = Normal(a, 1.0) c = Normal(b, 1.0) d = Normal(c, 1.0) e = Normal(d, 1.0) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(set(get_descendants(b)), set([c, d, e])) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_tensor(self): with self.test_session(): a = Normal(0.0, 1.0) b = tf.constant(2.0) c = a + b d = Normal(c, 1.0) self.assertEqual(get_descendants(a), [d]) self.assertEqual(get_descendants(b), [d]) self.assertEqual(get_descendants(c), [d]) self.assertEqual(get_descendants(d), []) def test_control_flow(self): with self.test_session(): a = Bernoulli(0.5) b = Normal(0.0, 1.0) c = tf.constant(0.0) d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c) e = Normal(d, 1.0) self.assertEqual(get_descendants(a), [e]) self.assertEqual(get_descendants(b), [e]) self.assertEqual(get_descendants(c), [e]) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) def test_scan(self): def cumsum(x): return tf.scan(lambda a, x: a + x, x) with self.test_session(): a = Normal(tf.ones([3]), tf.ones([3])) b = Normal(cumsum(a), tf.ones([3])) c = Normal(cumsum(b), tf.ones([3])) d = Normal(cumsum(c), tf.ones([3])) e = Normal(cumsum(d), tf.ones([3])) self.assertEqual(set(get_descendants(a)), set([b, c, d, e])) self.assertEqual(set(get_descendants(b)), set([c, d, e])) self.assertEqual(set(get_descendants(c)), set([d, e])) self.assertEqual(get_descendants(d), [e]) self.assertEqual(get_descendants(e), []) if __name__ == '__main__': tf.test.main()
true
true
f70eed1e1375e4432d0c54f717dd6b0c81d354b9
2,024
py
Python
deepvariant/core/proto_utils.py
ishandutta2007/deepvariant
f1684281b3ded6c68a1d4bc89f7848b7cedac548
[ "BSD-3-Clause" ]
1
2019-05-09T21:56:48.000Z
2019-05-09T21:56:48.000Z
deepvariant/core/proto_utils.py
Joyvalley/deepvariant
b3a5bced5720f8a27080a5330e64295f16bbd46c
[ "BSD-3-Clause" ]
null
null
null
deepvariant/core/proto_utils.py
Joyvalley/deepvariant
b3a5bced5720f8a27080a5330e64295f16bbd46c
[ "BSD-3-Clause" ]
1
2017-12-23T04:27:06.000Z
2017-12-23T04:27:06.000Z
# Copyright 2017 Google Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Utility library for working with protobufs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf.internal import api_implementation def uses_fast_cpp_protos_or_die(): if api_implementation.Type() != 'cpp': raise ValueError('Expected to be using C++ protobuf implementation ' '(api_implementation.Type() == "cpp") but it is {}'.format( api_implementation.Type()))
47.069767
80
0.764822
from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf.internal import api_implementation def uses_fast_cpp_protos_or_die(): if api_implementation.Type() != 'cpp': raise ValueError('Expected to be using C++ protobuf implementation ' '(api_implementation.Type() == "cpp") but it is {}'.format( api_implementation.Type()))
true
true
f70eedf59fe8f614e603477963743fd9b0c5b712
1,764
py
Python
libweasyl/libweasyl/conftest.py
hyena/weasyl
a43ad885eb07ae89d6639f289a5b95f3a177439c
[ "Apache-2.0" ]
1
2019-02-15T04:21:48.000Z
2019-02-15T04:21:48.000Z
libweasyl/libweasyl/conftest.py
hyena/weasyl
a43ad885eb07ae89d6639f289a5b95f3a177439c
[ "Apache-2.0" ]
254
2017-12-23T19:36:43.000Z
2020-04-14T21:46:13.000Z
libweasyl/libweasyl/conftest.py
hyena/weasyl
a43ad885eb07ae89d6639f289a5b95f3a177439c
[ "Apache-2.0" ]
1
2017-12-23T18:42:16.000Z
2017-12-23T18:42:16.000Z
import os import pytest import sqlalchemy as sa from libweasyl.configuration import configure_libweasyl from libweasyl.models.meta import registry from libweasyl.models.tables import metadata from libweasyl.test.common import NotFound from libweasyl.test.common import media_link_formatter from libweasyl import cache engine = sa.create_engine(os.environ.get('WEASYL_TEST_SQLALCHEMY_URL', 'postgresql+psycopg2cffi:///weasyl_test')) sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine)) @pytest.fixture(scope='session', autouse=True) def setup(request): db = sessionmaker() db.execute('DROP SCHEMA public CASCADE') db.execute('CREATE SCHEMA public') db.execute('CREATE EXTENSION HSTORE') db.commit() metadata.create_all(engine) cache.region.configure('dogpile.cache.memory') @pytest.fixture(autouse=True) def staticdir(tmpdir): tmpdir = tmpdir.join('libweasyl-staticdir') configure_libweasyl( dbsession=sessionmaker, not_found_exception=NotFound, base_file_path=tmpdir.strpath, staff_config_dict={}, media_link_formatter_callback=media_link_formatter.format_media_link, ) return tmpdir @pytest.fixture def db(request): db = sessionmaker() # If a previous test has failed due to an SQL problem, the session will be # in a broken state, requiring a rollback. It's not harmful to # unconditionally rollback, so just do that. db.rollback() def tear_down(): "Clears all rows from the test database." for k, cls in registry.items(): if not k[0].isupper(): continue db.query(cls).delete() db.flush() db.commit() request.addfinalizer(tear_down) return db
28.451613
113
0.713719
import os import pytest import sqlalchemy as sa from libweasyl.configuration import configure_libweasyl from libweasyl.models.meta import registry from libweasyl.models.tables import metadata from libweasyl.test.common import NotFound from libweasyl.test.common import media_link_formatter from libweasyl import cache engine = sa.create_engine(os.environ.get('WEASYL_TEST_SQLALCHEMY_URL', 'postgresql+psycopg2cffi:///weasyl_test')) sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine)) @pytest.fixture(scope='session', autouse=True) def setup(request): db = sessionmaker() db.execute('DROP SCHEMA public CASCADE') db.execute('CREATE SCHEMA public') db.execute('CREATE EXTENSION HSTORE') db.commit() metadata.create_all(engine) cache.region.configure('dogpile.cache.memory') @pytest.fixture(autouse=True) def staticdir(tmpdir): tmpdir = tmpdir.join('libweasyl-staticdir') configure_libweasyl( dbsession=sessionmaker, not_found_exception=NotFound, base_file_path=tmpdir.strpath, staff_config_dict={}, media_link_formatter_callback=media_link_formatter.format_media_link, ) return tmpdir @pytest.fixture def db(request): db = sessionmaker() # unconditionally rollback, so just do that. db.rollback() def tear_down(): for k, cls in registry.items(): if not k[0].isupper(): continue db.query(cls).delete() db.flush() db.commit() request.addfinalizer(tear_down) return db
true
true
f70eee367e175b617f0f5ce115780567e62dbda0
2,081
py
Python
device.py
seank-com/iot-device-python
f862a2b4bf5a6e2eee0546c287fc0ffdfbd08945
[ "MIT" ]
null
null
null
device.py
seank-com/iot-device-python
f862a2b4bf5a6e2eee0546c287fc0ffdfbd08945
[ "MIT" ]
null
null
null
device.py
seank-com/iot-device-python
f862a2b4bf5a6e2eee0546c287fc0ffdfbd08945
[ "MIT" ]
null
null
null
#!/usr/bin/env python import time import sys import iothub_client from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError # String containing Hostname, Device Id & Device Key in the format: # "HostName=<host_name>;DeviceId=<device_id>;SharedAccessKey=<device_key>" CONNECTION_STRING = "[Device Connection String]" MSG_TXT = "{\"msg\": \"%s\"}" RECEIVE_CONTEXT = 0 SEND_CONTEXT = 0 SLEEP_TIME = 15 def receive_message_callback(message, user_context): message_buffer = message.get_bytearray() size = len(message_buffer) print ( "Received Message: data = \"%s\" size=%d" % (message_buffer[:size].decode('utf-8'), size) ) return IoTHubMessageDispositionResult.ACCEPTED def send_confirmation_callback(message, result, user_context): print ( "Confirmation received for message with result = %s" % result ) def iothub_client_sample_run(): try: # prepare iothub client client = IoTHubClient(CONNECTION_STRING, IoTHubTransportProvider.MQTT) # to enable MQTT logging set to 1 client.set_option("logtrace", 0) client.set_message_callback(receive_message_callback, RECEIVE_CONTEXT) while True: # send a few messages every minute print ( "IoTHubClient sending message" ) msg_txt_formatted = MSG_TXT % "This is a test" message = IoTHubMessage(msg_txt_formatted) client.send_event_async(message, send_confirmation_callback, SEND_CONTEXT) print ( "IoTHubClient.send_event_async accepted message for transmission to IoT Hub." ) time.sleep(SLEEP_TIME) except IoTHubError as iothub_error: print ( "Unexpected error %s from IoTHub" % iothub_error ) return except KeyboardInterrupt: print ( "IoTHubClient sample stopped" ) if __name__ == '__main__': print ( "\nPython %s" % sys.version ) print ( "IoT Hub Client for Python" ) iothub_client_sample_run()
34.114754
104
0.713119
import time import sys import iothub_client from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError CONNECTION_STRING = "[Device Connection String]" MSG_TXT = "{\"msg\": \"%s\"}" RECEIVE_CONTEXT = 0 SEND_CONTEXT = 0 SLEEP_TIME = 15 def receive_message_callback(message, user_context): message_buffer = message.get_bytearray() size = len(message_buffer) print ( "Received Message: data = \"%s\" size=%d" % (message_buffer[:size].decode('utf-8'), size) ) return IoTHubMessageDispositionResult.ACCEPTED def send_confirmation_callback(message, result, user_context): print ( "Confirmation received for message with result = %s" % result ) def iothub_client_sample_run(): try: client = IoTHubClient(CONNECTION_STRING, IoTHubTransportProvider.MQTT) client.set_option("logtrace", 0) client.set_message_callback(receive_message_callback, RECEIVE_CONTEXT) while True: print ( "IoTHubClient sending message" ) msg_txt_formatted = MSG_TXT % "This is a test" message = IoTHubMessage(msg_txt_formatted) client.send_event_async(message, send_confirmation_callback, SEND_CONTEXT) print ( "IoTHubClient.send_event_async accepted message for transmission to IoT Hub." ) time.sleep(SLEEP_TIME) except IoTHubError as iothub_error: print ( "Unexpected error %s from IoTHub" % iothub_error ) return except KeyboardInterrupt: print ( "IoTHubClient sample stopped" ) if __name__ == '__main__': print ( "\nPython %s" % sys.version ) print ( "IoT Hub Client for Python" ) iothub_client_sample_run()
true
true
f70eee72ebaef7bb1e9932648eb79dff211b82c2
390
py
Python
algo/implementation/easy/time.py
rivergillis/hackerrank-practice
16b1d448c011f22b202b1ccadac09c71f646aa5e
[ "MIT" ]
null
null
null
algo/implementation/easy/time.py
rivergillis/hackerrank-practice
16b1d448c011f22b202b1ccadac09c71f646aa5e
[ "MIT" ]
null
null
null
algo/implementation/easy/time.py
rivergillis/hackerrank-practice
16b1d448c011f22b202b1ccadac09c71f646aa5e
[ "MIT" ]
null
null
null
import sys time = input().strip() splitted = time.split(':') hours_12 = int(splitted[0]) mins = splitted[1] secs = splitted[2][:2] is_pm = splitted[2].endswith("PM") if (is_pm): if (hours_12 >= 1 and hours_12 < 12): # between 1pm and 11:59pm hours_12 += 12 else: if (hours_12 == 12): hours_12 -= 12 print(':'.join(list((str(hours_12).zfill(2), mins, secs))))
18.571429
68
0.602564
import sys time = input().strip() splitted = time.split(':') hours_12 = int(splitted[0]) mins = splitted[1] secs = splitted[2][:2] is_pm = splitted[2].endswith("PM") if (is_pm): if (hours_12 >= 1 and hours_12 < 12): hours_12 += 12 else: if (hours_12 == 12): hours_12 -= 12 print(':'.join(list((str(hours_12).zfill(2), mins, secs))))
true
true
f70eefb7329c2c9517976ab1418f9cdd7cacbee9
14,654
py
Python
parlai/agents/transformer/mixer.py
dongfangyixi/ParlAI
424a2b3c7086593f699c76612dffd1d925986177
[ "MIT" ]
null
null
null
parlai/agents/transformer/mixer.py
dongfangyixi/ParlAI
424a2b3c7086593f699c76612dffd1d925986177
[ "MIT" ]
null
null
null
parlai/agents/transformer/mixer.py
dongfangyixi/ParlAI
424a2b3c7086593f699c76612dffd1d925986177
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Transformer Agents. """ from typing import Optional from parlai.core.params import ParlaiParser from parlai.core.opt import Opt from parlai.core.agents import Agent from parlai.utils.torch import padded_3d from parlai.core.torch_classifier_agent import TorchClassifierAgent from parlai.core.torch_ranker_agent import TorchRankerAgent from parlai.core.torch_generator_agent import TorchGeneratorAgent from parlai.utils.misc import recursive_getattr from parlai.utils.logging import logging from .modules import ( TransformerMemNetModel, TransformerGeneratorModel, TransformerLinearWrapper, MixerModel, MixerGeneratorModel, ) import torch def add_common_cmdline_args(parser): """ Add common command line args. """ parser.add_argument( '-esz', '--embedding-size', type=int, default=300, help='Size of all embedding layers. Must be a multiple of --n-heads.', ) parser.add_argument( '-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.' ) parser.add_argument( '-hid', '--ffn-size', type=int, default=300, help='Hidden size of the FFN layers', ) parser.add_argument( '--dropout', type=float, default=0.0, help='Dropout used around embeddings and before layer layer normalizations. ' 'This is used in Vaswani 2017 and works well on large datasets.', ) parser.add_argument( '--attention-dropout', type=float, default=0.0, help='Dropout used after attention softmax. This is not used in Vaswani 2017.', ) parser.add_argument( '--relu-dropout', type=float, default=0.0, help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, ' 'but used in Tensor2Tensor.', ) parser.add_argument( '--n-heads', type=int, default=2, help='Number of multihead attention heads' ) parser.add_argument( '--learn-positional-embeddings', type='bool', default=False, help='If off, sinusoidal embeddings are used. If on, position embeddings are ' 'learned from scratch.', ) parser.add_argument('--embeddings-scale', type='bool', default=True) parser.add_argument( '--n-positions', type=int, default=None, hidden=True, help='Number of positional embeddings to learn. Defaults ' 'to truncate or 1024 if not provided.', ) parser.add_argument( '--n-segments', type=int, default=0, help='The number of segments that support the model. ' 'If zero no segment and no langs_embedding.', ) parser.add_argument( '--variant', choices={'aiayn', 'xlm', 'prelayernorm', 'bart'}, default='aiayn', help='Chooses locations of layer norms, etc. prelayernorm ' 'is used to match some fairseq models', recommended='xlm', ) parser.add_argument( '--activation', choices={'relu', 'gelu'}, default='relu', help='Nonlinear activation to use. AIAYN uses relu, but ' 'more recent papers prefer gelu.', recommended='gelu', ) parser.add_argument( '--output-scaling', type=float, default=1.0, help='scale the output of every transformer by this quantity.', ) parser.add_argument( '--share-word-embeddings', type='bool', default=True, help='Share word embeddings table for candidate and context' 'in the memory network', ) parser.add_argument( '-nel', '--n-encoder-layers', type=int, default=-1, help='This will overide the n-layers for asymmetrical transformers', ) parser.add_argument( '-ndl', '--n-decoder-layers', type=int, default=-1, help='This will overide the n-layers for asymmetrical transformers', ) parser.add_argument( '--model-parallel', type='bool', default=False, help='Shard the layers across multiple GPUs.', ) class Transformer(Agent): """ Placeholder Transformer Agent. Placeholder class, which just throws an error telling the user to specify whether they want the ranker or the generator. """ def __init__(self, opt, shared=None): raise RuntimeError( "`--model transformer` is not a valid choice. Please select either " "`--model transformer/ranker` or `--model transformer/generator" ) class TransformerRankerAgent(TorchRankerAgent): """ Transformer Ranker Agent. Implementation of a TorchRankerAgent, where the model is a Transformer """ @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ super().add_cmdline_args(parser, partial_opt=partial_opt) agent = parser.add_argument_group('Transformer Arguments') add_common_cmdline_args(agent) # memory and knowledge arguments agent.add_argument( '--use-memories', type='bool', default=False, help='use memories: must implement the function ' '`_vectorize_memories` to use this', ) agent.add_argument( '--wrap-memory-encoder', type='bool', default=False, help='wrap memory encoder with MLP', ) agent.add_argument( '--memory-attention', type=str, default='sqrt', choices=['cosine', 'dot', 'sqrt'], help='similarity for basic attention mechanism ' 'when using transformer to encode memories', ) # model specific arguments agent.add_argument('--normalize-sent-emb', type='bool', default=False) agent.add_argument('--share-encoders', type='bool', default=True) parser.add_argument( '--share-word-embeddings', type='bool', default=True, help='Share word embeddings table for candidate and context' 'in the memory network', ) agent.add_argument( '--learn-embeddings', type='bool', default=True, help='learn embeddings' ) agent.add_argument( '--data-parallel', type='bool', default=False, help='use model in data parallel, requires ' 'multiple gpus', ) agent.add_argument( '--reduction-type', type=str, default='mean', choices=['first', 'max', 'mean'], help='Type of reduction at the end of transformer', ) parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024) cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt) return agent def _score(self, output, cands): if cands.dim() == 2: return torch.matmul(output, cands.t()) elif cands.dim() == 3: return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1) else: raise RuntimeError( 'Unexpected candidate dimensions {}' ''.format(cands.dim()) ) def build_model(self, states=None): """ Build and return model. """ model = MixerModel(self.opt, self.dict) if self.opt['embedding_type'] != 'random': self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type']) return model def batchify(self, obs_batch, sort=False): """ Override so that we can add memories to the Batch object. """ batch = super().batchify(obs_batch, sort) if self.opt['use_memories']: valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)] valid_inds, exs = zip(*valid_obs) mems = None if any('memory_vecs' in ex for ex in exs): mems = [ex.get('memory_vecs', None) for ex in exs] batch.memory_vecs = mems return batch def _vectorize_memories(self, obs): # TODO: move this to Torch Ranker Agent raise NotImplementedError( 'Abstract class: user must implement this function to use memories' ) def vectorize(self, *args, **kwargs): """ Override to include vectorization of memories. """ kwargs['add_start'] = False kwargs['add_end'] = False obs = super().vectorize(*args, **kwargs) if self.opt['use_memories']: obs = self._vectorize_memories(obs) return obs def encode_candidates(self, padded_cands): """ Encode candidates. """ _, cands = self.model(xs=None, mems=None, cands=padded_cands) return cands def score_candidates(self, batch, cand_vecs, cand_encs=None): """ Score candidates. """ # convoluted check that not all memories are empty if ( self.opt['use_memories'] and batch.memory_vecs is not None and sum(len(m) for m in batch.memory_vecs) ): mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX) else: mems = None if cand_encs is not None: # we pre-encoded the candidates, do not re-encode here cand_vecs = None context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs) if cand_encs is not None: cands_h = cand_encs scores = self._score(context_h, cands_h) return scores class TransformerGeneratorAgent(TorchGeneratorAgent): """ TransformerGeneratorAgent. Implementation of TorchGeneratorAgent, where the model is a Transformer """ @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ agent = parser.add_argument_group('Transformer Arguments') add_common_cmdline_args(agent) cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt) super().add_cmdline_args(parser, partial_opt=partial_opt) return agent def build_model(self, states=None): """ Build and return model. """ model = MixerGeneratorModel(self.opt, self.dict) if self.opt['embedding_type'] != 'random': self._copy_embeddings( model.encoder.embeddings.weight, self.opt['embedding_type'] ) return model def _resize_token_embeddings(self, state_dict, msg=None): """ Resize the token embeddings when are adding extra special tokens. """ # map extra special tokens carefully new_size = self.model.embeddings.weight.size()[0] orig_size = state_dict['embeddings.weight'].size()[0] logging.info(f'Resizing token embeddings from {orig_size} to {new_size}') if new_size <= orig_size: # new size should be greater than original size, # as we are adding special tokens raise RuntimeError(msg) for emb_weights in [ 'embeddings.weight', 'encoder.embeddings.weight', 'decoder.embeddings.weight', ]: # get new_embs old_embs = state_dict[emb_weights] new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device) # copy over old weights new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :] # reset in state dict state_dict[emb_weights] = new_embs return state_dict class TransformerClassifierAgent(TorchClassifierAgent): """ Classifier based on Transformer. """ @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: TransformerRankerAgent.add_cmdline_args( parser, partial_opt=partial_opt ) # add transformer args super().add_cmdline_args(parser, partial_opt=partial_opt) parser.add_argument( '--load-from-pretrained-ranker', type='bool', default=False, help='load model from base transformer ranking model ' '(used for pretraining)', ) parser.set_defaults(reduction_type='first') return parser def build_model(self): num_classes = len(self.class_list) self.base_model = MixerModel(self.opt, self.dict) return TransformerLinearWrapper(self.base_model.context_encoder, num_classes) def vectorize(self, *args, **kwargs): """ Add the start and end token to the text. """ kwargs['add_start'] = True kwargs['add_end'] = True obs = super().vectorize(*args, **kwargs) return obs def _set_text_vec(self, *args, **kwargs): """ Add the start and end token to the text. """ obs = super()._set_text_vec(*args, **kwargs) if 'text_vec' in obs and 'added_start_end' not in obs: obs.force_set( 'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True) ) obs['added_start_end'] = True # check truncation after adding start end tokens if obs.get('text_vec') is not None: truncated_vec = self._check_truncate( obs['text_vec'], self.text_truncate, True ) obs.force_set('text_vec', torch.LongTensor(truncated_vec)) return obs def score(self, batch): return self.model(batch.text_vec) def load_state_dict(self, state_dict): """ Load the state dict into model. This is easily overridable to facilitate transfer of state dicts. """ if self.is_finetune and self.opt['load_from_pretrained_ranker']: self.base_model.load_state_dict(state_dict, strict=False) else: self.model.load_state_dict(state_dict)
32.348786
88
0.602702
from typing import Optional from parlai.core.params import ParlaiParser from parlai.core.opt import Opt from parlai.core.agents import Agent from parlai.utils.torch import padded_3d from parlai.core.torch_classifier_agent import TorchClassifierAgent from parlai.core.torch_ranker_agent import TorchRankerAgent from parlai.core.torch_generator_agent import TorchGeneratorAgent from parlai.utils.misc import recursive_getattr from parlai.utils.logging import logging from .modules import ( TransformerMemNetModel, TransformerGeneratorModel, TransformerLinearWrapper, MixerModel, MixerGeneratorModel, ) import torch def add_common_cmdline_args(parser): parser.add_argument( '-esz', '--embedding-size', type=int, default=300, help='Size of all embedding layers. Must be a multiple of --n-heads.', ) parser.add_argument( '-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.' ) parser.add_argument( '-hid', '--ffn-size', type=int, default=300, help='Hidden size of the FFN layers', ) parser.add_argument( '--dropout', type=float, default=0.0, help='Dropout used around embeddings and before layer layer normalizations. ' 'This is used in Vaswani 2017 and works well on large datasets.', ) parser.add_argument( '--attention-dropout', type=float, default=0.0, help='Dropout used after attention softmax. This is not used in Vaswani 2017.', ) parser.add_argument( '--relu-dropout', type=float, default=0.0, help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, ' 'but used in Tensor2Tensor.', ) parser.add_argument( '--n-heads', type=int, default=2, help='Number of multihead attention heads' ) parser.add_argument( '--learn-positional-embeddings', type='bool', default=False, help='If off, sinusoidal embeddings are used. If on, position embeddings are ' 'learned from scratch.', ) parser.add_argument('--embeddings-scale', type='bool', default=True) parser.add_argument( '--n-positions', type=int, default=None, hidden=True, help='Number of positional embeddings to learn. Defaults ' 'to truncate or 1024 if not provided.', ) parser.add_argument( '--n-segments', type=int, default=0, help='The number of segments that support the model. ' 'If zero no segment and no langs_embedding.', ) parser.add_argument( '--variant', choices={'aiayn', 'xlm', 'prelayernorm', 'bart'}, default='aiayn', help='Chooses locations of layer norms, etc. prelayernorm ' 'is used to match some fairseq models', recommended='xlm', ) parser.add_argument( '--activation', choices={'relu', 'gelu'}, default='relu', help='Nonlinear activation to use. AIAYN uses relu, but ' 'more recent papers prefer gelu.', recommended='gelu', ) parser.add_argument( '--output-scaling', type=float, default=1.0, help='scale the output of every transformer by this quantity.', ) parser.add_argument( '--share-word-embeddings', type='bool', default=True, help='Share word embeddings table for candidate and context' 'in the memory network', ) parser.add_argument( '-nel', '--n-encoder-layers', type=int, default=-1, help='This will overide the n-layers for asymmetrical transformers', ) parser.add_argument( '-ndl', '--n-decoder-layers', type=int, default=-1, help='This will overide the n-layers for asymmetrical transformers', ) parser.add_argument( '--model-parallel', type='bool', default=False, help='Shard the layers across multiple GPUs.', ) class Transformer(Agent): def __init__(self, opt, shared=None): raise RuntimeError( "`--model transformer` is not a valid choice. Please select either " "`--model transformer/ranker` or `--model transformer/generator" ) class TransformerRankerAgent(TorchRankerAgent): @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: super().add_cmdline_args(parser, partial_opt=partial_opt) agent = parser.add_argument_group('Transformer Arguments') add_common_cmdline_args(agent) agent.add_argument( '--use-memories', type='bool', default=False, help='use memories: must implement the function ' '`_vectorize_memories` to use this', ) agent.add_argument( '--wrap-memory-encoder', type='bool', default=False, help='wrap memory encoder with MLP', ) agent.add_argument( '--memory-attention', type=str, default='sqrt', choices=['cosine', 'dot', 'sqrt'], help='similarity for basic attention mechanism ' 'when using transformer to encode memories', ) agent.add_argument('--normalize-sent-emb', type='bool', default=False) agent.add_argument('--share-encoders', type='bool', default=True) parser.add_argument( '--share-word-embeddings', type='bool', default=True, help='Share word embeddings table for candidate and context' 'in the memory network', ) agent.add_argument( '--learn-embeddings', type='bool', default=True, help='learn embeddings' ) agent.add_argument( '--data-parallel', type='bool', default=False, help='use model in data parallel, requires ' 'multiple gpus', ) agent.add_argument( '--reduction-type', type=str, default='mean', choices=['first', 'max', 'mean'], help='Type of reduction at the end of transformer', ) parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024) cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt) return agent def _score(self, output, cands): if cands.dim() == 2: return torch.matmul(output, cands.t()) elif cands.dim() == 3: return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1) else: raise RuntimeError( 'Unexpected candidate dimensions {}' ''.format(cands.dim()) ) def build_model(self, states=None): model = MixerModel(self.opt, self.dict) if self.opt['embedding_type'] != 'random': self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type']) return model def batchify(self, obs_batch, sort=False): batch = super().batchify(obs_batch, sort) if self.opt['use_memories']: valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)] valid_inds, exs = zip(*valid_obs) mems = None if any('memory_vecs' in ex for ex in exs): mems = [ex.get('memory_vecs', None) for ex in exs] batch.memory_vecs = mems return batch def _vectorize_memories(self, obs): raise NotImplementedError( 'Abstract class: user must implement this function to use memories' ) def vectorize(self, *args, **kwargs): kwargs['add_start'] = False kwargs['add_end'] = False obs = super().vectorize(*args, **kwargs) if self.opt['use_memories']: obs = self._vectorize_memories(obs) return obs def encode_candidates(self, padded_cands): _, cands = self.model(xs=None, mems=None, cands=padded_cands) return cands def score_candidates(self, batch, cand_vecs, cand_encs=None): if ( self.opt['use_memories'] and batch.memory_vecs is not None and sum(len(m) for m in batch.memory_vecs) ): mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX) else: mems = None if cand_encs is not None: cand_vecs = None context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs) if cand_encs is not None: cands_h = cand_encs scores = self._score(context_h, cands_h) return scores class TransformerGeneratorAgent(TorchGeneratorAgent): @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: agent = parser.add_argument_group('Transformer Arguments') add_common_cmdline_args(agent) cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt) super().add_cmdline_args(parser, partial_opt=partial_opt) return agent def build_model(self, states=None): model = MixerGeneratorModel(self.opt, self.dict) if self.opt['embedding_type'] != 'random': self._copy_embeddings( model.encoder.embeddings.weight, self.opt['embedding_type'] ) return model def _resize_token_embeddings(self, state_dict, msg=None): new_size = self.model.embeddings.weight.size()[0] orig_size = state_dict['embeddings.weight'].size()[0] logging.info(f'Resizing token embeddings from {orig_size} to {new_size}') if new_size <= orig_size: raise RuntimeError(msg) for emb_weights in [ 'embeddings.weight', 'encoder.embeddings.weight', 'decoder.embeddings.weight', ]: old_embs = state_dict[emb_weights] new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device) new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :] state_dict[emb_weights] = new_embs return state_dict class TransformerClassifierAgent(TorchClassifierAgent): @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: TransformerRankerAgent.add_cmdline_args( parser, partial_opt=partial_opt ) super().add_cmdline_args(parser, partial_opt=partial_opt) parser.add_argument( '--load-from-pretrained-ranker', type='bool', default=False, help='load model from base transformer ranking model ' '(used for pretraining)', ) parser.set_defaults(reduction_type='first') return parser def build_model(self): num_classes = len(self.class_list) self.base_model = MixerModel(self.opt, self.dict) return TransformerLinearWrapper(self.base_model.context_encoder, num_classes) def vectorize(self, *args, **kwargs): kwargs['add_start'] = True kwargs['add_end'] = True obs = super().vectorize(*args, **kwargs) return obs def _set_text_vec(self, *args, **kwargs): obs = super()._set_text_vec(*args, **kwargs) if 'text_vec' in obs and 'added_start_end' not in obs: obs.force_set( 'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True) ) obs['added_start_end'] = True if obs.get('text_vec') is not None: truncated_vec = self._check_truncate( obs['text_vec'], self.text_truncate, True ) obs.force_set('text_vec', torch.LongTensor(truncated_vec)) return obs def score(self, batch): return self.model(batch.text_vec) def load_state_dict(self, state_dict): if self.is_finetune and self.opt['load_from_pretrained_ranker']: self.base_model.load_state_dict(state_dict, strict=False) else: self.model.load_state_dict(state_dict)
true
true
f70ef0ac0372c717352edce2b5da38e908ee6060
31,508
py
Python
Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
60
2017-08-05T21:47:56.000Z
2022-03-08T21:46:29.000Z
Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
1
2017-08-22T07:17:47.000Z
2017-09-24T22:04:19.000Z
Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
11
2017-09-10T16:22:21.000Z
2021-08-09T09:24:50.000Z
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow/core/protobuf/config.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tensorflow.core.framework import cost_graph_pb2 as tensorflow_dot_core_dot_framework_dot_cost__graph__pb2 from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2 from tensorflow.core.framework import step_stats_pb2 as tensorflow_dot_core_dot_framework_dot_step__stats__pb2 from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/core/protobuf/config.proto', package='tensorflow', syntax='proto3', serialized_pb=_b('\n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.proto\x1a*tensorflow/core/framework/step_stats.proto\x1a$tensorflow/core/protobuf/debug.proto\"\xa1\x01\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\"\xdf\x02\n\x10OptimizerOptions\x12+\n#do_common_subexpression_elimination\x18\x01 \x01(\x08\x12\x1b\n\x13\x64o_constant_folding\x18\x02 \x01(\x08\x12\x1c\n\x14\x64o_function_inlining\x18\x04 \x01(\x08\x12\x35\n\topt_level\x18\x03 \x01(\x0e\x32\".tensorflow.OptimizerOptions.Level\x12\x45\n\x10global_jit_level\x18\x05 \x01(\x0e\x32+.tensorflow.OptimizerOptions.GlobalJitLevel\" \n\x05Level\x12\x06\n\x02L1\x10\x00\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\xb9\x02\n\x0cGraphOptions\x12\x1e\n\x16\x65nable_recv_scheduling\x18\x02 \x01(\x08\x12\x37\n\x11optimizer_options\x18\x03 \x01(\x0b\x32\x1c.tensorflow.OptimizerOptions\x12\x18\n\x10\x62uild_cost_model\x18\x04 \x01(\x03\x12\x1e\n\x16\x62uild_cost_model_after\x18\t \x01(\x03\x12\x14\n\x0cinfer_shapes\x18\x05 \x01(\x08\x12\x1a\n\x12place_pruned_graph\x18\x06 \x01(\x08\x12 \n\x18\x65nable_bfloat16_sendrecv\x18\x07 \x01(\x08\x12\x15\n\rtimeline_step\x18\x08 \x01(\x05J\x04\x08\x01\x10\x02R%skip_common_subexpression_elimination\",\n\x15ThreadPoolOptionProto\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\"2\n\nRPCOptions\x12$\n\x1cuse_rpc_for_inprocess_master\x18\x01 \x01(\x08\"\xd1\x04\n\x0b\x43onfigProto\x12>\n\x0c\x64\x65vice_count\x18\x01 \x03(\x0b\x32(.tensorflow.ConfigProto.DeviceCountEntry\x12$\n\x1cintra_op_parallelism_threads\x18\x02 \x01(\x05\x12$\n\x1cinter_op_parallelism_threads\x18\x05 \x01(\x05\x12\x1f\n\x17use_per_session_threads\x18\t \x01(\x08\x12G\n\x1csession_inter_op_thread_pool\x18\x0c \x03(\x0b\x32!.tensorflow.ThreadPoolOptionProto\x12\x18\n\x10placement_period\x18\x03 \x01(\x05\x12\x16\n\x0e\x64\x65vice_filters\x18\x04 \x03(\t\x12+\n\x0bgpu_options\x18\x06 \x01(\x0b\x32\x16.tensorflow.GPUOptions\x12\x1c\n\x14\x61llow_soft_placement\x18\x07 \x01(\x08\x12\x1c\n\x14log_device_placement\x18\x08 \x01(\x08\x12/\n\rgraph_options\x18\n \x01(\x0b\x32\x18.tensorflow.GraphOptions\x12\x1f\n\x17operation_timeout_in_ms\x18\x0b \x01(\x03\x12+\n\x0brpc_options\x18\r \x01(\x0b\x32\x16.tensorflow.RPCOptions\x1a\x32\n\x10\x44\x65viceCountEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xa5\x02\n\nRunOptions\x12\x36\n\x0btrace_level\x18\x01 \x01(\x0e\x32!.tensorflow.RunOptions.TraceLevel\x12\x15\n\rtimeout_in_ms\x18\x02 \x01(\x03\x12\x1c\n\x14inter_op_thread_pool\x18\x03 \x01(\x05\x12\x1f\n\x17output_partition_graphs\x18\x05 \x01(\x08\x12/\n\rdebug_options\x18\x06 \x01(\x0b\x32\x18.tensorflow.DebugOptions\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\x00\x12\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\x96\x01\n\x0bRunMetadata\x12)\n\nstep_stats\x18\x01 \x01(\x0b\x32\x15.tensorflow.StepStats\x12,\n\ncost_graph\x18\x02 \x01(\x0b\x32\x18.tensorflow.CostGraphDef\x12.\n\x10partition_graphs\x18\x03 \x03(\x0b\x32\x14.tensorflow.GraphDefB-\n\x18org.tensorflow.frameworkB\x0c\x43onfigProtosP\x01\xf8\x01\x01\x62\x06proto3') , dependencies=[tensorflow_dot_core_dot_framework_dot_cost__graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_step__stats__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _OPTIMIZEROPTIONS_LEVEL = _descriptor.EnumDescriptor( name='Level', full_name='tensorflow.OptimizerOptions.Level', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='L1', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='L0', index=1, number=-1, options=None, type=None), ], containing_type=None, options=None, serialized_start=633, serialized_end=665, ) _sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_LEVEL) _OPTIMIZEROPTIONS_GLOBALJITLEVEL = _descriptor.EnumDescriptor( name='GlobalJitLevel', full_name='tensorflow.OptimizerOptions.GlobalJitLevel', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='OFF', index=1, number=-1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ON_1', index=2, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ON_2', index=3, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=667, serialized_end=734, ) _sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_GLOBALJITLEVEL) _RUNOPTIONS_TRACELEVEL = _descriptor.EnumDescriptor( name='TraceLevel', full_name='tensorflow.RunOptions.TraceLevel', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='NO_TRACE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SOFTWARE_TRACE', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='HARDWARE_TRACE', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='FULL_TRACE', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=1952, serialized_end=2034, ) _sym_db.RegisterEnumDescriptor(_RUNOPTIONS_TRACELEVEL) _GPUOPTIONS = _descriptor.Descriptor( name='GPUOptions', full_name='tensorflow.GPUOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='per_process_gpu_memory_fraction', full_name='tensorflow.GPUOptions.per_process_gpu_memory_fraction', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allocator_type', full_name='tensorflow.GPUOptions.allocator_type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='deferred_deletion_bytes', full_name='tensorflow.GPUOptions.deferred_deletion_bytes', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allow_growth', full_name='tensorflow.GPUOptions.allow_growth', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='visible_device_list', full_name='tensorflow.GPUOptions.visible_device_list', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=219, serialized_end=380, ) _OPTIMIZEROPTIONS = _descriptor.Descriptor( name='OptimizerOptions', full_name='tensorflow.OptimizerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='do_common_subexpression_elimination', full_name='tensorflow.OptimizerOptions.do_common_subexpression_elimination', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='do_constant_folding', full_name='tensorflow.OptimizerOptions.do_constant_folding', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='do_function_inlining', full_name='tensorflow.OptimizerOptions.do_function_inlining', index=2, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='opt_level', full_name='tensorflow.OptimizerOptions.opt_level', index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='global_jit_level', full_name='tensorflow.OptimizerOptions.global_jit_level', index=4, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _OPTIMIZEROPTIONS_LEVEL, _OPTIMIZEROPTIONS_GLOBALJITLEVEL, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=383, serialized_end=734, ) _GRAPHOPTIONS = _descriptor.Descriptor( name='GraphOptions', full_name='tensorflow.GraphOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='enable_recv_scheduling', full_name='tensorflow.GraphOptions.enable_recv_scheduling', index=0, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='optimizer_options', full_name='tensorflow.GraphOptions.optimizer_options', index=1, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='build_cost_model', full_name='tensorflow.GraphOptions.build_cost_model', index=2, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='build_cost_model_after', full_name='tensorflow.GraphOptions.build_cost_model_after', index=3, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='infer_shapes', full_name='tensorflow.GraphOptions.infer_shapes', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='place_pruned_graph', full_name='tensorflow.GraphOptions.place_pruned_graph', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='enable_bfloat16_sendrecv', full_name='tensorflow.GraphOptions.enable_bfloat16_sendrecv', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='timeline_step', full_name='tensorflow.GraphOptions.timeline_step', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=737, serialized_end=1050, ) _THREADPOOLOPTIONPROTO = _descriptor.Descriptor( name='ThreadPoolOptionProto', full_name='tensorflow.ThreadPoolOptionProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_threads', full_name='tensorflow.ThreadPoolOptionProto.num_threads', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1052, serialized_end=1096, ) _RPCOPTIONS = _descriptor.Descriptor( name='RPCOptions', full_name='tensorflow.RPCOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='use_rpc_for_inprocess_master', full_name='tensorflow.RPCOptions.use_rpc_for_inprocess_master', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1098, serialized_end=1148, ) _CONFIGPROTO_DEVICECOUNTENTRY = _descriptor.Descriptor( name='DeviceCountEntry', full_name='tensorflow.ConfigProto.DeviceCountEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='tensorflow.ConfigProto.DeviceCountEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='tensorflow.ConfigProto.DeviceCountEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1694, serialized_end=1744, ) _CONFIGPROTO = _descriptor.Descriptor( name='ConfigProto', full_name='tensorflow.ConfigProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='device_count', full_name='tensorflow.ConfigProto.device_count', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='intra_op_parallelism_threads', full_name='tensorflow.ConfigProto.intra_op_parallelism_threads', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='inter_op_parallelism_threads', full_name='tensorflow.ConfigProto.inter_op_parallelism_threads', index=2, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='use_per_session_threads', full_name='tensorflow.ConfigProto.use_per_session_threads', index=3, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='session_inter_op_thread_pool', full_name='tensorflow.ConfigProto.session_inter_op_thread_pool', index=4, number=12, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='placement_period', full_name='tensorflow.ConfigProto.placement_period', index=5, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='device_filters', full_name='tensorflow.ConfigProto.device_filters', index=6, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='gpu_options', full_name='tensorflow.ConfigProto.gpu_options', index=7, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allow_soft_placement', full_name='tensorflow.ConfigProto.allow_soft_placement', index=8, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='log_device_placement', full_name='tensorflow.ConfigProto.log_device_placement', index=9, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='graph_options', full_name='tensorflow.ConfigProto.graph_options', index=10, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='operation_timeout_in_ms', full_name='tensorflow.ConfigProto.operation_timeout_in_ms', index=11, number=11, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rpc_options', full_name='tensorflow.ConfigProto.rpc_options', index=12, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_CONFIGPROTO_DEVICECOUNTENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1151, serialized_end=1744, ) _RUNOPTIONS = _descriptor.Descriptor( name='RunOptions', full_name='tensorflow.RunOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='trace_level', full_name='tensorflow.RunOptions.trace_level', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='timeout_in_ms', full_name='tensorflow.RunOptions.timeout_in_ms', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='inter_op_thread_pool', full_name='tensorflow.RunOptions.inter_op_thread_pool', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='output_partition_graphs', full_name='tensorflow.RunOptions.output_partition_graphs', index=3, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='debug_options', full_name='tensorflow.RunOptions.debug_options', index=4, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _RUNOPTIONS_TRACELEVEL, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1747, serialized_end=2040, ) _RUNMETADATA = _descriptor.Descriptor( name='RunMetadata', full_name='tensorflow.RunMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='step_stats', full_name='tensorflow.RunMetadata.step_stats', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='cost_graph', full_name='tensorflow.RunMetadata.cost_graph', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='partition_graphs', full_name='tensorflow.RunMetadata.partition_graphs', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2043, serialized_end=2193, ) _OPTIMIZEROPTIONS.fields_by_name['opt_level'].enum_type = _OPTIMIZEROPTIONS_LEVEL _OPTIMIZEROPTIONS.fields_by_name['global_jit_level'].enum_type = _OPTIMIZEROPTIONS_GLOBALJITLEVEL _OPTIMIZEROPTIONS_LEVEL.containing_type = _OPTIMIZEROPTIONS _OPTIMIZEROPTIONS_GLOBALJITLEVEL.containing_type = _OPTIMIZEROPTIONS _GRAPHOPTIONS.fields_by_name['optimizer_options'].message_type = _OPTIMIZEROPTIONS _CONFIGPROTO_DEVICECOUNTENTRY.containing_type = _CONFIGPROTO _CONFIGPROTO.fields_by_name['device_count'].message_type = _CONFIGPROTO_DEVICECOUNTENTRY _CONFIGPROTO.fields_by_name['session_inter_op_thread_pool'].message_type = _THREADPOOLOPTIONPROTO _CONFIGPROTO.fields_by_name['gpu_options'].message_type = _GPUOPTIONS _CONFIGPROTO.fields_by_name['graph_options'].message_type = _GRAPHOPTIONS _CONFIGPROTO.fields_by_name['rpc_options'].message_type = _RPCOPTIONS _RUNOPTIONS.fields_by_name['trace_level'].enum_type = _RUNOPTIONS_TRACELEVEL _RUNOPTIONS.fields_by_name['debug_options'].message_type = tensorflow_dot_core_dot_protobuf_dot_debug__pb2._DEBUGOPTIONS _RUNOPTIONS_TRACELEVEL.containing_type = _RUNOPTIONS _RUNMETADATA.fields_by_name['step_stats'].message_type = tensorflow_dot_core_dot_framework_dot_step__stats__pb2._STEPSTATS _RUNMETADATA.fields_by_name['cost_graph'].message_type = tensorflow_dot_core_dot_framework_dot_cost__graph__pb2._COSTGRAPHDEF _RUNMETADATA.fields_by_name['partition_graphs'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF DESCRIPTOR.message_types_by_name['GPUOptions'] = _GPUOPTIONS DESCRIPTOR.message_types_by_name['OptimizerOptions'] = _OPTIMIZEROPTIONS DESCRIPTOR.message_types_by_name['GraphOptions'] = _GRAPHOPTIONS DESCRIPTOR.message_types_by_name['ThreadPoolOptionProto'] = _THREADPOOLOPTIONPROTO DESCRIPTOR.message_types_by_name['RPCOptions'] = _RPCOPTIONS DESCRIPTOR.message_types_by_name['ConfigProto'] = _CONFIGPROTO DESCRIPTOR.message_types_by_name['RunOptions'] = _RUNOPTIONS DESCRIPTOR.message_types_by_name['RunMetadata'] = _RUNMETADATA GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), dict( DESCRIPTOR = _GPUOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.GPUOptions) )) _sym_db.RegisterMessage(GPUOptions) OptimizerOptions = _reflection.GeneratedProtocolMessageType('OptimizerOptions', (_message.Message,), dict( DESCRIPTOR = _OPTIMIZEROPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions) )) _sym_db.RegisterMessage(OptimizerOptions) GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), dict( DESCRIPTOR = _GRAPHOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.GraphOptions) )) _sym_db.RegisterMessage(GraphOptions) ThreadPoolOptionProto = _reflection.GeneratedProtocolMessageType('ThreadPoolOptionProto', (_message.Message,), dict( DESCRIPTOR = _THREADPOOLOPTIONPROTO, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto) )) _sym_db.RegisterMessage(ThreadPoolOptionProto) RPCOptions = _reflection.GeneratedProtocolMessageType('RPCOptions', (_message.Message,), dict( DESCRIPTOR = _RPCOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RPCOptions) )) _sym_db.RegisterMessage(RPCOptions) ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), dict( DeviceCountEntry = _reflection.GeneratedProtocolMessageType('DeviceCountEntry', (_message.Message,), dict( DESCRIPTOR = _CONFIGPROTO_DEVICECOUNTENTRY, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.DeviceCountEntry) )) , DESCRIPTOR = _CONFIGPROTO, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ConfigProto) )) _sym_db.RegisterMessage(ConfigProto) _sym_db.RegisterMessage(ConfigProto.DeviceCountEntry) RunOptions = _reflection.GeneratedProtocolMessageType('RunOptions', (_message.Message,), dict( DESCRIPTOR = _RUNOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RunOptions) )) _sym_db.RegisterMessage(RunOptions) RunMetadata = _reflection.GeneratedProtocolMessageType('RunMetadata', (_message.Message,), dict( DESCRIPTOR = _RUNMETADATA, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RunMetadata) )) _sym_db.RegisterMessage(RunMetadata) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014ConfigProtosP\001\370\001\001')) _CONFIGPROTO_DEVICECOUNTENTRY.has_options = True _CONFIGPROTO_DEVICECOUNTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) # @@protoc_insertion_point(module_scope)
43.161644
3,662
0.758601
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 _sym_db = _symbol_database.Default() from tensorflow.core.framework import cost_graph_pb2 as tensorflow_dot_core_dot_framework_dot_cost__graph__pb2 from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2 from tensorflow.core.framework import step_stats_pb2 as tensorflow_dot_core_dot_framework_dot_step__stats__pb2 from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/core/protobuf/config.proto', package='tensorflow', syntax='proto3', serialized_pb=_b('\n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.proto\x1a*tensorflow/core/framework/step_stats.proto\x1a$tensorflow/core/protobuf/debug.proto\"\xa1\x01\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\"\xdf\x02\n\x10OptimizerOptions\x12+\n , dependencies=[tensorflow_dot_core_dot_framework_dot_cost__graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_step__stats__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _OPTIMIZEROPTIONS_LEVEL = _descriptor.EnumDescriptor( name='Level', full_name='tensorflow.OptimizerOptions.Level', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='L1', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='L0', index=1, number=-1, options=None, type=None), ], containing_type=None, options=None, serialized_start=633, serialized_end=665, ) _sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_LEVEL) _OPTIMIZEROPTIONS_GLOBALJITLEVEL = _descriptor.EnumDescriptor( name='GlobalJitLevel', full_name='tensorflow.OptimizerOptions.GlobalJitLevel', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='OFF', index=1, number=-1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ON_1', index=2, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ON_2', index=3, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=667, serialized_end=734, ) _sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_GLOBALJITLEVEL) _RUNOPTIONS_TRACELEVEL = _descriptor.EnumDescriptor( name='TraceLevel', full_name='tensorflow.RunOptions.TraceLevel', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='NO_TRACE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SOFTWARE_TRACE', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='HARDWARE_TRACE', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='FULL_TRACE', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=1952, serialized_end=2034, ) _sym_db.RegisterEnumDescriptor(_RUNOPTIONS_TRACELEVEL) _GPUOPTIONS = _descriptor.Descriptor( name='GPUOptions', full_name='tensorflow.GPUOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='per_process_gpu_memory_fraction', full_name='tensorflow.GPUOptions.per_process_gpu_memory_fraction', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allocator_type', full_name='tensorflow.GPUOptions.allocator_type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='deferred_deletion_bytes', full_name='tensorflow.GPUOptions.deferred_deletion_bytes', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allow_growth', full_name='tensorflow.GPUOptions.allow_growth', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='visible_device_list', full_name='tensorflow.GPUOptions.visible_device_list', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=219, serialized_end=380, ) _OPTIMIZEROPTIONS = _descriptor.Descriptor( name='OptimizerOptions', full_name='tensorflow.OptimizerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='do_common_subexpression_elimination', full_name='tensorflow.OptimizerOptions.do_common_subexpression_elimination', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='do_constant_folding', full_name='tensorflow.OptimizerOptions.do_constant_folding', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='do_function_inlining', full_name='tensorflow.OptimizerOptions.do_function_inlining', index=2, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='opt_level', full_name='tensorflow.OptimizerOptions.opt_level', index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='global_jit_level', full_name='tensorflow.OptimizerOptions.global_jit_level', index=4, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _OPTIMIZEROPTIONS_LEVEL, _OPTIMIZEROPTIONS_GLOBALJITLEVEL, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=383, serialized_end=734, ) _GRAPHOPTIONS = _descriptor.Descriptor( name='GraphOptions', full_name='tensorflow.GraphOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='enable_recv_scheduling', full_name='tensorflow.GraphOptions.enable_recv_scheduling', index=0, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='optimizer_options', full_name='tensorflow.GraphOptions.optimizer_options', index=1, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='build_cost_model', full_name='tensorflow.GraphOptions.build_cost_model', index=2, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='build_cost_model_after', full_name='tensorflow.GraphOptions.build_cost_model_after', index=3, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='infer_shapes', full_name='tensorflow.GraphOptions.infer_shapes', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='place_pruned_graph', full_name='tensorflow.GraphOptions.place_pruned_graph', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='enable_bfloat16_sendrecv', full_name='tensorflow.GraphOptions.enable_bfloat16_sendrecv', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='timeline_step', full_name='tensorflow.GraphOptions.timeline_step', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=737, serialized_end=1050, ) _THREADPOOLOPTIONPROTO = _descriptor.Descriptor( name='ThreadPoolOptionProto', full_name='tensorflow.ThreadPoolOptionProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_threads', full_name='tensorflow.ThreadPoolOptionProto.num_threads', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1052, serialized_end=1096, ) _RPCOPTIONS = _descriptor.Descriptor( name='RPCOptions', full_name='tensorflow.RPCOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='use_rpc_for_inprocess_master', full_name='tensorflow.RPCOptions.use_rpc_for_inprocess_master', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1098, serialized_end=1148, ) _CONFIGPROTO_DEVICECOUNTENTRY = _descriptor.Descriptor( name='DeviceCountEntry', full_name='tensorflow.ConfigProto.DeviceCountEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='tensorflow.ConfigProto.DeviceCountEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='tensorflow.ConfigProto.DeviceCountEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1694, serialized_end=1744, ) _CONFIGPROTO = _descriptor.Descriptor( name='ConfigProto', full_name='tensorflow.ConfigProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='device_count', full_name='tensorflow.ConfigProto.device_count', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='intra_op_parallelism_threads', full_name='tensorflow.ConfigProto.intra_op_parallelism_threads', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='inter_op_parallelism_threads', full_name='tensorflow.ConfigProto.inter_op_parallelism_threads', index=2, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='use_per_session_threads', full_name='tensorflow.ConfigProto.use_per_session_threads', index=3, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='session_inter_op_thread_pool', full_name='tensorflow.ConfigProto.session_inter_op_thread_pool', index=4, number=12, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='placement_period', full_name='tensorflow.ConfigProto.placement_period', index=5, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='device_filters', full_name='tensorflow.ConfigProto.device_filters', index=6, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='gpu_options', full_name='tensorflow.ConfigProto.gpu_options', index=7, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='allow_soft_placement', full_name='tensorflow.ConfigProto.allow_soft_placement', index=8, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='log_device_placement', full_name='tensorflow.ConfigProto.log_device_placement', index=9, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='graph_options', full_name='tensorflow.ConfigProto.graph_options', index=10, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='operation_timeout_in_ms', full_name='tensorflow.ConfigProto.operation_timeout_in_ms', index=11, number=11, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rpc_options', full_name='tensorflow.ConfigProto.rpc_options', index=12, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_CONFIGPROTO_DEVICECOUNTENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1151, serialized_end=1744, ) _RUNOPTIONS = _descriptor.Descriptor( name='RunOptions', full_name='tensorflow.RunOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='trace_level', full_name='tensorflow.RunOptions.trace_level', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='timeout_in_ms', full_name='tensorflow.RunOptions.timeout_in_ms', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='inter_op_thread_pool', full_name='tensorflow.RunOptions.inter_op_thread_pool', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='output_partition_graphs', full_name='tensorflow.RunOptions.output_partition_graphs', index=3, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='debug_options', full_name='tensorflow.RunOptions.debug_options', index=4, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _RUNOPTIONS_TRACELEVEL, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1747, serialized_end=2040, ) _RUNMETADATA = _descriptor.Descriptor( name='RunMetadata', full_name='tensorflow.RunMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='step_stats', full_name='tensorflow.RunMetadata.step_stats', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='cost_graph', full_name='tensorflow.RunMetadata.cost_graph', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='partition_graphs', full_name='tensorflow.RunMetadata.partition_graphs', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2043, serialized_end=2193, ) _OPTIMIZEROPTIONS.fields_by_name['opt_level'].enum_type = _OPTIMIZEROPTIONS_LEVEL _OPTIMIZEROPTIONS.fields_by_name['global_jit_level'].enum_type = _OPTIMIZEROPTIONS_GLOBALJITLEVEL _OPTIMIZEROPTIONS_LEVEL.containing_type = _OPTIMIZEROPTIONS _OPTIMIZEROPTIONS_GLOBALJITLEVEL.containing_type = _OPTIMIZEROPTIONS _GRAPHOPTIONS.fields_by_name['optimizer_options'].message_type = _OPTIMIZEROPTIONS _CONFIGPROTO_DEVICECOUNTENTRY.containing_type = _CONFIGPROTO _CONFIGPROTO.fields_by_name['device_count'].message_type = _CONFIGPROTO_DEVICECOUNTENTRY _CONFIGPROTO.fields_by_name['session_inter_op_thread_pool'].message_type = _THREADPOOLOPTIONPROTO _CONFIGPROTO.fields_by_name['gpu_options'].message_type = _GPUOPTIONS _CONFIGPROTO.fields_by_name['graph_options'].message_type = _GRAPHOPTIONS _CONFIGPROTO.fields_by_name['rpc_options'].message_type = _RPCOPTIONS _RUNOPTIONS.fields_by_name['trace_level'].enum_type = _RUNOPTIONS_TRACELEVEL _RUNOPTIONS.fields_by_name['debug_options'].message_type = tensorflow_dot_core_dot_protobuf_dot_debug__pb2._DEBUGOPTIONS _RUNOPTIONS_TRACELEVEL.containing_type = _RUNOPTIONS _RUNMETADATA.fields_by_name['step_stats'].message_type = tensorflow_dot_core_dot_framework_dot_step__stats__pb2._STEPSTATS _RUNMETADATA.fields_by_name['cost_graph'].message_type = tensorflow_dot_core_dot_framework_dot_cost__graph__pb2._COSTGRAPHDEF _RUNMETADATA.fields_by_name['partition_graphs'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF DESCRIPTOR.message_types_by_name['GPUOptions'] = _GPUOPTIONS DESCRIPTOR.message_types_by_name['OptimizerOptions'] = _OPTIMIZEROPTIONS DESCRIPTOR.message_types_by_name['GraphOptions'] = _GRAPHOPTIONS DESCRIPTOR.message_types_by_name['ThreadPoolOptionProto'] = _THREADPOOLOPTIONPROTO DESCRIPTOR.message_types_by_name['RPCOptions'] = _RPCOPTIONS DESCRIPTOR.message_types_by_name['ConfigProto'] = _CONFIGPROTO DESCRIPTOR.message_types_by_name['RunOptions'] = _RUNOPTIONS DESCRIPTOR.message_types_by_name['RunMetadata'] = _RUNMETADATA GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), dict( DESCRIPTOR = _GPUOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.GPUOptions) )) _sym_db.RegisterMessage(GPUOptions) OptimizerOptions = _reflection.GeneratedProtocolMessageType('OptimizerOptions', (_message.Message,), dict( DESCRIPTOR = _OPTIMIZEROPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions) )) _sym_db.RegisterMessage(OptimizerOptions) GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), dict( DESCRIPTOR = _GRAPHOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.GraphOptions) )) _sym_db.RegisterMessage(GraphOptions) ThreadPoolOptionProto = _reflection.GeneratedProtocolMessageType('ThreadPoolOptionProto', (_message.Message,), dict( DESCRIPTOR = _THREADPOOLOPTIONPROTO, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto) )) _sym_db.RegisterMessage(ThreadPoolOptionProto) RPCOptions = _reflection.GeneratedProtocolMessageType('RPCOptions', (_message.Message,), dict( DESCRIPTOR = _RPCOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RPCOptions) )) _sym_db.RegisterMessage(RPCOptions) ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), dict( DeviceCountEntry = _reflection.GeneratedProtocolMessageType('DeviceCountEntry', (_message.Message,), dict( DESCRIPTOR = _CONFIGPROTO_DEVICECOUNTENTRY, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.DeviceCountEntry) )) , DESCRIPTOR = _CONFIGPROTO, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.ConfigProto) )) _sym_db.RegisterMessage(ConfigProto) _sym_db.RegisterMessage(ConfigProto.DeviceCountEntry) RunOptions = _reflection.GeneratedProtocolMessageType('RunOptions', (_message.Message,), dict( DESCRIPTOR = _RUNOPTIONS, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RunOptions) )) _sym_db.RegisterMessage(RunOptions) RunMetadata = _reflection.GeneratedProtocolMessageType('RunMetadata', (_message.Message,), dict( DESCRIPTOR = _RUNMETADATA, __module__ = 'tensorflow.core.protobuf.config_pb2' # @@protoc_insertion_point(class_scope:tensorflow.RunMetadata) )) _sym_db.RegisterMessage(RunMetadata) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014ConfigProtosP\001\370\001\001')) _CONFIGPROTO_DEVICECOUNTENTRY.has_options = True _CONFIGPROTO_DEVICECOUNTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) # @@protoc_insertion_point(module_scope)
true
true
f70ef0f412e5276c5b8da11a1ad63834bedea5f9
593
py
Python
venv/lib/python3.6/site-packages/gensim/__init__.py
bopopescu/wired_cli
844b5c2bf32c95ad2974663f0501a85ff6134bd4
[ "MIT" ]
2
2021-06-09T20:55:17.000Z
2021-11-03T03:07:37.000Z
venv/lib/python3.6/site-packages/gensim/__init__.py
bopopescu/wired_cli
844b5c2bf32c95ad2974663f0501a85ff6134bd4
[ "MIT" ]
4
2020-07-26T02:10:42.000Z
2021-03-31T18:48:58.000Z
venv/lib/python3.6/site-packages/gensim/__init__.py
bopopescu/wired_cli
844b5c2bf32c95ad2974663f0501a85ff6134bd4
[ "MIT" ]
1
2020-07-25T23:57:23.000Z
2020-07-25T23:57:23.000Z
"""This package contains interfaces and functionality to compute pair-wise document similarities within a corpus of documents. """ from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401 import logging __version__ = '3.5.0' class NullHandler(logging.Handler): """For python versions <= 2.6; same as `logging.NullHandler` in 2.7.""" def emit(self, record): pass logger = logging.getLogger('gensim') if len(logger.handlers) == 0: # To ensure reload() doesn't add another one logger.addHandler(NullHandler())
28.238095
114
0.726813
from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils import logging __version__ = '3.5.0' class NullHandler(logging.Handler): def emit(self, record): pass logger = logging.getLogger('gensim') if len(logger.handlers) == 0: logger.addHandler(NullHandler())
true
true
f70ef1c1fcfaa212ff2a8e28ee9bafc3b2b10d8d
3,514
py
Python
intersight/models/boot_san_ref.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
intersight/models/boot_san_ref.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
intersight/models/boot_san_ref.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Intersight REST API This is Intersight REST API OpenAPI spec version: 1.0.9-262 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class BootSanRef(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'moid': 'str', 'object_type': 'str' } attribute_map = { 'moid': 'Moid', 'object_type': 'ObjectType' } def __init__(self, moid=None, object_type=None): """ BootSanRef - a model defined in Swagger """ self._moid = None self._object_type = None if moid is not None: self.moid = moid if object_type is not None: self.object_type = object_type @property def moid(self): """ Gets the moid of this BootSanRef. :return: The moid of this BootSanRef. :rtype: str """ return self._moid @moid.setter def moid(self, moid): """ Sets the moid of this BootSanRef. :param moid: The moid of this BootSanRef. :type: str """ self._moid = moid @property def object_type(self): """ Gets the object_type of this BootSanRef. :return: The object_type of this BootSanRef. :rtype: str """ return self._object_type @object_type.setter def object_type(self, object_type): """ Sets the object_type of this BootSanRef. :param object_type: The object_type of this BootSanRef. :type: str """ self._object_type = object_type def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, BootSanRef): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
23.426667
77
0.529027
from pprint import pformat from six import iteritems import re class BootSanRef(object): swagger_types = { 'moid': 'str', 'object_type': 'str' } attribute_map = { 'moid': 'Moid', 'object_type': 'ObjectType' } def __init__(self, moid=None, object_type=None): self._moid = None self._object_type = None if moid is not None: self.moid = moid if object_type is not None: self.object_type = object_type @property def moid(self): return self._moid @moid.setter def moid(self, moid): self._moid = moid @property def object_type(self): return self._object_type @object_type.setter def object_type(self, object_type): self._object_type = object_type def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, BootSanRef): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f70ef231f97b1c04885674e7f2bc0a4b3632a20f
1,296
py
Python
modules/file_utils.py
Efemache/Mercenaries-Hearthstone-game-bot
a243275dddc484f09fd87272277e9d9baca5f3ee
[ "Apache-2.0" ]
6
2022-03-03T03:41:00.000Z
2022-03-25T23:08:12.000Z
modules/file_utils.py
Efemache/Mercenaries-Farm-bot
0eb6bdb10e282606570456f33c071b488eaf02cf
[ "Apache-2.0" ]
37
2021-11-25T16:14:20.000Z
2022-03-30T21:12:05.000Z
modules/file_utils.py
Efemache/Mercenaries-Hearthstone-game-bot
a243275dddc484f09fd87272277e9d9baca5f3ee
[ "Apache-2.0" ]
3
2022-02-18T03:41:22.000Z
2022-03-22T02:11:41.000Z
import json import configparser import re import logging from modules.exceptions import SettingsError log = logging.getLogger(__name__) def readjson(jfile): """... just for reading json file and return data :)""" with open(jfile) as descriptor: data = json.load(descriptor) return data def read_ini_to_dict(inifile): """read ini file to parsed dictionary""" log.debug("Reading %s", inifile) return parseINI(readINI(inifile)) def parseINI(inidict): """... just for transform value into right type""" initype = {} for k in inidict.keys(): i = inidict[k].split("#")[0] if i in ["True", "False"]: initype[k] = i == "True" elif re.match("^[0-9]+$", i): initype[k] = int(i) elif re.match("^[0-9]+\.[0-9]+$", i): initype[k] = float(i) else: initype[k] = str(i) return initype def readINI(inifile): """... just for reading .ini file and return data""" config = configparser.ConfigParser() try: config.read(inifile) except configparser.DuplicateOptionError as err: log.error("Error while reading ini file %s", err) raise SettingsError(f"Duplicate Option in Settings File: {err}") from err return config._sections
25.411765
81
0.611111
import json import configparser import re import logging from modules.exceptions import SettingsError log = logging.getLogger(__name__) def readjson(jfile): with open(jfile) as descriptor: data = json.load(descriptor) return data def read_ini_to_dict(inifile): log.debug("Reading %s", inifile) return parseINI(readINI(inifile)) def parseINI(inidict): initype = {} for k in inidict.keys(): i = inidict[k].split("#")[0] if i in ["True", "False"]: initype[k] = i == "True" elif re.match("^[0-9]+$", i): initype[k] = int(i) elif re.match("^[0-9]+\.[0-9]+$", i): initype[k] = float(i) else: initype[k] = str(i) return initype def readINI(inifile): config = configparser.ConfigParser() try: config.read(inifile) except configparser.DuplicateOptionError as err: log.error("Error while reading ini file %s", err) raise SettingsError(f"Duplicate Option in Settings File: {err}") from err return config._sections
true
true
f70ef372c774d66de4efbfa2589caab790ff166a
156
py
Python
python/pictures/base64_to_pic.py
livejq/AutoScripts
b801e68515c6cdb6ddadc9c499a21276774d3d74
[ "MIT" ]
11
2021-05-21T16:12:21.000Z
2022-02-03T10:21:13.000Z
python/pictures/base64_to_pic.py
livejq/AutoScripts
b801e68515c6cdb6ddadc9c499a21276774d3d74
[ "MIT" ]
null
null
null
python/pictures/base64_to_pic.py
livejq/AutoScripts
b801e68515c6cdb6ddadc9c499a21276774d3d74
[ "MIT" ]
5
2021-05-21T16:30:19.000Z
2022-01-06T16:15:08.000Z
# -*- coding:utf-8 -*- import base64 bs='iVBORw0KGgoAAAANSUhEUg....' imgdata=base64.b64decode(bs) file=open('2.jpg','wb') file.write(imgdata) file.close()
17.333333
31
0.698718
import base64 bs='iVBORw0KGgoAAAANSUhEUg....' imgdata=base64.b64decode(bs) file=open('2.jpg','wb') file.write(imgdata) file.close()
true
true
f70ef3994d20c14340d01f4ee1f72727c196fd1b
233,632
py
Python
sdk/python/pulumi_azure_native/network/v20160901/outputs.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_native/network/v20160901/outputs.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_native/network/v20160901/outputs.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * __all__ = [ 'AddressSpaceResponse', 'ApplicationGatewayAuthenticationCertificateResponse', 'ApplicationGatewayBackendAddressPoolResponse', 'ApplicationGatewayBackendAddressResponse', 'ApplicationGatewayBackendHttpSettingsResponse', 'ApplicationGatewayFrontendIPConfigurationResponse', 'ApplicationGatewayFrontendPortResponse', 'ApplicationGatewayHttpListenerResponse', 'ApplicationGatewayIPConfigurationResponse', 'ApplicationGatewayPathRuleResponse', 'ApplicationGatewayProbeResponse', 'ApplicationGatewayRequestRoutingRuleResponse', 'ApplicationGatewaySkuResponse', 'ApplicationGatewaySslCertificateResponse', 'ApplicationGatewaySslPolicyResponse', 'ApplicationGatewayUrlPathMapResponse', 'ApplicationGatewayWebApplicationFirewallConfigurationResponse', 'BackendAddressPoolResponse', 'BgpPeerStatusResponseResult', 'BgpSettingsResponse', 'DhcpOptionsResponse', 'ExpressRouteCircuitAuthorizationResponse', 'ExpressRouteCircuitPeeringConfigResponse', 'ExpressRouteCircuitPeeringResponse', 'ExpressRouteCircuitServiceProviderPropertiesResponse', 'ExpressRouteCircuitSkuResponse', 'ExpressRouteCircuitStatsResponse', 'FrontendIPConfigurationResponse', 'GatewayRouteResponseResult', 'IPConfigurationResponse', 'InboundNatPoolResponse', 'InboundNatRuleResponse', 'LoadBalancingRuleResponse', 'LocalNetworkGatewayResponse', 'NetworkInterfaceDnsSettingsResponse', 'NetworkInterfaceIPConfigurationResponse', 'NetworkInterfaceResponse', 'NetworkSecurityGroupResponse', 'OutboundNatRuleResponse', 'PacketCaptureFilterResponse', 'PacketCaptureStorageLocationResponse', 'ProbeResponse', 'PublicIPAddressDnsSettingsResponse', 'PublicIPAddressResponse', 'ResourceNavigationLinkResponse', 'RouteResponse', 'RouteTableResponse', 'SecurityRuleResponse', 'SubResourceResponse', 'SubnetResponse', 'TunnelConnectionHealthResponse', 'VirtualNetworkGatewayIPConfigurationResponse', 'VirtualNetworkGatewayResponse', 'VirtualNetworkGatewaySkuResponse', 'VirtualNetworkPeeringResponse', 'VpnClientConfigurationResponse', 'VpnClientRevokedCertificateResponse', 'VpnClientRootCertificateResponse', ] @pulumi.output_type class AddressSpaceResponse(dict): """ AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network. """ def __init__(__self__, *, address_prefixes: Optional[Sequence[str]] = None): """ AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network. :param Sequence[str] address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation. """ if address_prefixes is not None: pulumi.set(__self__, "address_prefixes", address_prefixes) @property @pulumi.getter(name="addressPrefixes") def address_prefixes(self) -> Optional[Sequence[str]]: """ A list of address blocks reserved for this virtual network in CIDR notation. """ return pulumi.get(self, "address_prefixes") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayAuthenticationCertificateResponse(dict): """ Authentication certificates of an application gateway. """ def __init__(__self__, *, data: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Authentication certificates of an application gateway. :param str data: Certificate public data. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if data is not None: pulumi.set(__self__, "data", data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter def data(self) -> Optional[str]: """ Certificate public data. """ return pulumi.get(self, "data") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendAddressPoolResponse(dict): """ Backend Address Pool of an application gateway. """ def __init__(__self__, *, backend_addresses: Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']] = None, backend_ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Backend Address Pool of an application gateway. :param Sequence['ApplicationGatewayBackendAddressResponseArgs'] backend_addresses: Backend addresses :param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Collection of references to IPs defined in network interfaces. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if backend_addresses is not None: pulumi.set(__self__, "backend_addresses", backend_addresses) if backend_ip_configurations is not None: pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddresses") def backend_addresses(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']]: """ Backend addresses """ return pulumi.get(self, "backend_addresses") @property @pulumi.getter(name="backendIPConfigurations") def backend_ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]: """ Collection of references to IPs defined in network interfaces. """ return pulumi.get(self, "backend_ip_configurations") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendAddressResponse(dict): """ Backend address of an application gateway. """ def __init__(__self__, *, fqdn: Optional[str] = None, ip_address: Optional[str] = None): """ Backend address of an application gateway. :param str fqdn: Fully qualified domain name (FQDN). :param str ip_address: IP address """ if fqdn is not None: pulumi.set(__self__, "fqdn", fqdn) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) @property @pulumi.getter def fqdn(self) -> Optional[str]: """ Fully qualified domain name (FQDN). """ return pulumi.get(self, "fqdn") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[str]: """ IP address """ return pulumi.get(self, "ip_address") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendHttpSettingsResponse(dict): """ Backend address pool settings of an application gateway. """ def __init__(__self__, *, authentication_certificates: Optional[Sequence['outputs.SubResourceResponse']] = None, cookie_based_affinity: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, port: Optional[int] = None, probe: Optional['outputs.SubResourceResponse'] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, request_timeout: Optional[int] = None): """ Backend address pool settings of an application gateway. :param Sequence['SubResourceResponseArgs'] authentication_certificates: Array of references to application gateway authentication certificates. :param str cookie_based_affinity: Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param int port: Port :param 'SubResourceResponseArgs' probe: Probe resource of an application gateway. :param str protocol: Protocol. Possible values are: 'Http' and 'Https'. :param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param int request_timeout: Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds. """ if authentication_certificates is not None: pulumi.set(__self__, "authentication_certificates", authentication_certificates) if cookie_based_affinity is not None: pulumi.set(__self__, "cookie_based_affinity", cookie_based_affinity) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) if probe is not None: pulumi.set(__self__, "probe", probe) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if request_timeout is not None: pulumi.set(__self__, "request_timeout", request_timeout) @property @pulumi.getter(name="authenticationCertificates") def authentication_certificates(self) -> Optional[Sequence['outputs.SubResourceResponse']]: """ Array of references to application gateway authentication certificates. """ return pulumi.get(self, "authentication_certificates") @property @pulumi.getter(name="cookieBasedAffinity") def cookie_based_affinity(self) -> Optional[str]: """ Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'. """ return pulumi.get(self, "cookie_based_affinity") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def port(self) -> Optional[int]: """ Port """ return pulumi.get(self, "port") @property @pulumi.getter def probe(self) -> Optional['outputs.SubResourceResponse']: """ Probe resource of an application gateway. """ return pulumi.get(self, "probe") @property @pulumi.getter def protocol(self) -> Optional[str]: """ Protocol. Possible values are: 'Http' and 'Https'. """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requestTimeout") def request_timeout(self) -> Optional[int]: """ Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds. """ return pulumi.get(self, "request_timeout") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayFrontendIPConfigurationResponse(dict): """ Frontend IP configuration of an application gateway. """ def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.SubResourceResponse'] = None, subnet: Optional['outputs.SubResourceResponse'] = None): """ Frontend IP configuration of an application gateway. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str private_ip_address: PrivateIPAddress of the network interface IP Configuration. :param str private_ip_allocation_method: PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'. :param str provisioning_state: Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param 'SubResourceResponseArgs' public_ip_address: Reference of the PublicIP resource. :param 'SubResourceResponseArgs' subnet: Reference of the subnet resource. """ if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: """ PrivateIPAddress of the network interface IP Configuration. """ return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: """ PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'. """ return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.SubResourceResponse']: """ Reference of the PublicIP resource. """ return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubResourceResponse']: """ Reference of the subnet resource. """ return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayFrontendPortResponse(dict): """ Frontend port of an application gateway. """ def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, port: Optional[int] = None, provisioning_state: Optional[str] = None): """ Frontend port of an application gateway. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param int port: Frontend port :param str provisioning_state: Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def port(self) -> Optional[int]: """ Frontend port """ return pulumi.get(self, "port") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayHttpListenerResponse(dict): """ Http listener of an application gateway. """ def __init__(__self__, *, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, frontend_port: Optional['outputs.SubResourceResponse'] = None, host_name: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, require_server_name_indication: Optional[bool] = None, ssl_certificate: Optional['outputs.SubResourceResponse'] = None): """ Http listener of an application gateway. :param str etag: A unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' frontend_ip_configuration: Frontend IP configuration resource of an application gateway. :param 'SubResourceResponseArgs' frontend_port: Frontend port resource of an application gateway. :param str host_name: Host name of HTTP listener. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str protocol: Protocol. Possible values are: 'Http' and 'Https'. :param str provisioning_state: Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param bool require_server_name_indication: Applicable only if protocol is https. Enables SNI for multi-hosting. :param 'SubResourceResponseArgs' ssl_certificate: SSL certificate resource of an application gateway. """ if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if frontend_port is not None: pulumi.set(__self__, "frontend_port", frontend_port) if host_name is not None: pulumi.set(__self__, "host_name", host_name) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if require_server_name_indication is not None: pulumi.set(__self__, "require_server_name_indication", require_server_name_indication) if ssl_certificate is not None: pulumi.set(__self__, "ssl_certificate", ssl_certificate) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: """ Frontend IP configuration resource of an application gateway. """ return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> Optional['outputs.SubResourceResponse']: """ Frontend port resource of an application gateway. """ return pulumi.get(self, "frontend_port") @property @pulumi.getter(name="hostName") def host_name(self) -> Optional[str]: """ Host name of HTTP listener. """ return pulumi.get(self, "host_name") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def protocol(self) -> Optional[str]: """ Protocol. Possible values are: 'Http' and 'Https'. """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requireServerNameIndication") def require_server_name_indication(self) -> Optional[bool]: """ Applicable only if protocol is https. Enables SNI for multi-hosting. """ return pulumi.get(self, "require_server_name_indication") @property @pulumi.getter(name="sslCertificate") def ssl_certificate(self) -> Optional['outputs.SubResourceResponse']: """ SSL certificate resource of an application gateway. """ return pulumi.get(self, "ssl_certificate") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayIPConfigurationResponse(dict): """ IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed. """ def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None, subnet: Optional['outputs.SubResourceResponse'] = None): """ IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param 'SubResourceResponseArgs' subnet: Reference of the subnet resource. A subnet from where application gateway gets its private address. """ if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubResourceResponse']: """ Reference of the subnet resource. A subnet from where application gateway gets its private address. """ return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayPathRuleResponse(dict): """ Path rule of URL path map of an application gateway. """ def __init__(__self__, *, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, paths: Optional[Sequence[str]] = None, provisioning_state: Optional[str] = None): """ Path rule of URL path map of an application gateway. :param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of URL path map. :param 'SubResourceResponseArgs' backend_http_settings: Backend http settings resource of URL path map. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param Sequence[str] paths: Path rules of URL path map. :param str provisioning_state: Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_http_settings is not None: pulumi.set(__self__, "backend_http_settings", backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if paths is not None: pulumi.set(__self__, "paths", paths) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: """ Backend address pool resource of URL path map. """ return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendHttpSettings") def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: """ Backend http settings resource of URL path map. """ return pulumi.get(self, "backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def paths(self) -> Optional[Sequence[str]]: """ Path rules of URL path map. """ return pulumi.get(self, "paths") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayProbeResponse(dict): """ Probe of the application gateway. """ def __init__(__self__, *, etag: Optional[str] = None, host: Optional[str] = None, id: Optional[str] = None, interval: Optional[int] = None, name: Optional[str] = None, path: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, timeout: Optional[int] = None, unhealthy_threshold: Optional[int] = None): """ Probe of the application gateway. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str host: Host name to send the probe to. :param str id: Resource ID. :param int interval: The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str path: Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path> :param str protocol: Protocol. Possible values are: 'Http' and 'Https'. :param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param int timeout: the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. :param int unhealthy_threshold: The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20. """ if etag is not None: pulumi.set(__self__, "etag", etag) if host is not None: pulumi.set(__self__, "host", host) if id is not None: pulumi.set(__self__, "id", id) if interval is not None: pulumi.set(__self__, "interval", interval) if name is not None: pulumi.set(__self__, "name", name) if path is not None: pulumi.set(__self__, "path", path) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if unhealthy_threshold is not None: pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def host(self) -> Optional[str]: """ Host name to send the probe to. """ return pulumi.get(self, "host") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def interval(self) -> Optional[int]: """ The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds. """ return pulumi.get(self, "interval") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def path(self) -> Optional[str]: """ Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path> """ return pulumi.get(self, "path") @property @pulumi.getter def protocol(self) -> Optional[str]: """ Protocol. Possible values are: 'Http' and 'Https'. """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def timeout(self) -> Optional[int]: """ the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. """ return pulumi.get(self, "timeout") @property @pulumi.getter(name="unhealthyThreshold") def unhealthy_threshold(self) -> Optional[int]: """ The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20. """ return pulumi.get(self, "unhealthy_threshold") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayRequestRoutingRuleResponse(dict): """ Request routing rule of an application gateway. """ def __init__(__self__, *, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, http_listener: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None, rule_type: Optional[str] = None, url_path_map: Optional['outputs.SubResourceResponse'] = None): """ Request routing rule of an application gateway. :param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of the application gateway. :param 'SubResourceResponseArgs' backend_http_settings: Frontend port resource of the application gateway. :param str etag: A unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' http_listener: Http listener resource of the application gateway. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str rule_type: Rule type. Possible values are: 'Basic' and 'PathBasedRouting'. :param 'SubResourceResponseArgs' url_path_map: URL path map resource of the application gateway. """ if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_http_settings is not None: pulumi.set(__self__, "backend_http_settings", backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if http_listener is not None: pulumi.set(__self__, "http_listener", http_listener) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if rule_type is not None: pulumi.set(__self__, "rule_type", rule_type) if url_path_map is not None: pulumi.set(__self__, "url_path_map", url_path_map) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: """ Backend address pool resource of the application gateway. """ return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendHttpSettings") def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: """ Frontend port resource of the application gateway. """ return pulumi.get(self, "backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="httpListener") def http_listener(self) -> Optional['outputs.SubResourceResponse']: """ Http listener resource of the application gateway. """ return pulumi.get(self, "http_listener") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="ruleType") def rule_type(self) -> Optional[str]: """ Rule type. Possible values are: 'Basic' and 'PathBasedRouting'. """ return pulumi.get(self, "rule_type") @property @pulumi.getter(name="urlPathMap") def url_path_map(self) -> Optional['outputs.SubResourceResponse']: """ URL path map resource of the application gateway. """ return pulumi.get(self, "url_path_map") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySkuResponse(dict): """ SKU of an application gateway """ def __init__(__self__, *, capacity: Optional[int] = None, name: Optional[str] = None, tier: Optional[str] = None): """ SKU of an application gateway :param int capacity: Capacity (instance count) of an application gateway. :param str name: Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and 'WAF_Large'. :param str tier: Tier of an application gateway. Possible values are: 'Standard' and 'WAF'. """ if capacity is not None: pulumi.set(__self__, "capacity", capacity) if name is not None: pulumi.set(__self__, "name", name) if tier is not None: pulumi.set(__self__, "tier", tier) @property @pulumi.getter def capacity(self) -> Optional[int]: """ Capacity (instance count) of an application gateway. """ return pulumi.get(self, "capacity") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and 'WAF_Large'. """ return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> Optional[str]: """ Tier of an application gateway. Possible values are: 'Standard' and 'WAF'. """ return pulumi.get(self, "tier") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySslCertificateResponse(dict): """ SSL certificates of an application gateway. """ def __init__(__self__, *, data: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, password: Optional[str] = None, provisioning_state: Optional[str] = None, public_cert_data: Optional[str] = None): """ SSL certificates of an application gateway. :param str data: Base-64 encoded pfx certificate. Only applicable in PUT Request. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param str password: Password for the pfx file specified in data. Only applicable in PUT request. :param str provisioning_state: Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request. """ if data is not None: pulumi.set(__self__, "data", data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if password is not None: pulumi.set(__self__, "password", password) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_cert_data is not None: pulumi.set(__self__, "public_cert_data", public_cert_data) @property @pulumi.getter def data(self) -> Optional[str]: """ Base-64 encoded pfx certificate. Only applicable in PUT Request. """ return pulumi.get(self, "data") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def password(self) -> Optional[str]: """ Password for the pfx file specified in data. Only applicable in PUT request. """ return pulumi.get(self, "password") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicCertData") def public_cert_data(self) -> Optional[str]: """ Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request. """ return pulumi.get(self, "public_cert_data") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySslPolicyResponse(dict): """ Application gateway SSL policy. """ def __init__(__self__, *, disabled_ssl_protocols: Optional[Sequence[str]] = None): """ Application gateway SSL policy. :param Sequence[str] disabled_ssl_protocols: SSL protocols to be disabled on application gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'. """ if disabled_ssl_protocols is not None: pulumi.set(__self__, "disabled_ssl_protocols", disabled_ssl_protocols) @property @pulumi.getter(name="disabledSslProtocols") def disabled_ssl_protocols(self) -> Optional[Sequence[str]]: """ SSL protocols to be disabled on application gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'. """ return pulumi.get(self, "disabled_ssl_protocols") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayUrlPathMapResponse(dict): """ UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. """ def __init__(__self__, *, default_backend_address_pool: Optional['outputs.SubResourceResponse'] = None, default_backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, path_rules: Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']] = None, provisioning_state: Optional[str] = None): """ UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. :param 'SubResourceResponseArgs' default_backend_address_pool: Default backend address pool resource of URL path map. :param 'SubResourceResponseArgs' default_backend_http_settings: Default backend http settings resource of URL path map. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :param Sequence['ApplicationGatewayPathRuleResponseArgs'] path_rules: Path rule of URL path map resource. :param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if default_backend_address_pool is not None: pulumi.set(__self__, "default_backend_address_pool", default_backend_address_pool) if default_backend_http_settings is not None: pulumi.set(__self__, "default_backend_http_settings", default_backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if path_rules is not None: pulumi.set(__self__, "path_rules", path_rules) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="defaultBackendAddressPool") def default_backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: """ Default backend address pool resource of URL path map. """ return pulumi.get(self, "default_backend_address_pool") @property @pulumi.getter(name="defaultBackendHttpSettings") def default_backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: """ Default backend http settings resource of URL path map. """ return pulumi.get(self, "default_backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="pathRules") def path_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']]: """ Path rule of URL path map resource. """ return pulumi.get(self, "path_rules") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayWebApplicationFirewallConfigurationResponse(dict): """ Application gateway web application firewall configuration. """ def __init__(__self__, *, enabled: bool, firewall_mode: Optional[str] = None): """ Application gateway web application firewall configuration. :param bool enabled: Whether the web application firewall is enabled. :param str firewall_mode: Web application firewall mode. Possible values are: 'Detection' and 'Prevention'. """ pulumi.set(__self__, "enabled", enabled) if firewall_mode is not None: pulumi.set(__self__, "firewall_mode", firewall_mode) @property @pulumi.getter def enabled(self) -> bool: """ Whether the web application firewall is enabled. """ return pulumi.get(self, "enabled") @property @pulumi.getter(name="firewallMode") def firewall_mode(self) -> Optional[str]: """ Web application firewall mode. Possible values are: 'Detection' and 'Prevention'. """ return pulumi.get(self, "firewall_mode") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class BackendAddressPoolResponse(dict): """ Pool of backend IP addresses. """ def __init__(__self__, *, backend_ip_configurations: Sequence['outputs.NetworkInterfaceIPConfigurationResponse'], load_balancing_rules: Sequence['outputs.SubResourceResponse'], outbound_nat_rule: 'outputs.SubResourceResponse', etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Pool of backend IP addresses. :param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Gets collection of references to IP addresses defined in network interfaces. :param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules that use this backend address pool. :param 'SubResourceResponseArgs' outbound_nat_rule: Gets outbound rules that use this backend address pool. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations) pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "outbound_nat_rule", outbound_nat_rule) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendIPConfigurations") def backend_ip_configurations(self) -> Sequence['outputs.NetworkInterfaceIPConfigurationResponse']: """ Gets collection of references to IP addresses defined in network interfaces. """ return pulumi.get(self, "backend_ip_configurations") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: """ Gets load balancing rules that use this backend address pool. """ return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter(name="outboundNatRule") def outbound_nat_rule(self) -> 'outputs.SubResourceResponse': """ Gets outbound rules that use this backend address pool. """ return pulumi.get(self, "outbound_nat_rule") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class BgpPeerStatusResponseResult(dict): def __init__(__self__, *, asn: int, connected_duration: str, local_address: str, messages_received: float, messages_sent: float, neighbor: str, routes_received: float, state: str): """ :param int asn: The autonomous system number of the remote BGP peer :param str connected_duration: For how long the peering has been up :param str local_address: The virtual network gateway's local address :param float messages_received: The number of BGP messages received :param float messages_sent: The number of BGP messages sent :param str neighbor: The remote BGP peer :param float routes_received: The number of routes learned from this peer :param str state: The BGP peer state """ pulumi.set(__self__, "asn", asn) pulumi.set(__self__, "connected_duration", connected_duration) pulumi.set(__self__, "local_address", local_address) pulumi.set(__self__, "messages_received", messages_received) pulumi.set(__self__, "messages_sent", messages_sent) pulumi.set(__self__, "neighbor", neighbor) pulumi.set(__self__, "routes_received", routes_received) pulumi.set(__self__, "state", state) @property @pulumi.getter def asn(self) -> int: """ The autonomous system number of the remote BGP peer """ return pulumi.get(self, "asn") @property @pulumi.getter(name="connectedDuration") def connected_duration(self) -> str: """ For how long the peering has been up """ return pulumi.get(self, "connected_duration") @property @pulumi.getter(name="localAddress") def local_address(self) -> str: """ The virtual network gateway's local address """ return pulumi.get(self, "local_address") @property @pulumi.getter(name="messagesReceived") def messages_received(self) -> float: """ The number of BGP messages received """ return pulumi.get(self, "messages_received") @property @pulumi.getter(name="messagesSent") def messages_sent(self) -> float: """ The number of BGP messages sent """ return pulumi.get(self, "messages_sent") @property @pulumi.getter def neighbor(self) -> str: """ The remote BGP peer """ return pulumi.get(self, "neighbor") @property @pulumi.getter(name="routesReceived") def routes_received(self) -> float: """ The number of routes learned from this peer """ return pulumi.get(self, "routes_received") @property @pulumi.getter def state(self) -> str: """ The BGP peer state """ return pulumi.get(self, "state") @pulumi.output_type class BgpSettingsResponse(dict): def __init__(__self__, *, asn: Optional[float] = None, bgp_peering_address: Optional[str] = None, peer_weight: Optional[int] = None): """ :param float asn: The BGP speaker's ASN. :param str bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker. :param int peer_weight: The weight added to routes learned from this BGP speaker. """ if asn is not None: pulumi.set(__self__, "asn", asn) if bgp_peering_address is not None: pulumi.set(__self__, "bgp_peering_address", bgp_peering_address) if peer_weight is not None: pulumi.set(__self__, "peer_weight", peer_weight) @property @pulumi.getter def asn(self) -> Optional[float]: """ The BGP speaker's ASN. """ return pulumi.get(self, "asn") @property @pulumi.getter(name="bgpPeeringAddress") def bgp_peering_address(self) -> Optional[str]: """ The BGP peering address and BGP identifier of this BGP speaker. """ return pulumi.get(self, "bgp_peering_address") @property @pulumi.getter(name="peerWeight") def peer_weight(self) -> Optional[int]: """ The weight added to routes learned from this BGP speaker. """ return pulumi.get(self, "peer_weight") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class DhcpOptionsResponse(dict): """ DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options. """ def __init__(__self__, *, dns_servers: Optional[Sequence[str]] = None): """ DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options. :param Sequence[str] dns_servers: The list of DNS servers IP addresses. """ if dns_servers is not None: pulumi.set(__self__, "dns_servers", dns_servers) @property @pulumi.getter(name="dnsServers") def dns_servers(self) -> Optional[Sequence[str]]: """ The list of DNS servers IP addresses. """ return pulumi.get(self, "dns_servers") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitAuthorizationResponse(dict): """ Authorization in an ExpressRouteCircuit resource. """ def __init__(__self__, *, authorization_key: Optional[str] = None, authorization_use_status: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Authorization in an ExpressRouteCircuit resource. :param str authorization_key: The authorization key. :param str authorization_use_status: AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ if authorization_key is not None: pulumi.set(__self__, "authorization_key", authorization_key) if authorization_use_status is not None: pulumi.set(__self__, "authorization_use_status", authorization_use_status) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="authorizationKey") def authorization_key(self) -> Optional[str]: """ The authorization key. """ return pulumi.get(self, "authorization_key") @property @pulumi.getter(name="authorizationUseStatus") def authorization_use_status(self) -> Optional[str]: """ AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'. """ return pulumi.get(self, "authorization_use_status") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitPeeringConfigResponse(dict): """ Specifies the peering configuration. """ def __init__(__self__, *, advertised_public_prefixes: Optional[Sequence[str]] = None, advertised_public_prefixes_state: Optional[str] = None, customer_asn: Optional[int] = None, routing_registry_name: Optional[str] = None): """ Specifies the peering configuration. :param Sequence[str] advertised_public_prefixes: The reference of AdvertisedPublicPrefixes. :param str advertised_public_prefixes_state: AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'. :param int customer_asn: The CustomerASN of the peering. :param str routing_registry_name: The RoutingRegistryName of the configuration. """ if advertised_public_prefixes is not None: pulumi.set(__self__, "advertised_public_prefixes", advertised_public_prefixes) if advertised_public_prefixes_state is not None: pulumi.set(__self__, "advertised_public_prefixes_state", advertised_public_prefixes_state) if customer_asn is not None: pulumi.set(__self__, "customer_asn", customer_asn) if routing_registry_name is not None: pulumi.set(__self__, "routing_registry_name", routing_registry_name) @property @pulumi.getter(name="advertisedPublicPrefixes") def advertised_public_prefixes(self) -> Optional[Sequence[str]]: """ The reference of AdvertisedPublicPrefixes. """ return pulumi.get(self, "advertised_public_prefixes") @property @pulumi.getter(name="advertisedPublicPrefixesState") def advertised_public_prefixes_state(self) -> Optional[str]: """ AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'. """ return pulumi.get(self, "advertised_public_prefixes_state") @property @pulumi.getter(name="customerASN") def customer_asn(self) -> Optional[int]: """ The CustomerASN of the peering. """ return pulumi.get(self, "customer_asn") @property @pulumi.getter(name="routingRegistryName") def routing_registry_name(self) -> Optional[str]: """ The RoutingRegistryName of the configuration. """ return pulumi.get(self, "routing_registry_name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitPeeringResponse(dict): """ Peering in an ExpressRouteCircuit resource. """ def __init__(__self__, *, azure_asn: Optional[int] = None, etag: Optional[str] = None, gateway_manager_etag: Optional[str] = None, id: Optional[str] = None, last_modified_by: Optional[str] = None, microsoft_peering_config: Optional['outputs.ExpressRouteCircuitPeeringConfigResponse'] = None, name: Optional[str] = None, peer_asn: Optional[int] = None, peering_type: Optional[str] = None, primary_azure_port: Optional[str] = None, primary_peer_address_prefix: Optional[str] = None, provisioning_state: Optional[str] = None, secondary_azure_port: Optional[str] = None, secondary_peer_address_prefix: Optional[str] = None, shared_key: Optional[str] = None, state: Optional[str] = None, stats: Optional['outputs.ExpressRouteCircuitStatsResponse'] = None, vlan_id: Optional[int] = None): """ Peering in an ExpressRouteCircuit resource. :param int azure_asn: The Azure ASN. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str gateway_manager_etag: The GatewayManager Etag. :param str id: Resource ID. :param str last_modified_by: Gets whether the provider or the customer last modified the peering. :param 'ExpressRouteCircuitPeeringConfigResponseArgs' microsoft_peering_config: The Microsoft peering configuration. :param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :param int peer_asn: The peer ASN. :param str peering_type: The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'. :param str primary_azure_port: The primary port. :param str primary_peer_address_prefix: The primary address prefix. :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str secondary_azure_port: The secondary port. :param str secondary_peer_address_prefix: The secondary address prefix. :param str shared_key: The shared key. :param str state: The state of peering. Possible values are: 'Disabled' and 'Enabled' :param 'ExpressRouteCircuitStatsResponseArgs' stats: Gets peering stats. :param int vlan_id: The VLAN ID. """ if azure_asn is not None: pulumi.set(__self__, "azure_asn", azure_asn) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_manager_etag is not None: pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag) if id is not None: pulumi.set(__self__, "id", id) if last_modified_by is not None: pulumi.set(__self__, "last_modified_by", last_modified_by) if microsoft_peering_config is not None: pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config) if name is not None: pulumi.set(__self__, "name", name) if peer_asn is not None: pulumi.set(__self__, "peer_asn", peer_asn) if peering_type is not None: pulumi.set(__self__, "peering_type", peering_type) if primary_azure_port is not None: pulumi.set(__self__, "primary_azure_port", primary_azure_port) if primary_peer_address_prefix is not None: pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if secondary_azure_port is not None: pulumi.set(__self__, "secondary_azure_port", secondary_azure_port) if secondary_peer_address_prefix is not None: pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix) if shared_key is not None: pulumi.set(__self__, "shared_key", shared_key) if state is not None: pulumi.set(__self__, "state", state) if stats is not None: pulumi.set(__self__, "stats", stats) if vlan_id is not None: pulumi.set(__self__, "vlan_id", vlan_id) @property @pulumi.getter(name="azureASN") def azure_asn(self) -> Optional[int]: """ The Azure ASN. """ return pulumi.get(self, "azure_asn") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayManagerEtag") def gateway_manager_etag(self) -> Optional[str]: """ The GatewayManager Etag. """ return pulumi.get(self, "gateway_manager_etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="lastModifiedBy") def last_modified_by(self) -> Optional[str]: """ Gets whether the provider or the customer last modified the peering. """ return pulumi.get(self, "last_modified_by") @property @pulumi.getter(name="microsoftPeeringConfig") def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']: """ The Microsoft peering configuration. """ return pulumi.get(self, "microsoft_peering_config") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="peerASN") def peer_asn(self) -> Optional[int]: """ The peer ASN. """ return pulumi.get(self, "peer_asn") @property @pulumi.getter(name="peeringType") def peering_type(self) -> Optional[str]: """ The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'. """ return pulumi.get(self, "peering_type") @property @pulumi.getter(name="primaryAzurePort") def primary_azure_port(self) -> Optional[str]: """ The primary port. """ return pulumi.get(self, "primary_azure_port") @property @pulumi.getter(name="primaryPeerAddressPrefix") def primary_peer_address_prefix(self) -> Optional[str]: """ The primary address prefix. """ return pulumi.get(self, "primary_peer_address_prefix") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="secondaryAzurePort") def secondary_azure_port(self) -> Optional[str]: """ The secondary port. """ return pulumi.get(self, "secondary_azure_port") @property @pulumi.getter(name="secondaryPeerAddressPrefix") def secondary_peer_address_prefix(self) -> Optional[str]: """ The secondary address prefix. """ return pulumi.get(self, "secondary_peer_address_prefix") @property @pulumi.getter(name="sharedKey") def shared_key(self) -> Optional[str]: """ The shared key. """ return pulumi.get(self, "shared_key") @property @pulumi.getter def state(self) -> Optional[str]: """ The state of peering. Possible values are: 'Disabled' and 'Enabled' """ return pulumi.get(self, "state") @property @pulumi.getter def stats(self) -> Optional['outputs.ExpressRouteCircuitStatsResponse']: """ Gets peering stats. """ return pulumi.get(self, "stats") @property @pulumi.getter(name="vlanId") def vlan_id(self) -> Optional[int]: """ The VLAN ID. """ return pulumi.get(self, "vlan_id") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitServiceProviderPropertiesResponse(dict): """ Contains ServiceProviderProperties in an ExpressRouteCircuit. """ def __init__(__self__, *, bandwidth_in_mbps: Optional[int] = None, peering_location: Optional[str] = None, service_provider_name: Optional[str] = None): """ Contains ServiceProviderProperties in an ExpressRouteCircuit. :param int bandwidth_in_mbps: The BandwidthInMbps. :param str peering_location: The peering location. :param str service_provider_name: The serviceProviderName. """ if bandwidth_in_mbps is not None: pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps) if peering_location is not None: pulumi.set(__self__, "peering_location", peering_location) if service_provider_name is not None: pulumi.set(__self__, "service_provider_name", service_provider_name) @property @pulumi.getter(name="bandwidthInMbps") def bandwidth_in_mbps(self) -> Optional[int]: """ The BandwidthInMbps. """ return pulumi.get(self, "bandwidth_in_mbps") @property @pulumi.getter(name="peeringLocation") def peering_location(self) -> Optional[str]: """ The peering location. """ return pulumi.get(self, "peering_location") @property @pulumi.getter(name="serviceProviderName") def service_provider_name(self) -> Optional[str]: """ The serviceProviderName. """ return pulumi.get(self, "service_provider_name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitSkuResponse(dict): """ Contains SKU in an ExpressRouteCircuit. """ def __init__(__self__, *, family: Optional[str] = None, name: Optional[str] = None, tier: Optional[str] = None): """ Contains SKU in an ExpressRouteCircuit. :param str family: The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'. :param str name: The name of the SKU. :param str tier: The tier of the SKU. Possible values are 'Standard' and 'Premium'. """ if family is not None: pulumi.set(__self__, "family", family) if name is not None: pulumi.set(__self__, "name", name) if tier is not None: pulumi.set(__self__, "tier", tier) @property @pulumi.getter def family(self) -> Optional[str]: """ The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'. """ return pulumi.get(self, "family") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the SKU. """ return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> Optional[str]: """ The tier of the SKU. Possible values are 'Standard' and 'Premium'. """ return pulumi.get(self, "tier") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitStatsResponse(dict): """ Contains stats associated with the peering. """ def __init__(__self__, *, primarybytes_in: Optional[float] = None, primarybytes_out: Optional[float] = None, secondarybytes_in: Optional[float] = None, secondarybytes_out: Optional[float] = None): """ Contains stats associated with the peering. :param float primarybytes_in: Gets BytesIn of the peering. :param float primarybytes_out: Gets BytesOut of the peering. :param float secondarybytes_in: Gets BytesIn of the peering. :param float secondarybytes_out: Gets BytesOut of the peering. """ if primarybytes_in is not None: pulumi.set(__self__, "primarybytes_in", primarybytes_in) if primarybytes_out is not None: pulumi.set(__self__, "primarybytes_out", primarybytes_out) if secondarybytes_in is not None: pulumi.set(__self__, "secondarybytes_in", secondarybytes_in) if secondarybytes_out is not None: pulumi.set(__self__, "secondarybytes_out", secondarybytes_out) @property @pulumi.getter(name="primarybytesIn") def primarybytes_in(self) -> Optional[float]: """ Gets BytesIn of the peering. """ return pulumi.get(self, "primarybytes_in") @property @pulumi.getter(name="primarybytesOut") def primarybytes_out(self) -> Optional[float]: """ Gets BytesOut of the peering. """ return pulumi.get(self, "primarybytes_out") @property @pulumi.getter(name="secondarybytesIn") def secondarybytes_in(self) -> Optional[float]: """ Gets BytesIn of the peering. """ return pulumi.get(self, "secondarybytes_in") @property @pulumi.getter(name="secondarybytesOut") def secondarybytes_out(self) -> Optional[float]: """ Gets BytesOut of the peering. """ return pulumi.get(self, "secondarybytes_out") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class FrontendIPConfigurationResponse(dict): """ Frontend IP address of the load balancer. """ def __init__(__self__, *, inbound_nat_pools: Sequence['outputs.SubResourceResponse'], inbound_nat_rules: Sequence['outputs.SubResourceResponse'], load_balancing_rules: Sequence['outputs.SubResourceResponse'], outbound_nat_rules: Sequence['outputs.SubResourceResponse'], etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): """ Frontend IP address of the load balancer. :param Sequence['SubResourceResponseArgs'] inbound_nat_pools: Read only. Inbound pools URIs that use this frontend IP. :param Sequence['SubResourceResponseArgs'] inbound_nat_rules: Read only. Inbound rules URIs that use this frontend IP. :param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules URIs that use this frontend IP. :param Sequence['SubResourceResponseArgs'] outbound_nat_rules: Read only. Outbound rules URIs that use this frontend IP. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str private_ip_address: The private IP address of the IP configuration. :param str private_ip_allocation_method: The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the Public IP resource. :param 'SubnetResponseArgs' subnet: The reference of the subnet resource. """ pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools) pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules) pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "outbound_nat_rules", outbound_nat_rules) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter(name="inboundNatPools") def inbound_nat_pools(self) -> Sequence['outputs.SubResourceResponse']: """ Read only. Inbound pools URIs that use this frontend IP. """ return pulumi.get(self, "inbound_nat_pools") @property @pulumi.getter(name="inboundNatRules") def inbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']: """ Read only. Inbound rules URIs that use this frontend IP. """ return pulumi.get(self, "inbound_nat_rules") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: """ Gets load balancing rules URIs that use this frontend IP. """ return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter(name="outboundNatRules") def outbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']: """ Read only. Outbound rules URIs that use this frontend IP. """ return pulumi.get(self, "outbound_nat_rules") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: """ The private IP address of the IP configuration. """ return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: """ The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. """ return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: """ The reference of the Public IP resource. """ return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: """ The reference of the subnet resource. """ return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class GatewayRouteResponseResult(dict): def __init__(__self__, *, as_path: str, local_address: str, network: str, next_hop: str, origin: str, source_peer: str, weight: int): """ :param str as_path: The route's AS path sequence :param str local_address: The gateway's local address :param str network: The route's network prefix :param str next_hop: The route's next hop :param str origin: The source this route was learned from :param str source_peer: The peer this route was learned from :param int weight: The route's weight """ pulumi.set(__self__, "as_path", as_path) pulumi.set(__self__, "local_address", local_address) pulumi.set(__self__, "network", network) pulumi.set(__self__, "next_hop", next_hop) pulumi.set(__self__, "origin", origin) pulumi.set(__self__, "source_peer", source_peer) pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="asPath") def as_path(self) -> str: """ The route's AS path sequence """ return pulumi.get(self, "as_path") @property @pulumi.getter(name="localAddress") def local_address(self) -> str: """ The gateway's local address """ return pulumi.get(self, "local_address") @property @pulumi.getter def network(self) -> str: """ The route's network prefix """ return pulumi.get(self, "network") @property @pulumi.getter(name="nextHop") def next_hop(self) -> str: """ The route's next hop """ return pulumi.get(self, "next_hop") @property @pulumi.getter def origin(self) -> str: """ The source this route was learned from """ return pulumi.get(self, "origin") @property @pulumi.getter(name="sourcePeer") def source_peer(self) -> str: """ The peer this route was learned from """ return pulumi.get(self, "source_peer") @property @pulumi.getter def weight(self) -> int: """ The route's weight """ return pulumi.get(self, "weight") @pulumi.output_type class IPConfigurationResponse(dict): """ IPConfiguration """ def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): """ IPConfiguration :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str private_ip_address: The private IP address of the IP configuration. :param str private_ip_allocation_method: The private IP allocation method. Possible values are 'Static' and 'Dynamic'. :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the public IP resource. :param 'SubnetResponseArgs' subnet: The reference of the subnet resource. """ if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: """ The private IP address of the IP configuration. """ return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: """ The private IP allocation method. Possible values are 'Static' and 'Dynamic'. """ return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: """ The reference of the public IP resource. """ return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: """ The reference of the subnet resource. """ return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class InboundNatPoolResponse(dict): """ Inbound NAT pool of the load balancer. """ def __init__(__self__, *, backend_port: int, frontend_port_range_end: int, frontend_port_range_start: int, protocol: str, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Inbound NAT pool of the load balancer. :param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. :param int frontend_port_range_end: The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535. :param int frontend_port_range_start: The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534. :param str protocol: The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'. :param str etag: A unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "backend_port", backend_port) pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end) pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start) pulumi.set(__self__, "protocol", protocol) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendPort") def backend_port(self) -> int: """ The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. """ return pulumi.get(self, "backend_port") @property @pulumi.getter(name="frontendPortRangeEnd") def frontend_port_range_end(self) -> int: """ The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535. """ return pulumi.get(self, "frontend_port_range_end") @property @pulumi.getter(name="frontendPortRangeStart") def frontend_port_range_start(self) -> int: """ The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534. """ return pulumi.get(self, "frontend_port_range_start") @property @pulumi.getter def protocol(self) -> str: """ The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'. """ return pulumi.get(self, "protocol") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: """ A reference to frontend IP addresses. """ return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class InboundNatRuleResponse(dict): """ Inbound NAT rule of the load balancer. """ def __init__(__self__, *, backend_ip_configuration: 'outputs.NetworkInterfaceIPConfigurationResponse', backend_port: Optional[int] = None, enable_floating_ip: Optional[bool] = None, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, frontend_port: Optional[int] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, name: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None): """ Inbound NAT rule of the load balancer. :param 'NetworkInterfaceIPConfigurationResponseArgs' backend_ip_configuration: A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backed IP. :param int backend_port: The port used for the internal endpoint. Acceptable values range from 1 to 65535. :param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :param str etag: A unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses. :param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534. :param str id: Resource ID. :param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :param str protocol: The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp' :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration) if backend_port is not None: pulumi.set(__self__, "backend_port", backend_port) if enable_floating_ip is not None: pulumi.set(__self__, "enable_floating_ip", enable_floating_ip) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if frontend_port is not None: pulumi.set(__self__, "frontend_port", frontend_port) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if name is not None: pulumi.set(__self__, "name", name) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendIPConfiguration") def backend_ip_configuration(self) -> 'outputs.NetworkInterfaceIPConfigurationResponse': """ A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backed IP. """ return pulumi.get(self, "backend_ip_configuration") @property @pulumi.getter(name="backendPort") def backend_port(self) -> Optional[int]: """ The port used for the internal endpoint. Acceptable values range from 1 to 65535. """ return pulumi.get(self, "backend_port") @property @pulumi.getter(name="enableFloatingIP") def enable_floating_ip(self) -> Optional[bool]: """ Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. """ return pulumi.get(self, "enable_floating_ip") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: """ A reference to frontend IP addresses. """ return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> Optional[int]: """ The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534. """ return pulumi.get(self, "frontend_port") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: """ The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. """ return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def protocol(self) -> Optional[str]: """ The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp' """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class LoadBalancingRuleResponse(dict): """ A load balancing rule for a load balancer. """ def __init__(__self__, *, frontend_port: int, protocol: str, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_port: Optional[int] = None, enable_floating_ip: Optional[bool] = None, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, load_distribution: Optional[str] = None, name: Optional[str] = None, probe: Optional['outputs.SubResourceResponse'] = None, provisioning_state: Optional[str] = None): """ A load balancing rule for a load balancer. :param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534. :param str protocol: The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp' :param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs. :param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. :param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :param str etag: A unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses. :param str id: Resource ID. :param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :param str load_distribution: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param 'SubResourceResponseArgs' probe: The reference of the load balancer probe used by the load balancing rule. :param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "frontend_port", frontend_port) pulumi.set(__self__, "protocol", protocol) if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_port is not None: pulumi.set(__self__, "backend_port", backend_port) if enable_floating_ip is not None: pulumi.set(__self__, "enable_floating_ip", enable_floating_ip) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if load_distribution is not None: pulumi.set(__self__, "load_distribution", load_distribution) if name is not None: pulumi.set(__self__, "name", name) if probe is not None: pulumi.set(__self__, "probe", probe) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> int: """ The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534. """ return pulumi.get(self, "frontend_port") @property @pulumi.getter def protocol(self) -> str: """ The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp' """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: """ A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs. """ return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendPort") def backend_port(self) -> Optional[int]: """ The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. """ return pulumi.get(self, "backend_port") @property @pulumi.getter(name="enableFloatingIP") def enable_floating_ip(self) -> Optional[bool]: """ Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. """ return pulumi.get(self, "enable_floating_ip") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: """ A reference to frontend IP addresses. """ return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: """ The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. """ return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter(name="loadDistribution") def load_distribution(self) -> Optional[str]: """ The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'. """ return pulumi.get(self, "load_distribution") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def probe(self) -> Optional['outputs.SubResourceResponse']: """ The reference of the load balancer probe used by the load balancing rule. """ return pulumi.get(self, "probe") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class LocalNetworkGatewayResponse(dict): """ A common class for general resource information """ def __init__(__self__, *, local_network_address_space: 'outputs.AddressSpaceResponse', name: str, provisioning_state: str, type: str, bgp_settings: Optional['outputs.BgpSettingsResponse'] = None, etag: Optional[str] = None, gateway_ip_address: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): """ A common class for general resource information :param 'AddressSpaceResponseArgs' local_network_address_space: Local network site address space. :param str name: Resource name. :param str provisioning_state: The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str type: Resource type. :param 'BgpSettingsResponseArgs' bgp_settings: Local network gateway's BGP speaker settings. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str gateway_ip_address: IP address of local network gateway. :param str id: Resource ID. :param str location: Resource location. :param str resource_guid: The resource GUID property of the LocalNetworkGateway resource. :param Mapping[str, str] tags: Resource tags. """ pulumi.set(__self__, "local_network_address_space", local_network_address_space) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) if bgp_settings is not None: pulumi.set(__self__, "bgp_settings", bgp_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_ip_address is not None: pulumi.set(__self__, "gateway_ip_address", gateway_ip_address) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="localNetworkAddressSpace") def local_network_address_space(self) -> 'outputs.AddressSpaceResponse': """ Local network site address space. """ return pulumi.get(self, "local_network_address_space") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="bgpSettings") def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']: """ Local network gateway's BGP speaker settings. """ return pulumi.get(self, "bgp_settings") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayIpAddress") def gateway_ip_address(self) -> Optional[str]: """ IP address of local network gateway. """ return pulumi.get(self, "gateway_ip_address") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: """ The resource GUID property of the LocalNetworkGateway resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceDnsSettingsResponse(dict): """ DNS settings of a network interface. """ def __init__(__self__, *, applied_dns_servers: Optional[Sequence[str]] = None, dns_servers: Optional[Sequence[str]] = None, internal_dns_name_label: Optional[str] = None, internal_domain_name_suffix: Optional[str] = None, internal_fqdn: Optional[str] = None): """ DNS settings of a network interface. :param Sequence[str] applied_dns_servers: If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. :param Sequence[str] dns_servers: List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection. :param str internal_dns_name_label: Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. :param str internal_domain_name_suffix: Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix. :param str internal_fqdn: Fully qualified DNS name supporting internal communications between VMs in the same virtual network. """ if applied_dns_servers is not None: pulumi.set(__self__, "applied_dns_servers", applied_dns_servers) if dns_servers is not None: pulumi.set(__self__, "dns_servers", dns_servers) if internal_dns_name_label is not None: pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label) if internal_domain_name_suffix is not None: pulumi.set(__self__, "internal_domain_name_suffix", internal_domain_name_suffix) if internal_fqdn is not None: pulumi.set(__self__, "internal_fqdn", internal_fqdn) @property @pulumi.getter(name="appliedDnsServers") def applied_dns_servers(self) -> Optional[Sequence[str]]: """ If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. """ return pulumi.get(self, "applied_dns_servers") @property @pulumi.getter(name="dnsServers") def dns_servers(self) -> Optional[Sequence[str]]: """ List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection. """ return pulumi.get(self, "dns_servers") @property @pulumi.getter(name="internalDnsNameLabel") def internal_dns_name_label(self) -> Optional[str]: """ Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. """ return pulumi.get(self, "internal_dns_name_label") @property @pulumi.getter(name="internalDomainNameSuffix") def internal_domain_name_suffix(self) -> Optional[str]: """ Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix. """ return pulumi.get(self, "internal_domain_name_suffix") @property @pulumi.getter(name="internalFqdn") def internal_fqdn(self) -> Optional[str]: """ Fully qualified DNS name supporting internal communications between VMs in the same virtual network. """ return pulumi.get(self, "internal_fqdn") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceIPConfigurationResponse(dict): """ IPConfiguration in a network interface. """ def __init__(__self__, *, application_gateway_backend_address_pools: Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, load_balancer_backend_address_pools: Optional[Sequence['outputs.BackendAddressPoolResponse']] = None, load_balancer_inbound_nat_rules: Optional[Sequence['outputs.InboundNatRuleResponse']] = None, name: Optional[str] = None, primary: Optional[bool] = None, private_ip_address: Optional[str] = None, private_ip_address_version: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): """ IPConfiguration in a network interface. :param Sequence['ApplicationGatewayBackendAddressPoolResponseArgs'] application_gateway_backend_address_pools: The reference of ApplicationGatewayBackendAddressPool resource. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param Sequence['BackendAddressPoolResponseArgs'] load_balancer_backend_address_pools: The reference of LoadBalancerBackendAddressPool resource. :param Sequence['InboundNatRuleResponseArgs'] load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param bool primary: Gets whether this is a primary customer address on the network interface. :param str private_ip_address_version: Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. :param str private_ip_allocation_method: Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'. :param 'PublicIPAddressResponseArgs' public_ip_address: Public IP address resource. :param 'SubnetResponseArgs' subnet: Subnet in a virtual network resource. """ if application_gateway_backend_address_pools is not None: pulumi.set(__self__, "application_gateway_backend_address_pools", application_gateway_backend_address_pools) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if load_balancer_backend_address_pools is not None: pulumi.set(__self__, "load_balancer_backend_address_pools", load_balancer_backend_address_pools) if load_balancer_inbound_nat_rules is not None: pulumi.set(__self__, "load_balancer_inbound_nat_rules", load_balancer_inbound_nat_rules) if name is not None: pulumi.set(__self__, "name", name) if primary is not None: pulumi.set(__self__, "primary", primary) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_address_version is not None: pulumi.set(__self__, "private_ip_address_version", private_ip_address_version) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter(name="applicationGatewayBackendAddressPools") def application_gateway_backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]: """ The reference of ApplicationGatewayBackendAddressPool resource. """ return pulumi.get(self, "application_gateway_backend_address_pools") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="loadBalancerBackendAddressPools") def load_balancer_backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]: """ The reference of LoadBalancerBackendAddressPool resource. """ return pulumi.get(self, "load_balancer_backend_address_pools") @property @pulumi.getter(name="loadBalancerInboundNatRules") def load_balancer_inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]: """ A list of references of LoadBalancerInboundNatRules. """ return pulumi.get(self, "load_balancer_inbound_nat_rules") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def primary(self) -> Optional[bool]: """ Gets whether this is a primary customer address on the network interface. """ return pulumi.get(self, "primary") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAddressVersion") def private_ip_address_version(self) -> Optional[str]: """ Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. """ return pulumi.get(self, "private_ip_address_version") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: """ Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'. """ return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: """ Public IP address resource. """ return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: """ Subnet in a virtual network resource. """ return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceResponse(dict): """ A network interface in a resource group. """ def __init__(__self__, *, name: str, type: str, dns_settings: Optional['outputs.NetworkInterfaceDnsSettingsResponse'] = None, enable_accelerated_networking: Optional[bool] = None, enable_ip_forwarding: Optional[bool] = None, etag: Optional[str] = None, id: Optional[str] = None, ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None, location: Optional[str] = None, mac_address: Optional[str] = None, network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None, primary: Optional[bool] = None, provisioning_state: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None, virtual_machine: Optional['outputs.SubResourceResponse'] = None): """ A network interface in a resource group. :param str name: Resource name. :param str type: Resource type. :param 'NetworkInterfaceDnsSettingsResponseArgs' dns_settings: The DNS settings in network interface. :param bool enable_accelerated_networking: If the network interface is accelerated networking enabled. :param bool enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] ip_configurations: A list of IPConfigurations of the network interface. :param str location: Resource location. :param str mac_address: The MAC address of the network interface. :param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource. :param bool primary: Gets whether this is a primary network interface on a virtual machine. :param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str resource_guid: The resource GUID property of the network interface resource. :param Mapping[str, str] tags: Resource tags. :param 'SubResourceResponseArgs' virtual_machine: The reference of a virtual machine. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "type", type) if dns_settings is not None: pulumi.set(__self__, "dns_settings", dns_settings) if enable_accelerated_networking is not None: pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking) if enable_ip_forwarding is not None: pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if ip_configurations is not None: pulumi.set(__self__, "ip_configurations", ip_configurations) if location is not None: pulumi.set(__self__, "location", location) if mac_address is not None: pulumi.set(__self__, "mac_address", mac_address) if network_security_group is not None: pulumi.set(__self__, "network_security_group", network_security_group) if primary is not None: pulumi.set(__self__, "primary", primary) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_machine is not None: pulumi.set(__self__, "virtual_machine", virtual_machine) @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="dnsSettings") def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']: """ The DNS settings in network interface. """ return pulumi.get(self, "dns_settings") @property @pulumi.getter(name="enableAcceleratedNetworking") def enable_accelerated_networking(self) -> Optional[bool]: """ If the network interface is accelerated networking enabled. """ return pulumi.get(self, "enable_accelerated_networking") @property @pulumi.getter(name="enableIPForwarding") def enable_ip_forwarding(self) -> Optional[bool]: """ Indicates whether IP forwarding is enabled on this network interface. """ return pulumi.get(self, "enable_ip_forwarding") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]: """ A list of IPConfigurations of the network interface. """ return pulumi.get(self, "ip_configurations") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="macAddress") def mac_address(self) -> Optional[str]: """ The MAC address of the network interface. """ return pulumi.get(self, "mac_address") @property @pulumi.getter(name="networkSecurityGroup") def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']: """ The reference of the NetworkSecurityGroup resource. """ return pulumi.get(self, "network_security_group") @property @pulumi.getter def primary(self) -> Optional[bool]: """ Gets whether this is a primary network interface on a virtual machine. """ return pulumi.get(self, "primary") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: """ The resource GUID property of the network interface resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="virtualMachine") def virtual_machine(self) -> Optional['outputs.SubResourceResponse']: """ The reference of a virtual machine. """ return pulumi.get(self, "virtual_machine") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkSecurityGroupResponse(dict): """ NetworkSecurityGroup resource. """ def __init__(__self__, *, name: str, network_interfaces: Sequence['outputs.NetworkInterfaceResponse'], subnets: Sequence['outputs.SubnetResponse'], type: str, default_security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, resource_guid: Optional[str] = None, security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None, tags: Optional[Mapping[str, str]] = None): """ NetworkSecurityGroup resource. :param str name: Resource name. :param Sequence['NetworkInterfaceResponseArgs'] network_interfaces: A collection of references to network interfaces. :param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets. :param str type: Resource type. :param Sequence['SecurityRuleResponseArgs'] default_security_rules: The default security rules of network security group. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str location: Resource location. :param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str resource_guid: The resource GUID property of the network security group resource. :param Sequence['SecurityRuleResponseArgs'] security_rules: A collection of security rules of the network security group. :param Mapping[str, str] tags: Resource tags. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "network_interfaces", network_interfaces) pulumi.set(__self__, "subnets", subnets) pulumi.set(__self__, "type", type) if default_security_rules is not None: pulumi.set(__self__, "default_security_rules", default_security_rules) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if security_rules is not None: pulumi.set(__self__, "security_rules", security_rules) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkInterfaces") def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']: """ A collection of references to network interfaces. """ return pulumi.get(self, "network_interfaces") @property @pulumi.getter def subnets(self) -> Sequence['outputs.SubnetResponse']: """ A collection of references to subnets. """ return pulumi.get(self, "subnets") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="defaultSecurityRules") def default_security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]: """ The default security rules of network security group. """ return pulumi.get(self, "default_security_rules") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: """ The resource GUID property of the network security group resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter(name="securityRules") def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]: """ A collection of security rules of the network security group. """ return pulumi.get(self, "security_rules") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class OutboundNatRuleResponse(dict): """ Outbound NAT pool of the load balancer. """ def __init__(__self__, *, backend_address_pool: 'outputs.SubResourceResponse', allocated_outbound_ports: Optional[int] = None, etag: Optional[str] = None, frontend_ip_configurations: Optional[Sequence['outputs.SubResourceResponse']] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): """ Outbound NAT pool of the load balancer. :param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs. :param int allocated_outbound_ports: The number of outbound ports to be used for NAT. :param str etag: A unique read-only string that changes whenever the resource is updated. :param Sequence['SubResourceResponseArgs'] frontend_ip_configurations: The Frontend IP addresses of the load balancer. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "backend_address_pool", backend_address_pool) if allocated_outbound_ports is not None: pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configurations is not None: pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> 'outputs.SubResourceResponse': """ A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs. """ return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="allocatedOutboundPorts") def allocated_outbound_ports(self) -> Optional[int]: """ The number of outbound ports to be used for NAT. """ return pulumi.get(self, "allocated_outbound_ports") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfigurations") def frontend_ip_configurations(self) -> Optional[Sequence['outputs.SubResourceResponse']]: """ The Frontend IP addresses of the load balancer. """ return pulumi.get(self, "frontend_ip_configurations") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PacketCaptureFilterResponse(dict): """ Filter that is applied to packet capture request. Multiple filters can be applied. """ def __init__(__self__, *, local_ip_address: Optional[str] = None, local_port: Optional[str] = None, protocol: Optional[str] = None, remote_ip_address: Optional[str] = None, remote_port: Optional[str] = None): """ Filter that is applied to packet capture request. Multiple filters can be applied. :param str local_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :param str local_port: Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :param str protocol: Protocol to be filtered on. :param str remote_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :param str remote_port: Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. """ if local_ip_address is not None: pulumi.set(__self__, "local_ip_address", local_ip_address) if local_port is not None: pulumi.set(__self__, "local_port", local_port) if protocol is None: protocol = 'Any' if protocol is not None: pulumi.set(__self__, "protocol", protocol) if remote_ip_address is not None: pulumi.set(__self__, "remote_ip_address", remote_ip_address) if remote_port is not None: pulumi.set(__self__, "remote_port", remote_port) @property @pulumi.getter(name="localIPAddress") def local_ip_address(self) -> Optional[str]: """ Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. """ return pulumi.get(self, "local_ip_address") @property @pulumi.getter(name="localPort") def local_port(self) -> Optional[str]: """ Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. """ return pulumi.get(self, "local_port") @property @pulumi.getter def protocol(self) -> Optional[str]: """ Protocol to be filtered on. """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="remoteIPAddress") def remote_ip_address(self) -> Optional[str]: """ Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. """ return pulumi.get(self, "remote_ip_address") @property @pulumi.getter(name="remotePort") def remote_port(self) -> Optional[str]: """ Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. """ return pulumi.get(self, "remote_port") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PacketCaptureStorageLocationResponse(dict): """ Describes the storage location for a packet capture session. """ def __init__(__self__, *, file_path: Optional[str] = None, storage_id: Optional[str] = None, storage_path: Optional[str] = None): """ Describes the storage location for a packet capture session. :param str file_path: A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional. :param str storage_id: The ID of the storage account to save the packet capture session. Required if no local file path is provided. :param str storage_path: The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture. """ if file_path is not None: pulumi.set(__self__, "file_path", file_path) if storage_id is not None: pulumi.set(__self__, "storage_id", storage_id) if storage_path is not None: pulumi.set(__self__, "storage_path", storage_path) @property @pulumi.getter(name="filePath") def file_path(self) -> Optional[str]: """ A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional. """ return pulumi.get(self, "file_path") @property @pulumi.getter(name="storageId") def storage_id(self) -> Optional[str]: """ The ID of the storage account to save the packet capture session. Required if no local file path is provided. """ return pulumi.get(self, "storage_id") @property @pulumi.getter(name="storagePath") def storage_path(self) -> Optional[str]: """ The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture. """ return pulumi.get(self, "storage_path") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ProbeResponse(dict): """ A load balancer probe. """ def __init__(__self__, *, load_balancing_rules: Sequence['outputs.SubResourceResponse'], port: int, protocol: str, etag: Optional[str] = None, id: Optional[str] = None, interval_in_seconds: Optional[int] = None, name: Optional[str] = None, number_of_probes: Optional[int] = None, provisioning_state: Optional[str] = None, request_path: Optional[str] = None): """ A load balancer probe. :param Sequence['SubResourceResponseArgs'] load_balancing_rules: The load balancer rules that use this probe. :param int port: The port for communicating the probe. Possible values range from 1 to 65535, inclusive. :param str protocol: The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the specifies URI is required for the probe to be successful. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param int interval_in_seconds: The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5. :param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :param int number_of_probes: The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure. :param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str request_path: The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value. """ pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "port", port) pulumi.set(__self__, "protocol", protocol) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if interval_in_seconds is not None: pulumi.set(__self__, "interval_in_seconds", interval_in_seconds) if name is not None: pulumi.set(__self__, "name", name) if number_of_probes is not None: pulumi.set(__self__, "number_of_probes", number_of_probes) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if request_path is not None: pulumi.set(__self__, "request_path", request_path) @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: """ The load balancer rules that use this probe. """ return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter def port(self) -> int: """ The port for communicating the probe. Possible values range from 1 to 65535, inclusive. """ return pulumi.get(self, "port") @property @pulumi.getter def protocol(self) -> str: """ The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the specifies URI is required for the probe to be successful. """ return pulumi.get(self, "protocol") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="intervalInSeconds") def interval_in_seconds(self) -> Optional[int]: """ The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5. """ return pulumi.get(self, "interval_in_seconds") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="numberOfProbes") def number_of_probes(self) -> Optional[int]: """ The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure. """ return pulumi.get(self, "number_of_probes") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[str]: """ The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value. """ return pulumi.get(self, "request_path") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PublicIPAddressDnsSettingsResponse(dict): """ Contains FQDN of the DNS record associated with the public IP address """ def __init__(__self__, *, domain_name_label: Optional[str] = None, fqdn: Optional[str] = None, reverse_fqdn: Optional[str] = None): """ Contains FQDN of the DNS record associated with the public IP address :param str domain_name_label: Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system. :param str fqdn: Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone. :param str reverse_fqdn: Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN. """ if domain_name_label is not None: pulumi.set(__self__, "domain_name_label", domain_name_label) if fqdn is not None: pulumi.set(__self__, "fqdn", fqdn) if reverse_fqdn is not None: pulumi.set(__self__, "reverse_fqdn", reverse_fqdn) @property @pulumi.getter(name="domainNameLabel") def domain_name_label(self) -> Optional[str]: """ Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system. """ return pulumi.get(self, "domain_name_label") @property @pulumi.getter def fqdn(self) -> Optional[str]: """ Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone. """ return pulumi.get(self, "fqdn") @property @pulumi.getter(name="reverseFqdn") def reverse_fqdn(self) -> Optional[str]: """ Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN. """ return pulumi.get(self, "reverse_fqdn") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PublicIPAddressResponse(dict): """ Public IP address resource. """ def __init__(__self__, *, ip_configuration: 'outputs.IPConfigurationResponse', name: str, type: str, dns_settings: Optional['outputs.PublicIPAddressDnsSettingsResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, ip_address: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address_version: Optional[str] = None, public_ip_allocation_method: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): """ Public IP address resource. :param 'IPConfigurationResponseArgs' ip_configuration: IPConfiguration :param str name: Resource name. :param str type: Resource type. :param 'PublicIPAddressDnsSettingsResponseArgs' dns_settings: The FQDN of the DNS record associated with the public IP address. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param int idle_timeout_in_minutes: The idle timeout of the public IP address. :param str location: Resource location. :param str provisioning_state: The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str public_ip_address_version: The public IP address version. Possible values are: 'IPv4' and 'IPv6'. :param str public_ip_allocation_method: The public IP allocation method. Possible values are: 'Static' and 'Dynamic'. :param str resource_guid: The resource GUID property of the public IP resource. :param Mapping[str, str] tags: Resource tags. """ pulumi.set(__self__, "ip_configuration", ip_configuration) pulumi.set(__self__, "name", name) pulumi.set(__self__, "type", type) if dns_settings is not None: pulumi.set(__self__, "dns_settings", dns_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address_version is not None: pulumi.set(__self__, "public_ip_address_version", public_ip_address_version) if public_ip_allocation_method is not None: pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="ipConfiguration") def ip_configuration(self) -> 'outputs.IPConfigurationResponse': """ IPConfiguration """ return pulumi.get(self, "ip_configuration") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="dnsSettings") def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']: """ The FQDN of the DNS record associated with the public IP address. """ return pulumi.get(self, "dns_settings") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: """ The idle timeout of the public IP address. """ return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[str]: return pulumi.get(self, "ip_address") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddressVersion") def public_ip_address_version(self) -> Optional[str]: """ The public IP address version. Possible values are: 'IPv4' and 'IPv6'. """ return pulumi.get(self, "public_ip_address_version") @property @pulumi.getter(name="publicIPAllocationMethod") def public_ip_allocation_method(self) -> Optional[str]: """ The public IP allocation method. Possible values are: 'Static' and 'Dynamic'. """ return pulumi.get(self, "public_ip_allocation_method") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: """ The resource GUID property of the public IP resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ResourceNavigationLinkResponse(dict): """ ResourceNavigationLink resource. """ def __init__(__self__, *, etag: str, provisioning_state: str, id: Optional[str] = None, link: Optional[str] = None, linked_resource_type: Optional[str] = None, name: Optional[str] = None): """ ResourceNavigationLink resource. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str provisioning_state: Provisioning state of the ResourceNavigationLink resource. :param str id: Resource ID. :param str link: Link to the external resource :param str linked_resource_type: Resource type of the linked resource. :param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource. """ pulumi.set(__self__, "etag", etag) pulumi.set(__self__, "provisioning_state", provisioning_state) if id is not None: pulumi.set(__self__, "id", id) if link is not None: pulumi.set(__self__, "link", link) if linked_resource_type is not None: pulumi.set(__self__, "linked_resource_type", linked_resource_type) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def etag(self) -> str: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ Provisioning state of the ResourceNavigationLink resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def link(self) -> Optional[str]: """ Link to the external resource """ return pulumi.get(self, "link") @property @pulumi.getter(name="linkedResourceType") def linked_resource_type(self) -> Optional[str]: """ Resource type of the linked resource. """ return pulumi.get(self, "linked_resource_type") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class RouteResponse(dict): """ Route resource """ def __init__(__self__, *, next_hop_type: str, address_prefix: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, next_hop_ip_address: Optional[str] = None, provisioning_state: Optional[str] = None): """ Route resource :param str next_hop_type: The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None' :param str address_prefix: The destination CIDR to which the route applies. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance. :param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ pulumi.set(__self__, "next_hop_type", next_hop_type) if address_prefix is not None: pulumi.set(__self__, "address_prefix", address_prefix) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if next_hop_ip_address is not None: pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="nextHopType") def next_hop_type(self) -> str: """ The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None' """ return pulumi.get(self, "next_hop_type") @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> Optional[str]: """ The destination CIDR to which the route applies. """ return pulumi.get(self, "address_prefix") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="nextHopIpAddress") def next_hop_ip_address(self) -> Optional[str]: """ The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance. """ return pulumi.get(self, "next_hop_ip_address") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class RouteTableResponse(dict): """ Route table resource. """ def __init__(__self__, *, name: str, subnets: Sequence['outputs.SubnetResponse'], type: str, etag: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, routes: Optional[Sequence['outputs.RouteResponse']] = None, tags: Optional[Mapping[str, str]] = None): """ Route table resource. :param str name: Resource name. :param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets. :param str type: Resource type. :param str etag: Gets a unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str location: Resource location. :param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param Sequence['RouteResponseArgs'] routes: Collection of routes contained within a route table. :param Mapping[str, str] tags: Resource tags. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "subnets", subnets) pulumi.set(__self__, "type", type) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if routes is not None: pulumi.set(__self__, "routes", routes) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter def subnets(self) -> Sequence['outputs.SubnetResponse']: """ A collection of references to subnets. """ return pulumi.get(self, "subnets") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter def etag(self) -> Optional[str]: """ Gets a unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def routes(self) -> Optional[Sequence['outputs.RouteResponse']]: """ Collection of routes contained within a route table. """ return pulumi.get(self, "routes") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SecurityRuleResponse(dict): """ Network security rule. """ def __init__(__self__, *, access: str, destination_address_prefix: str, direction: str, protocol: str, source_address_prefix: str, description: Optional[str] = None, destination_port_range: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, priority: Optional[int] = None, provisioning_state: Optional[str] = None, source_port_range: Optional[str] = None): """ Network security rule. :param str access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. :param str destination_address_prefix: The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. :param str direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'. :param str protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. :param str source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param str description: A description for this rule. Restricted to 140 chars. :param str destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param int priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. """ pulumi.set(__self__, "access", access) pulumi.set(__self__, "destination_address_prefix", destination_address_prefix) pulumi.set(__self__, "direction", direction) pulumi.set(__self__, "protocol", protocol) pulumi.set(__self__, "source_address_prefix", source_address_prefix) if description is not None: pulumi.set(__self__, "description", description) if destination_port_range is not None: pulumi.set(__self__, "destination_port_range", destination_port_range) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if priority is not None: pulumi.set(__self__, "priority", priority) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if source_port_range is not None: pulumi.set(__self__, "source_port_range", source_port_range) @property @pulumi.getter def access(self) -> str: """ The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. """ return pulumi.get(self, "access") @property @pulumi.getter(name="destinationAddressPrefix") def destination_address_prefix(self) -> str: """ The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. """ return pulumi.get(self, "destination_address_prefix") @property @pulumi.getter def direction(self) -> str: """ The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'. """ return pulumi.get(self, "direction") @property @pulumi.getter def protocol(self) -> str: """ Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="sourceAddressPrefix") def source_address_prefix(self) -> str: """ The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. """ return pulumi.get(self, "source_address_prefix") @property @pulumi.getter def description(self) -> Optional[str]: """ A description for this rule. Restricted to 140 chars. """ return pulumi.get(self, "description") @property @pulumi.getter(name="destinationPortRange") def destination_port_range(self) -> Optional[str]: """ The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. """ return pulumi.get(self, "destination_port_range") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def priority(self) -> Optional[int]: """ The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. """ return pulumi.get(self, "priority") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="sourcePortRange") def source_port_range(self) -> Optional[str]: """ The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. """ return pulumi.get(self, "source_port_range") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SubResourceResponse(dict): def __init__(__self__, *, id: Optional[str] = None): """ :param str id: Resource ID. """ if id is not None: pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SubnetResponse(dict): """ Subnet in a virtual network resource. """ def __init__(__self__, *, ip_configurations: Sequence['outputs.IPConfigurationResponse'], address_prefix: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None, provisioning_state: Optional[str] = None, resource_navigation_links: Optional[Sequence['outputs.ResourceNavigationLinkResponse']] = None, route_table: Optional['outputs.RouteTableResponse'] = None): """ Subnet in a virtual network resource. :param Sequence['IPConfigurationResponseArgs'] ip_configurations: Gets an array of references to the network interface IP configurations using subnet. :param str address_prefix: The address prefix for the subnet. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource. :param str provisioning_state: The provisioning state of the resource. :param Sequence['ResourceNavigationLinkResponseArgs'] resource_navigation_links: Gets an array of references to the external resources using subnet. :param 'RouteTableResponseArgs' route_table: The reference of the RouteTable resource. """ pulumi.set(__self__, "ip_configurations", ip_configurations) if address_prefix is not None: pulumi.set(__self__, "address_prefix", address_prefix) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if network_security_group is not None: pulumi.set(__self__, "network_security_group", network_security_group) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_navigation_links is not None: pulumi.set(__self__, "resource_navigation_links", resource_navigation_links) if route_table is not None: pulumi.set(__self__, "route_table", route_table) @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Sequence['outputs.IPConfigurationResponse']: """ Gets an array of references to the network interface IP configurations using subnet. """ return pulumi.get(self, "ip_configurations") @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> Optional[str]: """ The address prefix for the subnet. """ return pulumi.get(self, "address_prefix") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkSecurityGroup") def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']: """ The reference of the NetworkSecurityGroup resource. """ return pulumi.get(self, "network_security_group") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceNavigationLinks") def resource_navigation_links(self) -> Optional[Sequence['outputs.ResourceNavigationLinkResponse']]: """ Gets an array of references to the external resources using subnet. """ return pulumi.get(self, "resource_navigation_links") @property @pulumi.getter(name="routeTable") def route_table(self) -> Optional['outputs.RouteTableResponse']: """ The reference of the RouteTable resource. """ return pulumi.get(self, "route_table") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class TunnelConnectionHealthResponse(dict): """ VirtualNetworkGatewayConnection properties """ def __init__(__self__, *, connection_status: str, egress_bytes_transferred: float, ingress_bytes_transferred: float, last_connection_established_utc_time: str, tunnel: str): """ VirtualNetworkGatewayConnection properties :param str connection_status: Virtual network Gateway connection status :param float egress_bytes_transferred: The Egress Bytes Transferred in this connection :param float ingress_bytes_transferred: The Ingress Bytes Transferred in this connection :param str last_connection_established_utc_time: The time at which connection was established in Utc format. :param str tunnel: Tunnel name. """ pulumi.set(__self__, "connection_status", connection_status) pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred) pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred) pulumi.set(__self__, "last_connection_established_utc_time", last_connection_established_utc_time) pulumi.set(__self__, "tunnel", tunnel) @property @pulumi.getter(name="connectionStatus") def connection_status(self) -> str: """ Virtual network Gateway connection status """ return pulumi.get(self, "connection_status") @property @pulumi.getter(name="egressBytesTransferred") def egress_bytes_transferred(self) -> float: """ The Egress Bytes Transferred in this connection """ return pulumi.get(self, "egress_bytes_transferred") @property @pulumi.getter(name="ingressBytesTransferred") def ingress_bytes_transferred(self) -> float: """ The Ingress Bytes Transferred in this connection """ return pulumi.get(self, "ingress_bytes_transferred") @property @pulumi.getter(name="lastConnectionEstablishedUtcTime") def last_connection_established_utc_time(self) -> str: """ The time at which connection was established in Utc format. """ return pulumi.get(self, "last_connection_established_utc_time") @property @pulumi.getter def tunnel(self) -> str: """ Tunnel name. """ return pulumi.get(self, "tunnel") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewayIPConfigurationResponse(dict): """ IP configuration for virtual network gateway """ def __init__(__self__, *, provisioning_state: str, public_ip_address: 'outputs.SubResourceResponse', subnet: 'outputs.SubResourceResponse', etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_allocation_method: Optional[str] = None): """ IP configuration for virtual network gateway :param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param 'SubResourceResponseArgs' public_ip_address: The reference of the public IP resource. :param 'SubResourceResponseArgs' subnet: The reference of the subnet resource. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str private_ip_allocation_method: The private IP allocation method. Possible values are: 'Static' and 'Dynamic'. """ pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "public_ip_address", public_ip_address) pulumi.set(__self__, "subnet", subnet) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> 'outputs.SubResourceResponse': """ The reference of the public IP resource. """ return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> 'outputs.SubResourceResponse': """ The reference of the subnet resource. """ return pulumi.get(self, "subnet") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: """ The private IP allocation method. Possible values are: 'Static' and 'Dynamic'. """ return pulumi.get(self, "private_ip_allocation_method") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewayResponse(dict): """ A common class for general resource information """ def __init__(__self__, *, gateway_type: str, ip_configurations: Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse'], name: str, provisioning_state: str, type: str, vpn_type: str, active_active: Optional[bool] = None, bgp_settings: Optional['outputs.BgpSettingsResponse'] = None, enable_bgp: Optional[bool] = None, etag: Optional[str] = None, gateway_default_site: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, location: Optional[str] = None, resource_guid: Optional[str] = None, sku: Optional['outputs.VirtualNetworkGatewaySkuResponse'] = None, tags: Optional[Mapping[str, str]] = None, vpn_client_configuration: Optional['outputs.VpnClientConfigurationResponse'] = None): """ A common class for general resource information :param str gateway_type: The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'. :param Sequence['VirtualNetworkGatewayIPConfigurationResponseArgs'] ip_configurations: IP configurations for virtual network gateway. :param str name: Resource name. :param str provisioning_state: The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str type: Resource type. :param str vpn_type: The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'. :param bool active_active: ActiveActive flag :param 'BgpSettingsResponseArgs' bgp_settings: Virtual network gateway's BGP speaker settings. :param bool enable_bgp: Whether BGP is enabled for this virtual network gateway or not. :param str etag: Gets a unique read-only string that changes whenever the resource is updated. :param 'SubResourceResponseArgs' gateway_default_site: The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting. :param str id: Resource ID. :param str location: Resource location. :param str resource_guid: The resource GUID property of the VirtualNetworkGateway resource. :param 'VirtualNetworkGatewaySkuResponseArgs' sku: The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway. :param Mapping[str, str] tags: Resource tags. :param 'VpnClientConfigurationResponseArgs' vpn_client_configuration: The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations. """ pulumi.set(__self__, "gateway_type", gateway_type) pulumi.set(__self__, "ip_configurations", ip_configurations) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) pulumi.set(__self__, "vpn_type", vpn_type) if active_active is not None: pulumi.set(__self__, "active_active", active_active) if bgp_settings is not None: pulumi.set(__self__, "bgp_settings", bgp_settings) if enable_bgp is not None: pulumi.set(__self__, "enable_bgp", enable_bgp) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_default_site is not None: pulumi.set(__self__, "gateway_default_site", gateway_default_site) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if sku is not None: pulumi.set(__self__, "sku", sku) if tags is not None: pulumi.set(__self__, "tags", tags) if vpn_client_configuration is not None: pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration) @property @pulumi.getter(name="gatewayType") def gateway_type(self) -> str: """ The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'. """ return pulumi.get(self, "gateway_type") @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']: """ IP configurations for virtual network gateway. """ return pulumi.get(self, "ip_configurations") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="vpnType") def vpn_type(self) -> str: """ The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'. """ return pulumi.get(self, "vpn_type") @property @pulumi.getter(name="activeActive") def active_active(self) -> Optional[bool]: """ ActiveActive flag """ return pulumi.get(self, "active_active") @property @pulumi.getter(name="bgpSettings") def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']: """ Virtual network gateway's BGP speaker settings. """ return pulumi.get(self, "bgp_settings") @property @pulumi.getter(name="enableBgp") def enable_bgp(self) -> Optional[bool]: """ Whether BGP is enabled for this virtual network gateway or not. """ return pulumi.get(self, "enable_bgp") @property @pulumi.getter def etag(self) -> Optional[str]: """ Gets a unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayDefaultSite") def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']: """ The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting. """ return pulumi.get(self, "gateway_default_site") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: """ The resource GUID property of the VirtualNetworkGateway resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']: """ The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway. """ return pulumi.get(self, "sku") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="vpnClientConfiguration") def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']: """ The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations. """ return pulumi.get(self, "vpn_client_configuration") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewaySkuResponse(dict): """ VirtualNetworkGatewaySku details """ def __init__(__self__, *, name: str, tier: str, capacity: Optional[int] = None): """ VirtualNetworkGatewaySku details :param str name: Gateway SKU name. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'. :param str tier: Gateway SKU tier. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'. :param int capacity: The capacity. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "tier", tier) if capacity is not None: pulumi.set(__self__, "capacity", capacity) @property @pulumi.getter def name(self) -> str: """ Gateway SKU name. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'. """ return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> str: """ Gateway SKU tier. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'. """ return pulumi.get(self, "tier") @property @pulumi.getter def capacity(self) -> Optional[int]: """ The capacity. """ return pulumi.get(self, "capacity") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkPeeringResponse(dict): """ Peerings in a virtual network resource. """ def __init__(__self__, *, allow_forwarded_traffic: Optional[bool] = None, allow_gateway_transit: Optional[bool] = None, allow_virtual_network_access: Optional[bool] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, peering_state: Optional[str] = None, provisioning_state: Optional[str] = None, remote_virtual_network: Optional['outputs.SubResourceResponse'] = None, use_remote_gateways: Optional[bool] = None): """ Peerings in a virtual network resource. :param bool allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed. :param bool allow_gateway_transit: If gateway links can be used in remote virtual networking to link to this virtual network. :param bool allow_virtual_network_access: Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str peering_state: The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'. :param str provisioning_state: The provisioning state of the resource. :param 'SubResourceResponseArgs' remote_virtual_network: The reference of the remote virtual network. :param bool use_remote_gateways: If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway. """ if allow_forwarded_traffic is not None: pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic) if allow_gateway_transit is not None: pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit) if allow_virtual_network_access is not None: pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if peering_state is not None: pulumi.set(__self__, "peering_state", peering_state) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if remote_virtual_network is not None: pulumi.set(__self__, "remote_virtual_network", remote_virtual_network) if use_remote_gateways is not None: pulumi.set(__self__, "use_remote_gateways", use_remote_gateways) @property @pulumi.getter(name="allowForwardedTraffic") def allow_forwarded_traffic(self) -> Optional[bool]: """ Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed. """ return pulumi.get(self, "allow_forwarded_traffic") @property @pulumi.getter(name="allowGatewayTransit") def allow_gateway_transit(self) -> Optional[bool]: """ If gateway links can be used in remote virtual networking to link to this virtual network. """ return pulumi.get(self, "allow_gateway_transit") @property @pulumi.getter(name="allowVirtualNetworkAccess") def allow_virtual_network_access(self) -> Optional[bool]: """ Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space. """ return pulumi.get(self, "allow_virtual_network_access") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="peeringState") def peering_state(self) -> Optional[str]: """ The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'. """ return pulumi.get(self, "peering_state") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="remoteVirtualNetwork") def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']: """ The reference of the remote virtual network. """ return pulumi.get(self, "remote_virtual_network") @property @pulumi.getter(name="useRemoteGateways") def use_remote_gateways(self) -> Optional[bool]: """ If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway. """ return pulumi.get(self, "use_remote_gateways") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientConfigurationResponse(dict): """ VpnClientConfiguration for P2S client. """ def __init__(__self__, *, vpn_client_address_pool: Optional['outputs.AddressSpaceResponse'] = None, vpn_client_revoked_certificates: Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']] = None, vpn_client_root_certificates: Optional[Sequence['outputs.VpnClientRootCertificateResponse']] = None): """ VpnClientConfiguration for P2S client. :param 'AddressSpaceResponseArgs' vpn_client_address_pool: The reference of the address space resource which represents Address space for P2S VpnClient. :param Sequence['VpnClientRevokedCertificateResponseArgs'] vpn_client_revoked_certificates: VpnClientRevokedCertificate for Virtual network gateway. :param Sequence['VpnClientRootCertificateResponseArgs'] vpn_client_root_certificates: VpnClientRootCertificate for virtual network gateway. """ if vpn_client_address_pool is not None: pulumi.set(__self__, "vpn_client_address_pool", vpn_client_address_pool) if vpn_client_revoked_certificates is not None: pulumi.set(__self__, "vpn_client_revoked_certificates", vpn_client_revoked_certificates) if vpn_client_root_certificates is not None: pulumi.set(__self__, "vpn_client_root_certificates", vpn_client_root_certificates) @property @pulumi.getter(name="vpnClientAddressPool") def vpn_client_address_pool(self) -> Optional['outputs.AddressSpaceResponse']: """ The reference of the address space resource which represents Address space for P2S VpnClient. """ return pulumi.get(self, "vpn_client_address_pool") @property @pulumi.getter(name="vpnClientRevokedCertificates") def vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']]: """ VpnClientRevokedCertificate for Virtual network gateway. """ return pulumi.get(self, "vpn_client_revoked_certificates") @property @pulumi.getter(name="vpnClientRootCertificates") def vpn_client_root_certificates(self) -> Optional[Sequence['outputs.VpnClientRootCertificateResponse']]: """ VpnClientRootCertificate for virtual network gateway. """ return pulumi.get(self, "vpn_client_root_certificates") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientRevokedCertificateResponse(dict): """ VPN client revoked certificate of virtual network gateway. """ def __init__(__self__, *, provisioning_state: str, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, thumbprint: Optional[str] = None): """ VPN client revoked certificate of virtual network gateway. :param str provisioning_state: The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :param str thumbprint: The revoked VPN client certificate thumbprint. """ pulumi.set(__self__, "provisioning_state", provisioning_state) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if thumbprint is not None: pulumi.set(__self__, "thumbprint", thumbprint) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def thumbprint(self) -> Optional[str]: """ The revoked VPN client certificate thumbprint. """ return pulumi.get(self, "thumbprint") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientRootCertificateResponse(dict): """ VPN client root certificate of virtual network gateway """ def __init__(__self__, *, provisioning_state: str, public_cert_data: str, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None): """ VPN client root certificate of virtual network gateway :param str provisioning_state: The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param str public_cert_data: The certificate public data. :param str etag: A unique read-only string that changes whenever the resource is updated. :param str id: Resource ID. :param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource. """ pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "public_cert_data", public_cert_data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicCertData") def public_cert_data(self) -> str: """ The certificate public data. """ return pulumi.get(self, "public_cert_data") @property @pulumi.getter def etag(self) -> Optional[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: """ The name of the resource that is unique within a resource group. This name can be used to access the resource. """ return pulumi.get(self, "name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
40.674095
368
0.647077
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * __all__ = [ 'AddressSpaceResponse', 'ApplicationGatewayAuthenticationCertificateResponse', 'ApplicationGatewayBackendAddressPoolResponse', 'ApplicationGatewayBackendAddressResponse', 'ApplicationGatewayBackendHttpSettingsResponse', 'ApplicationGatewayFrontendIPConfigurationResponse', 'ApplicationGatewayFrontendPortResponse', 'ApplicationGatewayHttpListenerResponse', 'ApplicationGatewayIPConfigurationResponse', 'ApplicationGatewayPathRuleResponse', 'ApplicationGatewayProbeResponse', 'ApplicationGatewayRequestRoutingRuleResponse', 'ApplicationGatewaySkuResponse', 'ApplicationGatewaySslCertificateResponse', 'ApplicationGatewaySslPolicyResponse', 'ApplicationGatewayUrlPathMapResponse', 'ApplicationGatewayWebApplicationFirewallConfigurationResponse', 'BackendAddressPoolResponse', 'BgpPeerStatusResponseResult', 'BgpSettingsResponse', 'DhcpOptionsResponse', 'ExpressRouteCircuitAuthorizationResponse', 'ExpressRouteCircuitPeeringConfigResponse', 'ExpressRouteCircuitPeeringResponse', 'ExpressRouteCircuitServiceProviderPropertiesResponse', 'ExpressRouteCircuitSkuResponse', 'ExpressRouteCircuitStatsResponse', 'FrontendIPConfigurationResponse', 'GatewayRouteResponseResult', 'IPConfigurationResponse', 'InboundNatPoolResponse', 'InboundNatRuleResponse', 'LoadBalancingRuleResponse', 'LocalNetworkGatewayResponse', 'NetworkInterfaceDnsSettingsResponse', 'NetworkInterfaceIPConfigurationResponse', 'NetworkInterfaceResponse', 'NetworkSecurityGroupResponse', 'OutboundNatRuleResponse', 'PacketCaptureFilterResponse', 'PacketCaptureStorageLocationResponse', 'ProbeResponse', 'PublicIPAddressDnsSettingsResponse', 'PublicIPAddressResponse', 'ResourceNavigationLinkResponse', 'RouteResponse', 'RouteTableResponse', 'SecurityRuleResponse', 'SubResourceResponse', 'SubnetResponse', 'TunnelConnectionHealthResponse', 'VirtualNetworkGatewayIPConfigurationResponse', 'VirtualNetworkGatewayResponse', 'VirtualNetworkGatewaySkuResponse', 'VirtualNetworkPeeringResponse', 'VpnClientConfigurationResponse', 'VpnClientRevokedCertificateResponse', 'VpnClientRootCertificateResponse', ] @pulumi.output_type class AddressSpaceResponse(dict): def __init__(__self__, *, address_prefixes: Optional[Sequence[str]] = None): if address_prefixes is not None: pulumi.set(__self__, "address_prefixes", address_prefixes) @property @pulumi.getter(name="addressPrefixes") def address_prefixes(self) -> Optional[Sequence[str]]: return pulumi.get(self, "address_prefixes") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayAuthenticationCertificateResponse(dict): def __init__(__self__, *, data: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): if data is not None: pulumi.set(__self__, "data", data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter def data(self) -> Optional[str]: return pulumi.get(self, "data") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendAddressPoolResponse(dict): def __init__(__self__, *, backend_addresses: Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']] = None, backend_ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): if backend_addresses is not None: pulumi.set(__self__, "backend_addresses", backend_addresses) if backend_ip_configurations is not None: pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddresses") def backend_addresses(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']]: return pulumi.get(self, "backend_addresses") @property @pulumi.getter(name="backendIPConfigurations") def backend_ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]: return pulumi.get(self, "backend_ip_configurations") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendAddressResponse(dict): def __init__(__self__, *, fqdn: Optional[str] = None, ip_address: Optional[str] = None): if fqdn is not None: pulumi.set(__self__, "fqdn", fqdn) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) @property @pulumi.getter def fqdn(self) -> Optional[str]: return pulumi.get(self, "fqdn") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[str]: return pulumi.get(self, "ip_address") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayBackendHttpSettingsResponse(dict): def __init__(__self__, *, authentication_certificates: Optional[Sequence['outputs.SubResourceResponse']] = None, cookie_based_affinity: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, port: Optional[int] = None, probe: Optional['outputs.SubResourceResponse'] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, request_timeout: Optional[int] = None): if authentication_certificates is not None: pulumi.set(__self__, "authentication_certificates", authentication_certificates) if cookie_based_affinity is not None: pulumi.set(__self__, "cookie_based_affinity", cookie_based_affinity) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) if probe is not None: pulumi.set(__self__, "probe", probe) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if request_timeout is not None: pulumi.set(__self__, "request_timeout", request_timeout) @property @pulumi.getter(name="authenticationCertificates") def authentication_certificates(self) -> Optional[Sequence['outputs.SubResourceResponse']]: return pulumi.get(self, "authentication_certificates") @property @pulumi.getter(name="cookieBasedAffinity") def cookie_based_affinity(self) -> Optional[str]: return pulumi.get(self, "cookie_based_affinity") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def port(self) -> Optional[int]: return pulumi.get(self, "port") @property @pulumi.getter def probe(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "probe") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requestTimeout") def request_timeout(self) -> Optional[int]: return pulumi.get(self, "request_timeout") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayFrontendIPConfigurationResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.SubResourceResponse'] = None, subnet: Optional['outputs.SubResourceResponse'] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayFrontendPortResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, port: Optional[int] = None, provisioning_state: Optional[str] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def port(self) -> Optional[int]: return pulumi.get(self, "port") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayHttpListenerResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, frontend_port: Optional['outputs.SubResourceResponse'] = None, host_name: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, require_server_name_indication: Optional[bool] = None, ssl_certificate: Optional['outputs.SubResourceResponse'] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if frontend_port is not None: pulumi.set(__self__, "frontend_port", frontend_port) if host_name is not None: pulumi.set(__self__, "host_name", host_name) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if require_server_name_indication is not None: pulumi.set(__self__, "require_server_name_indication", require_server_name_indication) if ssl_certificate is not None: pulumi.set(__self__, "ssl_certificate", ssl_certificate) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "frontend_port") @property @pulumi.getter(name="hostName") def host_name(self) -> Optional[str]: return pulumi.get(self, "host_name") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requireServerNameIndication") def require_server_name_indication(self) -> Optional[bool]: return pulumi.get(self, "require_server_name_indication") @property @pulumi.getter(name="sslCertificate") def ssl_certificate(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "ssl_certificate") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayIPConfigurationResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None, subnet: Optional['outputs.SubResourceResponse'] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayPathRuleResponse(dict): def __init__(__self__, *, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, paths: Optional[Sequence[str]] = None, provisioning_state: Optional[str] = None): if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_http_settings is not None: pulumi.set(__self__, "backend_http_settings", backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if paths is not None: pulumi.set(__self__, "paths", paths) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendHttpSettings") def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def paths(self) -> Optional[Sequence[str]]: return pulumi.get(self, "paths") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayProbeResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, host: Optional[str] = None, id: Optional[str] = None, interval: Optional[int] = None, name: Optional[str] = None, path: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None, timeout: Optional[int] = None, unhealthy_threshold: Optional[int] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if host is not None: pulumi.set(__self__, "host", host) if id is not None: pulumi.set(__self__, "id", id) if interval is not None: pulumi.set(__self__, "interval", interval) if name is not None: pulumi.set(__self__, "name", name) if path is not None: pulumi.set(__self__, "path", path) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if unhealthy_threshold is not None: pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def host(self) -> Optional[str]: return pulumi.get(self, "host") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def interval(self) -> Optional[int]: return pulumi.get(self, "interval") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def path(self) -> Optional[str]: return pulumi.get(self, "path") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def timeout(self) -> Optional[int]: return pulumi.get(self, "timeout") @property @pulumi.getter(name="unhealthyThreshold") def unhealthy_threshold(self) -> Optional[int]: return pulumi.get(self, "unhealthy_threshold") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayRequestRoutingRuleResponse(dict): def __init__(__self__, *, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, http_listener: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None, rule_type: Optional[str] = None, url_path_map: Optional['outputs.SubResourceResponse'] = None): if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_http_settings is not None: pulumi.set(__self__, "backend_http_settings", backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if http_listener is not None: pulumi.set(__self__, "http_listener", http_listener) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if rule_type is not None: pulumi.set(__self__, "rule_type", rule_type) if url_path_map is not None: pulumi.set(__self__, "url_path_map", url_path_map) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendHttpSettings") def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="httpListener") def http_listener(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "http_listener") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="ruleType") def rule_type(self) -> Optional[str]: return pulumi.get(self, "rule_type") @property @pulumi.getter(name="urlPathMap") def url_path_map(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "url_path_map") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySkuResponse(dict): def __init__(__self__, *, capacity: Optional[int] = None, name: Optional[str] = None, tier: Optional[str] = None): if capacity is not None: pulumi.set(__self__, "capacity", capacity) if name is not None: pulumi.set(__self__, "name", name) if tier is not None: pulumi.set(__self__, "tier", tier) @property @pulumi.getter def capacity(self) -> Optional[int]: return pulumi.get(self, "capacity") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> Optional[str]: return pulumi.get(self, "tier") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySslCertificateResponse(dict): def __init__(__self__, *, data: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, password: Optional[str] = None, provisioning_state: Optional[str] = None, public_cert_data: Optional[str] = None): if data is not None: pulumi.set(__self__, "data", data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if password is not None: pulumi.set(__self__, "password", password) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_cert_data is not None: pulumi.set(__self__, "public_cert_data", public_cert_data) @property @pulumi.getter def data(self) -> Optional[str]: return pulumi.get(self, "data") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def password(self) -> Optional[str]: return pulumi.get(self, "password") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicCertData") def public_cert_data(self) -> Optional[str]: return pulumi.get(self, "public_cert_data") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewaySslPolicyResponse(dict): def __init__(__self__, *, disabled_ssl_protocols: Optional[Sequence[str]] = None): if disabled_ssl_protocols is not None: pulumi.set(__self__, "disabled_ssl_protocols", disabled_ssl_protocols) @property @pulumi.getter(name="disabledSslProtocols") def disabled_ssl_protocols(self) -> Optional[Sequence[str]]: return pulumi.get(self, "disabled_ssl_protocols") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayUrlPathMapResponse(dict): def __init__(__self__, *, default_backend_address_pool: Optional['outputs.SubResourceResponse'] = None, default_backend_http_settings: Optional['outputs.SubResourceResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, path_rules: Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']] = None, provisioning_state: Optional[str] = None): if default_backend_address_pool is not None: pulumi.set(__self__, "default_backend_address_pool", default_backend_address_pool) if default_backend_http_settings is not None: pulumi.set(__self__, "default_backend_http_settings", default_backend_http_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if path_rules is not None: pulumi.set(__self__, "path_rules", path_rules) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="defaultBackendAddressPool") def default_backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "default_backend_address_pool") @property @pulumi.getter(name="defaultBackendHttpSettings") def default_backend_http_settings(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "default_backend_http_settings") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="pathRules") def path_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']]: return pulumi.get(self, "path_rules") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApplicationGatewayWebApplicationFirewallConfigurationResponse(dict): def __init__(__self__, *, enabled: bool, firewall_mode: Optional[str] = None): pulumi.set(__self__, "enabled", enabled) if firewall_mode is not None: pulumi.set(__self__, "firewall_mode", firewall_mode) @property @pulumi.getter def enabled(self) -> bool: return pulumi.get(self, "enabled") @property @pulumi.getter(name="firewallMode") def firewall_mode(self) -> Optional[str]: return pulumi.get(self, "firewall_mode") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class BackendAddressPoolResponse(dict): def __init__(__self__, *, backend_ip_configurations: Sequence['outputs.NetworkInterfaceIPConfigurationResponse'], load_balancing_rules: Sequence['outputs.SubResourceResponse'], outbound_nat_rule: 'outputs.SubResourceResponse', etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations) pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "outbound_nat_rule", outbound_nat_rule) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendIPConfigurations") def backend_ip_configurations(self) -> Sequence['outputs.NetworkInterfaceIPConfigurationResponse']: return pulumi.get(self, "backend_ip_configurations") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter(name="outboundNatRule") def outbound_nat_rule(self) -> 'outputs.SubResourceResponse': return pulumi.get(self, "outbound_nat_rule") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class BgpPeerStatusResponseResult(dict): def __init__(__self__, *, asn: int, connected_duration: str, local_address: str, messages_received: float, messages_sent: float, neighbor: str, routes_received: float, state: str): pulumi.set(__self__, "asn", asn) pulumi.set(__self__, "connected_duration", connected_duration) pulumi.set(__self__, "local_address", local_address) pulumi.set(__self__, "messages_received", messages_received) pulumi.set(__self__, "messages_sent", messages_sent) pulumi.set(__self__, "neighbor", neighbor) pulumi.set(__self__, "routes_received", routes_received) pulumi.set(__self__, "state", state) @property @pulumi.getter def asn(self) -> int: return pulumi.get(self, "asn") @property @pulumi.getter(name="connectedDuration") def connected_duration(self) -> str: return pulumi.get(self, "connected_duration") @property @pulumi.getter(name="localAddress") def local_address(self) -> str: return pulumi.get(self, "local_address") @property @pulumi.getter(name="messagesReceived") def messages_received(self) -> float: return pulumi.get(self, "messages_received") @property @pulumi.getter(name="messagesSent") def messages_sent(self) -> float: return pulumi.get(self, "messages_sent") @property @pulumi.getter def neighbor(self) -> str: return pulumi.get(self, "neighbor") @property @pulumi.getter(name="routesReceived") def routes_received(self) -> float: return pulumi.get(self, "routes_received") @property @pulumi.getter def state(self) -> str: return pulumi.get(self, "state") @pulumi.output_type class BgpSettingsResponse(dict): def __init__(__self__, *, asn: Optional[float] = None, bgp_peering_address: Optional[str] = None, peer_weight: Optional[int] = None): if asn is not None: pulumi.set(__self__, "asn", asn) if bgp_peering_address is not None: pulumi.set(__self__, "bgp_peering_address", bgp_peering_address) if peer_weight is not None: pulumi.set(__self__, "peer_weight", peer_weight) @property @pulumi.getter def asn(self) -> Optional[float]: return pulumi.get(self, "asn") @property @pulumi.getter(name="bgpPeeringAddress") def bgp_peering_address(self) -> Optional[str]: return pulumi.get(self, "bgp_peering_address") @property @pulumi.getter(name="peerWeight") def peer_weight(self) -> Optional[int]: return pulumi.get(self, "peer_weight") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class DhcpOptionsResponse(dict): def __init__(__self__, *, dns_servers: Optional[Sequence[str]] = None): if dns_servers is not None: pulumi.set(__self__, "dns_servers", dns_servers) @property @pulumi.getter(name="dnsServers") def dns_servers(self) -> Optional[Sequence[str]]: return pulumi.get(self, "dns_servers") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitAuthorizationResponse(dict): def __init__(__self__, *, authorization_key: Optional[str] = None, authorization_use_status: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): if authorization_key is not None: pulumi.set(__self__, "authorization_key", authorization_key) if authorization_use_status is not None: pulumi.set(__self__, "authorization_use_status", authorization_use_status) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="authorizationKey") def authorization_key(self) -> Optional[str]: return pulumi.get(self, "authorization_key") @property @pulumi.getter(name="authorizationUseStatus") def authorization_use_status(self) -> Optional[str]: return pulumi.get(self, "authorization_use_status") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitPeeringConfigResponse(dict): def __init__(__self__, *, advertised_public_prefixes: Optional[Sequence[str]] = None, advertised_public_prefixes_state: Optional[str] = None, customer_asn: Optional[int] = None, routing_registry_name: Optional[str] = None): if advertised_public_prefixes is not None: pulumi.set(__self__, "advertised_public_prefixes", advertised_public_prefixes) if advertised_public_prefixes_state is not None: pulumi.set(__self__, "advertised_public_prefixes_state", advertised_public_prefixes_state) if customer_asn is not None: pulumi.set(__self__, "customer_asn", customer_asn) if routing_registry_name is not None: pulumi.set(__self__, "routing_registry_name", routing_registry_name) @property @pulumi.getter(name="advertisedPublicPrefixes") def advertised_public_prefixes(self) -> Optional[Sequence[str]]: return pulumi.get(self, "advertised_public_prefixes") @property @pulumi.getter(name="advertisedPublicPrefixesState") def advertised_public_prefixes_state(self) -> Optional[str]: return pulumi.get(self, "advertised_public_prefixes_state") @property @pulumi.getter(name="customerASN") def customer_asn(self) -> Optional[int]: return pulumi.get(self, "customer_asn") @property @pulumi.getter(name="routingRegistryName") def routing_registry_name(self) -> Optional[str]: return pulumi.get(self, "routing_registry_name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitPeeringResponse(dict): def __init__(__self__, *, azure_asn: Optional[int] = None, etag: Optional[str] = None, gateway_manager_etag: Optional[str] = None, id: Optional[str] = None, last_modified_by: Optional[str] = None, microsoft_peering_config: Optional['outputs.ExpressRouteCircuitPeeringConfigResponse'] = None, name: Optional[str] = None, peer_asn: Optional[int] = None, peering_type: Optional[str] = None, primary_azure_port: Optional[str] = None, primary_peer_address_prefix: Optional[str] = None, provisioning_state: Optional[str] = None, secondary_azure_port: Optional[str] = None, secondary_peer_address_prefix: Optional[str] = None, shared_key: Optional[str] = None, state: Optional[str] = None, stats: Optional['outputs.ExpressRouteCircuitStatsResponse'] = None, vlan_id: Optional[int] = None): if azure_asn is not None: pulumi.set(__self__, "azure_asn", azure_asn) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_manager_etag is not None: pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag) if id is not None: pulumi.set(__self__, "id", id) if last_modified_by is not None: pulumi.set(__self__, "last_modified_by", last_modified_by) if microsoft_peering_config is not None: pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config) if name is not None: pulumi.set(__self__, "name", name) if peer_asn is not None: pulumi.set(__self__, "peer_asn", peer_asn) if peering_type is not None: pulumi.set(__self__, "peering_type", peering_type) if primary_azure_port is not None: pulumi.set(__self__, "primary_azure_port", primary_azure_port) if primary_peer_address_prefix is not None: pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if secondary_azure_port is not None: pulumi.set(__self__, "secondary_azure_port", secondary_azure_port) if secondary_peer_address_prefix is not None: pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix) if shared_key is not None: pulumi.set(__self__, "shared_key", shared_key) if state is not None: pulumi.set(__self__, "state", state) if stats is not None: pulumi.set(__self__, "stats", stats) if vlan_id is not None: pulumi.set(__self__, "vlan_id", vlan_id) @property @pulumi.getter(name="azureASN") def azure_asn(self) -> Optional[int]: return pulumi.get(self, "azure_asn") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayManagerEtag") def gateway_manager_etag(self) -> Optional[str]: return pulumi.get(self, "gateway_manager_etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="lastModifiedBy") def last_modified_by(self) -> Optional[str]: return pulumi.get(self, "last_modified_by") @property @pulumi.getter(name="microsoftPeeringConfig") def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']: return pulumi.get(self, "microsoft_peering_config") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="peerASN") def peer_asn(self) -> Optional[int]: return pulumi.get(self, "peer_asn") @property @pulumi.getter(name="peeringType") def peering_type(self) -> Optional[str]: return pulumi.get(self, "peering_type") @property @pulumi.getter(name="primaryAzurePort") def primary_azure_port(self) -> Optional[str]: return pulumi.get(self, "primary_azure_port") @property @pulumi.getter(name="primaryPeerAddressPrefix") def primary_peer_address_prefix(self) -> Optional[str]: return pulumi.get(self, "primary_peer_address_prefix") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="secondaryAzurePort") def secondary_azure_port(self) -> Optional[str]: return pulumi.get(self, "secondary_azure_port") @property @pulumi.getter(name="secondaryPeerAddressPrefix") def secondary_peer_address_prefix(self) -> Optional[str]: return pulumi.get(self, "secondary_peer_address_prefix") @property @pulumi.getter(name="sharedKey") def shared_key(self) -> Optional[str]: return pulumi.get(self, "shared_key") @property @pulumi.getter def state(self) -> Optional[str]: return pulumi.get(self, "state") @property @pulumi.getter def stats(self) -> Optional['outputs.ExpressRouteCircuitStatsResponse']: return pulumi.get(self, "stats") @property @pulumi.getter(name="vlanId") def vlan_id(self) -> Optional[int]: return pulumi.get(self, "vlan_id") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitServiceProviderPropertiesResponse(dict): def __init__(__self__, *, bandwidth_in_mbps: Optional[int] = None, peering_location: Optional[str] = None, service_provider_name: Optional[str] = None): if bandwidth_in_mbps is not None: pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps) if peering_location is not None: pulumi.set(__self__, "peering_location", peering_location) if service_provider_name is not None: pulumi.set(__self__, "service_provider_name", service_provider_name) @property @pulumi.getter(name="bandwidthInMbps") def bandwidth_in_mbps(self) -> Optional[int]: return pulumi.get(self, "bandwidth_in_mbps") @property @pulumi.getter(name="peeringLocation") def peering_location(self) -> Optional[str]: return pulumi.get(self, "peering_location") @property @pulumi.getter(name="serviceProviderName") def service_provider_name(self) -> Optional[str]: return pulumi.get(self, "service_provider_name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitSkuResponse(dict): def __init__(__self__, *, family: Optional[str] = None, name: Optional[str] = None, tier: Optional[str] = None): if family is not None: pulumi.set(__self__, "family", family) if name is not None: pulumi.set(__self__, "name", name) if tier is not None: pulumi.set(__self__, "tier", tier) @property @pulumi.getter def family(self) -> Optional[str]: return pulumi.get(self, "family") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> Optional[str]: return pulumi.get(self, "tier") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ExpressRouteCircuitStatsResponse(dict): def __init__(__self__, *, primarybytes_in: Optional[float] = None, primarybytes_out: Optional[float] = None, secondarybytes_in: Optional[float] = None, secondarybytes_out: Optional[float] = None): if primarybytes_in is not None: pulumi.set(__self__, "primarybytes_in", primarybytes_in) if primarybytes_out is not None: pulumi.set(__self__, "primarybytes_out", primarybytes_out) if secondarybytes_in is not None: pulumi.set(__self__, "secondarybytes_in", secondarybytes_in) if secondarybytes_out is not None: pulumi.set(__self__, "secondarybytes_out", secondarybytes_out) @property @pulumi.getter(name="primarybytesIn") def primarybytes_in(self) -> Optional[float]: return pulumi.get(self, "primarybytes_in") @property @pulumi.getter(name="primarybytesOut") def primarybytes_out(self) -> Optional[float]: return pulumi.get(self, "primarybytes_out") @property @pulumi.getter(name="secondarybytesIn") def secondarybytes_in(self) -> Optional[float]: return pulumi.get(self, "secondarybytes_in") @property @pulumi.getter(name="secondarybytesOut") def secondarybytes_out(self) -> Optional[float]: return pulumi.get(self, "secondarybytes_out") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class FrontendIPConfigurationResponse(dict): def __init__(__self__, *, inbound_nat_pools: Sequence['outputs.SubResourceResponse'], inbound_nat_rules: Sequence['outputs.SubResourceResponse'], load_balancing_rules: Sequence['outputs.SubResourceResponse'], outbound_nat_rules: Sequence['outputs.SubResourceResponse'], etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools) pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules) pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "outbound_nat_rules", outbound_nat_rules) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter(name="inboundNatPools") def inbound_nat_pools(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "inbound_nat_pools") @property @pulumi.getter(name="inboundNatRules") def inbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "inbound_nat_rules") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter(name="outboundNatRules") def outbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "outbound_nat_rules") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class GatewayRouteResponseResult(dict): def __init__(__self__, *, as_path: str, local_address: str, network: str, next_hop: str, origin: str, source_peer: str, weight: int): pulumi.set(__self__, "as_path", as_path) pulumi.set(__self__, "local_address", local_address) pulumi.set(__self__, "network", network) pulumi.set(__self__, "next_hop", next_hop) pulumi.set(__self__, "origin", origin) pulumi.set(__self__, "source_peer", source_peer) pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="asPath") def as_path(self) -> str: return pulumi.get(self, "as_path") @property @pulumi.getter(name="localAddress") def local_address(self) -> str: return pulumi.get(self, "local_address") @property @pulumi.getter def network(self) -> str: return pulumi.get(self, "network") @property @pulumi.getter(name="nextHop") def next_hop(self) -> str: return pulumi.get(self, "next_hop") @property @pulumi.getter def origin(self) -> str: return pulumi.get(self, "origin") @property @pulumi.getter(name="sourcePeer") def source_peer(self) -> str: return pulumi.get(self, "source_peer") @property @pulumi.getter def weight(self) -> int: return pulumi.get(self, "weight") @pulumi.output_type class IPConfigurationResponse(dict): def __init__(__self__, *, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class InboundNatPoolResponse(dict): def __init__(__self__, *, backend_port: int, frontend_port_range_end: int, frontend_port_range_start: int, protocol: str, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "backend_port", backend_port) pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end) pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start) pulumi.set(__self__, "protocol", protocol) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendPort") def backend_port(self) -> int: return pulumi.get(self, "backend_port") @property @pulumi.getter(name="frontendPortRangeEnd") def frontend_port_range_end(self) -> int: return pulumi.get(self, "frontend_port_range_end") @property @pulumi.getter(name="frontendPortRangeStart") def frontend_port_range_start(self) -> int: return pulumi.get(self, "frontend_port_range_start") @property @pulumi.getter def protocol(self) -> str: return pulumi.get(self, "protocol") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class InboundNatRuleResponse(dict): def __init__(__self__, *, backend_ip_configuration: 'outputs.NetworkInterfaceIPConfigurationResponse', backend_port: Optional[int] = None, enable_floating_ip: Optional[bool] = None, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, frontend_port: Optional[int] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, name: Optional[str] = None, protocol: Optional[str] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration) if backend_port is not None: pulumi.set(__self__, "backend_port", backend_port) if enable_floating_ip is not None: pulumi.set(__self__, "enable_floating_ip", enable_floating_ip) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if frontend_port is not None: pulumi.set(__self__, "frontend_port", frontend_port) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if name is not None: pulumi.set(__self__, "name", name) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendIPConfiguration") def backend_ip_configuration(self) -> 'outputs.NetworkInterfaceIPConfigurationResponse': return pulumi.get(self, "backend_ip_configuration") @property @pulumi.getter(name="backendPort") def backend_port(self) -> Optional[int]: return pulumi.get(self, "backend_port") @property @pulumi.getter(name="enableFloatingIP") def enable_floating_ip(self) -> Optional[bool]: return pulumi.get(self, "enable_floating_ip") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> Optional[int]: return pulumi.get(self, "frontend_port") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class LoadBalancingRuleResponse(dict): def __init__(__self__, *, frontend_port: int, protocol: str, backend_address_pool: Optional['outputs.SubResourceResponse'] = None, backend_port: Optional[int] = None, enable_floating_ip: Optional[bool] = None, etag: Optional[str] = None, frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, load_distribution: Optional[str] = None, name: Optional[str] = None, probe: Optional['outputs.SubResourceResponse'] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "frontend_port", frontend_port) pulumi.set(__self__, "protocol", protocol) if backend_address_pool is not None: pulumi.set(__self__, "backend_address_pool", backend_address_pool) if backend_port is not None: pulumi.set(__self__, "backend_port", backend_port) if enable_floating_ip is not None: pulumi.set(__self__, "enable_floating_ip", enable_floating_ip) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configuration is not None: pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if load_distribution is not None: pulumi.set(__self__, "load_distribution", load_distribution) if name is not None: pulumi.set(__self__, "name", name) if probe is not None: pulumi.set(__self__, "probe", probe) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="frontendPort") def frontend_port(self) -> int: return pulumi.get(self, "frontend_port") @property @pulumi.getter def protocol(self) -> str: return pulumi.get(self, "protocol") @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="backendPort") def backend_port(self) -> Optional[int]: return pulumi.get(self, "backend_port") @property @pulumi.getter(name="enableFloatingIP") def enable_floating_ip(self) -> Optional[bool]: return pulumi.get(self, "enable_floating_ip") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfiguration") def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "frontend_ip_configuration") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter(name="loadDistribution") def load_distribution(self) -> Optional[str]: return pulumi.get(self, "load_distribution") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def probe(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "probe") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class LocalNetworkGatewayResponse(dict): def __init__(__self__, *, local_network_address_space: 'outputs.AddressSpaceResponse', name: str, provisioning_state: str, type: str, bgp_settings: Optional['outputs.BgpSettingsResponse'] = None, etag: Optional[str] = None, gateway_ip_address: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): pulumi.set(__self__, "local_network_address_space", local_network_address_space) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) if bgp_settings is not None: pulumi.set(__self__, "bgp_settings", bgp_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_ip_address is not None: pulumi.set(__self__, "gateway_ip_address", gateway_ip_address) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="localNetworkAddressSpace") def local_network_address_space(self) -> 'outputs.AddressSpaceResponse': return pulumi.get(self, "local_network_address_space") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="bgpSettings") def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']: return pulumi.get(self, "bgp_settings") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayIpAddress") def gateway_ip_address(self) -> Optional[str]: return pulumi.get(self, "gateway_ip_address") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceDnsSettingsResponse(dict): def __init__(__self__, *, applied_dns_servers: Optional[Sequence[str]] = None, dns_servers: Optional[Sequence[str]] = None, internal_dns_name_label: Optional[str] = None, internal_domain_name_suffix: Optional[str] = None, internal_fqdn: Optional[str] = None): if applied_dns_servers is not None: pulumi.set(__self__, "applied_dns_servers", applied_dns_servers) if dns_servers is not None: pulumi.set(__self__, "dns_servers", dns_servers) if internal_dns_name_label is not None: pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label) if internal_domain_name_suffix is not None: pulumi.set(__self__, "internal_domain_name_suffix", internal_domain_name_suffix) if internal_fqdn is not None: pulumi.set(__self__, "internal_fqdn", internal_fqdn) @property @pulumi.getter(name="appliedDnsServers") def applied_dns_servers(self) -> Optional[Sequence[str]]: return pulumi.get(self, "applied_dns_servers") @property @pulumi.getter(name="dnsServers") def dns_servers(self) -> Optional[Sequence[str]]: return pulumi.get(self, "dns_servers") @property @pulumi.getter(name="internalDnsNameLabel") def internal_dns_name_label(self) -> Optional[str]: return pulumi.get(self, "internal_dns_name_label") @property @pulumi.getter(name="internalDomainNameSuffix") def internal_domain_name_suffix(self) -> Optional[str]: return pulumi.get(self, "internal_domain_name_suffix") @property @pulumi.getter(name="internalFqdn") def internal_fqdn(self) -> Optional[str]: return pulumi.get(self, "internal_fqdn") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceIPConfigurationResponse(dict): def __init__(__self__, *, application_gateway_backend_address_pools: Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, load_balancer_backend_address_pools: Optional[Sequence['outputs.BackendAddressPoolResponse']] = None, load_balancer_inbound_nat_rules: Optional[Sequence['outputs.InboundNatRuleResponse']] = None, name: Optional[str] = None, primary: Optional[bool] = None, private_ip_address: Optional[str] = None, private_ip_address_version: Optional[str] = None, private_ip_allocation_method: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None, subnet: Optional['outputs.SubnetResponse'] = None): if application_gateway_backend_address_pools is not None: pulumi.set(__self__, "application_gateway_backend_address_pools", application_gateway_backend_address_pools) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if load_balancer_backend_address_pools is not None: pulumi.set(__self__, "load_balancer_backend_address_pools", load_balancer_backend_address_pools) if load_balancer_inbound_nat_rules is not None: pulumi.set(__self__, "load_balancer_inbound_nat_rules", load_balancer_inbound_nat_rules) if name is not None: pulumi.set(__self__, "name", name) if primary is not None: pulumi.set(__self__, "primary", primary) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if private_ip_address_version is not None: pulumi.set(__self__, "private_ip_address_version", private_ip_address_version) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address is not None: pulumi.set(__self__, "public_ip_address", public_ip_address) if subnet is not None: pulumi.set(__self__, "subnet", subnet) @property @pulumi.getter(name="applicationGatewayBackendAddressPools") def application_gateway_backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]: return pulumi.get(self, "application_gateway_backend_address_pools") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="loadBalancerBackendAddressPools") def load_balancer_backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]: return pulumi.get(self, "load_balancer_backend_address_pools") @property @pulumi.getter(name="loadBalancerInboundNatRules") def load_balancer_inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]: return pulumi.get(self, "load_balancer_inbound_nat_rules") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def primary(self) -> Optional[bool]: return pulumi.get(self, "primary") @property @pulumi.getter(name="privateIPAddress") def private_ip_address(self) -> Optional[str]: return pulumi.get(self, "private_ip_address") @property @pulumi.getter(name="privateIPAddressVersion") def private_ip_address_version(self) -> Optional[str]: return pulumi.get(self, "private_ip_address_version") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "private_ip_allocation_method") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']: return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> Optional['outputs.SubnetResponse']: return pulumi.get(self, "subnet") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkInterfaceResponse(dict): def __init__(__self__, *, name: str, type: str, dns_settings: Optional['outputs.NetworkInterfaceDnsSettingsResponse'] = None, enable_accelerated_networking: Optional[bool] = None, enable_ip_forwarding: Optional[bool] = None, etag: Optional[str] = None, id: Optional[str] = None, ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None, location: Optional[str] = None, mac_address: Optional[str] = None, network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None, primary: Optional[bool] = None, provisioning_state: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None, virtual_machine: Optional['outputs.SubResourceResponse'] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "type", type) if dns_settings is not None: pulumi.set(__self__, "dns_settings", dns_settings) if enable_accelerated_networking is not None: pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking) if enable_ip_forwarding is not None: pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if ip_configurations is not None: pulumi.set(__self__, "ip_configurations", ip_configurations) if location is not None: pulumi.set(__self__, "location", location) if mac_address is not None: pulumi.set(__self__, "mac_address", mac_address) if network_security_group is not None: pulumi.set(__self__, "network_security_group", network_security_group) if primary is not None: pulumi.set(__self__, "primary", primary) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_machine is not None: pulumi.set(__self__, "virtual_machine", virtual_machine) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="dnsSettings") def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']: return pulumi.get(self, "dns_settings") @property @pulumi.getter(name="enableAcceleratedNetworking") def enable_accelerated_networking(self) -> Optional[bool]: return pulumi.get(self, "enable_accelerated_networking") @property @pulumi.getter(name="enableIPForwarding") def enable_ip_forwarding(self) -> Optional[bool]: return pulumi.get(self, "enable_ip_forwarding") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]: return pulumi.get(self, "ip_configurations") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="macAddress") def mac_address(self) -> Optional[str]: return pulumi.get(self, "mac_address") @property @pulumi.getter(name="networkSecurityGroup") def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']: return pulumi.get(self, "network_security_group") @property @pulumi.getter def primary(self) -> Optional[bool]: return pulumi.get(self, "primary") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") @property @pulumi.getter(name="virtualMachine") def virtual_machine(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "virtual_machine") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class NetworkSecurityGroupResponse(dict): def __init__(__self__, *, name: str, network_interfaces: Sequence['outputs.NetworkInterfaceResponse'], subnets: Sequence['outputs.SubnetResponse'], type: str, default_security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None, etag: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, resource_guid: Optional[str] = None, security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None, tags: Optional[Mapping[str, str]] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "network_interfaces", network_interfaces) pulumi.set(__self__, "subnets", subnets) pulumi.set(__self__, "type", type) if default_security_rules is not None: pulumi.set(__self__, "default_security_rules", default_security_rules) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if security_rules is not None: pulumi.set(__self__, "security_rules", security_rules) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter(name="networkInterfaces") def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']: return pulumi.get(self, "network_interfaces") @property @pulumi.getter def subnets(self) -> Sequence['outputs.SubnetResponse']: return pulumi.get(self, "subnets") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="defaultSecurityRules") def default_security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]: return pulumi.get(self, "default_security_rules") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: return pulumi.get(self, "resource_guid") @property @pulumi.getter(name="securityRules") def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]: return pulumi.get(self, "security_rules") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class OutboundNatRuleResponse(dict): def __init__(__self__, *, backend_address_pool: 'outputs.SubResourceResponse', allocated_outbound_ports: Optional[int] = None, etag: Optional[str] = None, frontend_ip_configurations: Optional[Sequence['outputs.SubResourceResponse']] = None, id: Optional[str] = None, name: Optional[str] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "backend_address_pool", backend_address_pool) if allocated_outbound_ports is not None: pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports) if etag is not None: pulumi.set(__self__, "etag", etag) if frontend_ip_configurations is not None: pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="backendAddressPool") def backend_address_pool(self) -> 'outputs.SubResourceResponse': return pulumi.get(self, "backend_address_pool") @property @pulumi.getter(name="allocatedOutboundPorts") def allocated_outbound_ports(self) -> Optional[int]: return pulumi.get(self, "allocated_outbound_ports") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfigurations") def frontend_ip_configurations(self) -> Optional[Sequence['outputs.SubResourceResponse']]: return pulumi.get(self, "frontend_ip_configurations") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PacketCaptureFilterResponse(dict): def __init__(__self__, *, local_ip_address: Optional[str] = None, local_port: Optional[str] = None, protocol: Optional[str] = None, remote_ip_address: Optional[str] = None, remote_port: Optional[str] = None): if local_ip_address is not None: pulumi.set(__self__, "local_ip_address", local_ip_address) if local_port is not None: pulumi.set(__self__, "local_port", local_port) if protocol is None: protocol = 'Any' if protocol is not None: pulumi.set(__self__, "protocol", protocol) if remote_ip_address is not None: pulumi.set(__self__, "remote_ip_address", remote_ip_address) if remote_port is not None: pulumi.set(__self__, "remote_port", remote_port) @property @pulumi.getter(name="localIPAddress") def local_ip_address(self) -> Optional[str]: return pulumi.get(self, "local_ip_address") @property @pulumi.getter(name="localPort") def local_port(self) -> Optional[str]: return pulumi.get(self, "local_port") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter(name="remoteIPAddress") def remote_ip_address(self) -> Optional[str]: return pulumi.get(self, "remote_ip_address") @property @pulumi.getter(name="remotePort") def remote_port(self) -> Optional[str]: return pulumi.get(self, "remote_port") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PacketCaptureStorageLocationResponse(dict): def __init__(__self__, *, file_path: Optional[str] = None, storage_id: Optional[str] = None, storage_path: Optional[str] = None): if file_path is not None: pulumi.set(__self__, "file_path", file_path) if storage_id is not None: pulumi.set(__self__, "storage_id", storage_id) if storage_path is not None: pulumi.set(__self__, "storage_path", storage_path) @property @pulumi.getter(name="filePath") def file_path(self) -> Optional[str]: return pulumi.get(self, "file_path") @property @pulumi.getter(name="storageId") def storage_id(self) -> Optional[str]: return pulumi.get(self, "storage_id") @property @pulumi.getter(name="storagePath") def storage_path(self) -> Optional[str]: return pulumi.get(self, "storage_path") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ProbeResponse(dict): def __init__(__self__, *, load_balancing_rules: Sequence['outputs.SubResourceResponse'], port: int, protocol: str, etag: Optional[str] = None, id: Optional[str] = None, interval_in_seconds: Optional[int] = None, name: Optional[str] = None, number_of_probes: Optional[int] = None, provisioning_state: Optional[str] = None, request_path: Optional[str] = None): pulumi.set(__self__, "load_balancing_rules", load_balancing_rules) pulumi.set(__self__, "port", port) pulumi.set(__self__, "protocol", protocol) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if interval_in_seconds is not None: pulumi.set(__self__, "interval_in_seconds", interval_in_seconds) if name is not None: pulumi.set(__self__, "name", name) if number_of_probes is not None: pulumi.set(__self__, "number_of_probes", number_of_probes) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if request_path is not None: pulumi.set(__self__, "request_path", request_path) @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']: return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter def port(self) -> int: return pulumi.get(self, "port") @property @pulumi.getter def protocol(self) -> str: return pulumi.get(self, "protocol") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="intervalInSeconds") def interval_in_seconds(self) -> Optional[int]: return pulumi.get(self, "interval_in_seconds") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="numberOfProbes") def number_of_probes(self) -> Optional[int]: return pulumi.get(self, "number_of_probes") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[str]: return pulumi.get(self, "request_path") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PublicIPAddressDnsSettingsResponse(dict): def __init__(__self__, *, domain_name_label: Optional[str] = None, fqdn: Optional[str] = None, reverse_fqdn: Optional[str] = None): if domain_name_label is not None: pulumi.set(__self__, "domain_name_label", domain_name_label) if fqdn is not None: pulumi.set(__self__, "fqdn", fqdn) if reverse_fqdn is not None: pulumi.set(__self__, "reverse_fqdn", reverse_fqdn) @property @pulumi.getter(name="domainNameLabel") def domain_name_label(self) -> Optional[str]: return pulumi.get(self, "domain_name_label") @property @pulumi.getter def fqdn(self) -> Optional[str]: return pulumi.get(self, "fqdn") @property @pulumi.getter(name="reverseFqdn") def reverse_fqdn(self) -> Optional[str]: return pulumi.get(self, "reverse_fqdn") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class PublicIPAddressResponse(dict): def __init__(__self__, *, ip_configuration: 'outputs.IPConfigurationResponse', name: str, type: str, dns_settings: Optional['outputs.PublicIPAddressDnsSettingsResponse'] = None, etag: Optional[str] = None, id: Optional[str] = None, idle_timeout_in_minutes: Optional[int] = None, ip_address: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, public_ip_address_version: Optional[str] = None, public_ip_allocation_method: Optional[str] = None, resource_guid: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): pulumi.set(__self__, "ip_configuration", ip_configuration) pulumi.set(__self__, "name", name) pulumi.set(__self__, "type", type) if dns_settings is not None: pulumi.set(__self__, "dns_settings", dns_settings) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if idle_timeout_in_minutes is not None: pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if public_ip_address_version is not None: pulumi.set(__self__, "public_ip_address_version", public_ip_address_version) if public_ip_allocation_method is not None: pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="ipConfiguration") def ip_configuration(self) -> 'outputs.IPConfigurationResponse': return pulumi.get(self, "ip_configuration") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="dnsSettings") def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']: return pulumi.get(self, "dns_settings") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="idleTimeoutInMinutes") def idle_timeout_in_minutes(self) -> Optional[int]: return pulumi.get(self, "idle_timeout_in_minutes") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[str]: return pulumi.get(self, "ip_address") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddressVersion") def public_ip_address_version(self) -> Optional[str]: return pulumi.get(self, "public_ip_address_version") @property @pulumi.getter(name="publicIPAllocationMethod") def public_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "public_ip_allocation_method") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ResourceNavigationLinkResponse(dict): def __init__(__self__, *, etag: str, provisioning_state: str, id: Optional[str] = None, link: Optional[str] = None, linked_resource_type: Optional[str] = None, name: Optional[str] = None): pulumi.set(__self__, "etag", etag) pulumi.set(__self__, "provisioning_state", provisioning_state) if id is not None: pulumi.set(__self__, "id", id) if link is not None: pulumi.set(__self__, "link", link) if linked_resource_type is not None: pulumi.set(__self__, "linked_resource_type", linked_resource_type) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def etag(self) -> str: return pulumi.get(self, "etag") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def link(self) -> Optional[str]: return pulumi.get(self, "link") @property @pulumi.getter(name="linkedResourceType") def linked_resource_type(self) -> Optional[str]: return pulumi.get(self, "linked_resource_type") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class RouteResponse(dict): def __init__(__self__, *, next_hop_type: str, address_prefix: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, next_hop_ip_address: Optional[str] = None, provisioning_state: Optional[str] = None): pulumi.set(__self__, "next_hop_type", next_hop_type) if address_prefix is not None: pulumi.set(__self__, "address_prefix", address_prefix) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if next_hop_ip_address is not None: pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) @property @pulumi.getter(name="nextHopType") def next_hop_type(self) -> str: return pulumi.get(self, "next_hop_type") @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> Optional[str]: return pulumi.get(self, "address_prefix") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="nextHopIpAddress") def next_hop_ip_address(self) -> Optional[str]: return pulumi.get(self, "next_hop_ip_address") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class RouteTableResponse(dict): def __init__(__self__, *, name: str, subnets: Sequence['outputs.SubnetResponse'], type: str, etag: Optional[str] = None, id: Optional[str] = None, location: Optional[str] = None, provisioning_state: Optional[str] = None, routes: Optional[Sequence['outputs.RouteResponse']] = None, tags: Optional[Mapping[str, str]] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "subnets", subnets) pulumi.set(__self__, "type", type) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if routes is not None: pulumi.set(__self__, "routes", routes) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def subnets(self) -> Sequence['outputs.SubnetResponse']: return pulumi.get(self, "subnets") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def routes(self) -> Optional[Sequence['outputs.RouteResponse']]: return pulumi.get(self, "routes") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SecurityRuleResponse(dict): def __init__(__self__, *, access: str, destination_address_prefix: str, direction: str, protocol: str, source_address_prefix: str, description: Optional[str] = None, destination_port_range: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, priority: Optional[int] = None, provisioning_state: Optional[str] = None, source_port_range: Optional[str] = None): pulumi.set(__self__, "access", access) pulumi.set(__self__, "destination_address_prefix", destination_address_prefix) pulumi.set(__self__, "direction", direction) pulumi.set(__self__, "protocol", protocol) pulumi.set(__self__, "source_address_prefix", source_address_prefix) if description is not None: pulumi.set(__self__, "description", description) if destination_port_range is not None: pulumi.set(__self__, "destination_port_range", destination_port_range) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if priority is not None: pulumi.set(__self__, "priority", priority) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if source_port_range is not None: pulumi.set(__self__, "source_port_range", source_port_range) @property @pulumi.getter def access(self) -> str: return pulumi.get(self, "access") @property @pulumi.getter(name="destinationAddressPrefix") def destination_address_prefix(self) -> str: return pulumi.get(self, "destination_address_prefix") @property @pulumi.getter def direction(self) -> str: return pulumi.get(self, "direction") @property @pulumi.getter def protocol(self) -> str: return pulumi.get(self, "protocol") @property @pulumi.getter(name="sourceAddressPrefix") def source_address_prefix(self) -> str: return pulumi.get(self, "source_address_prefix") @property @pulumi.getter def description(self) -> Optional[str]: return pulumi.get(self, "description") @property @pulumi.getter(name="destinationPortRange") def destination_port_range(self) -> Optional[str]: return pulumi.get(self, "destination_port_range") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def priority(self) -> Optional[int]: return pulumi.get(self, "priority") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="sourcePortRange") def source_port_range(self) -> Optional[str]: return pulumi.get(self, "source_port_range") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SubResourceResponse(dict): def __init__(__self__, *, id: Optional[str] = None): if id is not None: pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class SubnetResponse(dict): def __init__(__self__, *, ip_configurations: Sequence['outputs.IPConfigurationResponse'], address_prefix: Optional[str] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None, provisioning_state: Optional[str] = None, resource_navigation_links: Optional[Sequence['outputs.ResourceNavigationLinkResponse']] = None, route_table: Optional['outputs.RouteTableResponse'] = None): pulumi.set(__self__, "ip_configurations", ip_configurations) if address_prefix is not None: pulumi.set(__self__, "address_prefix", address_prefix) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if network_security_group is not None: pulumi.set(__self__, "network_security_group", network_security_group) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if resource_navigation_links is not None: pulumi.set(__self__, "resource_navigation_links", resource_navigation_links) if route_table is not None: pulumi.set(__self__, "route_table", route_table) @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Sequence['outputs.IPConfigurationResponse']: return pulumi.get(self, "ip_configurations") @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> Optional[str]: return pulumi.get(self, "address_prefix") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="networkSecurityGroup") def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']: return pulumi.get(self, "network_security_group") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceNavigationLinks") def resource_navigation_links(self) -> Optional[Sequence['outputs.ResourceNavigationLinkResponse']]: return pulumi.get(self, "resource_navigation_links") @property @pulumi.getter(name="routeTable") def route_table(self) -> Optional['outputs.RouteTableResponse']: return pulumi.get(self, "route_table") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class TunnelConnectionHealthResponse(dict): def __init__(__self__, *, connection_status: str, egress_bytes_transferred: float, ingress_bytes_transferred: float, last_connection_established_utc_time: str, tunnel: str): pulumi.set(__self__, "connection_status", connection_status) pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred) pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred) pulumi.set(__self__, "last_connection_established_utc_time", last_connection_established_utc_time) pulumi.set(__self__, "tunnel", tunnel) @property @pulumi.getter(name="connectionStatus") def connection_status(self) -> str: return pulumi.get(self, "connection_status") @property @pulumi.getter(name="egressBytesTransferred") def egress_bytes_transferred(self) -> float: return pulumi.get(self, "egress_bytes_transferred") @property @pulumi.getter(name="ingressBytesTransferred") def ingress_bytes_transferred(self) -> float: return pulumi.get(self, "ingress_bytes_transferred") @property @pulumi.getter(name="lastConnectionEstablishedUtcTime") def last_connection_established_utc_time(self) -> str: return pulumi.get(self, "last_connection_established_utc_time") @property @pulumi.getter def tunnel(self) -> str: return pulumi.get(self, "tunnel") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewayIPConfigurationResponse(dict): def __init__(__self__, *, provisioning_state: str, public_ip_address: 'outputs.SubResourceResponse', subnet: 'outputs.SubResourceResponse', etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, private_ip_allocation_method: Optional[str] = None): pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "public_ip_address", public_ip_address) pulumi.set(__self__, "subnet", subnet) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if private_ip_allocation_method is not None: pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicIPAddress") def public_ip_address(self) -> 'outputs.SubResourceResponse': return pulumi.get(self, "public_ip_address") @property @pulumi.getter def subnet(self) -> 'outputs.SubResourceResponse': return pulumi.get(self, "subnet") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="privateIPAllocationMethod") def private_ip_allocation_method(self) -> Optional[str]: return pulumi.get(self, "private_ip_allocation_method") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewayResponse(dict): def __init__(__self__, *, gateway_type: str, ip_configurations: Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse'], name: str, provisioning_state: str, type: str, vpn_type: str, active_active: Optional[bool] = None, bgp_settings: Optional['outputs.BgpSettingsResponse'] = None, enable_bgp: Optional[bool] = None, etag: Optional[str] = None, gateway_default_site: Optional['outputs.SubResourceResponse'] = None, id: Optional[str] = None, location: Optional[str] = None, resource_guid: Optional[str] = None, sku: Optional['outputs.VirtualNetworkGatewaySkuResponse'] = None, tags: Optional[Mapping[str, str]] = None, vpn_client_configuration: Optional['outputs.VpnClientConfigurationResponse'] = None): pulumi.set(__self__, "gateway_type", gateway_type) pulumi.set(__self__, "ip_configurations", ip_configurations) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) pulumi.set(__self__, "vpn_type", vpn_type) if active_active is not None: pulumi.set(__self__, "active_active", active_active) if bgp_settings is not None: pulumi.set(__self__, "bgp_settings", bgp_settings) if enable_bgp is not None: pulumi.set(__self__, "enable_bgp", enable_bgp) if etag is not None: pulumi.set(__self__, "etag", etag) if gateway_default_site is not None: pulumi.set(__self__, "gateway_default_site", gateway_default_site) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if resource_guid is not None: pulumi.set(__self__, "resource_guid", resource_guid) if sku is not None: pulumi.set(__self__, "sku", sku) if tags is not None: pulumi.set(__self__, "tags", tags) if vpn_client_configuration is not None: pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration) @property @pulumi.getter(name="gatewayType") def gateway_type(self) -> str: return pulumi.get(self, "gateway_type") @property @pulumi.getter(name="ipConfigurations") def ip_configurations(self) -> Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']: return pulumi.get(self, "ip_configurations") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="vpnType") def vpn_type(self) -> str: return pulumi.get(self, "vpn_type") @property @pulumi.getter(name="activeActive") def active_active(self) -> Optional[bool]: return pulumi.get(self, "active_active") @property @pulumi.getter(name="bgpSettings") def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']: return pulumi.get(self, "bgp_settings") @property @pulumi.getter(name="enableBgp") def enable_bgp(self) -> Optional[bool]: return pulumi.get(self, "enable_bgp") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter(name="gatewayDefaultSite") def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "gateway_default_site") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: return pulumi.get(self, "location") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> Optional[str]: return pulumi.get(self, "resource_guid") @property @pulumi.getter def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']: return pulumi.get(self, "sku") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") @property @pulumi.getter(name="vpnClientConfiguration") def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']: return pulumi.get(self, "vpn_client_configuration") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkGatewaySkuResponse(dict): def __init__(__self__, *, name: str, tier: str, capacity: Optional[int] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "tier", tier) if capacity is not None: pulumi.set(__self__, "capacity", capacity) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def tier(self) -> str: return pulumi.get(self, "tier") @property @pulumi.getter def capacity(self) -> Optional[int]: return pulumi.get(self, "capacity") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VirtualNetworkPeeringResponse(dict): def __init__(__self__, *, allow_forwarded_traffic: Optional[bool] = None, allow_gateway_transit: Optional[bool] = None, allow_virtual_network_access: Optional[bool] = None, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, peering_state: Optional[str] = None, provisioning_state: Optional[str] = None, remote_virtual_network: Optional['outputs.SubResourceResponse'] = None, use_remote_gateways: Optional[bool] = None): if allow_forwarded_traffic is not None: pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic) if allow_gateway_transit is not None: pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit) if allow_virtual_network_access is not None: pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if peering_state is not None: pulumi.set(__self__, "peering_state", peering_state) if provisioning_state is not None: pulumi.set(__self__, "provisioning_state", provisioning_state) if remote_virtual_network is not None: pulumi.set(__self__, "remote_virtual_network", remote_virtual_network) if use_remote_gateways is not None: pulumi.set(__self__, "use_remote_gateways", use_remote_gateways) @property @pulumi.getter(name="allowForwardedTraffic") def allow_forwarded_traffic(self) -> Optional[bool]: return pulumi.get(self, "allow_forwarded_traffic") @property @pulumi.getter(name="allowGatewayTransit") def allow_gateway_transit(self) -> Optional[bool]: return pulumi.get(self, "allow_gateway_transit") @property @pulumi.getter(name="allowVirtualNetworkAccess") def allow_virtual_network_access(self) -> Optional[bool]: return pulumi.get(self, "allow_virtual_network_access") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="peeringState") def peering_state(self) -> Optional[str]: return pulumi.get(self, "peering_state") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="remoteVirtualNetwork") def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']: return pulumi.get(self, "remote_virtual_network") @property @pulumi.getter(name="useRemoteGateways") def use_remote_gateways(self) -> Optional[bool]: return pulumi.get(self, "use_remote_gateways") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientConfigurationResponse(dict): def __init__(__self__, *, vpn_client_address_pool: Optional['outputs.AddressSpaceResponse'] = None, vpn_client_revoked_certificates: Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']] = None, vpn_client_root_certificates: Optional[Sequence['outputs.VpnClientRootCertificateResponse']] = None): if vpn_client_address_pool is not None: pulumi.set(__self__, "vpn_client_address_pool", vpn_client_address_pool) if vpn_client_revoked_certificates is not None: pulumi.set(__self__, "vpn_client_revoked_certificates", vpn_client_revoked_certificates) if vpn_client_root_certificates is not None: pulumi.set(__self__, "vpn_client_root_certificates", vpn_client_root_certificates) @property @pulumi.getter(name="vpnClientAddressPool") def vpn_client_address_pool(self) -> Optional['outputs.AddressSpaceResponse']: return pulumi.get(self, "vpn_client_address_pool") @property @pulumi.getter(name="vpnClientRevokedCertificates") def vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']]: return pulumi.get(self, "vpn_client_revoked_certificates") @property @pulumi.getter(name="vpnClientRootCertificates") def vpn_client_root_certificates(self) -> Optional[Sequence['outputs.VpnClientRootCertificateResponse']]: return pulumi.get(self, "vpn_client_root_certificates") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientRevokedCertificateResponse(dict): def __init__(__self__, *, provisioning_state: str, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None, thumbprint: Optional[str] = None): pulumi.set(__self__, "provisioning_state", provisioning_state) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) if thumbprint is not None: pulumi.set(__self__, "thumbprint", thumbprint) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def thumbprint(self) -> Optional[str]: return pulumi.get(self, "thumbprint") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class VpnClientRootCertificateResponse(dict): def __init__(__self__, *, provisioning_state: str, public_cert_data: str, etag: Optional[str] = None, id: Optional[str] = None, name: Optional[str] = None): pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "public_cert_data", public_cert_data) if etag is not None: pulumi.set(__self__, "etag", etag) if id is not None: pulumi.set(__self__, "id", id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicCertData") def public_cert_data(self) -> str: return pulumi.get(self, "public_cert_data") @property @pulumi.getter def etag(self) -> Optional[str]: return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
true
true
f70ef43cb2465a8a7aace46d8fa63d92a517dfb1
591
py
Python
webware/MiddleKit/Core/StringAttr.py
PeaceWorksTechnologySolutions/w4py3-middlekit
a9554e20c47010e7b0c0deee63e1786482c59a1c
[ "MIT" ]
2
2020-10-31T09:12:58.000Z
2021-02-20T13:52:14.000Z
webware/MiddleKit/Core/StringAttr.py
WebwareForPython/w4py3-middlekit
f740e2d2d3a5c225d6b8f9eb27ac08f8deed47e6
[ "MIT" ]
2
2020-01-07T15:24:09.000Z
2020-01-08T15:39:57.000Z
webware/MiddleKit/Core/StringAttr.py
PeaceWorksTechnologySolutions/w4py3-middlekit
a9554e20c47010e7b0c0deee63e1786482c59a1c
[ "MIT" ]
1
2021-09-27T21:04:18.000Z
2021-09-27T21:04:18.000Z
from .BasicTypeAttr import BasicTypeAttr class StringAttr(BasicTypeAttr): def __init__(self, attr): BasicTypeAttr.__init__(self, attr) if self.get('Max') is not None: self['Max'] = int(self['Max']) if self.get('Min') is not None: self['Min'] = int(self['Min']) def printWarnings(self, out): if self.get('Max') in (None, '') and not self.get('SQLType'): out.write('warning: model %s: class %s: attr %s: max string length unspecified\n' % ( self.model().name(), self.klass().name(), self.name()))
34.764706
97
0.57868
from .BasicTypeAttr import BasicTypeAttr class StringAttr(BasicTypeAttr): def __init__(self, attr): BasicTypeAttr.__init__(self, attr) if self.get('Max') is not None: self['Max'] = int(self['Max']) if self.get('Min') is not None: self['Min'] = int(self['Min']) def printWarnings(self, out): if self.get('Max') in (None, '') and not self.get('SQLType'): out.write('warning: model %s: class %s: attr %s: max string length unspecified\n' % ( self.model().name(), self.klass().name(), self.name()))
true
true
f70ef508b4fa96a799589b3ae368486bd1d19c73
184
py
Python
src/naive/Python/Numba/fib.py
juliancoffee/fib
c337ccc570dfb692d015ff4f1155d44925f8f47f
[ "MIT" ]
4
2019-07-29T22:19:23.000Z
2021-01-22T05:27:39.000Z
src/naive/Python/Numba/fib.py
juliancoffee/fib
c337ccc570dfb692d015ff4f1155d44925f8f47f
[ "MIT" ]
2
2019-06-13T18:14:53.000Z
2019-06-14T17:24:37.000Z
src/naive/Python/Numba/fib.py
juliancoffee/fib
c337ccc570dfb692d015ff4f1155d44925f8f47f
[ "MIT" ]
8
2019-06-10T09:37:25.000Z
2019-06-30T07:55:02.000Z
from numba import jit import sys @jit def fib(n): return 1 if n < 3 else fib(n-1) + fib(n-2) if __name__ == "__main__": n = int(sys.argv[1]) print("{}".format(fib(n)))
14.153846
46
0.581522
from numba import jit import sys @jit def fib(n): return 1 if n < 3 else fib(n-1) + fib(n-2) if __name__ == "__main__": n = int(sys.argv[1]) print("{}".format(fib(n)))
true
true
f70ef56c468100d0c372ae156de34620904d237e
12,703
py
Python
tests/generate_go_ethereum_fixture.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
tests/generate_go_ethereum_fixture.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
tests/generate_go_ethereum_fixture.py
happyuc-project/webu.py
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
[ "MIT" ]
null
null
null
import contextlib import json import os import pprint import shutil import signal import socket import subprocess import sys import tempfile import time from cytoolz import ( merge, valmap, ) from eth_utils.curried import ( apply_formatter_if, is_bytes, is_checksum_address, is_dict, is_same_address, remove_0x_prefix, to_hex, to_text, to_wei, ) from webu import Webu from webu.utils.module_testing.emitter_contract import ( EMITTER_ABI, EMITTER_BYTECODE, EMITTER_ENUM, ) from webu.utils.module_testing.math_contract import ( MATH_ABI, MATH_BYTECODE, ) COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d' KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501 KEYFILE_PW = 'webupy-test' KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501 RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6' UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01' UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13' UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW GENESIS_DATA = { "nonce": "0xdeadbeefdeadbeef", "timestamp": "0x0", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501 "extraData": "0x7765623370792d746573742d636861696e", "gasLimit": "0x47d5cc", "difficulty": "0x01", "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501 "coinbase": "0x3333333333333333333333333333333333333333", "alloc": { remove_0x_prefix(COINBASE): { 'balance': str(to_wei(1000000000, 'huc')), }, remove_0x_prefix(RAW_TXN_ACCOUNT): { 'balance': str(to_wei(10, 'huc')), }, remove_0x_prefix(UNLOCKABLE_ACCOUNT): { 'balance': str(to_wei(10, 'huc')), }, }, "config": { "chainId": 131277322940537, # the string 'webupy' as an integer "homesteadBlock": 0, "eip155Block": 0, "eip158Block": 0 }, } def ensure_path_exists(dir_path): """ Make sure that a path exists """ if not os.path.exists(dir_path): os.makedirs(dir_path) return True return False @contextlib.contextmanager def tempdir(): dir_path = tempfile.mkdtemp() try: yield dir_path finally: shutil.rmtree(dir_path) def get_open_port(): sock = socket.socket() sock.bind(('127.0.0.1', 0)) port = sock.getsockname()[1] sock.close() return str(port) def get_ghuc_binary(): from ghuc.install import ( get_executable_path, install_ghuc, ) if 'GETH_BINARY' in os.environ: return os.environ['GETH_BINARY'] elif 'GETH_VERSION' in os.environ: ghuc_version = os.environ['GETH_VERSION'] _ghuc_binary = get_executable_path(ghuc_version) if not os.path.exists(_ghuc_binary): install_ghuc(ghuc_version) assert os.path.exists(_ghuc_binary) return _ghuc_binary else: return 'ghuc' def wait_for_popen(proc, timeout): start = time.time() while time.time() < start + timeout: if proc.poll() is None: time.sleep(0.01) else: break def kill_proc_gracefully(proc): if proc.poll() is None: proc.send_signal(signal.SIGINT) wait_for_popen(proc, 13) if proc.poll() is None: proc.terminate() wait_for_popen(proc, 5) if proc.poll() is None: proc.kill() wait_for_popen(proc, 2) def wait_for_socket(ipc_path, timeout=30): start = time.time() while time.time() < start + timeout: try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(ipc_path) sock.settimeout(timeout) except (FileNotFoundError, socket.error): time.sleep(0.01) else: break @contextlib.contextmanager def graceful_kill_on_exit(proc): try: yield proc finally: kill_proc_gracefully(proc) @contextlib.contextmanager def get_ghuc_process(ghuc_binary, datadir, genesis_file_path, ghuc_ipc_path, ghuc_port): init_datadir_command = ( ghuc_binary, '--datadir', datadir, 'init', genesis_file_path, ) subprocess.check_output( init_datadir_command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) run_ghuc_command = ( ghuc_binary, '--datadir', datadir, '--ipcpath', ghuc_ipc_path, '--ethash.dagsondisk', '1', '--gcmode', 'archive', '--nodiscover', '--port', ghuc_port, '--coinbase', COINBASE[2:], ) popen_proc = subprocess.Popen( run_ghuc_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, ) with popen_proc as proc: with graceful_kill_on_exit(proc) as graceful_proc: yield graceful_proc output, errors = proc.communicate() print( "Ghuc Process Exited:\n" "stdout:{0}\n\n" "stderr:{1}\n\n".format( to_text(output), to_text(errors), ) ) def write_config_json(config, datadir): bytes_to_hex = apply_formatter_if(is_bytes, to_hex) config_json_dict = valmap(bytes_to_hex, config) config_path = os.path.join(datadir, 'config.json') with open(config_path, 'w') as config_file: config_file.write(json.dumps(config_json_dict)) config_file.write('\n') def generate_go_happyuc_fixture(destination_dir): with contextlib.ExitStack() as stack: datadir = stack.enter_context(tempdir()) keystore_dir = os.path.join(datadir, 'keystore') ensure_path_exists(keystore_dir) keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME) with open(keyfile_path, 'w') as keyfile: keyfile.write(KEYFILE_DATA) genesis_file_path = os.path.join(datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(GENESIS_DATA)) ghuc_ipc_path_dir = stack.enter_context(tempdir()) ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc') ghuc_port = get_open_port() ghuc_binary = get_ghuc_binary() with get_ghuc_process( ghuc_binary=ghuc_binary, datadir=datadir, genesis_file_path=genesis_file_path, ghuc_ipc_path=ghuc_ipc_path, ghuc_port=ghuc_port): wait_for_socket(ghuc_ipc_path) webu = Webu(Webu.IPCProvider(ghuc_ipc_path)) chain_data = setup_chain_state(webu) # close ghuc by exiting context # must be closed before copying data dir verify_chain_state(webu, chain_data) # verify that chain state is still valid after closing # and re-opening ghuc with get_ghuc_process( ghuc_binary=ghuc_binary, datadir=datadir, genesis_file_path=genesis_file_path, ghuc_ipc_path=ghuc_ipc_path, ghuc_port=ghuc_port): wait_for_socket(ghuc_ipc_path) webu = Webu(Webu.IPCProvider(ghuc_ipc_path)) verify_chain_state(webu, chain_data) static_data = { 'raw_txn_account': RAW_TXN_ACCOUNT, 'keyfile_pw': KEYFILE_PW, } config = merge(chain_data, static_data) pprint.pprint(config) write_config_json(config, datadir) shutil.copytree(datadir, destination_dir) def verify_chain_state(webu, chain_data): receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash']) latest = webu.eth.getBlock('latest') assert receipt.blockNumber <= latest.number def mine_transaction_hash(webu, txn_hash): start_time = time.time() webu.miner.start(1) while time.time() < start_time + 60: receipt = webu.eth.getTransactionReceipt(txn_hash) if receipt is not None: webu.miner.stop() return receipt else: time.sleep(0.1) else: raise ValueError("Math contract deploy transaction not mined during wait period") def mine_block(webu): origin_block_number = webu.eth.blockNumber start_time = time.time() webu.miner.start(1) while time.time() < start_time + 60: block_number = webu.eth.blockNumber if block_number > origin_block_number: webu.miner.stop() return block_number else: time.sleep(0.1) else: raise ValueError("No block mined during wait period") def deploy_contract(webu, name, factory): webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW) deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase}) print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash) deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash) print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper())) contract_address = deploy_receipt['contractAddress'] assert is_checksum_address(contract_address) print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address) return deploy_receipt def setup_chain_state(webu): coinbase = webu.eth.coinbase assert is_same_address(coinbase, COINBASE) # # Math Contract # math_contract_factory = webu.eth.contract( abi=MATH_ABI, bytecode=MATH_BYTECODE, ) math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory) assert is_dict(math_deploy_receipt) # # Emitter Contract # emitter_contract_factory = webu.eth.contract( abi=EMITTER_ABI, bytecode=EMITTER_BYTECODE, ) emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory) emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress']) txn_hash_with_log = emitter_contract.transact({ 'from': webu.eth.coinbase, }).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321) print('TXN_HASH_WITH_LOG:', txn_hash_with_log) txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log) block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash']) print('BLOCK_HASH_WITH_LOG:', block_with_log['hash']) # # Empty Block # empty_block_number = mine_block(webu) print('MINED_EMPTY_BLOCK') empty_block = webu.eth.getBlock(empty_block_number) assert is_dict(empty_block) assert not empty_block['transactions'] print('EMPTY_BLOCK_HASH:', empty_block['hash']) # # Block with Transaction # webu.personal.unlockAccount(coinbase, KEYFILE_PW) webu.miner.start(1) mined_txn_hash = webu.eth.sendTransaction({ 'from': coinbase, 'to': coinbase, 'value': 1, 'gas': 21000, 'gas_price': webu.eth.gasPrice, }) mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash) print('MINED_TXN_HASH:', mined_txn_hash) block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash']) print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash']) ghuc_fixture = { 'math_deploy_txn_hash': math_deploy_receipt['transactionHash'], 'math_address': math_deploy_receipt['contractAddress'], 'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'], 'emitter_address': emitter_deploy_receipt['contractAddress'], 'txn_hash_with_log': txn_hash_with_log, 'block_hash_with_log': block_with_log['hash'], 'empty_block_hash': empty_block['hash'], 'mined_txn_hash': mined_txn_hash, 'block_with_txn_hash': block_with_txn['hash'], } return ghuc_fixture if __name__ == '__main__': fixture_dir = sys.argv[1] generate_go_happyuc_fixture(fixture_dir)
30.389952
522
0.664646
import contextlib import json import os import pprint import shutil import signal import socket import subprocess import sys import tempfile import time from cytoolz import ( merge, valmap, ) from eth_utils.curried import ( apply_formatter_if, is_bytes, is_checksum_address, is_dict, is_same_address, remove_0x_prefix, to_hex, to_text, to_wei, ) from webu import Webu from webu.utils.module_testing.emitter_contract import ( EMITTER_ABI, EMITTER_BYTECODE, EMITTER_ENUM, ) from webu.utils.module_testing.math_contract import ( MATH_ABI, MATH_BYTECODE, ) COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d' KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' KEYFILE_PW = 'webupy-test' KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6' UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01' UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13' UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW GENESIS_DATA = { "nonce": "0xdeadbeefdeadbeef", "timestamp": "0x0", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "0x7765623370792d746573742d636861696e", "gasLimit": "0x47d5cc", "difficulty": "0x01", "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase": "0x3333333333333333333333333333333333333333", "alloc": { remove_0x_prefix(COINBASE): { 'balance': str(to_wei(1000000000, 'huc')), }, remove_0x_prefix(RAW_TXN_ACCOUNT): { 'balance': str(to_wei(10, 'huc')), }, remove_0x_prefix(UNLOCKABLE_ACCOUNT): { 'balance': str(to_wei(10, 'huc')), }, }, "config": { "chainId": 131277322940537, "homesteadBlock": 0, "eip155Block": 0, "eip158Block": 0 }, } def ensure_path_exists(dir_path): if not os.path.exists(dir_path): os.makedirs(dir_path) return True return False @contextlib.contextmanager def tempdir(): dir_path = tempfile.mkdtemp() try: yield dir_path finally: shutil.rmtree(dir_path) def get_open_port(): sock = socket.socket() sock.bind(('127.0.0.1', 0)) port = sock.getsockname()[1] sock.close() return str(port) def get_ghuc_binary(): from ghuc.install import ( get_executable_path, install_ghuc, ) if 'GETH_BINARY' in os.environ: return os.environ['GETH_BINARY'] elif 'GETH_VERSION' in os.environ: ghuc_version = os.environ['GETH_VERSION'] _ghuc_binary = get_executable_path(ghuc_version) if not os.path.exists(_ghuc_binary): install_ghuc(ghuc_version) assert os.path.exists(_ghuc_binary) return _ghuc_binary else: return 'ghuc' def wait_for_popen(proc, timeout): start = time.time() while time.time() < start + timeout: if proc.poll() is None: time.sleep(0.01) else: break def kill_proc_gracefully(proc): if proc.poll() is None: proc.send_signal(signal.SIGINT) wait_for_popen(proc, 13) if proc.poll() is None: proc.terminate() wait_for_popen(proc, 5) if proc.poll() is None: proc.kill() wait_for_popen(proc, 2) def wait_for_socket(ipc_path, timeout=30): start = time.time() while time.time() < start + timeout: try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(ipc_path) sock.settimeout(timeout) except (FileNotFoundError, socket.error): time.sleep(0.01) else: break @contextlib.contextmanager def graceful_kill_on_exit(proc): try: yield proc finally: kill_proc_gracefully(proc) @contextlib.contextmanager def get_ghuc_process(ghuc_binary, datadir, genesis_file_path, ghuc_ipc_path, ghuc_port): init_datadir_command = ( ghuc_binary, '--datadir', datadir, 'init', genesis_file_path, ) subprocess.check_output( init_datadir_command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) run_ghuc_command = ( ghuc_binary, '--datadir', datadir, '--ipcpath', ghuc_ipc_path, '--ethash.dagsondisk', '1', '--gcmode', 'archive', '--nodiscover', '--port', ghuc_port, '--coinbase', COINBASE[2:], ) popen_proc = subprocess.Popen( run_ghuc_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, ) with popen_proc as proc: with graceful_kill_on_exit(proc) as graceful_proc: yield graceful_proc output, errors = proc.communicate() print( "Ghuc Process Exited:\n" "stdout:{0}\n\n" "stderr:{1}\n\n".format( to_text(output), to_text(errors), ) ) def write_config_json(config, datadir): bytes_to_hex = apply_formatter_if(is_bytes, to_hex) config_json_dict = valmap(bytes_to_hex, config) config_path = os.path.join(datadir, 'config.json') with open(config_path, 'w') as config_file: config_file.write(json.dumps(config_json_dict)) config_file.write('\n') def generate_go_happyuc_fixture(destination_dir): with contextlib.ExitStack() as stack: datadir = stack.enter_context(tempdir()) keystore_dir = os.path.join(datadir, 'keystore') ensure_path_exists(keystore_dir) keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME) with open(keyfile_path, 'w') as keyfile: keyfile.write(KEYFILE_DATA) genesis_file_path = os.path.join(datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(GENESIS_DATA)) ghuc_ipc_path_dir = stack.enter_context(tempdir()) ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc') ghuc_port = get_open_port() ghuc_binary = get_ghuc_binary() with get_ghuc_process( ghuc_binary=ghuc_binary, datadir=datadir, genesis_file_path=genesis_file_path, ghuc_ipc_path=ghuc_ipc_path, ghuc_port=ghuc_port): wait_for_socket(ghuc_ipc_path) webu = Webu(Webu.IPCProvider(ghuc_ipc_path)) chain_data = setup_chain_state(webu) verify_chain_state(webu, chain_data) with get_ghuc_process( ghuc_binary=ghuc_binary, datadir=datadir, genesis_file_path=genesis_file_path, ghuc_ipc_path=ghuc_ipc_path, ghuc_port=ghuc_port): wait_for_socket(ghuc_ipc_path) webu = Webu(Webu.IPCProvider(ghuc_ipc_path)) verify_chain_state(webu, chain_data) static_data = { 'raw_txn_account': RAW_TXN_ACCOUNT, 'keyfile_pw': KEYFILE_PW, } config = merge(chain_data, static_data) pprint.pprint(config) write_config_json(config, datadir) shutil.copytree(datadir, destination_dir) def verify_chain_state(webu, chain_data): receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash']) latest = webu.eth.getBlock('latest') assert receipt.blockNumber <= latest.number def mine_transaction_hash(webu, txn_hash): start_time = time.time() webu.miner.start(1) while time.time() < start_time + 60: receipt = webu.eth.getTransactionReceipt(txn_hash) if receipt is not None: webu.miner.stop() return receipt else: time.sleep(0.1) else: raise ValueError("Math contract deploy transaction not mined during wait period") def mine_block(webu): origin_block_number = webu.eth.blockNumber start_time = time.time() webu.miner.start(1) while time.time() < start_time + 60: block_number = webu.eth.blockNumber if block_number > origin_block_number: webu.miner.stop() return block_number else: time.sleep(0.1) else: raise ValueError("No block mined during wait period") def deploy_contract(webu, name, factory): webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW) deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase}) print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash) deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash) print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper())) contract_address = deploy_receipt['contractAddress'] assert is_checksum_address(contract_address) print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address) return deploy_receipt def setup_chain_state(webu): coinbase = webu.eth.coinbase assert is_same_address(coinbase, COINBASE) math_contract_factory = webu.eth.contract( abi=MATH_ABI, bytecode=MATH_BYTECODE, ) math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory) assert is_dict(math_deploy_receipt) emitter_contract_factory = webu.eth.contract( abi=EMITTER_ABI, bytecode=EMITTER_BYTECODE, ) emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory) emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress']) txn_hash_with_log = emitter_contract.transact({ 'from': webu.eth.coinbase, }).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321) print('TXN_HASH_WITH_LOG:', txn_hash_with_log) txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log) block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash']) print('BLOCK_HASH_WITH_LOG:', block_with_log['hash']) empty_block_number = mine_block(webu) print('MINED_EMPTY_BLOCK') empty_block = webu.eth.getBlock(empty_block_number) assert is_dict(empty_block) assert not empty_block['transactions'] print('EMPTY_BLOCK_HASH:', empty_block['hash']) webu.personal.unlockAccount(coinbase, KEYFILE_PW) webu.miner.start(1) mined_txn_hash = webu.eth.sendTransaction({ 'from': coinbase, 'to': coinbase, 'value': 1, 'gas': 21000, 'gas_price': webu.eth.gasPrice, }) mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash) print('MINED_TXN_HASH:', mined_txn_hash) block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash']) print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash']) ghuc_fixture = { 'math_deploy_txn_hash': math_deploy_receipt['transactionHash'], 'math_address': math_deploy_receipt['contractAddress'], 'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'], 'emitter_address': emitter_deploy_receipt['contractAddress'], 'txn_hash_with_log': txn_hash_with_log, 'block_hash_with_log': block_with_log['hash'], 'empty_block_hash': empty_block['hash'], 'mined_txn_hash': mined_txn_hash, 'block_with_txn_hash': block_with_txn['hash'], } return ghuc_fixture if __name__ == '__main__': fixture_dir = sys.argv[1] generate_go_happyuc_fixture(fixture_dir)
true
true
f70ef5c61dda827100835b501bf9c0da123c3708
4,333
py
Python
bayesopt/graph_features.py
xingchenwan/nasbowl
0abaa91b6ce436655a7488f75ed5aeca8df71246
[ "MIT" ]
18
2020-12-29T13:00:19.000Z
2022-03-30T08:34:47.000Z
bayesopt/graph_features.py
xingchenwan/nasbowl
0abaa91b6ce436655a7488f75ed5aeca8df71246
[ "MIT" ]
1
2021-07-16T18:03:26.000Z
2021-08-30T09:20:29.000Z
bayesopt/graph_features.py
xingchenwan/nasbowl
0abaa91b6ce436655a7488f75ed5aeca8df71246
[ "MIT" ]
4
2021-03-22T06:18:34.000Z
2022-03-11T16:00:03.000Z
import networkx as nx class FeatureExtractor: """ Extracting some hand-crafted x1_features for the x1_graphs - Number of (effective nodes) - Average """ def __init__(self, g: nx.Graph, node_attr_name='op_name', s='input', t='output'): """ g: a valid networkx graph node_attr_name: the tag of the node attribute. default is 'op_name' s, t: the tag of the two special input and output nodes. Note that there can be more than one input node (s), but only one output node (t) """ self.g = g self.input_index = [] self.output_index = None for n in range(g.number_of_nodes()): assert node_attr_name in list(dict(g.nodes[n]).keys()), node_attr_name + " is not found in " + str( g.nodes[n]) if str(g.nodes[n][node_attr_name]) == str(s): self.input_index.append(n) elif str(g.nodes[n][node_attr_name]) == str(t): self.output_index = n self.node_attr_name = node_attr_name if len(self.input_index) == 0: raise ValueError("Unknown input node!") elif self.output_index is None: raise ValueError("Unknown output node!") # Specify the special nodes (i.e. the input and output, source and sink) if isinstance(self.g, nx.DiGraph): self.undirected_g = self.g.to_undirected() else: self.undirected_g = self.g def __getattr__(self, item): """Identify the feature already implemented in the graph class""" try: res = getattr(self.g, item) except AttributeError: raise AttributeError("Item" + str(item) + ' is not found either in the feature extractor nor the graph' 'instance!') if callable(res): return res() return res def _paths(self) -> list: """Enumerate all paths from input to output. Return a list of lists with each sub-list the node indices from the input to output Data shape: (N_input x2 N_path x2 length of each path) for SISO graph, the data shape is (1 x2 N_path x2 length of each path) """ if not isinstance(self.g, nx.DiGraph): raise TypeError("Longest path is only applicable for directed graph!") result = [] for i in self.input_index: result.append(list(nx.all_simple_paths(self.g, i, self.output_index))) return result @property def number_of_paths(self): paths = self._paths() if len(paths) == 1: return len(paths[0]) return [len(i) for i in paths] @property def longest_path(self): """Return the longest path from input to output. the return type is a list in case when there is more than one input node.""" paths = self._paths() if len(paths) == 1: # if the list is a singlet (i.e. the S-T style graph), then return a scalar output only return len(max(paths[0], key=lambda x: len(x))) return [len(max(i, key=lambda x: len(x))) for i in paths] @property def degree_distribution(self, normalize=False): """ return the degree distribution of the *undirected* counterpart of the graph, if the graph is directed. return a dictionary in the form of ((D1, N1), (D2, N2)... ) where Di is the degree and Ni is the frequency """ from collections import Counter degree_seq = sorted([d for d, n in dict(self.undirected_g.degree)], reverse=True) degree_count = Counter(degree_seq) deg, cnt = zip(*degree_count.items()) if normalize: n = self.undirected_g.number_of_nodes() cnt //= n return deg, cnt @property def laplacian_spectrum(self, ): return nx.normalized_laplacian_spectrum(self.undirected_g) @property def average_undirected_degree(self): return sum(dict(self.undirected_g.degree).values()) / (self.undirected_g.number_of_nodes() + 0.0) @property def number_of_conv3x3(self): i = 0 for node, attr in self.g.nodes(data=True): if attr['op_name'] == 'conv3x3-bn-relu': i += 1 return i
38.6875
121
0.596354
import networkx as nx class FeatureExtractor: def __init__(self, g: nx.Graph, node_attr_name='op_name', s='input', t='output'): self.g = g self.input_index = [] self.output_index = None for n in range(g.number_of_nodes()): assert node_attr_name in list(dict(g.nodes[n]).keys()), node_attr_name + " is not found in " + str( g.nodes[n]) if str(g.nodes[n][node_attr_name]) == str(s): self.input_index.append(n) elif str(g.nodes[n][node_attr_name]) == str(t): self.output_index = n self.node_attr_name = node_attr_name if len(self.input_index) == 0: raise ValueError("Unknown input node!") elif self.output_index is None: raise ValueError("Unknown output node!") if isinstance(self.g, nx.DiGraph): self.undirected_g = self.g.to_undirected() else: self.undirected_g = self.g def __getattr__(self, item): try: res = getattr(self.g, item) except AttributeError: raise AttributeError("Item" + str(item) + ' is not found either in the feature extractor nor the graph' 'instance!') if callable(res): return res() return res def _paths(self) -> list: if not isinstance(self.g, nx.DiGraph): raise TypeError("Longest path is only applicable for directed graph!") result = [] for i in self.input_index: result.append(list(nx.all_simple_paths(self.g, i, self.output_index))) return result @property def number_of_paths(self): paths = self._paths() if len(paths) == 1: return len(paths[0]) return [len(i) for i in paths] @property def longest_path(self): paths = self._paths() if len(paths) == 1: return len(max(paths[0], key=lambda x: len(x))) return [len(max(i, key=lambda x: len(x))) for i in paths] @property def degree_distribution(self, normalize=False): from collections import Counter degree_seq = sorted([d for d, n in dict(self.undirected_g.degree)], reverse=True) degree_count = Counter(degree_seq) deg, cnt = zip(*degree_count.items()) if normalize: n = self.undirected_g.number_of_nodes() cnt //= n return deg, cnt @property def laplacian_spectrum(self, ): return nx.normalized_laplacian_spectrum(self.undirected_g) @property def average_undirected_degree(self): return sum(dict(self.undirected_g.degree).values()) / (self.undirected_g.number_of_nodes() + 0.0) @property def number_of_conv3x3(self): i = 0 for node, attr in self.g.nodes(data=True): if attr['op_name'] == 'conv3x3-bn-relu': i += 1 return i
true
true
f70ef64b5fe5985fc2fd93a3b677151ca2461f5b
3,423
py
Python
src/datasets/hits_dataset.py
ReyesDeJong/Deep-SVDD-PyTorch
1fc7eae1474556f869d5c5422da74fd4fe2f1aed
[ "MIT" ]
null
null
null
src/datasets/hits_dataset.py
ReyesDeJong/Deep-SVDD-PyTorch
1fc7eae1474556f869d5c5422da74fd4fe2f1aed
[ "MIT" ]
null
null
null
src/datasets/hits_dataset.py
ReyesDeJong/Deep-SVDD-PyTorch
1fc7eae1474556f869d5c5422da74fd4fe2f1aed
[ "MIT" ]
null
null
null
import os import sys import numpy as np import pandas as pd from torch.utils.data import Subset from torch.utils.data.dataset import Dataset # For custom datasets from torchvision import transforms PROJECT_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) sys.path.append(PROJECT_PATH) from src.base.torchvision_dataset import TorchvisionDataset from src.datasets.preprocessing import get_target_label_idx from src.datasets.data_splitter import DatasetDivider from src.datasets.data_set_generic import Dataset class HitsDataset(TorchvisionDataset): def __init__(self, root: str, normal_class=1): super().__init__(root) self.n_classes = 2 # 0: normal, 1: outlier self.normal_classes = tuple([normal_class]) self.outlier_classes = list(range(0, 2)) self.outlier_classes.remove(normal_class) self.data_dict = pd.read_pickle(self.root) # hardcoded selected channel images = self.normalize_by_image(self.data_dict['images'])[..., 3][ ..., np.newaxis] labels = np.array(self.data_dict['labels']) dataset = Dataset(data_array=images, data_label=labels, batch_size=50) data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1) data_splitter.set_dataset_obj(dataset) train_dataset, test_dataset, val_dataset = \ data_splitter.get_train_test_val_set_objs() transform = transforms.Compose([transforms.ToTensor()]) target_transform = transforms.Lambda( lambda x: int(x in self.outlier_classes)) train_set = Hits(train_dataset.data_array, train_dataset.data_label, transform=transform, target_transform=target_transform) train_idx_normal = get_target_label_idx( np.array(train_set.label_arr), self.normal_classes) self.train_set = Subset(train_set, train_idx_normal) print(self.train_set.__len__()) self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label, transform=transform, target_transform=target_transform) val_idx_normal = get_target_label_idx( np.array(self.val_all_set.label_arr), self.normal_classes) self.val_normal_set = Subset(self.val_all_set, val_idx_normal) print(self.val_normal_set.__len__()) self.test_set = Hits(test_dataset.data_array, test_dataset.data_label, transform=transform, target_transform=target_transform) def normalize_by_image(self, images): images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :] images = images / np.nanmax(images, axis=(1, 2))[ :, np.newaxis, np.newaxis, :] return images class Hits(Dataset): def __init__(self, images, labels, transform, target_transform): """ """ # Transforms self.transform = transform self.target_transform = target_transform self.image_arr = images self.label_arr = labels print(self.image_arr.shape) self.data_len = self.label_arr.shape[0] def __getitem__(self, index): single_image = self.image_arr[index] single_image_label = self.label_arr[index] if self.transform is not None: img = self.transform(single_image) if self.target_transform is not None: target = self.target_transform(single_image_label) return img, target, index # only line changed def __len__(self): return self.data_len
34.928571
76
0.71078
import os import sys import numpy as np import pandas as pd from torch.utils.data import Subset from torch.utils.data.dataset import Dataset from torchvision import transforms PROJECT_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) sys.path.append(PROJECT_PATH) from src.base.torchvision_dataset import TorchvisionDataset from src.datasets.preprocessing import get_target_label_idx from src.datasets.data_splitter import DatasetDivider from src.datasets.data_set_generic import Dataset class HitsDataset(TorchvisionDataset): def __init__(self, root: str, normal_class=1): super().__init__(root) self.n_classes = 2 self.normal_classes = tuple([normal_class]) self.outlier_classes = list(range(0, 2)) self.outlier_classes.remove(normal_class) self.data_dict = pd.read_pickle(self.root) images = self.normalize_by_image(self.data_dict['images'])[..., 3][ ..., np.newaxis] labels = np.array(self.data_dict['labels']) dataset = Dataset(data_array=images, data_label=labels, batch_size=50) data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1) data_splitter.set_dataset_obj(dataset) train_dataset, test_dataset, val_dataset = \ data_splitter.get_train_test_val_set_objs() transform = transforms.Compose([transforms.ToTensor()]) target_transform = transforms.Lambda( lambda x: int(x in self.outlier_classes)) train_set = Hits(train_dataset.data_array, train_dataset.data_label, transform=transform, target_transform=target_transform) train_idx_normal = get_target_label_idx( np.array(train_set.label_arr), self.normal_classes) self.train_set = Subset(train_set, train_idx_normal) print(self.train_set.__len__()) self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label, transform=transform, target_transform=target_transform) val_idx_normal = get_target_label_idx( np.array(self.val_all_set.label_arr), self.normal_classes) self.val_normal_set = Subset(self.val_all_set, val_idx_normal) print(self.val_normal_set.__len__()) self.test_set = Hits(test_dataset.data_array, test_dataset.data_label, transform=transform, target_transform=target_transform) def normalize_by_image(self, images): images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :] images = images / np.nanmax(images, axis=(1, 2))[ :, np.newaxis, np.newaxis, :] return images class Hits(Dataset): def __init__(self, images, labels, transform, target_transform): self.transform = transform self.target_transform = target_transform self.image_arr = images self.label_arr = labels print(self.image_arr.shape) self.data_len = self.label_arr.shape[0] def __getitem__(self, index): single_image = self.image_arr[index] single_image_label = self.label_arr[index] if self.transform is not None: img = self.transform(single_image) if self.target_transform is not None: target = self.target_transform(single_image_label) return img, target, index def __len__(self): return self.data_len
true
true
f70ef760aea72079f6af72ed3c3cacc7a63ab723
8,321
py
Python
sdk/python/pulumi_mongodbatlas/teams.py
pulumi/pulumi-mongodbatlas
0d5c085dcfd871b56fb4cf582620260b70caa07a
[ "ECL-2.0", "Apache-2.0" ]
9
2020-04-28T19:12:30.000Z
2022-03-22T23:04:46.000Z
sdk/python/pulumi_mongodbatlas/teams.py
pulumi/pulumi-mongodbatlas
0d5c085dcfd871b56fb4cf582620260b70caa07a
[ "ECL-2.0", "Apache-2.0" ]
59
2020-06-12T12:12:52.000Z
2022-03-28T18:14:50.000Z
sdk/python/pulumi_mongodbatlas/teams.py
pulumi/pulumi-mongodbatlas
0d5c085dcfd871b56fb4cf582620260b70caa07a
[ "ECL-2.0", "Apache-2.0" ]
2
2020-09-25T21:22:08.000Z
2021-08-30T20:06:18.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['TeamsArgs', 'Teams'] @pulumi.input_type class TeamsArgs: def __init__(__self__, *, org_id: pulumi.Input[str], usernames: pulumi.Input[Sequence[pulumi.Input[str]]], name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Teams resource. """ pulumi.set(__self__, "org_id", org_id) pulumi.set(__self__, "usernames", usernames) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="orgId") def org_id(self) -> pulumi.Input[str]: return pulumi.get(self, "org_id") @org_id.setter def org_id(self, value: pulumi.Input[str]): pulumi.set(self, "org_id", value) @property @pulumi.getter def usernames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "usernames") @usernames.setter def usernames(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "usernames", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _TeamsState: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, team_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Teams resources. """ if name is not None: pulumi.set(__self__, "name", name) if org_id is not None: pulumi.set(__self__, "org_id", org_id) if team_id is not None: pulumi.set(__self__, "team_id", team_id) if usernames is not None: pulumi.set(__self__, "usernames", usernames) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="orgId") def org_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "org_id") @org_id.setter def org_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "org_id", value) @property @pulumi.getter(name="teamId") def team_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "team_id") @team_id.setter def team_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "team_id", value) @property @pulumi.getter def usernames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "usernames") @usernames.setter def usernames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "usernames", value) class Teams(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): """ Create a Teams resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. """ ... @overload def __init__(__self__, resource_name: str, args: TeamsArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Create a Teams resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param TeamsArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TeamsArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TeamsArgs.__new__(TeamsArgs) __props__.__dict__["name"] = name if org_id is None and not opts.urn: raise TypeError("Missing required property 'org_id'") __props__.__dict__["org_id"] = org_id if usernames is None and not opts.urn: raise TypeError("Missing required property 'usernames'") __props__.__dict__["usernames"] = usernames __props__.__dict__["team_id"] = None super(Teams, __self__).__init__( 'mongodbatlas:index/teams:Teams', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, team_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Teams': """ Get an existing Teams resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TeamsState.__new__(_TeamsState) __props__.__dict__["name"] = name __props__.__dict__["org_id"] = org_id __props__.__dict__["team_id"] = team_id __props__.__dict__["usernames"] = usernames return Teams(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="orgId") def org_id(self) -> pulumi.Output[str]: return pulumi.get(self, "org_id") @property @pulumi.getter(name="teamId") def team_id(self) -> pulumi.Output[str]: return pulumi.get(self, "team_id") @property @pulumi.getter def usernames(self) -> pulumi.Output[Sequence[str]]: return pulumi.get(self, "usernames")
37.313901
134
0.619277
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['TeamsArgs', 'Teams'] @pulumi.input_type class TeamsArgs: def __init__(__self__, *, org_id: pulumi.Input[str], usernames: pulumi.Input[Sequence[pulumi.Input[str]]], name: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "org_id", org_id) pulumi.set(__self__, "usernames", usernames) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="orgId") def org_id(self) -> pulumi.Input[str]: return pulumi.get(self, "org_id") @org_id.setter def org_id(self, value: pulumi.Input[str]): pulumi.set(self, "org_id", value) @property @pulumi.getter def usernames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "usernames") @usernames.setter def usernames(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "usernames", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _TeamsState: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, team_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): if name is not None: pulumi.set(__self__, "name", name) if org_id is not None: pulumi.set(__self__, "org_id", org_id) if team_id is not None: pulumi.set(__self__, "team_id", team_id) if usernames is not None: pulumi.set(__self__, "usernames", usernames) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="orgId") def org_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "org_id") @org_id.setter def org_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "org_id", value) @property @pulumi.getter(name="teamId") def team_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "team_id") @team_id.setter def team_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "team_id", value) @property @pulumi.getter def usernames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "usernames") @usernames.setter def usernames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "usernames", value) class Teams(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: TeamsArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TeamsArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TeamsArgs.__new__(TeamsArgs) __props__.__dict__["name"] = name if org_id is None and not opts.urn: raise TypeError("Missing required property 'org_id'") __props__.__dict__["org_id"] = org_id if usernames is None and not opts.urn: raise TypeError("Missing required property 'usernames'") __props__.__dict__["usernames"] = usernames __props__.__dict__["team_id"] = None super(Teams, __self__).__init__( 'mongodbatlas:index/teams:Teams', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, team_id: Optional[pulumi.Input[str]] = None, usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Teams': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TeamsState.__new__(_TeamsState) __props__.__dict__["name"] = name __props__.__dict__["org_id"] = org_id __props__.__dict__["team_id"] = team_id __props__.__dict__["usernames"] = usernames return Teams(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="orgId") def org_id(self) -> pulumi.Output[str]: return pulumi.get(self, "org_id") @property @pulumi.getter(name="teamId") def team_id(self) -> pulumi.Output[str]: return pulumi.get(self, "team_id") @property @pulumi.getter def usernames(self) -> pulumi.Output[Sequence[str]]: return pulumi.get(self, "usernames")
true
true
f70ef76b75487b358e5b02d032d6a1bf46a704d9
3,741
py
Python
faker/providers/lorem/hy_AM/__init__.py
tristanHdez18/faker
14cb25712e6efcb7bf8d9f30f404a7304722af6d
[ "MIT" ]
1
2022-02-16T23:14:19.000Z
2022-02-16T23:14:19.000Z
faker/providers/lorem/hy_AM/__init__.py
tristanHdez18/faker
14cb25712e6efcb7bf8d9f30f404a7304722af6d
[ "MIT" ]
33
2020-12-09T16:49:15.000Z
2022-01-04T22:03:10.000Z
faker/providers/lorem/hy_AM/__init__.py
tristanHdez18/faker
14cb25712e6efcb7bf8d9f30f404a7304722af6d
[ "MIT" ]
3
2022-02-07T18:18:54.000Z
2022-03-11T22:09:01.000Z
from .. import Provider as LoremProvider class Provider(LoremProvider): """Implement lorem provider for ``hy_AM`` locale. Sources: - https://www.101languages.net/armenian/armenian-word-list """ word_list = ( "ես", "դու", "նա", "մենք", "դուք", "նրանք", "այս", "այն", "այստեղ", "այնտեղ", "ով", "ինչ", "որտեղ", "ուր", "երբ", "ինչպես", "ոչ", "բոլոր", "շատ", "որոշ", "քիչ", "այլ", "ուրիշ", "մեկ", "երկու", "երեք", "չորս", "հինգ", "մեծ", "երկար", "լայն", "հաստ", "ծանր", "փոքր", "կարճ", "նեղ", "բարակ", "կին", "տղամարդ", "մարդ", "երեխա", "կին", "ամուսին", "մայր", "հայր", "կենդանի", "ձուկ", "թռչուն", "շուն", "ոջիլ", "օձ", "ճիճու", "ծառ", "անտառ", "փայտ", "պտուղ", "սերմ", "տերև", "արմատ", "կեղև", "ծաղիկ", "խոտ", "պարան", "մաշկ", "կաշի", "միս", "արյուն", "ոսկոր", "ճարպ", "ձու", "եղջյուր", "պոզ", "պոչ", "փետուր", "մազ", "գլուխ", "ականջ", "աչք", "քիթ", "բերան", "ատամ", "լեզու", "եղունգ", "ոտք", "ծունկ", "ձեռք", "թև", "փոր", "փորոտիք", "աղիք", "վիզ", "մեջք", "կուրծք", "սիրտ", "լյարդ", "խմել", "ուտել", "կծել", "ծծել", "թքել", "ործկալ", "փչել", "շնչել", "ծիծաղել", "տեսնել", "լսել", "իմանալ", "գիտենալ", "մտածել", "զգալ", "վախենալ", "քնել", "ապրել", "մեռնել", "սպանել", "կռվել", "որսալ", "խփել", "հարվածել", "կտրել", "բաժանել", "խոցել", "քերծել", "քորել", "փորել", "լողալ", "թռչել", "քայլել", "գալ", "պառկել", "նստել", "կանգնել", "շրջվել", "ընկնել", "տալ", "պահել", "բռնել", "սեղմել", "շփել", "լվալ", "սրբել", "ձգել", "քաշել", "հրել", "նետել", "կապել", "կարել", "հաշվել", "ասել", "երգել", "խաղալ", "լողալ", "հոսել", "սառչել", "ուռել", "արև", "լուսին", "աստղ", "ջուր", "անձրև", "գետ", "լիճ", "ծով", "աղ", "քար", "ավազ", "փոշի", "հող", "ամպ", "մառախուղ", "մշուշ", "երկինք", "քամի", "ձյուն", "սառույց", "ծուխ", "հուր", "կրակ", "մոխիր", "վառվել", "այրվել", "ճամփա", "ճանապարհ", "լեռ", "սար", "կարմիր", "կանաչ", "դեղին", "սպիտակ", "սև", "գիշեր", "օր", "տարի", "տաք", "ցուրտ", "լիքը", "նոր", "հին", "լավ", "վատ", "փտած", "կեղտոտ", "ուղիղ", "կլոր", "սուր", "բութ", "հարթ", "թաց", "չոր", "ճիշտ", "մոտ", "հեռու", "աջ", )
16.480176
62
0.295108
from .. import Provider as LoremProvider class Provider(LoremProvider): word_list = ( "ես", "դու", "նա", "մենք", "դուք", "նրանք", "այս", "այն", "այստեղ", "այնտեղ", "ով", "ինչ", "որտեղ", "ուր", "երբ", "ինչպես", "ոչ", "բոլոր", "շատ", "որոշ", "քիչ", "այլ", "ուրիշ", "մեկ", "երկու", "երեք", "չորս", "հինգ", "մեծ", "երկար", "լայն", "հաստ", "ծանր", "փոքր", "կարճ", "նեղ", "բարակ", "կին", "տղամարդ", "մարդ", "երեխա", "կին", "ամուսին", "մայր", "հայր", "կենդանի", "ձուկ", "թռչուն", "շուն", "ոջիլ", "օձ", "ճիճու", "ծառ", "անտառ", "փայտ", "պտուղ", "սերմ", "տերև", "արմատ", "կեղև", "ծաղիկ", "խոտ", "պարան", "մաշկ", "կաշի", "միս", "արյուն", "ոսկոր", "ճարպ", "ձու", "եղջյուր", "պոզ", "պոչ", "փետուր", "մազ", "գլուխ", "ականջ", "աչք", "քիթ", "բերան", "ատամ", "լեզու", "եղունգ", "ոտք", "ծունկ", "ձեռք", "թև", "փոր", "փորոտիք", "աղիք", "վիզ", "մեջք", "կուրծք", "սիրտ", "լյարդ", "խմել", "ուտել", "կծել", "ծծել", "թքել", "ործկալ", "փչել", "շնչել", "ծիծաղել", "տեսնել", "լսել", "իմանալ", "գիտենալ", "մտածել", "զգալ", "վախենալ", "քնել", "ապրել", "մեռնել", "սպանել", "կռվել", "որսալ", "խփել", "հարվածել", "կտրել", "բաժանել", "խոցել", "քերծել", "քորել", "փորել", "լողալ", "թռչել", "քայլել", "գալ", "պառկել", "նստել", "կանգնել", "շրջվել", "ընկնել", "տալ", "պահել", "բռնել", "սեղմել", "շփել", "լվալ", "սրբել", "ձգել", "քաշել", "հրել", "նետել", "կապել", "կարել", "հաշվել", "ասել", "երգել", "խաղալ", "լողալ", "հոսել", "սառչել", "ուռել", "արև", "լուսին", "աստղ", "ջուր", "անձրև", "գետ", "լիճ", "ծով", "աղ", "քար", "ավազ", "փոշի", "հող", "ամպ", "մառախուղ", "մշուշ", "երկինք", "քամի", "ձյուն", "սառույց", "ծուխ", "հուր", "կրակ", "մոխիր", "վառվել", "այրվել", "ճամփա", "ճանապարհ", "լեռ", "սար", "կարմիր", "կանաչ", "դեղին", "սպիտակ", "սև", "գիշեր", "օր", "տարի", "տաք", "ցուրտ", "լիքը", "նոր", "հին", "լավ", "վատ", "փտած", "կեղտոտ", "ուղիղ", "կլոր", "սուր", "բութ", "հարթ", "թաց", "չոր", "ճիշտ", "մոտ", "հեռու", "աջ", )
true
true
f70ef7f591cfdde370c54c465c9581237183a58d
6,238
py
Python
src/_pytest/skipping.py
rosemichaele/pytest
1c0ab3c2a32f7932378a1c37106d082784cb4700
[ "MIT" ]
3
2019-11-26T02:30:12.000Z
2020-04-15T17:49:07.000Z
src/_pytest/skipping.py
rosemichaele/pytest
1c0ab3c2a32f7932378a1c37106d082784cb4700
[ "MIT" ]
59
2019-10-22T04:34:22.000Z
2021-11-27T18:23:11.000Z
src/_pytest/skipping.py
rosemichaele/pytest
1c0ab3c2a32f7932378a1c37106d082784cb4700
[ "MIT" ]
17
2019-11-21T14:11:29.000Z
2019-11-21T15:26:23.000Z
""" support for skip/xfail functions and markers. """ from _pytest.config import hookimpl from _pytest.mark.evaluate import MarkEvaluator from _pytest.outcomes import fail from _pytest.outcomes import skip from _pytest.outcomes import xfail def pytest_addoption(parser): group = parser.getgroup("general") group.addoption( "--runxfail", action="store_true", dest="runxfail", default=False, help="report the results of xfail tests as if they were not marked", ) parser.addini( "xfail_strict", "default for the strict parameter of xfail " "markers when not given explicitly (default: False)", default=False, type="bool", ) def pytest_configure(config): if config.option.runxfail: # yay a hack import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) config.addinivalue_line( "markers", "skip(reason=None): skip the given test function with an optional reason. " 'Example: skip(reason="no way of currently testing this") skips the ' "test.", ) config.addinivalue_line( "markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " "module global context. Example: skipif('sys.platform == \"win32\"') " "skips the test if we are on the win32 platform. see " "https://docs.pytest.org/en/latest/skipping.html", ) config.addinivalue_line( "markers", "xfail(condition, reason=None, run=True, raises=None, strict=False): " "mark the test function as an expected failure if eval(condition) " "has a True value. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " "a true failure. See https://docs.pytest.org/en/latest/skipping.html", ) @hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks item._skipped_by_mark = False eval_skipif = MarkEvaluator(item, "skipif") if eval_skipif.istrue(): item._skipped_by_mark = True skip(eval_skipif.getexplanation()) for skip_info in item.iter_markers(name="skip"): item._skipped_by_mark = True if "reason" in skip_info.kwargs: skip(skip_info.kwargs["reason"]) elif skip_info.args: skip(skip_info.args[0]) else: skip("unconditional skip") item._evalxfail = MarkEvaluator(item, "xfail") check_xfail_no_run(item) @hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield passed = outcome.excinfo is None if passed: check_strict_xfail(pyfuncitem) def check_xfail_no_run(item): """check xfail(run=False)""" if not item.config.option.runxfail: evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get("run", True): xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): """check xfail(strict=True) for the given PASSING test""" evalxfail = pyfuncitem._evalxfail if evalxfail.istrue(): strict_default = pyfuncitem.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() fail("[XPASS(strict)] " + explanation, pytrace=False) @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() evalxfail = getattr(item, "_evalxfail", None) # unittest special case, see setting of _unexpectedsuccess if hasattr(item, "_unexpectedsuccess") and rep.when == "call": if item._unexpectedsuccess: rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) else: rep.longrepr = "Unexpected success" rep.outcome = "failed" elif item.config.option.runxfail: pass # don't interfere elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" else: rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": strict_default = item.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" rep.longrepr = "[XPASS(strict)] {}".format(explanation) else: rep.outcome = "passed" rep.wasxfail = explanation elif ( getattr(item, "_skipped_by_mark", False) and rep.skipped and type(rep.longrepr) is tuple ): # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest _, _, reason = rep.longrepr filename, line = item.location[:2] rep.longrepr = filename, line + 1, reason # called by terminalreporter progress reporting def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "XFAIL" elif report.passed: return "xpassed", "X", "XPASS"
35.044944
87
0.639147
from _pytest.config import hookimpl from _pytest.mark.evaluate import MarkEvaluator from _pytest.outcomes import fail from _pytest.outcomes import skip from _pytest.outcomes import xfail def pytest_addoption(parser): group = parser.getgroup("general") group.addoption( "--runxfail", action="store_true", dest="runxfail", default=False, help="report the results of xfail tests as if they were not marked", ) parser.addini( "xfail_strict", "default for the strict parameter of xfail " "markers when not given explicitly (default: False)", default=False, type="bool", ) def pytest_configure(config): if config.option.runxfail: import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) config.addinivalue_line( "markers", "skip(reason=None): skip the given test function with an optional reason. " 'Example: skip(reason="no way of currently testing this") skips the ' "test.", ) config.addinivalue_line( "markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " "module global context. Example: skipif('sys.platform == \"win32\"') " "skips the test if we are on the win32 platform. see " "https://docs.pytest.org/en/latest/skipping.html", ) config.addinivalue_line( "markers", "xfail(condition, reason=None, run=True, raises=None, strict=False): " "mark the test function as an expected failure if eval(condition) " "has a True value. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " "a true failure. See https://docs.pytest.org/en/latest/skipping.html", ) @hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks item._skipped_by_mark = False eval_skipif = MarkEvaluator(item, "skipif") if eval_skipif.istrue(): item._skipped_by_mark = True skip(eval_skipif.getexplanation()) for skip_info in item.iter_markers(name="skip"): item._skipped_by_mark = True if "reason" in skip_info.kwargs: skip(skip_info.kwargs["reason"]) elif skip_info.args: skip(skip_info.args[0]) else: skip("unconditional skip") item._evalxfail = MarkEvaluator(item, "xfail") check_xfail_no_run(item) @hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield passed = outcome.excinfo is None if passed: check_strict_xfail(pyfuncitem) def check_xfail_no_run(item): if not item.config.option.runxfail: evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get("run", True): xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): evalxfail = pyfuncitem._evalxfail if evalxfail.istrue(): strict_default = pyfuncitem.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() fail("[XPASS(strict)] " + explanation, pytrace=False) @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() evalxfail = getattr(item, "_evalxfail", None) # unittest special case, see setting of _unexpectedsuccess if hasattr(item, "_unexpectedsuccess") and rep.when == "call": if item._unexpectedsuccess: rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) else: rep.longrepr = "Unexpected success" rep.outcome = "failed" elif item.config.option.runxfail: pass # don't interfere elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" else: rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": strict_default = item.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" rep.longrepr = "[XPASS(strict)] {}".format(explanation) else: rep.outcome = "passed" rep.wasxfail = explanation elif ( getattr(item, "_skipped_by_mark", False) and rep.skipped and type(rep.longrepr) is tuple ): _, _, reason = rep.longrepr filename, line = item.location[:2] rep.longrepr = filename, line + 1, reason def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "XFAIL" elif report.passed: return "xpassed", "X", "XPASS"
true
true
f70ef8dc5d4a4a748215573850421d4fa74c60d1
3,661
py
Python
generate_embeddings.py
Amitdedhia6/DrugDiscovery
c70dec96cee4d0d643a8b9de30530b6871fdf05e
[ "Apache-2.0" ]
null
null
null
generate_embeddings.py
Amitdedhia6/DrugDiscovery
c70dec96cee4d0d643a8b9de30530b6871fdf05e
[ "Apache-2.0" ]
null
null
null
generate_embeddings.py
Amitdedhia6/DrugDiscovery
c70dec96cee4d0d643a8b9de30530b6871fdf05e
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn import os from common import base_data_path from typing import List import pandas as pd CONTEXT_SIZE = 1 # 1 words to the left, 1 to the right EMDEDDING_DIM = 3 word_to_ix = {} ix_to_word = {} def make_context_vector(context, word_to_ix): idxs = [word_to_ix[w] for w in context] return torch.tensor(idxs, dtype=torch.long) def get_index_of_max(input): index = 0 for i in range(1, len(input)): if input[i] > input[index]: index = i return index def get_max_prob_result(input, ix_to_word): return ix_to_word[get_index_of_max(input)] def split_smiles_repr(smile_repr: str) -> List[str]: element_list = [] skip_next = False for i in range(len(smile_repr)): if skip_next: skip_next = False continue element = smile_repr[i] if (i < (len(smile_repr) - 1)) and (smile_repr[i].isalpha()): possible_element = element + smile_repr[i+1] if possible_element in word_to_ix: element = possible_element skip_next = True if element in word_to_ix: element_list.append(element) else: raise ValueError('Inappropriate argument to function get_elements_from_smiles_data of Vocab class') return element_list def get_data(sequence_list: List[str]): _sequence_list = [] sequence_elements_list = [] for s in sequence_list: split_elements = split_smiles_repr(s) _sequence_list.append(s) sequence_elements_list.append(split_elements) return sequence_elements_list filepath = os.path.join(base_data_path, "vocab.txt") f = open(filepath, "r") elements_list = f.read().splitlines() elements_list.append(' ') f.close() vocab = elements_list vocab_size = len(elements_list) for i, word in enumerate(vocab): word_to_ix[word] = i ix_to_word[i] = word filepath = os.path.join(base_data_path, "dataset_v1.csv") df = pd.read_csv(filepath, sep=",", header=0) smiles_data = get_data(df.SMILES.tolist()) class CBOW(torch.nn.Module): def __init__(self, vocab_size, embedding_dim): super(CBOW, self).__init__() self.embeddings = nn.Embedding(vocab_size, embedding_dim) self.linear1 = nn.Linear(embedding_dim, 128) self.activation_function1 = nn.ReLU() self.linear2 = nn.Linear(128, vocab_size) self.activation_function2 = nn.LogSoftmax(dim=-1) def forward(self, inputs): embeds = sum(self.embeddings(inputs)).view(1, -1) out = self.linear1(embeds) out = self.activation_function1(out) out = self.linear2(out) out = self.activation_function2(out) return out def get_word_emdedding(self, word): word = torch.LongTensor([word_to_ix[word]]) return self.embeddings(word).view(1, -1) model = CBOW(vocab_size, EMDEDDING_DIM) loss_function = nn.NLLLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001) for epoch in range(50): total_loss = 0 for smiles_element_list in smiles_data: for i in range(1, len(smiles_element_list) - 1): context = [smiles_element_list[i - 1], smiles_element_list[i + 1]] target = smiles_element_list[i] context_vector = make_context_vector(context, word_to_ix) model.zero_grad() log_probs = model(context_vector) loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long)) total_loss += loss.item() loss.backward() optimizer.step() print(f"Epoch - {epoch}, Loss - {total_loss}")
29.055556
111
0.659383
import torch import torch.nn as nn import os from common import base_data_path from typing import List import pandas as pd CONTEXT_SIZE = 1 EMDEDDING_DIM = 3 word_to_ix = {} ix_to_word = {} def make_context_vector(context, word_to_ix): idxs = [word_to_ix[w] for w in context] return torch.tensor(idxs, dtype=torch.long) def get_index_of_max(input): index = 0 for i in range(1, len(input)): if input[i] > input[index]: index = i return index def get_max_prob_result(input, ix_to_word): return ix_to_word[get_index_of_max(input)] def split_smiles_repr(smile_repr: str) -> List[str]: element_list = [] skip_next = False for i in range(len(smile_repr)): if skip_next: skip_next = False continue element = smile_repr[i] if (i < (len(smile_repr) - 1)) and (smile_repr[i].isalpha()): possible_element = element + smile_repr[i+1] if possible_element in word_to_ix: element = possible_element skip_next = True if element in word_to_ix: element_list.append(element) else: raise ValueError('Inappropriate argument to function get_elements_from_smiles_data of Vocab class') return element_list def get_data(sequence_list: List[str]): _sequence_list = [] sequence_elements_list = [] for s in sequence_list: split_elements = split_smiles_repr(s) _sequence_list.append(s) sequence_elements_list.append(split_elements) return sequence_elements_list filepath = os.path.join(base_data_path, "vocab.txt") f = open(filepath, "r") elements_list = f.read().splitlines() elements_list.append(' ') f.close() vocab = elements_list vocab_size = len(elements_list) for i, word in enumerate(vocab): word_to_ix[word] = i ix_to_word[i] = word filepath = os.path.join(base_data_path, "dataset_v1.csv") df = pd.read_csv(filepath, sep=",", header=0) smiles_data = get_data(df.SMILES.tolist()) class CBOW(torch.nn.Module): def __init__(self, vocab_size, embedding_dim): super(CBOW, self).__init__() self.embeddings = nn.Embedding(vocab_size, embedding_dim) self.linear1 = nn.Linear(embedding_dim, 128) self.activation_function1 = nn.ReLU() self.linear2 = nn.Linear(128, vocab_size) self.activation_function2 = nn.LogSoftmax(dim=-1) def forward(self, inputs): embeds = sum(self.embeddings(inputs)).view(1, -1) out = self.linear1(embeds) out = self.activation_function1(out) out = self.linear2(out) out = self.activation_function2(out) return out def get_word_emdedding(self, word): word = torch.LongTensor([word_to_ix[word]]) return self.embeddings(word).view(1, -1) model = CBOW(vocab_size, EMDEDDING_DIM) loss_function = nn.NLLLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001) for epoch in range(50): total_loss = 0 for smiles_element_list in smiles_data: for i in range(1, len(smiles_element_list) - 1): context = [smiles_element_list[i - 1], smiles_element_list[i + 1]] target = smiles_element_list[i] context_vector = make_context_vector(context, word_to_ix) model.zero_grad() log_probs = model(context_vector) loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long)) total_loss += loss.item() loss.backward() optimizer.step() print(f"Epoch - {epoch}, Loss - {total_loss}")
true
true
f70ef90f5e9f2e939fd227f2c3e143a931c5319a
5,947
py
Python
scripts/functionality/solarsystem.py
Whykiller/Final_Project-Python_Class
29cb388883e19476efe6d96803075b72b79a5aff
[ "MIT" ]
null
null
null
scripts/functionality/solarsystem.py
Whykiller/Final_Project-Python_Class
29cb388883e19476efe6d96803075b72b79a5aff
[ "MIT" ]
8
2020-06-20T16:53:26.000Z
2020-06-26T13:08:53.000Z
scripts/functionality/solarsystem.py
Whykiller/Fortgeschrittener_Python_Kurs
29cb388883e19476efe6d96803075b72b79a5aff
[ "MIT" ]
1
2021-07-19T17:40:11.000Z
2021-07-19T17:40:11.000Z
import functionality.planets as planets import assets.tools as tools from assets.variables import * # TODO: Also add logger to code and display errors correctly # TODO: Make one pixel correspond to 1/10 au so that acceleration works more realistic class SolarSystem(metaclass=tools.Singleton): """This creates the space in which the planets interact with each other. It is a singleton so it can be used in different functions and keeps all its information Attributes ----------- planets_list: list this list contains all planet class objects max_objects: int this sets the max object count allowed in the planets list system_time: int the starting time for the simulation error: bool set to True, if error occurs Methods ---------- add_planet(*args): Adds planets to the planet list remove_planet(planet): Removes planet from the solar system get_planet(planet): Gets a specific planet from planet list number_of_intrastellar_objects(): Returns the number of objects in list planetary_interaction(): Calculates the new accelerations of each of the planets planetary_position(): Utilizes verlet integration to get the next positions of all planets for a certain time step period update(): Updates the calculated data and stores it inside the planets itself reset(): This resets the class back to its empty state """ def __init__(self) -> None: """Initializes the intra-stellar objects and the class attributes Returns ---------- None """ self.planets_list = [] self.max_objects = 10 self.system_time = 0 # To display error messages self.error = False def add_planet(self, *args) -> None: """Adds planets to the planet list Parameters ---------- *args: Planet takes in Planet class objects Returns ---------- None """ # TODO: Display some error message if self.number_of_intrastellar_objects() < self.max_objects: for i in args: self.planets_list.append(i) else: self.error = True def remove_planet(self, planet) -> None: """Removes planet from the solar system Parameters ---------- planet: Planet planet to be removed from the planet list Returns ---------- None """ self.planets_list.remove(planet) def get_planet(self, planet): """Gets a specific planet from planet list Parameters ---------- planet: Planet planet to be get from planet list Returns ---------- planet: Planet planet from planet list or None if not found """ return planet if planet in self.planets_list else None def number_of_intrastellar_objects(self): """Returns the number of objects in list Returns ---------- planet_list_length: int the length of the planet list """ return len(self.planets_list) def planetary_interaction(self): """Calculates the new accelerations of each of the planets Returns ---------- acceleration_list: list the calculated acceleration of all the planets in the planet list """ acceleration_list = [] for i in self.planets_list: a_x, a_y = 0, 0 for j in self.planets_list: if i != j: a_x += i.alien_acceleration(j)[0] a_y += i.alien_acceleration(j)[1] acceleration_list.append([a_x, a_y]) return acceleration_list def planetary_positions(self): """Utilizes verlet integration to get the next positions of all planets for a certain time step period Returns ----------- temp_pos_list: list the positions of the planets after the verlet integration temp_vel_list: list the velocity of the planets after the verlet integration """ temp_pos_list, temp_vel_list = [], [] for i, o in enumerate(self.planets_list): # Calculates the position and velocity for each step and saves it to the planet temp_pos_x, temp_v_x = tools.verlet_algorithm(o.pos_x_real, o.v_x, self.planetary_interaction()[i][0]) temp_pos_y, temp_v_y = tools.verlet_algorithm(o.pos_y_real, o.v_y, self.planetary_interaction()[i][1]) # Saves it to a temporary list as to not corrupt positional data for each step temp_pos_list.append([temp_pos_x, temp_pos_y]) temp_vel_list.append([temp_v_x, temp_v_y]) return temp_pos_list, temp_vel_list def update(self) -> None: """"Updates the calculated data and stores it inside the planets itself Returns ---------- None """ for i, o in enumerate(self.planets_list): o.pos_x_real, o.pos_y_real = self.planetary_positions()[0][i] o.v_x, o.v_y = self.planetary_positions()[1][i] # o.trace.append(o.rect.center) def reset(self) -> None: """This resets the class back to its empty state Returns ----------- None """ self.planets_list = [] self.system_time = 0 if __name__ == "__main__": earth = planets.Planet(1e20, 5, 0) satellite = planets.Planet(1e20, 10, 0) moon = planets.Planet(1e20, 15, 0) ss = SolarSystem() ss.add_planet(earth, satellite, moon) print(ss.planetary_interaction()) print(ss.planets_list) for i in ss.planets_list: print(i.pos_x, i.pos_y) for i in ss.planets_list: print(i.pos_x, i.pos_y)
28.868932
114
0.599798
import functionality.planets as planets import assets.tools as tools from assets.variables import * class SolarSystem(metaclass=tools.Singleton): def __init__(self) -> None: self.planets_list = [] self.max_objects = 10 self.system_time = 0 self.error = False def add_planet(self, *args) -> None: if self.number_of_intrastellar_objects() < self.max_objects: for i in args: self.planets_list.append(i) else: self.error = True def remove_planet(self, planet) -> None: self.planets_list.remove(planet) def get_planet(self, planet): return planet if planet in self.planets_list else None def number_of_intrastellar_objects(self): return len(self.planets_list) def planetary_interaction(self): acceleration_list = [] for i in self.planets_list: a_x, a_y = 0, 0 for j in self.planets_list: if i != j: a_x += i.alien_acceleration(j)[0] a_y += i.alien_acceleration(j)[1] acceleration_list.append([a_x, a_y]) return acceleration_list def planetary_positions(self): temp_pos_list, temp_vel_list = [], [] for i, o in enumerate(self.planets_list): temp_pos_x, temp_v_x = tools.verlet_algorithm(o.pos_x_real, o.v_x, self.planetary_interaction()[i][0]) temp_pos_y, temp_v_y = tools.verlet_algorithm(o.pos_y_real, o.v_y, self.planetary_interaction()[i][1]) temp_pos_list.append([temp_pos_x, temp_pos_y]) temp_vel_list.append([temp_v_x, temp_v_y]) return temp_pos_list, temp_vel_list def update(self) -> None: for i, o in enumerate(self.planets_list): o.pos_x_real, o.pos_y_real = self.planetary_positions()[0][i] o.v_x, o.v_y = self.planetary_positions()[1][i] def reset(self) -> None: self.planets_list = [] self.system_time = 0 if __name__ == "__main__": earth = planets.Planet(1e20, 5, 0) satellite = planets.Planet(1e20, 10, 0) moon = planets.Planet(1e20, 15, 0) ss = SolarSystem() ss.add_planet(earth, satellite, moon) print(ss.planetary_interaction()) print(ss.planets_list) for i in ss.planets_list: print(i.pos_x, i.pos_y) for i in ss.planets_list: print(i.pos_x, i.pos_y)
true
true
f70efa147c6f9c7ee90e557fe0740d068a1ce522
213
py
Python
tests/test_ai.py
divanorama/katrain
dc22aa88526fb6446f908259f06020d649a2d0a9
[ "MIT" ]
null
null
null
tests/test_ai.py
divanorama/katrain
dc22aa88526fb6446f908259f06020d649a2d0a9
[ "MIT" ]
null
null
null
tests/test_ai.py
divanorama/katrain
dc22aa88526fb6446f908259f06020d649a2d0a9
[ "MIT" ]
null
null
null
import pytest from katrain.core.constants import AI_STRATEGIES_RECOMMENDED_ORDER, AI_STRATEGIES class TestAI: def test_order(self): assert set(AI_STRATEGIES_RECOMMENDED_ORDER) == set(AI_STRATEGIES)
23.666667
81
0.798122
import pytest from katrain.core.constants import AI_STRATEGIES_RECOMMENDED_ORDER, AI_STRATEGIES class TestAI: def test_order(self): assert set(AI_STRATEGIES_RECOMMENDED_ORDER) == set(AI_STRATEGIES)
true
true
f70efd874fd49a156b8dd23f6d6d90fc73d34d8f
1,341
py
Python
matterapi/enums.py
gmerz/MatterApi
b116da58d3a4ca77739970a28e30672e0e611705
[ "MIT" ]
3
2022-01-26T23:31:01.000Z
2022-03-01T13:07:26.000Z
matterapi/enums.py
gmerz/MatterApi
b116da58d3a4ca77739970a28e30672e0e611705
[ "MIT" ]
null
null
null
matterapi/enums.py
gmerz/MatterApi
b116da58d3a4ca77739970a28e30672e0e611705
[ "MIT" ]
null
null
null
""" Enums used in different API endpoints """ from enum import Enum class PluginStatusState(str, Enum): """State of the plugin""" NOTRUNNING = "NotRunning" STARTING = "Starting" RUNNING = "Running" FAILEDTOSTART = "FailedToStart" FAILEDTOSTAYRUNNING = "FailedToStayRunning" STOPPING = "Stopping" def __str__(self) -> str: return str(self.value) class SidebarCategoryType(str, Enum): """None""" CHANNELS = "channels" CUSTOM = "custom" DIRECT_MESSAGES = "direct_messages" FAVORITES = "favorites" def __str__(self) -> str: return str(self.value) class SidebarCategoryWithChannelsType(str, Enum): """None""" CHANNELS = "channels" CUSTOM = "custom" DIRECT_MESSAGES = "direct_messages" FAVORITES = "favorites" def __str__(self) -> str: return str(self.value) class UploadSessionType(str, Enum): """The type of the upload.""" ATTACHMENT = "attachment" IMPORT_ = "import" def __str__(self) -> str: return str(self.value) class PostMetadataEmbedsItemType(str, Enum): """The type of content that is embedded in this point.""" IMAGE = "image" MESSAGE_ATTACHMENT = "message_attachment" OPENGRAPH = "opengraph" LINK = "link" def __str__(self) -> str: return str(self.value)
21.285714
61
0.645041
from enum import Enum class PluginStatusState(str, Enum): NOTRUNNING = "NotRunning" STARTING = "Starting" RUNNING = "Running" FAILEDTOSTART = "FailedToStart" FAILEDTOSTAYRUNNING = "FailedToStayRunning" STOPPING = "Stopping" def __str__(self) -> str: return str(self.value) class SidebarCategoryType(str, Enum): CHANNELS = "channels" CUSTOM = "custom" DIRECT_MESSAGES = "direct_messages" FAVORITES = "favorites" def __str__(self) -> str: return str(self.value) class SidebarCategoryWithChannelsType(str, Enum): CHANNELS = "channels" CUSTOM = "custom" DIRECT_MESSAGES = "direct_messages" FAVORITES = "favorites" def __str__(self) -> str: return str(self.value) class UploadSessionType(str, Enum): ATTACHMENT = "attachment" IMPORT_ = "import" def __str__(self) -> str: return str(self.value) class PostMetadataEmbedsItemType(str, Enum): IMAGE = "image" MESSAGE_ATTACHMENT = "message_attachment" OPENGRAPH = "opengraph" LINK = "link" def __str__(self) -> str: return str(self.value)
true
true
f70efda32ce2d0e30af7d589d3a939cff26d40b4
5,492
py
Python
heuristic/train/nn/train-nn.py
ehsanul/brick
291c0783f3b062cf73887cb3581dd92342891165
[ "MIT" ]
5
2018-06-08T01:32:26.000Z
2019-05-14T15:30:02.000Z
heuristic/train/nn/train-nn.py
ehsanul/brick
291c0783f3b062cf73887cb3581dd92342891165
[ "MIT" ]
1
2018-07-16T11:52:30.000Z
2018-07-25T06:51:10.000Z
heuristic/train/nn/train-nn.py
ehsanul/brick
291c0783f3b062cf73887cb3581dd92342891165
[ "MIT" ]
null
null
null
from __future__ import absolute_import, division, print_function import sys import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers EPOCHS = 1000 # The patience parameter is the amount of epochs to check for improvement EARLY_STOP = keras.callbacks.EarlyStopping(monitor='val_loss', patience=30) class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [cost]') plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$cost^2$]') plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() # we hard-code the values instead of using stats so that integration with # predictor using the model is easier scaling = pd.DataFrame(data={ 'min': [-10000, -10000, -10000, -2300, -2300, -2300, -6.0, -6.0, -6.0, -3.2, -3.2, -3.2], 'max': [ 10000, 10000, 10000, 2300, 2300, 2300, 6.0, 6.0, 6.0, 3.2, 3.2, 3.2], }, index=[ 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw']) # scale to range [0, 1] # TODO try polar coordinates. for velocity: https://math.stackexchange.com/questions/2444965/relationship-between-cartesian-velocity-and-polar-velocity def scale(x): return (x - scaling['min']) / (scaling['max'] - scaling['min']) def build_model(): model = keras.Sequential([ layers.Dense(128, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), layers.Dense(128, activation=tf.nn.relu), # these extra layers seem to hurt more than they help! #layers.Dropout(0.01), #layers.Dense(64, activation=tf.nn.relu), # this doesn't work as well as a single 64-wide layer #layers.Dense(12, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), layers.Dense(1) ]) #optimizer = tf.keras.optimizers.RMSprop(0.001) optimizer = tf.train.AdamOptimizer(0.001) model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_squared_error']) return model # should be the time.csv from generate-data's time binary dataset_path = sys.argv[1] column_names = ['cost', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "", #comment='\t', sep=",", skipinitialspace=True) # visualize the data! pos_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "z"]], diag_kind="kde") pos_plot.savefig("./pos.fig.png") vel_plot = sns.pairplot(raw_dataset[["cost", "vx", "vy", "vz"]], diag_kind="kde") vel_plot.savefig("./vel.fig.png") avel_plot = sns.pairplot(raw_dataset[["cost", "avx", "avy", "avz"]], diag_kind="kde") avel_plot.savefig("./avel.fig.png") rot_plot = sns.pairplot(raw_dataset[["cost", "roll", "pitch", "yaw"]], diag_kind="kde") rot_plot.savefig("./rot.fig.png") pos_rot_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "yaw"]], diag_kind="kde") pos_rot_plot.savefig("./pos_rot.fig.png") dataset = raw_dataset.copy() dataset.tail() # we don't have missing data # dataset.isna().sum() # dataset = dataset.dropna() # split into training vs test datasets train_dataset = dataset.sample(frac=0.95,random_state=0) test_dataset = dataset.drop(train_dataset.index) # using stats from full dataset stats = raw_dataset.describe() stats.pop("cost") stats = stats.transpose() stats train_labels = train_dataset.pop('cost') test_labels = test_dataset.pop('cost') scaled_train_dataset = scale(train_dataset) scaled_test_dataset = scale(test_dataset) # build and train moddel model = build_model() model.summary() history = model.fit(scaled_train_dataset, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[EARLY_STOP, PrintDot()]) plot_history(history) # check against test set loss, mae, mse = model.evaluate(scaled_test_dataset, test_labels, verbose=0) print("Testing set Mean Abs Error: {:5.2f} cost".format(mae)) # plot all test predictions test_predictions = model.predict(scaled_test_dataset).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [cost]') plt.ylabel('Predictions [cost]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) plt.plot([-100, 100], [-100, 100]) plt.show() # error distribution error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [cost]") plt.ylabel("Count") plt.show() model.save('./simple_throttle_cost_model.h5') saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./simple_throttle_cost_saved_model")
33.901235
151
0.682629
from __future__ import absolute_import, division, print_function import sys import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers EPOCHS = 1000 EARLY_STOP = keras.callbacks.EarlyStopping(monitor='val_loss', patience=30) class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [cost]') plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$cost^2$]') plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() scaling = pd.DataFrame(data={ 'min': [-10000, -10000, -10000, -2300, -2300, -2300, -6.0, -6.0, -6.0, -3.2, -3.2, -3.2], 'max': [ 10000, 10000, 10000, 2300, 2300, 2300, 6.0, 6.0, 6.0, 3.2, 3.2, 3.2], }, index=[ 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw']) def scale(x): return (x - scaling['min']) / (scaling['max'] - scaling['min']) def build_model(): model = keras.Sequential([ layers.Dense(128, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), layers.Dense(128, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), #layers.Dense(12, activation=tf.nn.relu), layers.Dense(1) ]) #optimizer = tf.keras.optimizers.RMSprop(0.001) optimizer = tf.train.AdamOptimizer(0.001) model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_squared_error']) return model # should be the time.csv from generate-data's time binary dataset_path = sys.argv[1] column_names = ['cost', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "", sep=",", skipinitialspace=True) pos_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "z"]], diag_kind="kde") pos_plot.savefig("./pos.fig.png") vel_plot = sns.pairplot(raw_dataset[["cost", "vx", "vy", "vz"]], diag_kind="kde") vel_plot.savefig("./vel.fig.png") avel_plot = sns.pairplot(raw_dataset[["cost", "avx", "avy", "avz"]], diag_kind="kde") avel_plot.savefig("./avel.fig.png") rot_plot = sns.pairplot(raw_dataset[["cost", "roll", "pitch", "yaw"]], diag_kind="kde") rot_plot.savefig("./rot.fig.png") pos_rot_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "yaw"]], diag_kind="kde") pos_rot_plot.savefig("./pos_rot.fig.png") dataset = raw_dataset.copy() dataset.tail() # dataset.isna().sum() # dataset = dataset.dropna() # split into training vs test datasets train_dataset = dataset.sample(frac=0.95,random_state=0) test_dataset = dataset.drop(train_dataset.index) # using stats from full dataset stats = raw_dataset.describe() stats.pop("cost") stats = stats.transpose() stats train_labels = train_dataset.pop('cost') test_labels = test_dataset.pop('cost') scaled_train_dataset = scale(train_dataset) scaled_test_dataset = scale(test_dataset) # build and train moddel model = build_model() model.summary() history = model.fit(scaled_train_dataset, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[EARLY_STOP, PrintDot()]) plot_history(history) # check against test set loss, mae, mse = model.evaluate(scaled_test_dataset, test_labels, verbose=0) print("Testing set Mean Abs Error: {:5.2f} cost".format(mae)) # plot all test predictions test_predictions = model.predict(scaled_test_dataset).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [cost]') plt.ylabel('Predictions [cost]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) plt.plot([-100, 100], [-100, 100]) plt.show() # error distribution error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [cost]") plt.ylabel("Count") plt.show() model.save('./simple_throttle_cost_model.h5') saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./simple_throttle_cost_saved_model")
true
true
f70efeb1edf8afda9c8ef1c5b5a73e8cf2c23017
1,514
py
Python
pirate_control/python-rtmidi-0.3.1a/tests/test_probe_ports.py
bopopescu/pco-pirate
b8836870254992b4cb316fe29805e914a6e9910f
[ "MIT" ]
1
2019-12-27T11:45:11.000Z
2019-12-27T11:45:11.000Z
pirate_control/python-rtmidi-0.3.1a/tests/test_probe_ports.py
bopopescu/pco-pirate
b8836870254992b4cb316fe29805e914a6e9910f
[ "MIT" ]
1
2020-07-24T15:59:56.000Z
2020-07-24T15:59:56.000Z
pirate_control/python-rtmidi-0.3.1a/tests/test_probe_ports.py
bopopescu/pco-pirate
b8836870254992b4cb316fe29805e914a6e9910f
[ "MIT" ]
1
2020-07-24T12:54:33.000Z
2020-07-24T12:54:33.000Z
#!/usr/bin/env python # # test_probe_ports.py # """Shows how to probe for available MIDI input and output ports.""" import sys from rtmidi import * try: raw_input except NameError: # Python 3 raw_input = input apis = { API_MACOSX_CORE: "OS X CoreMIDI", API_LINUX_ALSA: "Linux ALSA", API_UNIX_JACK: "Jack Client", API_WINDOWS_MM: "Windows MultiMedia", API_WINDOWS_KS: "Windows Kernel Streaming", API_RTMIDI_DUMMY: "RtMidi Dummy" } available_apis = get_compiled_api() for api, desc in sorted(apis.items()): if api in available_apis: try: r = raw_input("Probe ports using the %s API? (Y/n) " % desc).strip() if r and r.lower() != "y": continue except (KeyboardInterrupt, EOFError): print('') break for name, class_ in (("input", MidiIn), ("output", MidiOut)): try: midi = class_(api) ports = midi.get_ports(encoding='latin1' if sys.platform.startswith('win') else 'utf-8') except RuntimeError as exc: print("Could not probe MIDI %s ports: %s" % (name, exc)) continue if not ports: print("No MIDI %s ports found." % name) else: print("Available MIDI %s ports:\n" % name) for port, name in enumerate(ports): print("[%i] %s" % (port, name)) print('') del midi
26.561404
80
0.543593
import sys from rtmidi import * try: raw_input except NameError: raw_input = input apis = { API_MACOSX_CORE: "OS X CoreMIDI", API_LINUX_ALSA: "Linux ALSA", API_UNIX_JACK: "Jack Client", API_WINDOWS_MM: "Windows MultiMedia", API_WINDOWS_KS: "Windows Kernel Streaming", API_RTMIDI_DUMMY: "RtMidi Dummy" } available_apis = get_compiled_api() for api, desc in sorted(apis.items()): if api in available_apis: try: r = raw_input("Probe ports using the %s API? (Y/n) " % desc).strip() if r and r.lower() != "y": continue except (KeyboardInterrupt, EOFError): print('') break for name, class_ in (("input", MidiIn), ("output", MidiOut)): try: midi = class_(api) ports = midi.get_ports(encoding='latin1' if sys.platform.startswith('win') else 'utf-8') except RuntimeError as exc: print("Could not probe MIDI %s ports: %s" % (name, exc)) continue if not ports: print("No MIDI %s ports found." % name) else: print("Available MIDI %s ports:\n" % name) for port, name in enumerate(ports): print("[%i] %s" % (port, name)) print('') del midi
true
true
f70eff5e37149022cec92c5d324368e5cbb343fc
4,569
py
Python
web3tools/web3wallet.py
LinuxIsCool/tokenspice2
aeca439b4beb1462e1f988f5d8eac60c2c5e8cbf
[ "Apache-2.0" ]
null
null
null
web3tools/web3wallet.py
LinuxIsCool/tokenspice2
aeca439b4beb1462e1f988f5d8eac60c2c5e8cbf
[ "Apache-2.0" ]
null
null
null
web3tools/web3wallet.py
LinuxIsCool/tokenspice2
aeca439b4beb1462e1f988f5d8eac60c2c5e8cbf
[ "Apache-2.0" ]
1
2021-09-05T22:34:29.000Z
2021-09-05T22:34:29.000Z
import logging import typing import web3 from util import constants from web3tools import web3util, account logger = logging.getLogger(__name__) def randomWeb3Wallet(): private_key = account.randomPrivateKey() return Web3Wallet(private_key=private_key) class Web3Wallet: """Signs txs and msgs with an account's private key.""" _last_tx_count = dict() MIN_GAS_PRICE = 1000000000 def __init__(self, private_key:str): self._private_key = private_key self._address = account.privateKeyToAddress(self._private_key) #give this wallet a bunch of ETH for gas fees @property def address(self): return self._address @property def private_key(self): return self._private_key @property def account(self): return account.Account(private_key=self.private_key) @staticmethod def reset_tx_count(): Web3Wallet._last_tx_count = dict() def __get_key(self): return self._private_key def validate(self): _web3 = web3util.get_web3() key = self.__get_key() account = _web3.eth.account.from_key(key) return account.address == self._address @staticmethod def _get_nonce(address): # We cannot rely on `web3.eth.getTransactionCount` because when sending multiple # transactions in a row without wait in between the network may not get the chance to # update the transaction count for the account address in time. # So we have to manage this internally per account address. _web3 = web3util.get_web3() if address not in Web3Wallet._last_tx_count: Web3Wallet._last_tx_count[address] = _web3.eth.getTransactionCount(address) else: Web3Wallet._last_tx_count[address] += 1 return Web3Wallet._last_tx_count[address] def sign_tx(self, tx): _web3 = web3util.get_web3() account = _web3.eth.account.from_key(self._private_key) nonce = Web3Wallet._get_nonce(account.address) gas_price = int(_web3.eth.gasPrice / 100) gas_price = max(gas_price, self.MIN_GAS_PRICE) tx['nonce'] = nonce tx['gasPrice'] = gas_price signed_tx = _web3.eth.account.sign_transaction(tx, private_key) return signed_tx.rawTransaction def sign(self, msg_hash): account = web3.eth.account.from_key(self._private_key) return account.signHash(msg_hash) def ETH_base(self) -> int: #returns ETH, in base 18 (i.e. num wei) _web3 = web3util.get_web3() return _web3.eth.getBalance(self._address) def fundFromAbove(self, num_wei: int): #Give the this wallet ETH to pay gas fees #Use funds given to 'TEST_PRIVATE_KEY1' from ganache (see deploy.py) network = web3util.get_network() god_key = web3util.confFileValue(network, 'TEST_PRIVATE_KEY1') god_wallet = Web3Wallet(god_key) god_wallet.sendEth(self.address, num_wei) def sendEth(self, to_address:str, num_wei:int): return buildAndSendTx( function=None, from_wallet=self, num_wei=num_wei, to_address=to_address) def buildAndSendTx(function, from_wallet: Web3Wallet, gaslimit: int = constants.GASLIMIT_DEFAULT, num_wei: int = 0, to_address=None): assert isinstance(from_wallet.address, str) #assert isinstance(from_wallet.private_key, str) _web3 = web3util.get_web3() nonce = _web3.eth.getTransactionCount(from_wallet.address) network = web3util.get_network() gas_price = int(web3util.confFileValue(network, 'GAS_PRICE')) tx_params = { "from": from_wallet.address, "value": num_wei, "nonce": nonce, "gas": gaslimit, "gasPrice": gas_price, } if function is None: #just send ETH, versus smart contract call? assert to_address is not None assert isinstance(to_address, str) tx = tx_params tx["to"] = to_address else: assert to_address is None tx = function.buildTransaction(tx_params) signed_tx = _web3.eth.account.sign_transaction( tx, private_key=from_wallet.private_key) tx_hash = _web3.eth.sendRawTransaction(signed_tx.rawTransaction) tx_receipt = _web3.eth.waitForTransactionReceipt(tx_hash) if tx_receipt['status'] == 0: # did tx fail? raise Exception("The tx failed. tx_receipt: {tx_receipt}") return (tx_hash, tx_receipt)
33.595588
93
0.663821
import logging import typing import web3 from util import constants from web3tools import web3util, account logger = logging.getLogger(__name__) def randomWeb3Wallet(): private_key = account.randomPrivateKey() return Web3Wallet(private_key=private_key) class Web3Wallet: _last_tx_count = dict() MIN_GAS_PRICE = 1000000000 def __init__(self, private_key:str): self._private_key = private_key self._address = account.privateKeyToAddress(self._private_key) @property def address(self): return self._address @property def private_key(self): return self._private_key @property def account(self): return account.Account(private_key=self.private_key) @staticmethod def reset_tx_count(): Web3Wallet._last_tx_count = dict() def __get_key(self): return self._private_key def validate(self): _web3 = web3util.get_web3() key = self.__get_key() account = _web3.eth.account.from_key(key) return account.address == self._address @staticmethod def _get_nonce(address): _web3 = web3util.get_web3() if address not in Web3Wallet._last_tx_count: Web3Wallet._last_tx_count[address] = _web3.eth.getTransactionCount(address) else: Web3Wallet._last_tx_count[address] += 1 return Web3Wallet._last_tx_count[address] def sign_tx(self, tx): _web3 = web3util.get_web3() account = _web3.eth.account.from_key(self._private_key) nonce = Web3Wallet._get_nonce(account.address) gas_price = int(_web3.eth.gasPrice / 100) gas_price = max(gas_price, self.MIN_GAS_PRICE) tx['nonce'] = nonce tx['gasPrice'] = gas_price signed_tx = _web3.eth.account.sign_transaction(tx, private_key) return signed_tx.rawTransaction def sign(self, msg_hash): account = web3.eth.account.from_key(self._private_key) return account.signHash(msg_hash) def ETH_base(self) -> int: _web3 = web3util.get_web3() return _web3.eth.getBalance(self._address) def fundFromAbove(self, num_wei: int): network = web3util.get_network() god_key = web3util.confFileValue(network, 'TEST_PRIVATE_KEY1') god_wallet = Web3Wallet(god_key) god_wallet.sendEth(self.address, num_wei) def sendEth(self, to_address:str, num_wei:int): return buildAndSendTx( function=None, from_wallet=self, num_wei=num_wei, to_address=to_address) def buildAndSendTx(function, from_wallet: Web3Wallet, gaslimit: int = constants.GASLIMIT_DEFAULT, num_wei: int = 0, to_address=None): assert isinstance(from_wallet.address, str) _web3 = web3util.get_web3() nonce = _web3.eth.getTransactionCount(from_wallet.address) network = web3util.get_network() gas_price = int(web3util.confFileValue(network, 'GAS_PRICE')) tx_params = { "from": from_wallet.address, "value": num_wei, "nonce": nonce, "gas": gaslimit, "gasPrice": gas_price, } if function is None: assert to_address is not None assert isinstance(to_address, str) tx = tx_params tx["to"] = to_address else: assert to_address is None tx = function.buildTransaction(tx_params) signed_tx = _web3.eth.account.sign_transaction( tx, private_key=from_wallet.private_key) tx_hash = _web3.eth.sendRawTransaction(signed_tx.rawTransaction) tx_receipt = _web3.eth.waitForTransactionReceipt(tx_hash) if tx_receipt['status'] == 0: raise Exception("The tx failed. tx_receipt: {tx_receipt}") return (tx_hash, tx_receipt)
true
true
f70efff899fc1223c7b3fa5193ad0a13d4e23777
34,673
py
Python
aiohttp/client_reqrep.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
2
2021-02-04T10:49:55.000Z
2021-02-04T10:50:31.000Z
aiohttp/client_reqrep.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
228
2020-10-17T22:31:34.000Z
2022-03-28T18:13:31.000Z
aiohttp/client_reqrep.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
1
2021-07-22T04:21:08.000Z
2021-07-22T04:21:08.000Z
import asyncio import codecs import dataclasses import functools import io import re import sys import traceback import warnings from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union, cast, ) from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy from yarl import URL from . import hdrs, helpers, http, multipart, payload from .abc import AbstractStreamWriter from .client_exceptions import ( ClientConnectionError, ClientOSError, ClientResponseError, ContentTypeError, InvalidURL, ServerFingerprintMismatch, ) from .formdata import FormData from .hdrs import CONTENT_TYPE from .helpers import ( BaseTimerContext, BasicAuth, HeadersMixin, TimerNoop, is_expected_content_type, noop, parse_mimetype, reify, set_result, ) from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter from .http_parser import HAS_BROTLI from .log import client_logger from .streams import StreamReader from .typedefs import ( DEFAULT_JSON_DECODER, JSONDecoder, LooseCookies, LooseHeaders, RawHeaders, ) try: import ssl from ssl import SSLContext except ImportError: # pragma: no cover ssl = None # type: ignore[assignment] SSLContext = object # type: ignore[misc,assignment] try: import cchardet as chardet except ImportError: # pragma: no cover import charset_normalizer as chardet # type: ignore[no-redef] __all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint") if TYPE_CHECKING: # pragma: no cover from .client import ClientSession from .connector import Connection from .tracing import Trace def _gen_default_accept_encoding() -> str: return "gzip, deflate, br" if HAS_BROTLI else "gzip, deflate" @dataclasses.dataclass(frozen=True) class ContentDisposition: type: Optional[str] parameters: "MappingProxyType[str, str]" filename: Optional[str] @dataclasses.dataclass(frozen=True) class RequestInfo: url: URL method: str headers: "CIMultiDictProxy[str]" real_url: URL class Fingerprint: HASHFUNC_BY_DIGESTLEN = { 16: md5, 20: sha1, 32: sha256, } def __init__(self, fingerprint: bytes) -> None: digestlen = len(fingerprint) hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen) if not hashfunc: raise ValueError("fingerprint has invalid length") elif hashfunc is md5 or hashfunc is sha1: raise ValueError( "md5 and sha1 are insecure and " "not supported. Use sha256." ) self._hashfunc = hashfunc self._fingerprint = fingerprint @property def fingerprint(self) -> bytes: return self._fingerprint def check(self, transport: asyncio.Transport) -> None: if not transport.get_extra_info("sslcontext"): return sslobj = transport.get_extra_info("ssl_object") cert = sslobj.getpeercert(binary_form=True) got = self._hashfunc(cert).digest() if got != self._fingerprint: host, port, *_ = transport.get_extra_info("peername") raise ServerFingerprintMismatch(self._fingerprint, got, host, port) if ssl is not None: SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None)) else: # pragma: no cover SSL_ALLOWED_TYPES = type(None) @dataclasses.dataclass(frozen=True) class ConnectionKey: # the key should contain an information about used proxy / TLS # to prevent reusing wrong connections from a pool host: str port: Optional[int] is_ssl: bool ssl: Union[SSLContext, None, bool, Fingerprint] proxy: Optional[URL] proxy_auth: Optional[BasicAuth] proxy_headers_hash: Optional[int] # hash(CIMultiDict) class ClientRequest: GET_METHODS = { hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS, hdrs.METH_TRACE, } POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT} ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE}) DEFAULT_HEADERS = { hdrs.ACCEPT: "*/*", hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(), } body = b"" auth = None response = None _writer = None # async task for streaming data _continue = None # waiter future for '100 Continue' response # N.B. # Adding __del__ method with self._writer closing doesn't make sense # because _writer is instance method, thus it keeps a reference to self. # Until writer has finished finalizer will not be called. def __init__( self, method: str, url: URL, *, params: Optional[Mapping[str, str]] = None, headers: Optional[LooseHeaders] = None, skip_auto_headers: Iterable[str] = frozenset(), data: Any = None, cookies: Optional[LooseCookies] = None, auth: Optional[BasicAuth] = None, version: http.HttpVersion = http.HttpVersion11, compress: Optional[str] = None, chunked: Optional[bool] = None, expect100: bool = False, loop: asyncio.AbstractEventLoop, response_class: Optional[Type["ClientResponse"]] = None, proxy: Optional[URL] = None, proxy_auth: Optional[BasicAuth] = None, timer: Optional[BaseTimerContext] = None, session: Optional["ClientSession"] = None, ssl: Union[SSLContext, bool, Fingerprint, None] = None, proxy_headers: Optional[LooseHeaders] = None, traces: Optional[List["Trace"]] = None, ): assert isinstance(url, URL), url assert isinstance(proxy, (URL, type(None))), proxy # FIXME: session is None in tests only, need to fix tests # assert session is not None self._session = cast("ClientSession", session) if params: q = MultiDict(url.query) url2 = url.with_query(params) q.extend(url2.query) url = url.with_query(q) self.original_url = url self.url = url.with_fragment(None) self.method = method.upper() self.chunked = chunked self.compress = compress self.loop = loop self.length = None if response_class is None: real_response_class = ClientResponse else: real_response_class = response_class self.response_class = real_response_class # type: Type[ClientResponse] self._timer = timer if timer is not None else TimerNoop() self._ssl = ssl if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.update_version(version) self.update_host(url) self.update_headers(headers) self.update_auto_headers(skip_auto_headers) self.update_cookies(cookies) self.update_content_encoding(data) self.update_auth(auth) self.update_proxy(proxy, proxy_auth, proxy_headers) self.update_body_from_data(data) if data is not None or self.method not in self.GET_METHODS: self.update_transfer_encoding() self.update_expect_continue(expect100) if traces is None: traces = [] self._traces = traces def is_ssl(self) -> bool: return self.url.scheme in ("https", "wss") @property def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]: return self._ssl @property def connection_key(self) -> ConnectionKey: proxy_headers = self.proxy_headers if proxy_headers: h = hash( tuple((k, v) for k, v in proxy_headers.items()) ) # type: Optional[int] else: h = None return ConnectionKey( self.host, self.port, self.is_ssl(), self.ssl, self.proxy, self.proxy_auth, h, ) @property def host(self) -> str: ret = self.url.raw_host assert ret is not None return ret @property def port(self) -> Optional[int]: return self.url.port @property def request_info(self) -> RequestInfo: headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str] return RequestInfo(self.url, self.method, headers, self.original_url) def update_host(self, url: URL) -> None: """Update destination host, port and connection type (ssl).""" # get host/port if not url.raw_host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or "") def update_version(self, version: Union[http.HttpVersion, str]) -> None: """Convert request version to two elements tuple. parser HTTP version '1.1' => (1, 1) """ if isinstance(version, str): v = [part.strip() for part in version.split(".", 1)] try: version = http.HttpVersion(int(v[0]), int(v[1])) except ValueError: raise ValueError( f"Can not parse http version number: {version}" ) from None self.version = version def update_headers(self, headers: Optional[LooseHeaders]) -> None: """Update request headers.""" self.headers = CIMultiDict() # type: CIMultiDict[str] # add host netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = f"[{netloc}]" if self.url.port is not None and not self.url.is_default_port(): netloc += ":" + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() # type: ignore[assignment] for key, value in headers: # type: ignore[misc] # A special case for Host header if key.lower() == "host": self.headers[key] = value else: self.headers.add(key, value) def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None: self.skip_auto_headers = CIMultiDict( (hdr, None) for hdr in sorted(skip_auto_headers) ) used_headers = self.headers.copy() used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type] for hdr, val in self.DEFAULT_HEADERS.items(): if hdr not in used_headers: self.headers.add(hdr, val) if hdrs.USER_AGENT not in used_headers: self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE def update_cookies(self, cookies: Optional[LooseCookies]) -> None: """Update request cookies header.""" if not cookies: return c = SimpleCookie() # type: SimpleCookie[str] if hdrs.COOKIE in self.headers: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] for name, value in iter_cookies: if isinstance(value, Morsel): # Preserve coded_value mrsl_val = value.get(value.key, Morsel()) mrsl_val.set(value.key, value.value, value.coded_value) c[name] = mrsl_val else: c[name] = value # type: ignore[assignment] self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip() def update_content_encoding(self, data: Any) -> None: """Set request content encoding.""" if data is None: return enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower() if enc: if self.compress: raise ValueError( "compress can not be set " "if Content-Encoding header is set" ) elif self.compress: if not isinstance(self.compress, str): self.compress = "deflate" self.headers[hdrs.CONTENT_ENCODING] = self.compress self.chunked = True # enable chunked, no need to deal with length def update_transfer_encoding(self) -> None: """Analyze transfer-encoding header.""" te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower() if "chunked" in te: if self.chunked: raise ValueError( "chunked can not be set " 'if "Transfer-Encoding: chunked" header is set' ) elif self.chunked: if hdrs.CONTENT_LENGTH in self.headers: raise ValueError( "chunked can not be set " "if Content-Length header is set" ) self.headers[hdrs.TRANSFER_ENCODING] = "chunked" else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body)) def update_auth(self, auth: Optional[BasicAuth]) -> None: """Set basic auth.""" if auth is None: auth = self.auth if auth is None: return if not isinstance(auth, helpers.BasicAuth): raise TypeError("BasicAuth() tuple is required instead") self.headers[hdrs.AUTHORIZATION] = auth.encode() def update_body_from_data(self, body: Any) -> None: if body is None: return # FormData if isinstance(body, FormData): body = body() try: body = payload.PAYLOAD_REGISTRY.get(body, disposition=None) except payload.LookupError: boundary = None if CONTENT_TYPE in self.headers: boundary = parse_mimetype(self.headers[CONTENT_TYPE]).parameters.get( "boundary" ) body = FormData(body, boundary=boundary)() self.body = body # enable chunked encoding if needed if not self.chunked: if hdrs.CONTENT_LENGTH not in self.headers: size = body.size if size is None: self.chunked = True else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(size) # copy payload headers assert body.headers for (key, value) in body.headers.items(): if key in self.headers: continue if key in self.skip_auto_headers: continue self.headers[key] = value def update_expect_continue(self, expect: bool = False) -> None: if expect: self.headers[hdrs.EXPECT] = "100-continue" elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue": expect = True if expect: self._continue = self.loop.create_future() def update_proxy( self, proxy: Optional[URL], proxy_auth: Optional[BasicAuth], proxy_headers: Optional[LooseHeaders], ) -> None: if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth): raise ValueError("proxy_auth must be None or BasicAuth() tuple") self.proxy = proxy self.proxy_auth = proxy_auth self.proxy_headers = proxy_headers def keep_alive(self) -> bool: if self.version < HttpVersion10: # keep alive not supported at all return False if self.version == HttpVersion10: if self.headers.get(hdrs.CONNECTION) == "keep-alive": return True else: # no headers means we close for Http 1.0 return False elif self.headers.get(hdrs.CONNECTION) == "close": return False return True async def write_bytes( self, writer: AbstractStreamWriter, conn: "Connection" ) -> None: """Support coroutines that yields bytes objects.""" # 100 response if self._continue is not None: await writer.drain() await self._continue protocol = conn.protocol assert protocol is not None try: if isinstance(self.body, payload.Payload): await self.body.write(writer) else: if isinstance(self.body, (bytes, bytearray)): self.body = (self.body,) # type: ignore[assignment] for chunk in self.body: await writer.write(chunk) # type: ignore[arg-type] await writer.write_eof() except OSError as exc: new_exc = ClientOSError( exc.errno, "Can not write request body for %s" % self.url ) new_exc.__context__ = exc new_exc.__cause__ = exc protocol.set_exception(new_exc) except asyncio.CancelledError as exc: if not conn.closed: protocol.set_exception(exc) except Exception as exc: protocol.set_exception(exc) finally: self._writer = None async def send(self, conn: "Connection") -> "ClientResponse": # Specify request target: # - CONNECT request must send authority form URI # - not CONNECT proxy must send absolute form URI # - most common is origin form URI if self.method == hdrs.METH_CONNECT: connect_host = self.url.raw_host assert connect_host is not None if helpers.is_ipv6_address(connect_host): connect_host = f"[{connect_host}]" path = f"{connect_host}:{self.url.port}" elif self.proxy and not self.is_ssl(): path = str(self.url) else: path = self.url.raw_path if self.url.raw_query_string: path += "?" + self.url.raw_query_string protocol = conn.protocol assert protocol is not None writer = StreamWriter( protocol, self.loop, on_chunk_sent=functools.partial( self._on_chunk_request_sent, self.method, self.url ), on_headers_sent=functools.partial( self._on_headers_request_sent, self.method, self.url ), ) if self.compress: writer.enable_compression(self.compress) if self.chunked is not None: writer.enable_chunking() # set default content-type if ( self.method in self.POST_METHODS and hdrs.CONTENT_TYPE not in self.skip_auto_headers and hdrs.CONTENT_TYPE not in self.headers ): self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream" # set the connection header connection = self.headers.get(hdrs.CONNECTION) if not connection: if self.keep_alive(): if self.version == HttpVersion10: connection = "keep-alive" else: if self.version == HttpVersion11: connection = "close" if connection is not None: self.headers[hdrs.CONNECTION] = connection # status + headers status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format( self.method, path, self.version ) await writer.write_headers(status_line, self.headers) self._writer = self.loop.create_task(self.write_bytes(writer, conn)) response_class = self.response_class assert response_class is not None self.response = response_class( self.method, self.original_url, writer=self._writer, continue100=self._continue, timer=self._timer, request_info=self.request_info, traces=self._traces, loop=self.loop, session=self._session, ) return self.response async def close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None def terminate(self) -> None: if self._writer is not None: if not self.loop.is_closed(): self._writer.cancel() self._writer = None async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None: for trace in self._traces: await trace.send_request_chunk_sent(method, url, chunk) async def _on_headers_request_sent( self, method: str, url: URL, headers: "CIMultiDict[str]" ) -> None: for trace in self._traces: await trace.send_request_headers(method, url, headers) class ClientResponse(HeadersMixin): # from the Status-Line of the response version = None # HTTP-Version status = None # type: int # Status-Code reason = None # Reason-Phrase content = None # type: StreamReader # Payload stream _headers = None # type: CIMultiDictProxy[str] # Response headers _raw_headers = None # type: RawHeaders # Response raw headers _connection = None # current connection _source_traceback = None # setted up by ClientRequest after ClientResponse object creation # post-init stage allows to not change ctor signature _closed = True # to allow __del__ for non-initialized properly response _released = False def __init__( self, method: str, url: URL, *, writer: "asyncio.Task[None]", continue100: Optional["asyncio.Future[bool]"], timer: BaseTimerContext, request_info: RequestInfo, traces: List["Trace"], loop: asyncio.AbstractEventLoop, session: "ClientSession", ) -> None: assert isinstance(url, URL) super().__init__() self.method = method self.cookies = SimpleCookie() # type: SimpleCookie[str] self._real_url = url self._url = url.with_fragment(None) self._body = None # type: Optional[bytes] self._writer = writer # type: Optional[asyncio.Task[None]] self._continue = continue100 # None by default self._closed = True self._history = () # type: Tuple[ClientResponse, ...] self._request_info = request_info self._timer = timer if timer is not None else TimerNoop() self._cache = {} # type: Dict[str, Any] self._traces = traces self._loop = loop # store a reference to session #1985 self._session = session # type: Optional[ClientSession] if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) @reify def url(self) -> URL: return self._url @reify def real_url(self) -> URL: return self._real_url @reify def host(self) -> str: assert self._url.host is not None return self._url.host @reify def headers(self) -> "CIMultiDictProxy[str]": return self._headers @reify def raw_headers(self) -> RawHeaders: return self._raw_headers @reify def request_info(self) -> RequestInfo: return self._request_info @reify def content_disposition(self) -> Optional[ContentDisposition]: raw = self._headers.get(hdrs.CONTENT_DISPOSITION) if raw is None: return None disposition_type, params_dct = multipart.parse_content_disposition(raw) params = MappingProxyType(params_dct) filename = multipart.content_disposition_filename(params) return ContentDisposition(disposition_type, params, filename) def __del__(self, _warnings: Any = warnings) -> None: if self._closed: return if self._connection is not None: self._connection.release() self._cleanup_writer() if self._loop.get_debug(): _warnings.warn( f"Unclosed response {self!r}", ResourceWarning, source=self ) context = {"client_response": self, "message": "Unclosed response"} if self._source_traceback: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) def __repr__(self) -> str: out = io.StringIO() ascii_encodable_url = str(self.url) if self.reason: ascii_encodable_reason = self.reason.encode( "ascii", "backslashreplace" ).decode("ascii") else: ascii_encodable_reason = self.reason print( "<ClientResponse({}) [{} {}]>".format( ascii_encodable_url, self.status, ascii_encodable_reason ), file=out, ) print(self.headers, file=out) return out.getvalue() @property def connection(self) -> Optional["Connection"]: return self._connection @reify def history(self) -> Tuple["ClientResponse", ...]: """A sequence of responses, if redirects occurred.""" return self._history @reify def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]": links_str = ", ".join(self.headers.getall("link", [])) if not links_str: return MultiDictProxy(MultiDict()) links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]] for val in re.split(r",(?=\s*<)", links_str): match = re.match(r"\s*<(.*)>(.*)", val) if match is None: # pragma: no cover # the check exists to suppress mypy error continue url, params_str = match.groups() params = params_str.split(";")[1:] link = MultiDict() # type: MultiDict[Union[str, URL]] for param in params: match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M) if match is None: # pragma: no cover # the check exists to suppress mypy error continue key, _, value, _ = match.groups() link.add(key, value) key = link.get("rel", url) # type: ignore[assignment] link.add("url", self.url.join(URL(url))) links.add(key, MultiDictProxy(link)) return MultiDictProxy(links) async def start(self, connection: "Connection") -> "ClientResponse": """Start response processing.""" self._closed = False self._protocol = connection.protocol self._connection = connection with self._timer: while True: # read response try: protocol = self._protocol message, payload = await protocol.read() # type: ignore[union-attr] except http.HttpProcessingError as exc: raise ClientResponseError( self.request_info, self.history, status=exc.code, message=exc.message, headers=exc.headers, ) from exc if message.code < 100 or message.code > 199 or message.code == 101: break if self._continue is not None: set_result(self._continue, True) self._continue = None # payload eof handler payload.on_eof(self._response_eof) # response status self.version = message.version self.status = message.code self.reason = message.reason # headers self._headers = message.headers # type is CIMultiDictProxy self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes] # payload self.content = payload # cookies for hdr in self.headers.getall(hdrs.SET_COOKIE, ()): try: self.cookies.load(hdr) except CookieError as exc: client_logger.warning("Can not load response cookies: %s", exc) return self def _response_eof(self) -> None: if self._closed: return if self._connection is not None: # websocket, protocol could be None because # connection could be detached if ( self._connection.protocol is not None and self._connection.protocol.upgraded ): return self._connection.release() self._connection = None self._closed = True self._cleanup_writer() @property def closed(self) -> bool: return self._closed def close(self) -> None: if not self._released: self._notify_content() if self._closed: return self._closed = True if self._loop is None or self._loop.is_closed(): return if self._connection is not None: self._connection.close() self._connection = None self._cleanup_writer() def release(self) -> Any: if not self._released: self._notify_content() if self._closed: return noop() self._closed = True if self._connection is not None: self._connection.release() self._connection = None self._cleanup_writer() return noop() @property def ok(self) -> bool: """Returns ``True`` if ``status`` is less than ``400``, ``False`` if not. This is **not** a check for ``200 OK`` but a check that the response status is under 400. """ return 400 > self.status def raise_for_status(self) -> None: if not self.ok: # reason should always be not None for a started response assert self.reason is not None self.release() raise ClientResponseError( self.request_info, self.history, status=self.status, message=self.reason, headers=self.headers, ) def _cleanup_writer(self) -> None: if self._writer is not None: self._writer.cancel() self._writer = None self._session = None def _notify_content(self) -> None: content = self.content if content and content.exception() is None: content.set_exception(ClientConnectionError("Connection closed")) self._released = True async def wait_for_close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None self.release() async def read(self) -> bytes: """Read response payload.""" if self._body is None: try: self._body = await self.content.read() for trace in self._traces: await trace.send_response_chunk_received( self.method, self.url, self._body ) except BaseException: self.close() raise elif self._released: raise ClientConnectionError("Connection closed") return self._body def get_encoding(self) -> str: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() mimetype = helpers.parse_mimetype(ctype) encoding = mimetype.parameters.get("charset") if encoding: try: codecs.lookup(encoding) except LookupError: encoding = None if not encoding: if mimetype.type == "application" and ( mimetype.subtype == "json" or mimetype.subtype == "rdap" ): # RFC 7159 states that the default encoding is UTF-8. # RFC 7483 defines application/rdap+json encoding = "utf-8" elif self._body is None: raise RuntimeError( "Cannot guess the encoding of " "a not yet read body" ) else: encoding = chardet.detect(self._body)["encoding"] if not encoding: encoding = "utf-8" return encoding async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str: """Read response payload and decode.""" if self._body is None: await self.read() if encoding is None: encoding = self.get_encoding() return self._body.decode(encoding, errors=errors) # type: ignore[union-attr] async def json( self, *, encoding: Optional[str] = None, loads: JSONDecoder = DEFAULT_JSON_DECODER, content_type: Optional[str] = "application/json", ) -> Any: """Read and decodes JSON response.""" if self._body is None: await self.read() if content_type: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() if not is_expected_content_type(ctype, content_type): raise ContentTypeError( self.request_info, self.history, message=( "Attempt to decode JSON with " "unexpected mimetype: %s" % ctype ), headers=self.headers, ) if encoding is None: encoding = self.get_encoding() return loads(self._body.decode(encoding)) # type: ignore[union-attr] async def __aenter__(self) -> "ClientResponse": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: # similar to _RequestContextManager, we do not need to check # for exceptions, response object can close connection # if state is broken self.release()
32.618062
88
0.582124
import asyncio import codecs import dataclasses import functools import io import re import sys import traceback import warnings from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union, cast, ) from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy from yarl import URL from . import hdrs, helpers, http, multipart, payload from .abc import AbstractStreamWriter from .client_exceptions import ( ClientConnectionError, ClientOSError, ClientResponseError, ContentTypeError, InvalidURL, ServerFingerprintMismatch, ) from .formdata import FormData from .hdrs import CONTENT_TYPE from .helpers import ( BaseTimerContext, BasicAuth, HeadersMixin, TimerNoop, is_expected_content_type, noop, parse_mimetype, reify, set_result, ) from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter from .http_parser import HAS_BROTLI from .log import client_logger from .streams import StreamReader from .typedefs import ( DEFAULT_JSON_DECODER, JSONDecoder, LooseCookies, LooseHeaders, RawHeaders, ) try: import ssl from ssl import SSLContext except ImportError: ssl = None SSLContext = object try: import cchardet as chardet except ImportError: import charset_normalizer as chardet __all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint") if TYPE_CHECKING: from .client import ClientSession from .connector import Connection from .tracing import Trace def _gen_default_accept_encoding() -> str: return "gzip, deflate, br" if HAS_BROTLI else "gzip, deflate" @dataclasses.dataclass(frozen=True) class ContentDisposition: type: Optional[str] parameters: "MappingProxyType[str, str]" filename: Optional[str] @dataclasses.dataclass(frozen=True) class RequestInfo: url: URL method: str headers: "CIMultiDictProxy[str]" real_url: URL class Fingerprint: HASHFUNC_BY_DIGESTLEN = { 16: md5, 20: sha1, 32: sha256, } def __init__(self, fingerprint: bytes) -> None: digestlen = len(fingerprint) hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen) if not hashfunc: raise ValueError("fingerprint has invalid length") elif hashfunc is md5 or hashfunc is sha1: raise ValueError( "md5 and sha1 are insecure and " "not supported. Use sha256." ) self._hashfunc = hashfunc self._fingerprint = fingerprint @property def fingerprint(self) -> bytes: return self._fingerprint def check(self, transport: asyncio.Transport) -> None: if not transport.get_extra_info("sslcontext"): return sslobj = transport.get_extra_info("ssl_object") cert = sslobj.getpeercert(binary_form=True) got = self._hashfunc(cert).digest() if got != self._fingerprint: host, port, *_ = transport.get_extra_info("peername") raise ServerFingerprintMismatch(self._fingerprint, got, host, port) if ssl is not None: SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None)) else: SSL_ALLOWED_TYPES = type(None) @dataclasses.dataclass(frozen=True) class ConnectionKey: host: str port: Optional[int] is_ssl: bool ssl: Union[SSLContext, None, bool, Fingerprint] proxy: Optional[URL] proxy_auth: Optional[BasicAuth] proxy_headers_hash: Optional[int] class ClientRequest: GET_METHODS = { hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS, hdrs.METH_TRACE, } POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT} ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE}) DEFAULT_HEADERS = { hdrs.ACCEPT: "*/*", hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(), } body = b"" auth = None response = None _writer = None _continue = None # because _writer is instance method, thus it keeps a reference to self. # Until writer has finished finalizer will not be called. def __init__( self, method: str, url: URL, *, params: Optional[Mapping[str, str]] = None, headers: Optional[LooseHeaders] = None, skip_auto_headers: Iterable[str] = frozenset(), data: Any = None, cookies: Optional[LooseCookies] = None, auth: Optional[BasicAuth] = None, version: http.HttpVersion = http.HttpVersion11, compress: Optional[str] = None, chunked: Optional[bool] = None, expect100: bool = False, loop: asyncio.AbstractEventLoop, response_class: Optional[Type["ClientResponse"]] = None, proxy: Optional[URL] = None, proxy_auth: Optional[BasicAuth] = None, timer: Optional[BaseTimerContext] = None, session: Optional["ClientSession"] = None, ssl: Union[SSLContext, bool, Fingerprint, None] = None, proxy_headers: Optional[LooseHeaders] = None, traces: Optional[List["Trace"]] = None, ): assert isinstance(url, URL), url assert isinstance(proxy, (URL, type(None))), proxy # FIXME: session is None in tests only, need to fix tests # assert session is not None self._session = cast("ClientSession", session) if params: q = MultiDict(url.query) url2 = url.with_query(params) q.extend(url2.query) url = url.with_query(q) self.original_url = url self.url = url.with_fragment(None) self.method = method.upper() self.chunked = chunked self.compress = compress self.loop = loop self.length = None if response_class is None: real_response_class = ClientResponse else: real_response_class = response_class self.response_class = real_response_class # type: Type[ClientResponse] self._timer = timer if timer is not None else TimerNoop() self._ssl = ssl if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.update_version(version) self.update_host(url) self.update_headers(headers) self.update_auto_headers(skip_auto_headers) self.update_cookies(cookies) self.update_content_encoding(data) self.update_auth(auth) self.update_proxy(proxy, proxy_auth, proxy_headers) self.update_body_from_data(data) if data is not None or self.method not in self.GET_METHODS: self.update_transfer_encoding() self.update_expect_continue(expect100) if traces is None: traces = [] self._traces = traces def is_ssl(self) -> bool: return self.url.scheme in ("https", "wss") @property def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]: return self._ssl @property def connection_key(self) -> ConnectionKey: proxy_headers = self.proxy_headers if proxy_headers: h = hash( tuple((k, v) for k, v in proxy_headers.items()) ) # type: Optional[int] else: h = None return ConnectionKey( self.host, self.port, self.is_ssl(), self.ssl, self.proxy, self.proxy_auth, h, ) @property def host(self) -> str: ret = self.url.raw_host assert ret is not None return ret @property def port(self) -> Optional[int]: return self.url.port @property def request_info(self) -> RequestInfo: headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str] return RequestInfo(self.url, self.method, headers, self.original_url) def update_host(self, url: URL) -> None: # get host/port if not url.raw_host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or "") def update_version(self, version: Union[http.HttpVersion, str]) -> None: if isinstance(version, str): v = [part.strip() for part in version.split(".", 1)] try: version = http.HttpVersion(int(v[0]), int(v[1])) except ValueError: raise ValueError( f"Can not parse http version number: {version}" ) from None self.version = version def update_headers(self, headers: Optional[LooseHeaders]) -> None: self.headers = CIMultiDict() # type: CIMultiDict[str] # add host netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = f"[{netloc}]" if self.url.port is not None and not self.url.is_default_port(): netloc += ":" + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() # type: ignore[assignment] for key, value in headers: # type: ignore[misc] # A special case for Host header if key.lower() == "host": self.headers[key] = value else: self.headers.add(key, value) def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None: self.skip_auto_headers = CIMultiDict( (hdr, None) for hdr in sorted(skip_auto_headers) ) used_headers = self.headers.copy() used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type] for hdr, val in self.DEFAULT_HEADERS.items(): if hdr not in used_headers: self.headers.add(hdr, val) if hdrs.USER_AGENT not in used_headers: self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE def update_cookies(self, cookies: Optional[LooseCookies]) -> None: if not cookies: return c = SimpleCookie() # type: SimpleCookie[str] if hdrs.COOKIE in self.headers: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] for name, value in iter_cookies: if isinstance(value, Morsel): # Preserve coded_value mrsl_val = value.get(value.key, Morsel()) mrsl_val.set(value.key, value.value, value.coded_value) c[name] = mrsl_val else: c[name] = value # type: ignore[assignment] self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip() def update_content_encoding(self, data: Any) -> None: if data is None: return enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower() if enc: if self.compress: raise ValueError( "compress can not be set " "if Content-Encoding header is set" ) elif self.compress: if not isinstance(self.compress, str): self.compress = "deflate" self.headers[hdrs.CONTENT_ENCODING] = self.compress self.chunked = True # enable chunked, no need to deal with length def update_transfer_encoding(self) -> None: te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower() if "chunked" in te: if self.chunked: raise ValueError( "chunked can not be set " 'if "Transfer-Encoding: chunked" header is set' ) elif self.chunked: if hdrs.CONTENT_LENGTH in self.headers: raise ValueError( "chunked can not be set " "if Content-Length header is set" ) self.headers[hdrs.TRANSFER_ENCODING] = "chunked" else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body)) def update_auth(self, auth: Optional[BasicAuth]) -> None: if auth is None: auth = self.auth if auth is None: return if not isinstance(auth, helpers.BasicAuth): raise TypeError("BasicAuth() tuple is required instead") self.headers[hdrs.AUTHORIZATION] = auth.encode() def update_body_from_data(self, body: Any) -> None: if body is None: return # FormData if isinstance(body, FormData): body = body() try: body = payload.PAYLOAD_REGISTRY.get(body, disposition=None) except payload.LookupError: boundary = None if CONTENT_TYPE in self.headers: boundary = parse_mimetype(self.headers[CONTENT_TYPE]).parameters.get( "boundary" ) body = FormData(body, boundary=boundary)() self.body = body # enable chunked encoding if needed if not self.chunked: if hdrs.CONTENT_LENGTH not in self.headers: size = body.size if size is None: self.chunked = True else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(size) # copy payload headers assert body.headers for (key, value) in body.headers.items(): if key in self.headers: continue if key in self.skip_auto_headers: continue self.headers[key] = value def update_expect_continue(self, expect: bool = False) -> None: if expect: self.headers[hdrs.EXPECT] = "100-continue" elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue": expect = True if expect: self._continue = self.loop.create_future() def update_proxy( self, proxy: Optional[URL], proxy_auth: Optional[BasicAuth], proxy_headers: Optional[LooseHeaders], ) -> None: if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth): raise ValueError("proxy_auth must be None or BasicAuth() tuple") self.proxy = proxy self.proxy_auth = proxy_auth self.proxy_headers = proxy_headers def keep_alive(self) -> bool: if self.version < HttpVersion10: # keep alive not supported at all return False if self.version == HttpVersion10: if self.headers.get(hdrs.CONNECTION) == "keep-alive": return True else: # no headers means we close for Http 1.0 return False elif self.headers.get(hdrs.CONNECTION) == "close": return False return True async def write_bytes( self, writer: AbstractStreamWriter, conn: "Connection" ) -> None: # 100 response if self._continue is not None: await writer.drain() await self._continue protocol = conn.protocol assert protocol is not None try: if isinstance(self.body, payload.Payload): await self.body.write(writer) else: if isinstance(self.body, (bytes, bytearray)): self.body = (self.body,) # type: ignore[assignment] for chunk in self.body: await writer.write(chunk) # type: ignore[arg-type] await writer.write_eof() except OSError as exc: new_exc = ClientOSError( exc.errno, "Can not write request body for %s" % self.url ) new_exc.__context__ = exc new_exc.__cause__ = exc protocol.set_exception(new_exc) except asyncio.CancelledError as exc: if not conn.closed: protocol.set_exception(exc) except Exception as exc: protocol.set_exception(exc) finally: self._writer = None async def send(self, conn: "Connection") -> "ClientResponse": # Specify request target: # - CONNECT request must send authority form URI # - not CONNECT proxy must send absolute form URI # - most common is origin form URI if self.method == hdrs.METH_CONNECT: connect_host = self.url.raw_host assert connect_host is not None if helpers.is_ipv6_address(connect_host): connect_host = f"[{connect_host}]" path = f"{connect_host}:{self.url.port}" elif self.proxy and not self.is_ssl(): path = str(self.url) else: path = self.url.raw_path if self.url.raw_query_string: path += "?" + self.url.raw_query_string protocol = conn.protocol assert protocol is not None writer = StreamWriter( protocol, self.loop, on_chunk_sent=functools.partial( self._on_chunk_request_sent, self.method, self.url ), on_headers_sent=functools.partial( self._on_headers_request_sent, self.method, self.url ), ) if self.compress: writer.enable_compression(self.compress) if self.chunked is not None: writer.enable_chunking() # set default content-type if ( self.method in self.POST_METHODS and hdrs.CONTENT_TYPE not in self.skip_auto_headers and hdrs.CONTENT_TYPE not in self.headers ): self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream" # set the connection header connection = self.headers.get(hdrs.CONNECTION) if not connection: if self.keep_alive(): if self.version == HttpVersion10: connection = "keep-alive" else: if self.version == HttpVersion11: connection = "close" if connection is not None: self.headers[hdrs.CONNECTION] = connection # status + headers status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format( self.method, path, self.version ) await writer.write_headers(status_line, self.headers) self._writer = self.loop.create_task(self.write_bytes(writer, conn)) response_class = self.response_class assert response_class is not None self.response = response_class( self.method, self.original_url, writer=self._writer, continue100=self._continue, timer=self._timer, request_info=self.request_info, traces=self._traces, loop=self.loop, session=self._session, ) return self.response async def close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None def terminate(self) -> None: if self._writer is not None: if not self.loop.is_closed(): self._writer.cancel() self._writer = None async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None: for trace in self._traces: await trace.send_request_chunk_sent(method, url, chunk) async def _on_headers_request_sent( self, method: str, url: URL, headers: "CIMultiDict[str]" ) -> None: for trace in self._traces: await trace.send_request_headers(method, url, headers) class ClientResponse(HeadersMixin): # from the Status-Line of the response version = None # HTTP-Version status = None # type: int # Status-Code reason = None # Reason-Phrase content = None # type: StreamReader # Payload stream _headers = None # type: CIMultiDictProxy[str] # Response headers _raw_headers = None # type: RawHeaders # Response raw headers _connection = None # current connection _source_traceback = None # setted up by ClientRequest after ClientResponse object creation # post-init stage allows to not change ctor signature _closed = True # to allow __del__ for non-initialized properly response _released = False def __init__( self, method: str, url: URL, *, writer: "asyncio.Task[None]", continue100: Optional["asyncio.Future[bool]"], timer: BaseTimerContext, request_info: RequestInfo, traces: List["Trace"], loop: asyncio.AbstractEventLoop, session: "ClientSession", ) -> None: assert isinstance(url, URL) super().__init__() self.method = method self.cookies = SimpleCookie() # type: SimpleCookie[str] self._real_url = url self._url = url.with_fragment(None) self._body = None # type: Optional[bytes] self._writer = writer # type: Optional[asyncio.Task[None]] self._continue = continue100 # None by default self._closed = True self._history = () # type: Tuple[ClientResponse, ...] self._request_info = request_info self._timer = timer if timer is not None else TimerNoop() self._cache = {} # type: Dict[str, Any] self._traces = traces self._loop = loop # store a reference to session #1985 self._session = session # type: Optional[ClientSession] if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) @reify def url(self) -> URL: return self._url @reify def real_url(self) -> URL: return self._real_url @reify def host(self) -> str: assert self._url.host is not None return self._url.host @reify def headers(self) -> "CIMultiDictProxy[str]": return self._headers @reify def raw_headers(self) -> RawHeaders: return self._raw_headers @reify def request_info(self) -> RequestInfo: return self._request_info @reify def content_disposition(self) -> Optional[ContentDisposition]: raw = self._headers.get(hdrs.CONTENT_DISPOSITION) if raw is None: return None disposition_type, params_dct = multipart.parse_content_disposition(raw) params = MappingProxyType(params_dct) filename = multipart.content_disposition_filename(params) return ContentDisposition(disposition_type, params, filename) def __del__(self, _warnings: Any = warnings) -> None: if self._closed: return if self._connection is not None: self._connection.release() self._cleanup_writer() if self._loop.get_debug(): _warnings.warn( f"Unclosed response {self!r}", ResourceWarning, source=self ) context = {"client_response": self, "message": "Unclosed response"} if self._source_traceback: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) def __repr__(self) -> str: out = io.StringIO() ascii_encodable_url = str(self.url) if self.reason: ascii_encodable_reason = self.reason.encode( "ascii", "backslashreplace" ).decode("ascii") else: ascii_encodable_reason = self.reason print( "<ClientResponse({}) [{} {}]>".format( ascii_encodable_url, self.status, ascii_encodable_reason ), file=out, ) print(self.headers, file=out) return out.getvalue() @property def connection(self) -> Optional["Connection"]: return self._connection @reify def history(self) -> Tuple["ClientResponse", ...]: return self._history @reify def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]": links_str = ", ".join(self.headers.getall("link", [])) if not links_str: return MultiDictProxy(MultiDict()) links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]] for val in re.split(r",(?=\s*<)", links_str): match = re.match(r"\s*<(.*)>(.*)", val) if match is None: # pragma: no cover # the check exists to suppress mypy error continue url, params_str = match.groups() params = params_str.split(";")[1:] link = MultiDict() # type: MultiDict[Union[str, URL]] for param in params: match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M) if match is None: # pragma: no cover # the check exists to suppress mypy error continue key, _, value, _ = match.groups() link.add(key, value) key = link.get("rel", url) # type: ignore[assignment] link.add("url", self.url.join(URL(url))) links.add(key, MultiDictProxy(link)) return MultiDictProxy(links) async def start(self, connection: "Connection") -> "ClientResponse": self._closed = False self._protocol = connection.protocol self._connection = connection with self._timer: while True: # read response try: protocol = self._protocol message, payload = await protocol.read() # type: ignore[union-attr] except http.HttpProcessingError as exc: raise ClientResponseError( self.request_info, self.history, status=exc.code, message=exc.message, headers=exc.headers, ) from exc if message.code < 100 or message.code > 199 or message.code == 101: break if self._continue is not None: set_result(self._continue, True) self._continue = None # payload eof handler payload.on_eof(self._response_eof) # response status self.version = message.version self.status = message.code self.reason = message.reason # headers self._headers = message.headers # type is CIMultiDictProxy self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes] # payload self.content = payload # cookies for hdr in self.headers.getall(hdrs.SET_COOKIE, ()): try: self.cookies.load(hdr) except CookieError as exc: client_logger.warning("Can not load response cookies: %s", exc) return self def _response_eof(self) -> None: if self._closed: return if self._connection is not None: # websocket, protocol could be None because # connection could be detached if ( self._connection.protocol is not None and self._connection.protocol.upgraded ): return self._connection.release() self._connection = None self._closed = True self._cleanup_writer() @property def closed(self) -> bool: return self._closed def close(self) -> None: if not self._released: self._notify_content() if self._closed: return self._closed = True if self._loop is None or self._loop.is_closed(): return if self._connection is not None: self._connection.close() self._connection = None self._cleanup_writer() def release(self) -> Any: if not self._released: self._notify_content() if self._closed: return noop() self._closed = True if self._connection is not None: self._connection.release() self._connection = None self._cleanup_writer() return noop() @property def ok(self) -> bool: return 400 > self.status def raise_for_status(self) -> None: if not self.ok: # reason should always be not None for a started response assert self.reason is not None self.release() raise ClientResponseError( self.request_info, self.history, status=self.status, message=self.reason, headers=self.headers, ) def _cleanup_writer(self) -> None: if self._writer is not None: self._writer.cancel() self._writer = None self._session = None def _notify_content(self) -> None: content = self.content if content and content.exception() is None: content.set_exception(ClientConnectionError("Connection closed")) self._released = True async def wait_for_close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None self.release() async def read(self) -> bytes: if self._body is None: try: self._body = await self.content.read() for trace in self._traces: await trace.send_response_chunk_received( self.method, self.url, self._body ) except BaseException: self.close() raise elif self._released: raise ClientConnectionError("Connection closed") return self._body def get_encoding(self) -> str: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() mimetype = helpers.parse_mimetype(ctype) encoding = mimetype.parameters.get("charset") if encoding: try: codecs.lookup(encoding) except LookupError: encoding = None if not encoding: if mimetype.type == "application" and ( mimetype.subtype == "json" or mimetype.subtype == "rdap" ): # RFC 7159 states that the default encoding is UTF-8. # RFC 7483 defines application/rdap+json encoding = "utf-8" elif self._body is None: raise RuntimeError( "Cannot guess the encoding of " "a not yet read body" ) else: encoding = chardet.detect(self._body)["encoding"] if not encoding: encoding = "utf-8" return encoding async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str: if self._body is None: await self.read() if encoding is None: encoding = self.get_encoding() return self._body.decode(encoding, errors=errors) # type: ignore[union-attr] async def json( self, *, encoding: Optional[str] = None, loads: JSONDecoder = DEFAULT_JSON_DECODER, content_type: Optional[str] = "application/json", ) -> Any: if self._body is None: await self.read() if content_type: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() if not is_expected_content_type(ctype, content_type): raise ContentTypeError( self.request_info, self.history, message=( "Attempt to decode JSON with " "unexpected mimetype: %s" % ctype ), headers=self.headers, ) if encoding is None: encoding = self.get_encoding() return loads(self._body.decode(encoding)) # type: ignore[union-attr] async def __aenter__(self) -> "ClientResponse": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: # similar to _RequestContextManager, we do not need to check # for exceptions, response object can close connection # if state is broken self.release()
true
true
f70f0001824e96892d2caeddbb3fede6f0d4c87a
95
py
Python
file/admin.py
simonprast/bestconnect-backend
fa19998e72941454c8f99246e37ba3d1810f3010
[ "BSD-Source-Code" ]
null
null
null
file/admin.py
simonprast/bestconnect-backend
fa19998e72941454c8f99246e37ba3d1810f3010
[ "BSD-Source-Code" ]
2
2022-01-07T07:27:21.000Z
2022-01-07T08:34:54.000Z
file/admin.py
simonprast/django-entrypoint
b32377dea0f367be598bc851bc0fdacc2b74a496
[ "CNRI-Python" ]
null
null
null
from django.contrib import admin from .models import Document admin.site.register(Document)
13.571429
32
0.810526
from django.contrib import admin from .models import Document admin.site.register(Document)
true
true
f70f000a0c7c5ab0c91dcb60e9125ca81979a3b3
112
py
Python
python/reportlab/rml2pdf.py
trammell/test
ccac5e1dac947032e64d813e53cb961417a58d05
[ "Artistic-2.0" ]
null
null
null
python/reportlab/rml2pdf.py
trammell/test
ccac5e1dac947032e64d813e53cb961417a58d05
[ "Artistic-2.0" ]
null
null
null
python/reportlab/rml2pdf.py
trammell/test
ccac5e1dac947032e64d813e53cb961417a58d05
[ "Artistic-2.0" ]
null
null
null
#!/usr/bin/env python2.4 import sys from z3c.rml import rml2pdf for arg in sys.argv[1:]: rml2pdf.go(arg)
12.444444
27
0.6875
import sys from z3c.rml import rml2pdf for arg in sys.argv[1:]: rml2pdf.go(arg)
true
true
f70f001340f4fac68545626ee014b6ff5aef8fbd
228
py
Python
backend/cost_claimer/admin.py
Trevor-Mansfield/WalmartReceiptSplitter
db85eef9210f177eaa298969ba64ff916d4a9b27
[ "MIT" ]
null
null
null
backend/cost_claimer/admin.py
Trevor-Mansfield/WalmartReceiptSplitter
db85eef9210f177eaa298969ba64ff916d4a9b27
[ "MIT" ]
1
2020-11-01T01:54:42.000Z
2020-11-01T01:54:42.000Z
backend/cost_claimer/admin.py
Trevor-Mansfield/WalmartReceiptSplitter
db85eef9210f177eaa298969ba64ff916d4a9b27
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Receipt, Item, User, Cover, Payment admin.site.register(Receipt) admin.site.register(Item) admin.site.register(User) admin.site.register(Cover) admin.site.register(Payment)
22.8
55
0.802632
from django.contrib import admin from .models import Receipt, Item, User, Cover, Payment admin.site.register(Receipt) admin.site.register(Item) admin.site.register(User) admin.site.register(Cover) admin.site.register(Payment)
true
true
f70f00182433c7a822fe1637de1c29153c5dcf36
820
py
Python
docs/conf.py
hat-open/hat-stc
8edf9e23f9f2d8e581f844f96e7a38d02ef4f403
[ "Apache-2.0" ]
1
2022-02-01T13:42:25.000Z
2022-02-01T13:42:25.000Z
docs/conf.py
hat-open/hat-stc
8edf9e23f9f2d8e581f844f96e7a38d02ef4f403
[ "Apache-2.0" ]
null
null
null
docs/conf.py
hat-open/hat-stc
8edf9e23f9f2d8e581f844f96e7a38d02ef4f403
[ "Apache-2.0" ]
null
null
null
from pathlib import Path import subprocess root_path = Path(__file__).parent.parent.resolve() extensions = [ 'sphinx.ext.todo', 'sphinxcontrib.drawio', ] version = (root_path / 'VERSION').read_text().strip() project = 'hat-stc' copyright = '2020-2021, Hat Open AUTHORS' master_doc = 'index' html_theme = 'furo' html_static_path = ['static'] html_css_files = ['custom.css'] html_use_index = False html_show_sourcelink = False html_show_sphinx = False html_sidebars = {'**': ["sidebar/brand.html", "sidebar/scroll-start.html", "sidebar/navigation.html", "sidebar/scroll-end.html"]} todo_include_todos = True p = subprocess.run(['which', 'drawio'], capture_output=True, check=True) drawio_binary_path = p.stdout.decode('utf-8').strip()
25.625
72
0.664634
from pathlib import Path import subprocess root_path = Path(__file__).parent.parent.resolve() extensions = [ 'sphinx.ext.todo', 'sphinxcontrib.drawio', ] version = (root_path / 'VERSION').read_text().strip() project = 'hat-stc' copyright = '2020-2021, Hat Open AUTHORS' master_doc = 'index' html_theme = 'furo' html_static_path = ['static'] html_css_files = ['custom.css'] html_use_index = False html_show_sourcelink = False html_show_sphinx = False html_sidebars = {'**': ["sidebar/brand.html", "sidebar/scroll-start.html", "sidebar/navigation.html", "sidebar/scroll-end.html"]} todo_include_todos = True p = subprocess.run(['which', 'drawio'], capture_output=True, check=True) drawio_binary_path = p.stdout.decode('utf-8').strip()
true
true
f70f010c765e7d5b11c81cbf2a8a5f6563f97562
1,657
py
Python
pyrasterframes/python/geomesa_pyspark/types.py
jdenisgiguere/rasterframes
4226cde5223e67ab0e9f27c98bc1053d0a4b7a4e
[ "Apache-2.0" ]
null
null
null
pyrasterframes/python/geomesa_pyspark/types.py
jdenisgiguere/rasterframes
4226cde5223e67ab0e9f27c98bc1053d0a4b7a4e
[ "Apache-2.0" ]
null
null
null
pyrasterframes/python/geomesa_pyspark/types.py
jdenisgiguere/rasterframes
4226cde5223e67ab0e9f27c98bc1053d0a4b7a4e
[ "Apache-2.0" ]
null
null
null
"""*********************************************************************** This file was created by Astraea, Inc., 2018 from an excerpt of the original: Copyright (c) 2013-2018 Commonwealth Computer Research, Inc. All rights reserved. This program and the accompanying materials are made available under the terms of the Apache License, Version 2.0 which accompanies this distribution and is available at http://www.opensource.org/licenses/apache2.0.php. + ***********************************************************************/""" from pyspark.sql.types import UserDefinedType from pyspark.sql import Row from pyspark.sql.types import * from pyrasterframes.context import RFContext class GeometryUDT(UserDefinedType): @classmethod def sqlType(self): # return StructField("wkb", BinaryType(), False) return StructType([StructField("wkb", BinaryType(), True)]) @classmethod def module(cls): return 'geomesa_pyspark.types' @classmethod def scalaUDT(cls): return 'org.apache.spark.sql.jts.' + cls.__name__ def serialize(self, obj): if (obj is None): return None return Row(obj.toBytes) def deserialize(self, datum): return RFContext._jvm_mirror().generate_geometry(datum[0]) class PointUDT(GeometryUDT): pass class LineStringUDT(GeometryUDT): pass class PolygonUDT(GeometryUDT): pass class MultiPointUDT(GeometryUDT): pass class MultiLineStringUDT(GeometryUDT): pass class MultiPolygonUDT(GeometryUDT): pass class GeometryUDT(GeometryUDT): pass class GeometryCollectionUDT(GeometryUDT): pass
24.014493
77
0.652384
from pyspark.sql.types import UserDefinedType from pyspark.sql import Row from pyspark.sql.types import * from pyrasterframes.context import RFContext class GeometryUDT(UserDefinedType): @classmethod def sqlType(self): return StructType([StructField("wkb", BinaryType(), True)]) @classmethod def module(cls): return 'geomesa_pyspark.types' @classmethod def scalaUDT(cls): return 'org.apache.spark.sql.jts.' + cls.__name__ def serialize(self, obj): if (obj is None): return None return Row(obj.toBytes) def deserialize(self, datum): return RFContext._jvm_mirror().generate_geometry(datum[0]) class PointUDT(GeometryUDT): pass class LineStringUDT(GeometryUDT): pass class PolygonUDT(GeometryUDT): pass class MultiPointUDT(GeometryUDT): pass class MultiLineStringUDT(GeometryUDT): pass class MultiPolygonUDT(GeometryUDT): pass class GeometryUDT(GeometryUDT): pass class GeometryCollectionUDT(GeometryUDT): pass
true
true
f70f016add7378486fc5701457b5b6ce3da7e9e2
3,181
py
Python
moog/action_spaces/joystick.py
juanpablordz/moog.github.io
d7995d3563492378d0877ce8d16f5ca9a8031794
[ "Apache-2.0", "MIT" ]
22
2021-02-26T18:19:35.000Z
2022-03-05T19:01:00.000Z
moog/action_spaces/joystick.py
juanpablordz/moog.github.io
d7995d3563492378d0877ce8d16f5ca9a8031794
[ "Apache-2.0", "MIT" ]
1
2021-04-01T06:15:02.000Z
2021-04-23T13:14:12.000Z
moog/action_spaces/joystick.py
juanpablordz/moog.github.io
d7995d3563492378d0877ce8d16f5ca9a8031794
[ "Apache-2.0", "MIT" ]
2
2021-05-02T02:20:39.000Z
2021-05-06T16:24:35.000Z
"""Joystick action space for controlling agent avatars.""" from . import abstract_action_space from dm_env import specs import numpy as np class Joystick(abstract_action_space.AbstractActionSpace): """Joystick action space.""" def __init__(self, scaling_factor=1., action_layers='agent', constrained_lr=False, control_velocity=False, momentum=0.): """Constructor. Args: scaling_factor: Scalar. Scaling factor multiplied to the action. agent_layer: String or iterable of strings. Elements (or itself if string) must be keys in the environment state. All sprites in these layers will be acted upon by this action space. control_velocity: Bool. Whether to control velocity (True) or force (False). constrained_lr: Bool. If True, joystick is contrained to actions parallel to the x-axis, by zeroing out the y-axis (component 1) of the action. momentum: Float in [0, 1]. Discount factor for previous action. This should be zero if control_velocity is False, because imparting forces automatically gives momentum to the agent(s) being controlled. If control_velocity is True, setting this greater than zero gives the controlled agent(s) momentum. However, the velocity is clipped at scaling_factor, so the agent only retains momentum when stopping or changing direction and does not accelerate. """ self._scaling_factor = scaling_factor if not isinstance(action_layers, (list, tuple)): action_layers = (action_layers,) self._action_layers = action_layers self._constrained_lr = constrained_lr self._control_velocity = control_velocity self._momentum = momentum self._action_spec = specs.BoundedArray( shape=(2,), dtype=np.float32, minimum=-1, maximum=1) def step(self, state, action): """Apply action to environment state. Args: state: OrderedDict. Environment state. action: Numpy float array of size (2) in [-1, 1]. Force to apply. """ if self._constrained_lr: action[1] = 0. self._action *= self._momentum self._action += self._scaling_factor * action self._action = np.clip( self._action, -self._scaling_factor, self._scaling_factor) for action_layer in self._action_layers: for sprite in state[action_layer]: if self._control_velocity: sprite.velocity = self._action / sprite.mass else: sprite.velocity += self._action / sprite.mass def reset(self, state): """Reset action space at start of new episode.""" del state self._action = np.zeros(2) def random_action(self): """Return randomly sampled action.""" return np.random.uniform(-1., 1., size=(2,)) def action_spec(self): return self._action_spec
40.782051
80
0.617102
from . import abstract_action_space from dm_env import specs import numpy as np class Joystick(abstract_action_space.AbstractActionSpace): def __init__(self, scaling_factor=1., action_layers='agent', constrained_lr=False, control_velocity=False, momentum=0.): self._scaling_factor = scaling_factor if not isinstance(action_layers, (list, tuple)): action_layers = (action_layers,) self._action_layers = action_layers self._constrained_lr = constrained_lr self._control_velocity = control_velocity self._momentum = momentum self._action_spec = specs.BoundedArray( shape=(2,), dtype=np.float32, minimum=-1, maximum=1) def step(self, state, action): if self._constrained_lr: action[1] = 0. self._action *= self._momentum self._action += self._scaling_factor * action self._action = np.clip( self._action, -self._scaling_factor, self._scaling_factor) for action_layer in self._action_layers: for sprite in state[action_layer]: if self._control_velocity: sprite.velocity = self._action / sprite.mass else: sprite.velocity += self._action / sprite.mass def reset(self, state): del state self._action = np.zeros(2) def random_action(self): return np.random.uniform(-1., 1., size=(2,)) def action_spec(self): return self._action_spec
true
true
f70f0196bf302980bb4ad2f3eb39b8628d0c4555
5,380
py
Python
tests/test_data.py
pedromachados/pygeocodio
5ff11edd46e749a9236078f9a207cdf501b08b58
[ "BSD-3-Clause" ]
null
null
null
tests/test_data.py
pedromachados/pygeocodio
5ff11edd46e749a9236078f9a207cdf501b08b58
[ "BSD-3-Clause" ]
null
null
null
tests/test_data.py
pedromachados/pygeocodio
5ff11edd46e749a9236078f9a207cdf501b08b58
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_geocodio ---------------------------------- Tests for `geocodio.data` module. """ import json import os import unittest from geocodio.data import Address from geocodio.data import Location from geocodio.data import LocationCollection class TestDataTypes(unittest.TestCase): def setUp(self): """ Read the test data from JSON files which are modified from actual service response only for formatting. This makes this file much easier to read, the data easier to inspect, and ensures that the data matches what the service actually replies with. """ fixtures = os.path.join(os.path.dirname(os.path.abspath(__file__)), "response/") with open(os.path.join(fixtures, "single.json"), "r") as single_json: self.single_response = json.loads(single_json.read()) with open(os.path.join(fixtures, "batch.json"), "r") as batch_json: self.batch_response = json.loads(batch_json.read()) with open(os.path.join(fixtures, "address.json"), "r") as address_json: self.address_response = json.loads(address_json.read()) with open(os.path.join(fixtures, "missing_results.json"), "r") as missing_json: self.missing_results = json.loads(missing_json.read()) with open( os.path.join(fixtures, "batch_reverse.json"), "r" ) as batch_reverse_json: self.batch_reverse_response = json.loads(batch_reverse_json.read()) def test_address_coords(self): """Ensure Address.coords property returns None when no location""" x = Address(self.address_response) self.assertEqual(None, x.coords) def test_address_accuracy(self): """Ensure Address.accuracy property returns None when no location""" x = Address(self.address_response) self.assertEqual(None, x.accuracy) def test_location_coords(self): """Ensure Location.coords property returns a suitable tuple""" x = Location(self.single_response) self.assertEqual(x.coords, (37.554895702703, -77.457561054054)) # Do the same with the order changed x = Location(self.single_response, order="lng") self.assertEqual(x.coords, (-77.457561054054, 37.554895702703)) def test_location_results_missing(self): """Ensure empty results are processed as a missing address""" bad_results = Location(self.missing_results) self.assertEqual(bad_results.coords, None) def test_collection(self): """Ensure that the LocationCollection stores as a list of Locations""" self.assertTrue(isinstance(self.batch_response, dict)) locations = LocationCollection(self.batch_response["results"]) self.assertTrue(isinstance(locations[0], Location)) locations = LocationCollection(self.batch_reverse_response["results"]) self.assertTrue(isinstance(locations[0], Location)) def test_collection_coords(self): """Ensure the coords property returns a list of suitable tuples""" locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.coords, [ (37.560890255102, -77.477400571429), (37.554895702703, -77.457561054054), None, ], ) # Do the same with the order changed locations = LocationCollection(self.batch_response["results"], order="lng") self.assertEqual( locations.coords, [ (-77.477400571429, 37.560890255102), (-77.457561054054, 37.554895702703), None, ], ) def test_collection_addresses(self): """Ensure that formatted addresses are returned""" locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.formatted_addresses, [ "3101 Patterson Ave, Richmond VA, 23221", "1657 W Broad St, Richmond VA, 23220", "", ], ) def test_collection_get(self): """Ensure 'get' performs a key based lookup""" locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.get("3101 patterson ave, richmond, va").coords, (37.560890255102, -77.477400571429), ) # Case sensitive on the specific query self.assertRaises(KeyError, locations.get, "3101 Patterson Ave, richmond, va") locations = LocationCollection(self.batch_reverse_response["results"]) # The rendred query string value is acceptable self.assertEqual( locations.get("37.538758,-77.433594").coords, (37.538758, -77.433594) ) # A tuple of floats is acceptable self.assertEqual( locations.get((37.538758, -77.433594)).coords, (37.538758, -77.433594) ) # If it can be coerced to a float it is acceptable self.assertEqual( locations.get(("37.538758", "-77.433594")).coords, (37.538758, -77.433594) ) # This is unacceptable self.assertRaises(ValueError, locations.get, ("37.538758 N", "-77.433594 W")) if __name__ == "__main__": unittest.main()
37.361111
88
0.631227
import json import os import unittest from geocodio.data import Address from geocodio.data import Location from geocodio.data import LocationCollection class TestDataTypes(unittest.TestCase): def setUp(self): fixtures = os.path.join(os.path.dirname(os.path.abspath(__file__)), "response/") with open(os.path.join(fixtures, "single.json"), "r") as single_json: self.single_response = json.loads(single_json.read()) with open(os.path.join(fixtures, "batch.json"), "r") as batch_json: self.batch_response = json.loads(batch_json.read()) with open(os.path.join(fixtures, "address.json"), "r") as address_json: self.address_response = json.loads(address_json.read()) with open(os.path.join(fixtures, "missing_results.json"), "r") as missing_json: self.missing_results = json.loads(missing_json.read()) with open( os.path.join(fixtures, "batch_reverse.json"), "r" ) as batch_reverse_json: self.batch_reverse_response = json.loads(batch_reverse_json.read()) def test_address_coords(self): x = Address(self.address_response) self.assertEqual(None, x.coords) def test_address_accuracy(self): x = Address(self.address_response) self.assertEqual(None, x.accuracy) def test_location_coords(self): x = Location(self.single_response) self.assertEqual(x.coords, (37.554895702703, -77.457561054054)) x = Location(self.single_response, order="lng") self.assertEqual(x.coords, (-77.457561054054, 37.554895702703)) def test_location_results_missing(self): bad_results = Location(self.missing_results) self.assertEqual(bad_results.coords, None) def test_collection(self): self.assertTrue(isinstance(self.batch_response, dict)) locations = LocationCollection(self.batch_response["results"]) self.assertTrue(isinstance(locations[0], Location)) locations = LocationCollection(self.batch_reverse_response["results"]) self.assertTrue(isinstance(locations[0], Location)) def test_collection_coords(self): locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.coords, [ (37.560890255102, -77.477400571429), (37.554895702703, -77.457561054054), None, ], ) locations = LocationCollection(self.batch_response["results"], order="lng") self.assertEqual( locations.coords, [ (-77.477400571429, 37.560890255102), (-77.457561054054, 37.554895702703), None, ], ) def test_collection_addresses(self): locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.formatted_addresses, [ "3101 Patterson Ave, Richmond VA, 23221", "1657 W Broad St, Richmond VA, 23220", "", ], ) def test_collection_get(self): locations = LocationCollection(self.batch_response["results"]) self.assertEqual( locations.get("3101 patterson ave, richmond, va").coords, (37.560890255102, -77.477400571429), ) self.assertRaises(KeyError, locations.get, "3101 Patterson Ave, richmond, va") locations = LocationCollection(self.batch_reverse_response["results"]) self.assertEqual( locations.get("37.538758,-77.433594").coords, (37.538758, -77.433594) ) self.assertEqual( locations.get((37.538758, -77.433594)).coords, (37.538758, -77.433594) ) self.assertEqual( locations.get(("37.538758", "-77.433594")).coords, (37.538758, -77.433594) ) self.assertRaises(ValueError, locations.get, ("37.538758 N", "-77.433594 W")) if __name__ == "__main__": unittest.main()
true
true
f70f020b7929b4915c2fffab4ea28ab2f15b1866
11,795
py
Python
docs/source/conf.py
nkeilbart/aiida-nwchem
d5199b8a94fc2ec8ed30d3370ceac3826312b757
[ "MIT" ]
1
2019-12-12T15:54:58.000Z
2019-12-12T15:54:58.000Z
docs/source/conf.py
nkeilbart/aiida-nwchem
d5199b8a94fc2ec8ed30d3370ceac3826312b757
[ "MIT" ]
10
2017-11-16T15:53:39.000Z
2021-12-07T16:34:18.000Z
docs/source/conf.py
nkeilbart/aiida-nwchem
d5199b8a94fc2ec8ed30d3370ceac3826312b757
[ "MIT" ]
6
2018-08-14T13:26:30.000Z
2021-12-31T14:37:31.000Z
# -*- coding: utf-8 -*- # # Sphinx configuration # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import time import aiida_nwchem from aiida.manage.configuration import load_documentation_profile # -- AiiDA-related setup -------------------------------------------------- # Load the dummy profile even if we are running locally, this way the documentation will succeed even if the current # default profile of the AiiDA installation does not use a Django backend. load_documentation_profile() # If we are not on READTHEDOCS load the Sphinx theme manually if not os.environ.get('READTHEDOCS', None): import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # -- General configuration ------------------------------------------------ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.5' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinxcontrib.contentui', 'aiida.sphinxext', 'sphinxcontrib.napoleon', ] intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'aiida': ('https://aiida-core.readthedocs.io/en/latest', None), } nitpick_ignore = [('py:obj', 'module')] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. #~ master_doc = 'index' master_doc = 'index' # General information about the project. project = u'aiida-nwchem' copyright_first_year = "2021" copyright_owners = "The AiiDA Team" current_year = str(time.localtime().tm_year) copyright_year_string = current_year if current_year == copyright_first_year else "{}-{}".format( copyright_first_year, current_year) # pylint: disable=redefined-builtin copyright = u'{}, {}. All rights reserved'.format(copyright_year_string, copyright_owners) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = aiida_nwchem.__version__ # The short X.Y version. version = '.'.join(release.split('.')[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # exclude_patterns = ['doc.rst'] #~ exclude_patterns = ['index.rst'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #~ html_theme = 'basicstrap' ## SET BELOW # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'display_version': True, } # Add any paths that contain custom themes here, relative to this directory. #~ html_theme_path = ["."] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "images/.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = "images/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #~ html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. html_use_opensearch = 'http://aiida-nwchem.readthedocs.io' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'aiida-nwchem-doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). # latex_documents = [ # ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # NOTE: Diabling API docs # def run_apidoc(_): # """Runs sphinx-apidoc when building the documentation. # Needs to be done in conf.py in order to include the APIdoc in the # build on readthedocs. # See also https://github.com/rtfd/readthedocs.org/issues/1139 # """ # source_dir = os.path.abspath(os.path.dirname(__file__)) # apidoc_dir = os.path.join(source_dir, 'apidoc') # package_dir = os.path.join(source_dir, os.pardir, os.pardir, 'aiida_nwchem') # # In #1139, they suggest the route below, but this ended up # # calling sphinx-build, not sphinx-apidoc # #from sphinx.apidoc import main # #main([None, '-e', '-o', apidoc_dir, package_dir, '--force']) # import subprocess # cmd_path = 'sphinx-apidoc' # if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv # # If we are, assemble the path manually # cmd_path = os.path.abspath( # os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')) # options = [ # '-o', # apidoc_dir, # package_dir, # '--private', # '--force', # '--no-toc', # ] # # See https://stackoverflow.com/a/30144019 # env = os.environ.copy() # env["SPHINX_APIDOC_OPTIONS"] = 'members,special-members,private-members,undoc-members,show-inheritance' # subprocess.check_call([cmd_path] + options, env=env) # def setup(app): # app.connect('builder-inited', run_apidoc) # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # man_pages = [ # ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) # texinfo_documents = [ # ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Warnings to ignore when using the -n (nitpicky) option # We should ignore any python built-in exception, for instance nitpick_ignore = []
33.039216
116
0.701145
import os import sys import time import aiida_nwchem from aiida.manage.configuration import load_documentation_profile load_documentation_profile() if not os.environ.get('READTHEDOCS', None): import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] needs_sphinx = '1.5' extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinxcontrib.contentui', 'aiida.sphinxext', 'sphinxcontrib.napoleon', ] intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'aiida': ('https://aiida-core.readthedocs.io/en/latest', None), } nitpick_ignore = [('py:obj', 'module')] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'aiida-nwchem' copyright_first_year = "2021" copyright_owners = "The AiiDA Team" current_year = str(time.localtime().tm_year) copyright_year_string = current_year if current_year == copyright_first_year else "{}-{}".format( copyright_first_year, current_year) copyright = u'{}, {}. All rights reserved'.format(copyright_year_string, copyright_owners) # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = aiida_nwchem.__version__ # The short X.Y version. version = '.'.join(release.split('.')[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # exclude_patterns = ['doc.rst'] #~ exclude_patterns = ['index.rst'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #~ html_theme = 'basicstrap' ## SET BELOW # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'display_version': True, } # Add any paths that contain custom themes here, relative to this directory. #~ html_theme_path = ["."] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "images/.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = "images/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #~ html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. html_use_opensearch = 'http://aiida-nwchem.readthedocs.io' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'aiida-nwchem-doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). # latex_documents = [ # ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # NOTE: Diabling API docs # def run_apidoc(_): # """Runs sphinx-apidoc when building the documentation. # Needs to be done in conf.py in order to include the APIdoc in the # build on readthedocs. # See also https://github.com/rtfd/readthedocs.org/issues/1139 # """ # source_dir = os.path.abspath(os.path.dirname(__file__)) # apidoc_dir = os.path.join(source_dir, 'apidoc') # package_dir = os.path.join(source_dir, os.pardir, os.pardir, 'aiida_nwchem') # # In #1139, they suggest the route below, but this ended up # # calling sphinx-build, not sphinx-apidoc # #from sphinx.apidoc import main # #main([None, '-e', '-o', apidoc_dir, package_dir, '--force']) # import subprocess # cmd_path = 'sphinx-apidoc' # if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv # # If we are, assemble the path manually # cmd_path = os.path.abspath( # os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')) # options = [ # '-o', # apidoc_dir, # package_dir, # '--private', # '--force', # '--no-toc', # ] # # See https://stackoverflow.com/a/30144019 # env = os.environ.copy() # env["SPHINX_APIDOC_OPTIONS"] = 'members,special-members,private-members,undoc-members,show-inheritance' # subprocess.check_call([cmd_path] + options, env=env) # def setup(app): # app.connect('builder-inited', run_apidoc) # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # man_pages = [ # ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) # texinfo_documents = [ # ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. nitpick_ignore = []
true
true
f70f028521e5de5b0a466d4f2b82c44f85164074
769
py
Python
final_project/server.py
jozuk/xzceb-flask_eng_fr
c5c89f9662c112698582a798560a97efec98993b
[ "Apache-2.0" ]
null
null
null
final_project/server.py
jozuk/xzceb-flask_eng_fr
c5c89f9662c112698582a798560a97efec98993b
[ "Apache-2.0" ]
null
null
null
final_project/server.py
jozuk/xzceb-flask_eng_fr
c5c89f9662c112698582a798560a97efec98993b
[ "Apache-2.0" ]
null
null
null
from machinetranslation import translator from flask import Flask, render_template, request import json app = Flask("Web Translator") @app.route("/englishToFrench") def englishToFrench(): textToTranslate = request.args.get('textToTranslate') translation = translator.englishToFrench(englishText=textToTranslate) return f"{translation} (Translated text to French)" @app.route("/frenchToEnglish") def frenchToEnglish(): textToTranslate = request.args.get('textToTranslate') translation = translator.frenchToEnglish(frenchText=textToTranslate) return f"{translation} (Translated text to English)" @app.route("/") def renderIndexPage(): return render_template('index.html') if __name__ == "__main__": app.run(host="0.0.0.0", port=8080)
30.76
73
0.755527
from machinetranslation import translator from flask import Flask, render_template, request import json app = Flask("Web Translator") @app.route("/englishToFrench") def englishToFrench(): textToTranslate = request.args.get('textToTranslate') translation = translator.englishToFrench(englishText=textToTranslate) return f"{translation} (Translated text to French)" @app.route("/frenchToEnglish") def frenchToEnglish(): textToTranslate = request.args.get('textToTranslate') translation = translator.frenchToEnglish(frenchText=textToTranslate) return f"{translation} (Translated text to English)" @app.route("/") def renderIndexPage(): return render_template('index.html') if __name__ == "__main__": app.run(host="0.0.0.0", port=8080)
true
true
f70f035e63d0185dcc7a6309281b612f988cd503
3,602
py
Python
bindings/python/ensmallen/datasets/string/lachnospiraceaebacteriumnk4a144.py
AnacletoLAB/ensmallen_graph
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
5
2021-02-17T00:44:45.000Z
2021-08-09T16:41:47.000Z
bindings/python/ensmallen/datasets/string/lachnospiraceaebacteriumnk4a144.py
AnacletoLAB/ensmallen_graph
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
18
2021-01-07T16:47:39.000Z
2021-08-12T21:51:32.000Z
bindings/python/ensmallen/datasets/string/lachnospiraceaebacteriumnk4a144.py
AnacletoLAB/ensmallen
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
3
2021-01-14T02:20:59.000Z
2021-08-04T19:09:52.000Z
""" This file offers the methods to automatically retrieve the graph Lachnospiraceae bacterium NK4A144. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def LachnospiraceaeBacteriumNk4a144( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Lachnospiraceae bacterium NK4A144 graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.0 - homology.v11.5 - physical.links.v11.0 - physical.links.v11.5 - links.v11.0 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Lachnospiraceae bacterium NK4A144 graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="LachnospiraceaeBacteriumNk4a144", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
33.351852
223
0.681566
from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph def LachnospiraceaeBacteriumNk4a144( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: return AutomaticallyRetrievedGraph( graph_name="LachnospiraceaeBacteriumNk4a144", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
true
true
f70f03937dd486d609695c5a1f3d24ea0a9fafa6
2,141
py
Python
tools/Vitis-AI-Library/xmodel_image/models/efficientNet-edgetpu-S/efficientNet-edgetpu-S.py
bluetiger9/Vitis-AI
a7728733bbcfc292ff3afa46b9c8b03e94b740b3
[ "Apache-2.0" ]
848
2019-12-03T00:16:17.000Z
2022-03-31T22:53:17.000Z
tools/Vitis-AI-Library/xmodel_image/models/efficientNet-edgetpu-S/efficientNet-edgetpu-S.py
wangyifan778/Vitis-AI
f61061eef7550d98bf02a171604c9a9f283a7c47
[ "Apache-2.0" ]
656
2019-12-03T00:48:46.000Z
2022-03-31T18:41:54.000Z
tools/Vitis-AI-Library/xmodel_image/models/efficientNet-edgetpu-S/efficientNet-edgetpu-S.py
wangyifan778/Vitis-AI
f61061eef7550d98bf02a171604c9a9f283a7c47
[ "Apache-2.0" ]
506
2019-12-03T00:46:26.000Z
2022-03-30T10:34:56.000Z
# # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import xir_extra_ops def jit(graph): graph.set_attr("xmodel_preprocessor", "libxmodel_preprocessor_efficientnet.so.1") graph.set_attr("need_preprocess", True) graph.set_attr("mean", [127.0, 127.0, 127.0]) graph.set_attr("scale", [0.0078125, 0.0078125, 0.0078125]) graph.set_attr("is_rgb_input", True) labels_list = open(os.path.join( graph.get_attr("__dir__"), "word_list.txt"), "r").read().splitlines() labels_list_1001 = ["background,"] labels_list_1001.extend(labels_list) graph.set_attr("labels", labels_list_1001) xir_extra_ops.set_postprocessor( graph, "libxmodel_postprocessor_classification.so.1", {"input": ["my_topk"]}) graph.remove_op(graph.get_op( "logits_fix")) conf_op = graph.get_op("efficientnet-edgetpu-S/model/head/dense/BiasAdd/aquant_logits") graph.create_op("aquant_logits_softmax", "softmax", attrs={"axis": -1}, input_ops={"input": [conf_op]}, subgraph=graph.get_leaf_subgraph(conf_op)) graph.create_op("my_topk", "topk", attrs={"K": 5}, input_ops={"input": [graph.get_op("aquant_logits_softmax")]}, subgraph=graph.get_leaf_subgraph(graph.get_op("aquant_logits_softmax"))) #graph.save_as_image(os.path.join(graph.get_attr("__dir__"), graph.get_attr("__basename__") + ".jit.svg"), "svg") #graph.serialize(os.path.join(graph.get_attr("__dir__"), graph.get_attr("__basename__") + ".jit.xmodel")) print(graph.get_name())
46.543478
117
0.685194
import os import xir_extra_ops def jit(graph): graph.set_attr("xmodel_preprocessor", "libxmodel_preprocessor_efficientnet.so.1") graph.set_attr("need_preprocess", True) graph.set_attr("mean", [127.0, 127.0, 127.0]) graph.set_attr("scale", [0.0078125, 0.0078125, 0.0078125]) graph.set_attr("is_rgb_input", True) labels_list = open(os.path.join( graph.get_attr("__dir__"), "word_list.txt"), "r").read().splitlines() labels_list_1001 = ["background,"] labels_list_1001.extend(labels_list) graph.set_attr("labels", labels_list_1001) xir_extra_ops.set_postprocessor( graph, "libxmodel_postprocessor_classification.so.1", {"input": ["my_topk"]}) graph.remove_op(graph.get_op( "logits_fix")) conf_op = graph.get_op("efficientnet-edgetpu-S/model/head/dense/BiasAdd/aquant_logits") graph.create_op("aquant_logits_softmax", "softmax", attrs={"axis": -1}, input_ops={"input": [conf_op]}, subgraph=graph.get_leaf_subgraph(conf_op)) graph.create_op("my_topk", "topk", attrs={"K": 5}, input_ops={"input": [graph.get_op("aquant_logits_softmax")]}, subgraph=graph.get_leaf_subgraph(graph.get_op("aquant_logits_softmax"))) print(graph.get_name())
true
true
f70f040ea176c1df0334a1bc712c28b2bf4eae04
845
py
Python
plugins/recreations/plugins/nonebot_plugin_picsearcher/formdata.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
plugins/recreations/plugins/nonebot_plugin_picsearcher/formdata.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
plugins/recreations/plugins/nonebot_plugin_picsearcher/formdata.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from typing import Any, Iterable, List, Optional from aiohttp import FormData as _FormData import aiohttp.multipart as multipart class FormData(_FormData): def __init__( self, fields: Iterable[Any] = (), quote_fields: bool = True, charset: Optional[str] = None, boundary: Optional[str] = None ) -> None: self._writer = multipart.MultipartWriter("form-data", boundary=boundary) self._fields = [] # type: List[Any] self._is_multipart = False self._is_processed = False self._quote_fields = quote_fields self._charset = charset if isinstance(fields, dict): fields = list(fields.items()) elif not isinstance(fields, (list, tuple)): fields = (fields,) self.add_fields(*fields)
30.178571
80
0.616568
from typing import Any, Iterable, List, Optional from aiohttp import FormData as _FormData import aiohttp.multipart as multipart class FormData(_FormData): def __init__( self, fields: Iterable[Any] = (), quote_fields: bool = True, charset: Optional[str] = None, boundary: Optional[str] = None ) -> None: self._writer = multipart.MultipartWriter("form-data", boundary=boundary) self._fields = [] self._is_multipart = False self._is_processed = False self._quote_fields = quote_fields self._charset = charset if isinstance(fields, dict): fields = list(fields.items()) elif not isinstance(fields, (list, tuple)): fields = (fields,) self.add_fields(*fields)
true
true
f70f053b6cba750dfb3d76ebc766809958971f63
10,714
py
Python
site_scons/podd_util.py
leadmocha/analyzer
ab26d755dbe816975e4165479c9fbefc4c4ca069
[ "BSD-3-Clause" ]
null
null
null
site_scons/podd_util.py
leadmocha/analyzer
ab26d755dbe816975e4165479c9fbefc4c4ca069
[ "BSD-3-Clause" ]
null
null
null
site_scons/podd_util.py
leadmocha/analyzer
ab26d755dbe816975e4165479c9fbefc4c4ca069
[ "BSD-3-Clause" ]
null
null
null
# podd_utils.py # Utility functions for Podd SCons build import os from SCons.Action import ActionFactory from SCons.Script.SConscript import SConsEnvironment SConsEnvironment.OSCommand = ActionFactory(os.system, lambda command : 'os.system("%s")' % command) import SCons.Util def list_to_path(lst): result = '' if SCons.Util.is_List(lst): for element in lst: result += str(element) if len(lst) > 1 and element != lst[-1]: result += ':' else: result = lst return result def InstallWithRPATH(env, dest, files, rpath): obj = env.Install(dest, files) if env['PLATFORM'] == 'posix': rpathstr = list_to_path(rpath) if env.WhereIs('patchelf'): if env.subst('$ADD_INSTALL_RPATH') and rpathstr: patch_cmd = "patchelf --force-rpath --set-rpath '"+rpathstr+"' " else: patch_cmd = "patchelf --remove-rpath " for i in obj: env.AddPostAction(i, env.OSCommand(patch_cmd+str(i))) elif env.WhereIs('chrpath') \ and not (env.subst('$ADD_INSTALL_RPATH') and rpathstr): # chrpath can only reliably delete RPATH for i in obj: env.AddPostAction(i, env.OSCommand('chrpath -d '+str(i))) else: print('WARNING: patchelf not found, cannot set RPATH') elif env['PLATFORM'] == 'darwin': for i in obj: env.AddPostAction(i, env.OSCommand("site_scons/clear_macos_rpath.sh "+str(i))) if env.subst('$ADD_INSTALL_RPATH') and rpath: tool_cmd = "install_name_tool" # rpath could contain empty strings or be [''] add_to_cmd = '' for rp in rpath: if rp: add_to_cmd += " -add_rpath "+str(rp) if add_to_cmd: tool_cmd += add_to_cmd+" "+str(i) env.AddPostAction(i, env.OSCommand(tool_cmd)) return obj def create_uninstall_target(env, path, is_glob = False): if is_glob: all_files = env.Glob(path,strings=True) for filei in all_files: print('Delete(%s)' % filei) env.Alias("uninstall", os.remove(filei)) else: print('Delete(%s)' % path) if os.path.exists(path): env.Alias("uninstall", os.remove(path)) import SCons.Script import re def build_library(env, sotarget, src, extrahdrs = [], extradicthdrs = [], dictname = None, useenv = True, versioned = False, install_rpath = []): """ Build shared library lib<sotarget> of ROOT classes from given sources "src" (space separated string). For each .cxx source file, a .h header file is expected. A ROOT dictionary is generated from the headers and compiled into the library. The otional "extradicthdrs" headers will be added to the list of headers used for generating the dictionary. If the dictionary source file should have a different name than <sotarget>Dict.cxx, specify that name as "dictname". The dictionary will expect a link definition file <dictname>_LinkDef.h. All sources and corresponding headers and the "extradicthdrs" will be installed in the source and include file installation locations. If any additional headers should be installed, specify them in "extrahdrs". "extrahdrs" will not passed to the ROOT dictionary generator, which is useful e.g. for non-ROOT class standalone headers such as local utility classes or global definitions. If "useenv" is True, the library will link against libraries in $LIB and search for libraires in $LIBPATH. Otherwise, no external libraries will be linked. Other environment variables (compiler flags, include directives, RPATH etc.) are not affected by this flag "install_rpath" is a list of directories that will be set on the installed library if RPATH installation is globally enabled with $ADD_INSTALL_RPATH. Literal "$" signs in any of these list elements (e.g. $ORIGIN) need to be given as "$$" (e.g. $$ORIGIN). """ # Current location relative to top directory thisdir_fullpath = env.Dir('.').path thisdir = os.path.basename(os.path.normpath(thisdir_fullpath)) if not dictname: dictname = sotarget if useenv: linklibs = env['LIBS'] linklibpath = env['LIBPATH'] else: linklibs = [''] linklibpath = [''] # Sources and headers srclist = env.Split(src) hdr = re.sub(r'\.cxx','.h',src) installhdrs = env.Split(hdr) installhdrs.extend(extradicthdrs) installhdrs.extend(extrahdrs) dicthdrs = env.Split(hdr) dicthdrs.extend(extradicthdrs) dicthdrs.append(dictname+'_LinkDef.h') # ROOT dictionary for this library rootdict = dictname+'Dict.cxx' libbase = env.subst('$SHLIBPREFIX')+sotarget thedict = env.RootCint(rootdict, dicthdrs, PCMNAME=libbase) # Versioned shared library symlink names libname_so = libbase+env.subst('$SHLIBSUFFIX') if env['PLATFORM'] == 'posix': # Linux if versioned: libname_soname = libname_so+'.'+env.subst('$SOVERSION') libname_versioned = libname_so+'.'+env.subst('$VERSION') shlibsuffix = env.subst('$SHLIBSUFFIX')+'.'+env.subst('$VERSION') else: libname_soname = libname_so shlibsuffix = env.subst('$SHLIBSUFFIX') shlinkflags = ['-Wl,-soname='+libname_soname] elif env['PLATFORM'] == 'darwin': # macOS if versioned: libname_soname = libbase+'.'+env.subst('$SOVERSION')+env.subst('$SHLIBSUFFIX') libname_versioned = libbase+'.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX') shlibsuffix = '.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX') else: libname_soname = libname_so shlibsuffix = env.subst('$SHLIBSUFFIX') shlinkflags = ['-Wl,-install_name,'+'@rpath/'+libname_soname] if versioned: shlinkflags.append(['-Wl,-compatibility_version,'+env.subst('$SOVERSION'), '-Wl,-current_version,'+env.subst('$VERSION')]) try: for rp in env['RPATH']: shlinkflags.append('-Wl,-rpath,'+rp) except KeyError: pass else: print('build_library: Error: unsupported platform') Exit(3) # Build the library thislib = env.SharedLibrary(target = sotarget, source = srclist+[rootdict], LIBS = linklibs, LIBPATH = linklibpath, SHLIBSUFFIX = shlibsuffix, SONAME = libname_soname, SHLINKFLAGS = env['SHLINKFLAGS']+shlinkflags) if versioned: # Create symlinks env.SymLink(libname_soname,thislib) env.SymLink(libname_so,libname_soname) # Installation install_prefix = env.subst('$INSTALLDIR') lib_dir = os.path.join(install_prefix,env.subst('$LIBSUBDIR')) #bin_dir = os.path.join(install_prefix,'bin') inc_dir = os.path.join(install_prefix,'include') src_dir = os.path.join(install_prefix,'src',thisdir) InstallWithRPATH(env,lib_dir,thislib,install_rpath) # Install PCM file generated by RootCint, if any if len(thedict) > 1: env.Install(lib_dir,thedict[1]) env.Install(inc_dir,installhdrs) env.Install(src_dir,srclist) libname_so_installpath = os.path.join(lib_dir,libname_so) if versioned: libname_soname_installpath = os.path.join(lib_dir,libname_soname) libname_versioned_installpath = os.path.join(lib_dir,libname_versioned) #Kludge for SCons's inability to install symlinks env.SymLink(libname_soname_installpath,libname_versioned_installpath) env.SymLink(libname_so_installpath,libname_soname_installpath) if 'uninstall' in SCons.Script.COMMAND_LINE_TARGETS: create_uninstall_target(env, libname_so_installpath) create_uninstall_target(env, libname_soname_installpath) return thislib import sys import subprocess import platform import time def write_compiledata(env, compiledata): if sys.version_info >= (2, 7): try: cmd = "git rev-parse HEAD 2>/dev/null" gitrev = subprocess.check_output(cmd, shell=True).rstrip() except: gitrev = '' try: cmd = env.subst('$CXX') + " --version 2>/dev/null | head -1" cxxver = subprocess.check_output(cmd, shell=True).rstrip() except: cxxver = '' # subprocess gives us byte string literals in Python 3, but we'd like # Unicode strings if sys.version_info >= (3, 0): gitrev = gitrev.decode() cxxver = cxxver.decode() else: fnull = open(os.devnull, 'w') try: gitrev = subprocess.Popen(['git', 'rev-parse', 'HEAD', '2>dev/null'], stdout=subprocess.PIPE, stderr=fnull).communicate()[0].rstrip() except: gitrev ='' try: outp = subprocess.Popen([env.subst('$CXX'), '--version'], stdout=subprocess.PIPE, stderr=fnull).communicate()[0] lines = outp.splitlines() cxxver = lines[0] except: cxxver = '' f=open(compiledata,'w') f.write('#ifndef ANALYZER_COMPILEDATA_H\n') f.write('#define ANALYZER_COMPILEDATA_H\n') f.write('\n') f.write('#define HA_INCLUDEPATH "%s %s %s"\n' % (env.subst('$HA_HallA'), env.subst('$HA_Podd'), env.subst('$HA_DC'))) f.write('#define HA_VERSION "%s"\n' % env.subst('$HA_VERSION')) f.write('#define HA_DATE "%s"\n' % time.strftime("%b %d %Y")) f.write('#define HA_DATETIME "%s"\n' % time.strftime("%a %b %d %Y")) #f.write('#define HA_DATETIME "%s"\n' % time.strftime("%a %b %d %H:%M:%S %Z %Y")) f.write('#define HA_PLATFORM "%s"\n' % platform.platform()) f.write('#define HA_BUILDNODE "%s"\n' % platform.node()) f.write('#define HA_BUILDDIR "%s"\n' % os.getcwd()) try: builduser = env['ENV']['LOGNAME'] except: builduser = '' f.write('#define HA_BUILDUSER "%s"\n' % builduser) f.write('#define HA_GITREV "%s"\n' % gitrev[:7]) f.write('#define HA_CXXVERS "%s"\n' % cxxver) f.write('#define HA_ROOTVERS "%s"\n' % env.subst('$ROOTVERS')) f.write('#define ANALYZER_VERSION_CODE %s\n' % env.subst('$VERCODE')) f.write('#define ANALYZER_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n') f.write('\n') f.write('#endif\n') f.close()
39.389706
91
0.612936
import os from SCons.Action import ActionFactory from SCons.Script.SConscript import SConsEnvironment SConsEnvironment.OSCommand = ActionFactory(os.system, lambda command : 'os.system("%s")' % command) import SCons.Util def list_to_path(lst): result = '' if SCons.Util.is_List(lst): for element in lst: result += str(element) if len(lst) > 1 and element != lst[-1]: result += ':' else: result = lst return result def InstallWithRPATH(env, dest, files, rpath): obj = env.Install(dest, files) if env['PLATFORM'] == 'posix': rpathstr = list_to_path(rpath) if env.WhereIs('patchelf'): if env.subst('$ADD_INSTALL_RPATH') and rpathstr: patch_cmd = "patchelf --force-rpath --set-rpath '"+rpathstr+"' " else: patch_cmd = "patchelf --remove-rpath " for i in obj: env.AddPostAction(i, env.OSCommand(patch_cmd+str(i))) elif env.WhereIs('chrpath') \ and not (env.subst('$ADD_INSTALL_RPATH') and rpathstr): for i in obj: env.AddPostAction(i, env.OSCommand('chrpath -d '+str(i))) else: print('WARNING: patchelf not found, cannot set RPATH') elif env['PLATFORM'] == 'darwin': for i in obj: env.AddPostAction(i, env.OSCommand("site_scons/clear_macos_rpath.sh "+str(i))) if env.subst('$ADD_INSTALL_RPATH') and rpath: tool_cmd = "install_name_tool" add_to_cmd = '' for rp in rpath: if rp: add_to_cmd += " -add_rpath "+str(rp) if add_to_cmd: tool_cmd += add_to_cmd+" "+str(i) env.AddPostAction(i, env.OSCommand(tool_cmd)) return obj def create_uninstall_target(env, path, is_glob = False): if is_glob: all_files = env.Glob(path,strings=True) for filei in all_files: print('Delete(%s)' % filei) env.Alias("uninstall", os.remove(filei)) else: print('Delete(%s)' % path) if os.path.exists(path): env.Alias("uninstall", os.remove(path)) import SCons.Script import re def build_library(env, sotarget, src, extrahdrs = [], extradicthdrs = [], dictname = None, useenv = True, versioned = False, install_rpath = []): thisdir_fullpath = env.Dir('.').path thisdir = os.path.basename(os.path.normpath(thisdir_fullpath)) if not dictname: dictname = sotarget if useenv: linklibs = env['LIBS'] linklibpath = env['LIBPATH'] else: linklibs = [''] linklibpath = [''] srclist = env.Split(src) hdr = re.sub(r'\.cxx','.h',src) installhdrs = env.Split(hdr) installhdrs.extend(extradicthdrs) installhdrs.extend(extrahdrs) dicthdrs = env.Split(hdr) dicthdrs.extend(extradicthdrs) dicthdrs.append(dictname+'_LinkDef.h') rootdict = dictname+'Dict.cxx' libbase = env.subst('$SHLIBPREFIX')+sotarget thedict = env.RootCint(rootdict, dicthdrs, PCMNAME=libbase) libname_so = libbase+env.subst('$SHLIBSUFFIX') if env['PLATFORM'] == 'posix': if versioned: libname_soname = libname_so+'.'+env.subst('$SOVERSION') libname_versioned = libname_so+'.'+env.subst('$VERSION') shlibsuffix = env.subst('$SHLIBSUFFIX')+'.'+env.subst('$VERSION') else: libname_soname = libname_so shlibsuffix = env.subst('$SHLIBSUFFIX') shlinkflags = ['-Wl,-soname='+libname_soname] elif env['PLATFORM'] == 'darwin': if versioned: libname_soname = libbase+'.'+env.subst('$SOVERSION')+env.subst('$SHLIBSUFFIX') libname_versioned = libbase+'.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX') shlibsuffix = '.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX') else: libname_soname = libname_so shlibsuffix = env.subst('$SHLIBSUFFIX') shlinkflags = ['-Wl,-install_name,'+'@rpath/'+libname_soname] if versioned: shlinkflags.append(['-Wl,-compatibility_version,'+env.subst('$SOVERSION'), '-Wl,-current_version,'+env.subst('$VERSION')]) try: for rp in env['RPATH']: shlinkflags.append('-Wl,-rpath,'+rp) except KeyError: pass else: print('build_library: Error: unsupported platform') Exit(3) thislib = env.SharedLibrary(target = sotarget, source = srclist+[rootdict], LIBS = linklibs, LIBPATH = linklibpath, SHLIBSUFFIX = shlibsuffix, SONAME = libname_soname, SHLINKFLAGS = env['SHLINKFLAGS']+shlinkflags) if versioned: env.SymLink(libname_soname,thislib) env.SymLink(libname_so,libname_soname) install_prefix = env.subst('$INSTALLDIR') lib_dir = os.path.join(install_prefix,env.subst('$LIBSUBDIR')) inc_dir = os.path.join(install_prefix,'include') src_dir = os.path.join(install_prefix,'src',thisdir) InstallWithRPATH(env,lib_dir,thislib,install_rpath) if len(thedict) > 1: env.Install(lib_dir,thedict[1]) env.Install(inc_dir,installhdrs) env.Install(src_dir,srclist) libname_so_installpath = os.path.join(lib_dir,libname_so) if versioned: libname_soname_installpath = os.path.join(lib_dir,libname_soname) libname_versioned_installpath = os.path.join(lib_dir,libname_versioned) env.SymLink(libname_soname_installpath,libname_versioned_installpath) env.SymLink(libname_so_installpath,libname_soname_installpath) if 'uninstall' in SCons.Script.COMMAND_LINE_TARGETS: create_uninstall_target(env, libname_so_installpath) create_uninstall_target(env, libname_soname_installpath) return thislib import sys import subprocess import platform import time def write_compiledata(env, compiledata): if sys.version_info >= (2, 7): try: cmd = "git rev-parse HEAD 2>/dev/null" gitrev = subprocess.check_output(cmd, shell=True).rstrip() except: gitrev = '' try: cmd = env.subst('$CXX') + " --version 2>/dev/null | head -1" cxxver = subprocess.check_output(cmd, shell=True).rstrip() except: cxxver = '' # subprocess gives us byte string literals in Python 3, but we'd like if sys.version_info >= (3, 0): gitrev = gitrev.decode() cxxver = cxxver.decode() else: fnull = open(os.devnull, 'w') try: gitrev = subprocess.Popen(['git', 'rev-parse', 'HEAD', '2>dev/null'], stdout=subprocess.PIPE, stderr=fnull).communicate()[0].rstrip() except: gitrev ='' try: outp = subprocess.Popen([env.subst('$CXX'), '--version'], stdout=subprocess.PIPE, stderr=fnull).communicate()[0] lines = outp.splitlines() cxxver = lines[0] except: cxxver = '' f=open(compiledata,'w') f.write('#ifndef ANALYZER_COMPILEDATA_H\n') f.write('#define ANALYZER_COMPILEDATA_H\n') f.write('\n') f.write('#define HA_INCLUDEPATH "%s %s %s"\n' % (env.subst('$HA_HallA'), env.subst('$HA_Podd'), env.subst('$HA_DC'))) f.write('#define HA_VERSION "%s"\n' % env.subst('$HA_VERSION')) f.write('#define HA_DATE "%s"\n' % time.strftime("%b %d %Y")) f.write('#define HA_DATETIME "%s"\n' % time.strftime("%a %b %d %Y")) f.write('#define HA_PLATFORM "%s"\n' % platform.platform()) f.write('#define HA_BUILDNODE "%s"\n' % platform.node()) f.write('#define HA_BUILDDIR "%s"\n' % os.getcwd()) try: builduser = env['ENV']['LOGNAME'] except: builduser = '' f.write('#define HA_BUILDUSER "%s"\n' % builduser) f.write('#define HA_GITREV "%s"\n' % gitrev[:7]) f.write('#define HA_CXXVERS "%s"\n' % cxxver) f.write('#define HA_ROOTVERS "%s"\n' % env.subst('$ROOTVERS')) f.write('#define ANALYZER_VERSION_CODE %s\n' % env.subst('$VERCODE')) f.write('#define ANALYZER_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n') f.write('\n') f.write('#endif\n') f.close()
true
true
f70f0585c7ebf9f69ae33d24a7e5ee609fe1149b
7,087
py
Python
Code/VnetLA/validate.py
yyliu01/TraCoCo
eecbc92c961d393deaa31726739a94b7f495d893
[ "MIT" ]
4
2022-03-29T04:19:02.000Z
2022-03-31T07:48:24.000Z
Code/VnetLA/validate.py
yyliu01/TraCoCo
eecbc92c961d393deaa31726739a94b7f495d893
[ "MIT" ]
null
null
null
Code/VnetLA/validate.py
yyliu01/TraCoCo
eecbc92c961d393deaa31726739a94b7f495d893
[ "MIT" ]
null
null
null
import os import math import torch import argparse import numpy as np from tqdm import tqdm from medpy import metric import torch.nn.functional as F from Configs.config import config from Model.Vnet import VNet as Vnet from cc3d import connected_components from Dataloader.dataset import LAHeartDataset """ # https://github.com/kleinzcy/SASSnet/blob/master/code/test_util.py def getLargestCC(segmentation): # from skimage.measure import label as sm_label labels = sm_label(segmentation) assert (labels.max() != 0) # assume at least 1 CC largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1 return largestCC """ def cct(pseudo_label): labels_out, N = connected_components(pseudo_label, connectivity=26, return_N=True) for segid in range(1, N + 1): extracted_image = labels_out * (labels_out == segid) if extracted_image.sum() < 8000: pseudo_label[labels_out == segid] = 0 return pseudo_label def test_all_case(net, val_set, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, post_process=False, visual=False): total_metric = 0.0 assert val_set.aug is False, ">> no augmentation for test set" dataloader = iter(val_set) tbar = range(len(val_set)) tbar = tqdm(tbar, ncols=135) for (idx, _) in enumerate(tbar): image, label = next(dataloader) prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=num_classes, post_process=post_process) if np.sum(prediction) == 0: single_metric = (0, 0, 0, 0) else: single_metric = calculate_metric_percase(np.array(prediction), np.array(label[:])) total_metric += np.asarray(single_metric) if visual: # import nibabel as nib # struggle for where to save; modify it if you need. raise NotImplementedError avg_metric = total_metric / len(val_set) print("|dice={:.4f}|mIoU={:.4f}|95HD={:.4f}|ASD={:.4f}|".format(avg_metric[0], avg_metric[1], avg_metric[3], avg_metric[2])) return avg_metric def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1, post_process=False): image = image.squeeze() w, h, d = image.shape # if the size of image is less than patch_size, then padding it add_pad = False if w < patch_size[0]: w_pad = patch_size[0] - w add_pad = True else: w_pad = 0 if h < patch_size[1]: h_pad = patch_size[1] - h add_pad = True else: h_pad = 0 if d < patch_size[2]: d_pad = patch_size[2] - d add_pad = True else: d_pad = 0 wl_pad, wr_pad = w_pad // 2, w_pad - w_pad // 2 hl_pad, hr_pad = h_pad // 2, h_pad - h_pad // 2 dl_pad, dr_pad = d_pad // 2, d_pad - d_pad // 2 if add_pad: image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad), (dl_pad, dr_pad)], mode='constant', constant_values=0) ww, hh, dd = image.shape sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1 sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1 sz = math.ceil((dd - patch_size[2]) / stride_z) + 1 score_map = np.zeros((num_classes,) + image.shape).astype(np.float32) cnt = np.zeros(image.shape).astype(np.float32) for x in range(0, sx): xs = min(stride_xy * x, ww - patch_size[0]) for y in range(0, sy): ys = min(stride_xy * y, hh - patch_size[1]) for z in range(0, sz): zs = min(stride_z * z, dd - patch_size[2]) test_patch = image[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] test_patch = np.expand_dims(np.expand_dims(test_patch, axis=0), axis=0).astype(np.float32) test_patch = torch.from_numpy(test_patch).cuda(non_blocking=True) y1, _ = net(test_patch) y = F.softmax(y1, dim=1) y = y.cpu().data.numpy() y = y[0, :, :, :, :] score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \ = score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + y cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \ = cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + 1 score_map = score_map / np.expand_dims(cnt, axis=0) label_map = np.argmax(score_map, axis=0) if post_process: label_map = cct(label_map) # label_map = getLargestCC(label_map) feel free to change the post-process approach if add_pad: label_map = label_map[wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d] score_map = score_map[:, wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d] return label_map, score_map def calculate_metric_percase(pred, gt): dice = metric.binary.dc(pred, gt) jc = metric.binary.jc(pred, gt) hd = metric.binary.hd95(pred, gt) asd = metric.binary.asd(pred, gt) return dice, jc, hd, asd def test_calculate_metric(ckpt_path, vis=False, post=False): net = Vnet(n_channels=1, n_classes=2, normalization='batchnorm', has_dropout=True).cuda() net.load_state_dict(torch.load(ckpt_path)) net.eval() val_dataset = LAHeartDataset(os.path.join(config.code_path, "Dataloader"), config.data_path, split="eval", config=config) # follows the previous works' setting avg_metric = test_all_case(net, val_dataset, num_classes=2, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, post_process=post, visual=vis) return avg_metric if __name__ == '__main__': parser = argparse.ArgumentParser(description='Medical Semi-supervised Semantic Segmentation (valid)') parser.add_argument("--env_name", default="traCoCo(8-label,spatial_weight(kl)=0.3,hyp=0.1,iters=9000)", type=str, help="your environment folder name for training") parser.add_argument("--visual", action="store_true", help="your environment folder name for training") parser.add_argument("--post", action="store_true", help="implement post process or not") cmd_line = parser.parse_args() default_path = os.path.join(config.code_path, "saved", cmd_line.env_name) ckpt = os.listdir(default_path) ckpt = [i for i in ckpt if ".pth" in str(i)][0] print("validate {} for LA dataset ...".format(str(ckpt))) metric = test_calculate_metric(os.path.join(default_path, ckpt), vis=cmd_line.visual, post=cmd_line.post)
39.372222
107
0.597291
import os import math import torch import argparse import numpy as np from tqdm import tqdm from medpy import metric import torch.nn.functional as F from Configs.config import config from Model.Vnet import VNet as Vnet from cc3d import connected_components from Dataloader.dataset import LAHeartDataset def cct(pseudo_label): labels_out, N = connected_components(pseudo_label, connectivity=26, return_N=True) for segid in range(1, N + 1): extracted_image = labels_out * (labels_out == segid) if extracted_image.sum() < 8000: pseudo_label[labels_out == segid] = 0 return pseudo_label def test_all_case(net, val_set, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, post_process=False, visual=False): total_metric = 0.0 assert val_set.aug is False, ">> no augmentation for test set" dataloader = iter(val_set) tbar = range(len(val_set)) tbar = tqdm(tbar, ncols=135) for (idx, _) in enumerate(tbar): image, label = next(dataloader) prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=num_classes, post_process=post_process) if np.sum(prediction) == 0: single_metric = (0, 0, 0, 0) else: single_metric = calculate_metric_percase(np.array(prediction), np.array(label[:])) total_metric += np.asarray(single_metric) if visual: raise NotImplementedError avg_metric = total_metric / len(val_set) print("|dice={:.4f}|mIoU={:.4f}|95HD={:.4f}|ASD={:.4f}|".format(avg_metric[0], avg_metric[1], avg_metric[3], avg_metric[2])) return avg_metric def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1, post_process=False): image = image.squeeze() w, h, d = image.shape add_pad = False if w < patch_size[0]: w_pad = patch_size[0] - w add_pad = True else: w_pad = 0 if h < patch_size[1]: h_pad = patch_size[1] - h add_pad = True else: h_pad = 0 if d < patch_size[2]: d_pad = patch_size[2] - d add_pad = True else: d_pad = 0 wl_pad, wr_pad = w_pad // 2, w_pad - w_pad // 2 hl_pad, hr_pad = h_pad // 2, h_pad - h_pad // 2 dl_pad, dr_pad = d_pad // 2, d_pad - d_pad // 2 if add_pad: image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad), (dl_pad, dr_pad)], mode='constant', constant_values=0) ww, hh, dd = image.shape sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1 sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1 sz = math.ceil((dd - patch_size[2]) / stride_z) + 1 score_map = np.zeros((num_classes,) + image.shape).astype(np.float32) cnt = np.zeros(image.shape).astype(np.float32) for x in range(0, sx): xs = min(stride_xy * x, ww - patch_size[0]) for y in range(0, sy): ys = min(stride_xy * y, hh - patch_size[1]) for z in range(0, sz): zs = min(stride_z * z, dd - patch_size[2]) test_patch = image[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] test_patch = np.expand_dims(np.expand_dims(test_patch, axis=0), axis=0).astype(np.float32) test_patch = torch.from_numpy(test_patch).cuda(non_blocking=True) y1, _ = net(test_patch) y = F.softmax(y1, dim=1) y = y.cpu().data.numpy() y = y[0, :, :, :, :] score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \ = score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + y cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \ = cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + 1 score_map = score_map / np.expand_dims(cnt, axis=0) label_map = np.argmax(score_map, axis=0) if post_process: label_map = cct(label_map) if add_pad: label_map = label_map[wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d] score_map = score_map[:, wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d] return label_map, score_map def calculate_metric_percase(pred, gt): dice = metric.binary.dc(pred, gt) jc = metric.binary.jc(pred, gt) hd = metric.binary.hd95(pred, gt) asd = metric.binary.asd(pred, gt) return dice, jc, hd, asd def test_calculate_metric(ckpt_path, vis=False, post=False): net = Vnet(n_channels=1, n_classes=2, normalization='batchnorm', has_dropout=True).cuda() net.load_state_dict(torch.load(ckpt_path)) net.eval() val_dataset = LAHeartDataset(os.path.join(config.code_path, "Dataloader"), config.data_path, split="eval", config=config) avg_metric = test_all_case(net, val_dataset, num_classes=2, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, post_process=post, visual=vis) return avg_metric if __name__ == '__main__': parser = argparse.ArgumentParser(description='Medical Semi-supervised Semantic Segmentation (valid)') parser.add_argument("--env_name", default="traCoCo(8-label,spatial_weight(kl)=0.3,hyp=0.1,iters=9000)", type=str, help="your environment folder name for training") parser.add_argument("--visual", action="store_true", help="your environment folder name for training") parser.add_argument("--post", action="store_true", help="implement post process or not") cmd_line = parser.parse_args() default_path = os.path.join(config.code_path, "saved", cmd_line.env_name) ckpt = os.listdir(default_path) ckpt = [i for i in ckpt if ".pth" in str(i)][0] print("validate {} for LA dataset ...".format(str(ckpt))) metric = test_calculate_metric(os.path.join(default_path, ckpt), vis=cmd_line.visual, post=cmd_line.post)
true
true
f70f05ab41aae7df827738907b865086531ef156
15,255
py
Python
sdk/python/pulumi_azure_nextgen/network/v20170301/load_balancer.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_nextgen/network/v20170301/load_balancer.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_nextgen/network/v20170301/load_balancer.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['LoadBalancer'] class LoadBalancer(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]]] = None, etag: Optional[pulumi.Input[str]] = None, frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None, id: Optional[pulumi.Input[str]] = None, inbound_nat_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]]] = None, inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]]] = None, load_balancer_name: Optional[pulumi.Input[str]] = None, load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]]] = None, location: Optional[pulumi.Input[str]] = None, outbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundNatRuleArgs']]]]] = None, probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]]] = None, provisioning_state: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, resource_guid: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ LoadBalancer resource :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]] backend_address_pools: Collection of backend address pools used by a load balancer :param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]] frontend_ip_configurations: Object representing the frontend IPs to be used for the load balancer :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]] inbound_nat_pools: Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]] inbound_nat_rules: Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules. :param pulumi.Input[str] load_balancer_name: The name of the load balancer. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]] load_balancing_rules: Object collection representing the load balancing rules Gets the provisioning :param pulumi.Input[str] location: Resource location. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundNatRuleArgs']]]] outbound_nat_rules: The outbound NAT rules. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]] probes: Collection of probe objects used in the load balancer :param pulumi.Input[str] provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[str] resource_guid: The resource GUID property of the load balancer resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['backend_address_pools'] = backend_address_pools __props__['etag'] = etag __props__['frontend_ip_configurations'] = frontend_ip_configurations __props__['id'] = id __props__['inbound_nat_pools'] = inbound_nat_pools __props__['inbound_nat_rules'] = inbound_nat_rules __props__['load_balancer_name'] = load_balancer_name __props__['load_balancing_rules'] = load_balancing_rules __props__['location'] = location __props__['outbound_nat_rules'] = outbound_nat_rules __props__['probes'] = probes __props__['provisioning_state'] = provisioning_state if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['resource_guid'] = resource_guid __props__['tags'] = tags __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/latest:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150615:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160330:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20161201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200301:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200501:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200801:LoadBalancer")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(LoadBalancer, __self__).__init__( 'azure-nextgen:network/v20170301:LoadBalancer', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'LoadBalancer': """ Get an existing LoadBalancer resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return LoadBalancer(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="backendAddressPools") def backend_address_pools(self) -> pulumi.Output[Optional[Sequence['outputs.BackendAddressPoolResponse']]]: """ Collection of backend address pools used by a load balancer """ return pulumi.get(self, "backend_address_pools") @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfigurations") def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FrontendIPConfigurationResponse']]]: """ Object representing the frontend IPs to be used for the load balancer """ return pulumi.get(self, "frontend_ip_configurations") @property @pulumi.getter(name="inboundNatPools") def inbound_nat_pools(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatPoolResponse']]]: """ Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules. """ return pulumi.get(self, "inbound_nat_pools") @property @pulumi.getter(name="inboundNatRules") def inbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatRuleResponse']]]: """ Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules. """ return pulumi.get(self, "inbound_nat_rules") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancingRuleResponse']]]: """ Object collection representing the load balancing rules Gets the provisioning """ return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="outboundNatRules") def outbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundNatRuleResponse']]]: """ The outbound NAT rules. """ return pulumi.get(self, "outbound_nat_rules") @property @pulumi.getter def probes(self) -> pulumi.Output[Optional[Sequence['outputs.ProbeResponse']]]: """ Collection of probe objects used in the load balancer """ return pulumi.get(self, "probes") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[Optional[str]]: """ Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> pulumi.Output[Optional[str]]: """ The resource GUID property of the load balancer resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
63.5625
2,495
0.708686
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['LoadBalancer'] class LoadBalancer(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]]] = None, etag: Optional[pulumi.Input[str]] = None, frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None, id: Optional[pulumi.Input[str]] = None, inbound_nat_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]]] = None, inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]]] = None, load_balancer_name: Optional[pulumi.Input[str]] = None, load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]]] = None, location: Optional[pulumi.Input[str]] = None, outbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundNatRuleArgs']]]]] = None, probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]]] = None, provisioning_state: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, resource_guid: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['backend_address_pools'] = backend_address_pools __props__['etag'] = etag __props__['frontend_ip_configurations'] = frontend_ip_configurations __props__['id'] = id __props__['inbound_nat_pools'] = inbound_nat_pools __props__['inbound_nat_rules'] = inbound_nat_rules __props__['load_balancer_name'] = load_balancer_name __props__['load_balancing_rules'] = load_balancing_rules __props__['location'] = location __props__['outbound_nat_rules'] = outbound_nat_rules __props__['probes'] = probes __props__['provisioning_state'] = provisioning_state if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['resource_guid'] = resource_guid __props__['tags'] = tags __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/latest:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150615:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160330:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20161201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200301:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200501:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200801:LoadBalancer")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(LoadBalancer, __self__).__init__( 'azure-nextgen:network/v20170301:LoadBalancer', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'LoadBalancer': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return LoadBalancer(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="backendAddressPools") def backend_address_pools(self) -> pulumi.Output[Optional[Sequence['outputs.BackendAddressPoolResponse']]]: return pulumi.get(self, "backend_address_pools") @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "etag") @property @pulumi.getter(name="frontendIPConfigurations") def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FrontendIPConfigurationResponse']]]: return pulumi.get(self, "frontend_ip_configurations") @property @pulumi.getter(name="inboundNatPools") def inbound_nat_pools(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatPoolResponse']]]: return pulumi.get(self, "inbound_nat_pools") @property @pulumi.getter(name="inboundNatRules") def inbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatRuleResponse']]]: return pulumi.get(self, "inbound_nat_rules") @property @pulumi.getter(name="loadBalancingRules") def load_balancing_rules(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancingRuleResponse']]]: return pulumi.get(self, "load_balancing_rules") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="outboundNatRules") def outbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundNatRuleResponse']]]: return pulumi.get(self, "outbound_nat_rules") @property @pulumi.getter def probes(self) -> pulumi.Output[Optional[Sequence['outputs.ProbeResponse']]]: return pulumi.get(self, "probes") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "resource_guid") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
true
true
f70f06341c00091fa3e07ab8cacd2d2f5aa1ebc3
19,241
py
Python
airmozilla/search/views.py
mozilla/airmozilla
fa6acbbbacc1e22553457807bcea7ce7a9ef6fe3
[ "BSD-3-Clause" ]
115
2015-01-06T18:45:39.000Z
2022-02-07T10:56:49.000Z
airmozilla/search/views.py
april/airmozilla
ee357f5396cdcb50147c72ff1e81a610f9cb292c
[ "BSD-3-Clause" ]
321
2015-01-02T15:19:25.000Z
2018-07-05T14:58:50.000Z
airmozilla/search/views.py
april/airmozilla
ee357f5396cdcb50147c72ff1e81a610f9cb292c
[ "BSD-3-Clause" ]
101
2015-01-13T17:59:15.000Z
2020-12-15T02:58:38.000Z
import re import urllib import time from django.shortcuts import render, redirect, get_object_or_404 from django import http from django.db.utils import DatabaseError from django.db import transaction from django.db.models import Count from django.conf import settings from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.contrib import messages from django.views.decorators.http import require_POST from jsonview.decorators import json_view from airmozilla.main.models import Event, Tag, Channel, get_profile_safely from airmozilla.main.views import is_contributor from airmozilla.base.utils import paginator from airmozilla.main.utils import get_event_channels from . import forms from . import utils from .models import LoggedSearch, SavedSearch from .split_search import split_search @transaction.atomic def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, 'tags': None, 'possible_tags': None, 'channels': None, 'possible_channels': None, 'found_channels': [], 'found_channels_count': 0, } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = form.cleaned_data['q'] privacy_filter = {} privacy_exclude = {} qs = Event.objects.scheduled_or_processing() if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: # privacy_filter = {'privacy': Event.PRIVACY_PUBLIC} privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} qs = qs.approved() extra = {} rest, params = split_search(context['q'], ('tag', 'channel')) if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: context['q'] = rest context['tags'] = extra['tags'] = tags else: # is the search term possibly a tag? all_tag_names = Tag.objects.all().values_list('name', flat=True) tags_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_tag_names),), re.I ) # next we need to turn all of these into a Tag QuerySet # because we can't do `filter(name__in=tags_regex.findall(...))` # because that case sensitive. tag_ids = [] for match in tags_regex.findall(rest): tag_ids.extend( Tag.objects.filter(name__iexact=match) .values_list('id', flat=True) ) possible_tags = Tag.objects.filter( id__in=tag_ids ) for tag in possible_tags: regex = re.compile(re.escape(tag.name), re.I) tag._query_string = regex.sub( '', context['q'], ) tag._query_string += ' tag: %s' % tag.name # reduce all excess whitespace into 1 tag._query_string = re.sub( '\s\s+', ' ', tag._query_string ) tag._query_string = tag._query_string.strip() context['possible_tags'] = possible_tags if params.get('channel'): channels = Channel.objects.filter(name__iexact=params['channel']) if channels: context['q'] = rest context['channels'] = extra['channels'] = channels else: # is the search term possibly a channel? all_channel_names = ( Channel.objects.all().values_list('name', flat=True) ) channels_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_channel_names),), re.I ) channel_ids = [] for match in channels_regex.findall(rest): channel_ids.extend( Channel.objects .filter(name__iexact=match).values_list('id', flat=True) ) possible_channels = Channel.objects.filter( id__in=channel_ids ) for channel in possible_channels: regex = re.compile(re.escape(channel.name), re.I) channel._query_string = regex.sub( '', context['q'], ) channel._query_string += ' channel: %s' % channel.name # reduce all excess whitespace into 1 channel._query_string = re.sub( '\s\s+', ' ', channel._query_string ) channel._query_string = channel._query_string.strip() context['possible_channels'] = possible_channels events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), **extra ) if not events.count() and utils.possible_to_or_query(context['q']): events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), fuzzy=True ) found_channels = _find_channels(context['q']) context['found_channels'] = found_channels # it's a list context['found_channels_count'] = len(found_channels) elif request.GET.get('ss'): savedsearch = get_object_or_404( SavedSearch, id=request.GET.get('ss') ) context['savedsearch'] = savedsearch events = savedsearch.get_events() # But if you're just browsing we want to make sure you don't # see anything you're not supposed to see. if request.user.is_active: if is_contributor(request.user): events = events.exclude(privacy=Event.PRIVACY_COMPANY) else: events = events.filter(privacy=Event.PRIVACY_PUBLIC) # It's not obvious how to sort these. They all match the saved # search. # Let's keep it simple and sort by start time for now events = events.order_by('-start_time') else: events = None if events is not None: try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" try: with transaction.atomic(): pager, events_paged = paginator(events, page, 10) _database_error_happened = False except DatabaseError: _database_error_happened = True # don't feed the trolls, just return nothing found pager, events_paged = paginator(Event.objects.none(), 1, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'page': page} if context.get('savedsearch'): querystring['ss'] = context['savedsearch'].id else: querystring['q'] = context['q'].encode('utf-8') querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count context['channels'] = get_event_channels(events_paged) log_searches = settings.LOG_SEARCHES and '_nolog' not in request.GET if ( log_searches and not _database_error_happened and request.GET.get('q', '').strip() ): logged_search = LoggedSearch.objects.create( term=request.GET['q'][:200], results=events.count(), page=page, user=request.user.is_authenticated() and request.user or None ) request.session['logged_search'] = ( logged_search.pk, time.time() ) elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context) def _find_channels(q): search_escaped = utils.make_or_query(q) sql = """ to_tsvector('english', name) @@ plainto_tsquery('english', %s) OR slug ILIKE %s """ channels_qs = Channel.objects.all().extra( where=[sql], params=[ search_escaped, search_escaped, ], select={ 'name_highlit': ( "ts_headline('english', name, " "plainto_tsquery('english', %s))" ), 'rank_name': ( "ts_rank_cd(to_tsvector('english', name), " "plainto_tsquery('english', %s))" ), }, select_params=[ search_escaped, search_escaped, ] ) # make a dict of parental counts subchannel_counts = {} qs = ( Channel.objects .filter(parent__isnull=False) .values('parent_id') .order_by() # necessary because the model has a default ordering .annotate(Count('parent')) ) for each in qs: subchannel_counts[each['parent_id']] = each['parent__count'] # make a dict of events counts by channel event_counts = {} qs = ( Event.channels.through.objects.filter(channel__in=channels_qs) .values('channel_id') .annotate(Count('channel')) ) for each in qs: event_counts[each['channel_id']] = each['channel__count'] channels = [] for channel in channels_qs[:5]: channel._event_count = event_counts.get(channel.id, 0) channel._subchannel_count = subchannel_counts.get(channel.id, 0) channels.append(channel) return channels def _search(qs, q, **options): # we only want to find upcoming or archived events # some optional filtering if 'tags' in options: qs = qs.filter(tags__in=options['tags']) if 'channels' in options: qs = qs.filter(channels__in=options['channels']) if options.get('privacy_filter'): qs = qs.filter(**options['privacy_filter']) elif options.get('privacy_exclude'): qs = qs.exclude(**options['privacy_exclude']) if q and options.get('fuzzy'): sql = """ ( to_tsvector('english', title) @@ to_tsquery('english', %s) OR to_tsvector('english', description || ' ' || short_description) @@ to_tsquery('english', %s) OR to_tsvector('english', transcript) @@ to_tsquery('english', %s) ) """ search_escaped = utils.make_or_query(q) elif q: sql = """ ( to_tsvector('english', title) @@ plainto_tsquery('english', %s) OR to_tsvector('english', description || ' ' || short_description) @@ plainto_tsquery('english', %s) OR to_tsvector('english', transcript) @@ plainto_tsquery('english', %s) ) """ search_escaped = q if q: qs = qs.extra( where=[sql], params=[search_escaped, search_escaped, search_escaped], select={ 'title_highlit': ( "ts_headline('english', title, " "plainto_tsquery('english', %s))" ), 'desc_highlit': ( "ts_headline('english', short_description, " "plainto_tsquery('english', %s))" ), 'transcript_highlit': ( "ts_headline('english', transcript, " "plainto_tsquery('english', %s))" ), 'rank_title': ( "ts_rank_cd(to_tsvector('english', title), " "plainto_tsquery('english', %s))" ), 'rank_desc': ( "ts_rank_cd(to_tsvector('english', description " "|| ' ' || short_description), " "plainto_tsquery('english', %s))" ), 'rank_transcript': ( "ts_rank_cd(to_tsvector('english', transcript), " "plainto_tsquery('english', %s))" ), }, select_params=[ search_escaped, search_escaped, search_escaped, search_escaped, search_escaped, search_escaped ], ) qs = qs.order_by('-rank_title', '-start_time', '-rank_desc') else: qs = qs.order_by('-start_time') return qs @require_POST @login_required @transaction.atomic() def savesearch(request): q = request.POST.get('q', '').strip() if not q: return http.HttpResponseBadRequest('no q') form = forms.SearchForm(request.POST) if not form.is_valid(): return http.HttpResponseBadRequest(form.errors) title = form.cleaned_data['q'] rest, params = split_search(title, ('tag', 'channel')) tags = None channels = None if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: title = rest if params.get('channel'): channels = Channel.objects.filter( name__iexact=params['channel'] ) if channels: title = rest filters = {} if q: filters['title'] = { 'include': title } if tags: filters['tags'] = { 'include': [tag.id for tag in tags], } if channels: filters['channels'] = { 'include': [channel.id for channel in channels], } for other in SavedSearch.objects.filter(user=request.user): if other.filters == filters: return redirect('search:savedsearch', id=other.id) savedsearch = SavedSearch.objects.create( user=request.user, filters=filters, ) messages.success( request, 'Search saved' ) return redirect('search:savedsearch', id=savedsearch.id) @login_required @transaction.atomic() def savedsearch(request, id=None): savedsearch = get_object_or_404(SavedSearch, id=id) if request.method == 'POST': forked = False if savedsearch.user != request.user: # fork the saved search forked = True savedsearch = SavedSearch.objects.create( user=request.user, name=savedsearch.name, filters=savedsearch.filters, ) form = forms.SavedSearchForm(request.POST) if form.is_valid(): data = form.export_filters() savedsearch.name = form.cleaned_data['name'] savedsearch.filters = data savedsearch.save() if forked: messages.success( request, 'Saved Search forked and saved' ) else: messages.success( request, 'Saved Search saved' ) return redirect('search:savedsearch', id=savedsearch.id) elif request.GET.get('sample'): events = savedsearch.get_events() return http.JsonResponse({'events': events.count()}) else: data = forms.SavedSearchForm.convert_filters( savedsearch.filters, pks=True ) data['name'] = savedsearch.name form = forms.SavedSearchForm(data) context = { 'savedsearch': savedsearch, 'form': form, 'use_findable': True, } return render(request, 'search/savesearch.html', context) @login_required @transaction.atomic() def new_savedsearch(request): if request.method == 'POST': form = forms.SavedSearchForm(request.POST) if form.is_valid(): data = form.export_filters() SavedSearch.objects.create( user=request.user, filters=data, name=form.cleaned_data['name'], ) messages.success( request, 'Saved Search saved' ) return redirect('search:savedsearches') else: form = forms.SavedSearchForm() context = { 'form': form, 'use_findable': False, } return render(request, 'search/savesearch.html', context) @login_required def savedsearches(request): context = {} return render(request, 'search/savedsearches.html', context) @login_required @json_view def savedsearches_data(request): context = {} qs = SavedSearch.objects.filter( user=request.user ).order_by('-created') searches = [] for savedsearch in qs: item = { 'id': savedsearch.id, 'name': savedsearch.name, 'summary': savedsearch.summary, 'modified': savedsearch.modified.isoformat(), } searches.append(item) # We need a general Feed URL that is tailored to this user from airmozilla.main.context_processors import base feed = base(request)['get_feed_data']() if request.user.is_active: profile = get_profile_safely(request.user) if profile and profile.contributor: calendar_privacy = 'contributors' else: calendar_privacy = 'company' else: calendar_privacy = 'public' context['savedsearches'] = searches context['urls'] = { 'search:savedsearch': reverse('search:savedsearch', args=(0,)), 'search:home': reverse('search:home'), 'feed': feed['url'], 'ical': reverse('main:calendar_ical', args=(calendar_privacy,)), } return context @login_required @json_view def delete_savedsearch(request, id): savedsearch = get_object_or_404(SavedSearch, id=id) if savedsearch.user != request.user: return http.HttpResponseForbidden('Not yours to delete') savedsearch.delete() return {'ok': True}
32.611864
78
0.551427
import re import urllib import time from django.shortcuts import render, redirect, get_object_or_404 from django import http from django.db.utils import DatabaseError from django.db import transaction from django.db.models import Count from django.conf import settings from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.contrib import messages from django.views.decorators.http import require_POST from jsonview.decorators import json_view from airmozilla.main.models import Event, Tag, Channel, get_profile_safely from airmozilla.main.views import is_contributor from airmozilla.base.utils import paginator from airmozilla.main.utils import get_event_channels from . import forms from . import utils from .models import LoggedSearch, SavedSearch from .split_search import split_search @transaction.atomic def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, 'tags': None, 'possible_tags': None, 'channels': None, 'possible_channels': None, 'found_channels': [], 'found_channels_count': 0, } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = form.cleaned_data['q'] privacy_filter = {} privacy_exclude = {} qs = Event.objects.scheduled_or_processing() if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} qs = qs.approved() extra = {} rest, params = split_search(context['q'], ('tag', 'channel')) if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: context['q'] = rest context['tags'] = extra['tags'] = tags else: all_tag_names = Tag.objects.all().values_list('name', flat=True) tags_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_tag_names),), re.I ) # because that case sensitive. tag_ids = [] for match in tags_regex.findall(rest): tag_ids.extend( Tag.objects.filter(name__iexact=match) .values_list('id', flat=True) ) possible_tags = Tag.objects.filter( id__in=tag_ids ) for tag in possible_tags: regex = re.compile(re.escape(tag.name), re.I) tag._query_string = regex.sub( '', context['q'], ) tag._query_string += ' tag: %s' % tag.name # reduce all excess whitespace into 1 tag._query_string = re.sub( '\s\s+', ' ', tag._query_string ) tag._query_string = tag._query_string.strip() context['possible_tags'] = possible_tags if params.get('channel'): channels = Channel.objects.filter(name__iexact=params['channel']) if channels: context['q'] = rest context['channels'] = extra['channels'] = channels else: # is the search term possibly a channel? all_channel_names = ( Channel.objects.all().values_list('name', flat=True) ) channels_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_channel_names),), re.I ) channel_ids = [] for match in channels_regex.findall(rest): channel_ids.extend( Channel.objects .filter(name__iexact=match).values_list('id', flat=True) ) possible_channels = Channel.objects.filter( id__in=channel_ids ) for channel in possible_channels: regex = re.compile(re.escape(channel.name), re.I) channel._query_string = regex.sub( '', context['q'], ) channel._query_string += ' channel: %s' % channel.name # reduce all excess whitespace into 1 channel._query_string = re.sub( '\s\s+', ' ', channel._query_string ) channel._query_string = channel._query_string.strip() context['possible_channels'] = possible_channels events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), **extra ) if not events.count() and utils.possible_to_or_query(context['q']): events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), fuzzy=True ) found_channels = _find_channels(context['q']) context['found_channels'] = found_channels # it's a list context['found_channels_count'] = len(found_channels) elif request.GET.get('ss'): savedsearch = get_object_or_404( SavedSearch, id=request.GET.get('ss') ) context['savedsearch'] = savedsearch events = savedsearch.get_events() if request.user.is_active: if is_contributor(request.user): events = events.exclude(privacy=Event.PRIVACY_COMPANY) else: events = events.filter(privacy=Event.PRIVACY_PUBLIC) # It's not obvious how to sort these. They all match the saved events = events.order_by('-start_time') else: events = None if events is not None: try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" try: with transaction.atomic(): pager, events_paged = paginator(events, page, 10) _database_error_happened = False except DatabaseError: _database_error_happened = True # don't feed the trolls, just return nothing found pager, events_paged = paginator(Event.objects.none(), 1, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'page': page} if context.get('savedsearch'): querystring['ss'] = context['savedsearch'].id else: querystring['q'] = context['q'].encode('utf-8') querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count context['channels'] = get_event_channels(events_paged) log_searches = settings.LOG_SEARCHES and '_nolog' not in request.GET if ( log_searches and not _database_error_happened and request.GET.get('q', '').strip() ): logged_search = LoggedSearch.objects.create( term=request.GET['q'][:200], results=events.count(), page=page, user=request.user.is_authenticated() and request.user or None ) request.session['logged_search'] = ( logged_search.pk, time.time() ) elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context) def _find_channels(q): search_escaped = utils.make_or_query(q) sql = """ to_tsvector('english', name) @@ plainto_tsquery('english', %s) OR slug ILIKE %s """ channels_qs = Channel.objects.all().extra( where=[sql], params=[ search_escaped, search_escaped, ], select={ 'name_highlit': ( "ts_headline('english', name, " "plainto_tsquery('english', %s))" ), 'rank_name': ( "ts_rank_cd(to_tsvector('english', name), " "plainto_tsquery('english', %s))" ), }, select_params=[ search_escaped, search_escaped, ] ) subchannel_counts = {} qs = ( Channel.objects .filter(parent__isnull=False) .values('parent_id') .order_by() .annotate(Count('parent')) ) for each in qs: subchannel_counts[each['parent_id']] = each['parent__count'] event_counts = {} qs = ( Event.channels.through.objects.filter(channel__in=channels_qs) .values('channel_id') .annotate(Count('channel')) ) for each in qs: event_counts[each['channel_id']] = each['channel__count'] channels = [] for channel in channels_qs[:5]: channel._event_count = event_counts.get(channel.id, 0) channel._subchannel_count = subchannel_counts.get(channel.id, 0) channels.append(channel) return channels def _search(qs, q, **options): if 'tags' in options: qs = qs.filter(tags__in=options['tags']) if 'channels' in options: qs = qs.filter(channels__in=options['channels']) if options.get('privacy_filter'): qs = qs.filter(**options['privacy_filter']) elif options.get('privacy_exclude'): qs = qs.exclude(**options['privacy_exclude']) if q and options.get('fuzzy'): sql = """ ( to_tsvector('english', title) @@ to_tsquery('english', %s) OR to_tsvector('english', description || ' ' || short_description) @@ to_tsquery('english', %s) OR to_tsvector('english', transcript) @@ to_tsquery('english', %s) ) """ search_escaped = utils.make_or_query(q) elif q: sql = """ ( to_tsvector('english', title) @@ plainto_tsquery('english', %s) OR to_tsvector('english', description || ' ' || short_description) @@ plainto_tsquery('english', %s) OR to_tsvector('english', transcript) @@ plainto_tsquery('english', %s) ) """ search_escaped = q if q: qs = qs.extra( where=[sql], params=[search_escaped, search_escaped, search_escaped], select={ 'title_highlit': ( "ts_headline('english', title, " "plainto_tsquery('english', %s))" ), 'desc_highlit': ( "ts_headline('english', short_description, " "plainto_tsquery('english', %s))" ), 'transcript_highlit': ( "ts_headline('english', transcript, " "plainto_tsquery('english', %s))" ), 'rank_title': ( "ts_rank_cd(to_tsvector('english', title), " "plainto_tsquery('english', %s))" ), 'rank_desc': ( "ts_rank_cd(to_tsvector('english', description " "|| ' ' || short_description), " "plainto_tsquery('english', %s))" ), 'rank_transcript': ( "ts_rank_cd(to_tsvector('english', transcript), " "plainto_tsquery('english', %s))" ), }, select_params=[ search_escaped, search_escaped, search_escaped, search_escaped, search_escaped, search_escaped ], ) qs = qs.order_by('-rank_title', '-start_time', '-rank_desc') else: qs = qs.order_by('-start_time') return qs @require_POST @login_required @transaction.atomic() def savesearch(request): q = request.POST.get('q', '').strip() if not q: return http.HttpResponseBadRequest('no q') form = forms.SearchForm(request.POST) if not form.is_valid(): return http.HttpResponseBadRequest(form.errors) title = form.cleaned_data['q'] rest, params = split_search(title, ('tag', 'channel')) tags = None channels = None if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: title = rest if params.get('channel'): channels = Channel.objects.filter( name__iexact=params['channel'] ) if channels: title = rest filters = {} if q: filters['title'] = { 'include': title } if tags: filters['tags'] = { 'include': [tag.id for tag in tags], } if channels: filters['channels'] = { 'include': [channel.id for channel in channels], } for other in SavedSearch.objects.filter(user=request.user): if other.filters == filters: return redirect('search:savedsearch', id=other.id) savedsearch = SavedSearch.objects.create( user=request.user, filters=filters, ) messages.success( request, 'Search saved' ) return redirect('search:savedsearch', id=savedsearch.id) @login_required @transaction.atomic() def savedsearch(request, id=None): savedsearch = get_object_or_404(SavedSearch, id=id) if request.method == 'POST': forked = False if savedsearch.user != request.user: forked = True savedsearch = SavedSearch.objects.create( user=request.user, name=savedsearch.name, filters=savedsearch.filters, ) form = forms.SavedSearchForm(request.POST) if form.is_valid(): data = form.export_filters() savedsearch.name = form.cleaned_data['name'] savedsearch.filters = data savedsearch.save() if forked: messages.success( request, 'Saved Search forked and saved' ) else: messages.success( request, 'Saved Search saved' ) return redirect('search:savedsearch', id=savedsearch.id) elif request.GET.get('sample'): events = savedsearch.get_events() return http.JsonResponse({'events': events.count()}) else: data = forms.SavedSearchForm.convert_filters( savedsearch.filters, pks=True ) data['name'] = savedsearch.name form = forms.SavedSearchForm(data) context = { 'savedsearch': savedsearch, 'form': form, 'use_findable': True, } return render(request, 'search/savesearch.html', context) @login_required @transaction.atomic() def new_savedsearch(request): if request.method == 'POST': form = forms.SavedSearchForm(request.POST) if form.is_valid(): data = form.export_filters() SavedSearch.objects.create( user=request.user, filters=data, name=form.cleaned_data['name'], ) messages.success( request, 'Saved Search saved' ) return redirect('search:savedsearches') else: form = forms.SavedSearchForm() context = { 'form': form, 'use_findable': False, } return render(request, 'search/savesearch.html', context) @login_required def savedsearches(request): context = {} return render(request, 'search/savedsearches.html', context) @login_required @json_view def savedsearches_data(request): context = {} qs = SavedSearch.objects.filter( user=request.user ).order_by('-created') searches = [] for savedsearch in qs: item = { 'id': savedsearch.id, 'name': savedsearch.name, 'summary': savedsearch.summary, 'modified': savedsearch.modified.isoformat(), } searches.append(item) from airmozilla.main.context_processors import base feed = base(request)['get_feed_data']() if request.user.is_active: profile = get_profile_safely(request.user) if profile and profile.contributor: calendar_privacy = 'contributors' else: calendar_privacy = 'company' else: calendar_privacy = 'public' context['savedsearches'] = searches context['urls'] = { 'search:savedsearch': reverse('search:savedsearch', args=(0,)), 'search:home': reverse('search:home'), 'feed': feed['url'], 'ical': reverse('main:calendar_ical', args=(calendar_privacy,)), } return context @login_required @json_view def delete_savedsearch(request, id): savedsearch = get_object_or_404(SavedSearch, id=id) if savedsearch.user != request.user: return http.HttpResponseForbidden('Not yours to delete') savedsearch.delete() return {'ok': True}
true
true
f70f069823fd5ef6630117d9dc3a0f6b3c733dac
6,184
py
Python
aspect_sentiment.py
dcstrandberg/aspect-sentiment
0177888d4fe96d49b78e44f5bd24be619c93bf00
[ "MIT" ]
null
null
null
aspect_sentiment.py
dcstrandberg/aspect-sentiment
0177888d4fe96d49b78e44f5bd24be619c93bf00
[ "MIT" ]
null
null
null
aspect_sentiment.py
dcstrandberg/aspect-sentiment
0177888d4fe96d49b78e44f5bd24be619c93bf00
[ "MIT" ]
null
null
null
import spacy from textblob import TextBlob import pandas as pd # Import functions from other files from tweet_handlers import pullTweetsFromCSV, tweetPulls ### Declare functions to standardize, identify, and analyze input text # Will ultimately take in a list of tweets and return: # - Word counts # - Split of positive / negative aspects # - Brand identification? #visualizeText() is a funtion to diagram sentences for help troubleshooting # Inputs: # - nlp: an NLP object, # - txt = a string containing the sentence to be diagramed, # - writeFilename: a string containing the filename to write the HTML diagram to # Returns: # - writeFilename: the path of the file that contains the HTML diagram def visualizeText(nlp, txt, writeFilename): doc = nlp(txt) html = spacy.displacy.render(doc, style='dep') filePath = './' + writeFilename + '.html' with open(filePath, 'w') as f: f.write(html) return filePath #extractDescriptors() is a funtion to pull aspects and descriptors from a list of sentences # Inputs: # - nlp: an NLP object, # - sentenceList: a list of strinsg containing the sentences to be analyzed # Outputs: # - list of dictionaries containing 'aspect' and 'description' -- not broken by tweet def extractDescriptors(nlp, sentenceList): #We'll ultimately return this aspects list aspects = [] aspects_lemma = [] attributes = [] attributes_lemma = [] #We will iterate through the sentences for i, aSentence in enumerate( sentenceList ): if i % 100 == 0: print("Tweet# ", str(i)) doc = nlp(aSentence) for token in doc: ###TODO: # Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed # Also need to add in a case that checks for pronoun resolution and sees what we can do about that # We need to identify each noun, and find its descendants that are (pos_ == 'ADJ' or pos_ == 'VERB') and (dep_ == 'amod' or dep_ == 'acl') # Modifying rule to examine ALL nouns, not just the subject of the sentence #if token.dep_ == 'nsubj' and token.pos_ == 'NOUN': if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'): #Now append the things aspects.append (token.head.text) aspects_lemma.append(token.head.lemma_) attributes.append( token.text ) attributes_lemma.append( token.lemma_ ) return ( aspects , attributes, aspects_lemma, attributes_lemma ) # Need a function that pulls attributes for each keyword in the tweet DF, since we need them to be kept separate # extractTweetAttributes: # Takes a DF of tweets, keywords, etc. and pulls out adjectives for each # Inputs: # - nlp: an NLP object, # - tweet_df: pandas dataframe containing colums: # - Tweet # - Keyword # - Spanish # - Date # Returns: # - attribute_df: dataframe containing the list of... # ...aspects & attributes for each keyword / spanish pair def extractTweetAttributes(nlp, tweet_df): #define return df attribute_df = pd.DataFrame( columns = [ 'Keyword', 'Spanish', 'aspect', 'attribute', 'aspect_lemma', 'attribute_lemma' ]) # Now create a set for the different keywords and spanish words keySet = set( tweet_df['Keyword'] ) for aKey in keySet: print("Extracting ", aKey) spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0] # And this is where we actually add the various analyses ( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] ) # Now that we've got the data, create lookup lists for the Keyword & Spanish words keyList = [aKey] * len(aspectList) spanishList = [spanishWord] * len(aspectList) temp_df = pd.DataFrame({ 'Keyword': keyList, 'Spanish': spanishList, 'aspect': aspectList, 'attribute': attributeList, 'aspect_lemma': aspectList_lemma, 'attribute_lemma': attributeList_lemma }) # Finally, append the data for this keyword to the attribute dataframe attribute_df = attribute_df.append( temp_df ) return attribute_df def countAttributes( aspect_df ): temp_df = pd.DataFrame({ 'Keyword': aspect_df['Keyword'], 'Spanish': aspect_df['Spanish'], 'aspect': aspect_df['aspect_lemma'], 'attribute': aspect_df['attribute_lemma'] }) return temp_df.value_counts() # In the main, this is where the tweet files are loaded... # ...and routed through the analysis functions if __name__ == "__main__": print("In the main") # Create the NLP object that will be used for all the text processing #nlp = spacy.load("en_core_web_sm") # We're actually using a spanish NLP object instead of an English one nlp = spacy.load("es_core_news_sm") # Pull in CSV files that hold all the tweets tweetFileList = [ './tweet_data/tweet_db_08.27.2021.csv' ] # Create the DF of tweets from the CSV File tweet_df = pullTweetsFromCSV( tweetFileList )#, fileEncoding='ANSI' ) # Instead of pulling tweets from a file, we're going to get new tweets # First we need to designate a list of english + spanish keywords to search for keyword_df = pd.read_csv('./keyword_list.csv') #tweet_df = tweetPulls( keyword_df ) #Save the tweet-df because of errors #tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI') # Run the tweets through the attribute extractor aspect_df = extractTweetAttributes ( nlp, tweet_df) # Run the aspects & attributes through a modified version of the wordcount function count_df = countAttributes( aspect_df ) # - Not to mention run some sort of pronoun resolution count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv')
35.54023
159
0.656856
import spacy from textblob import TextBlob import pandas as pd from tweet_handlers import pullTweetsFromCSV, tweetPulls ath = './' + writeFilename + '.html' with open(filePath, 'w') as f: f.write(html) return filePath def extractDescriptors(nlp, sentenceList): aspects = [] aspects_lemma = [] attributes = [] attributes_lemma = [] #We will iterate through the sentences for i, aSentence in enumerate( sentenceList ): if i % 100 == 0: print("Tweet# ", str(i)) doc = nlp(aSentence) for token in doc: ###TODO: # Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'): aspects.append (token.head.text) aspects_lemma.append(token.head.lemma_) attributes.append( token.text ) attributes_lemma.append( token.lemma_ ) return ( aspects , attributes, aspects_lemma, attributes_lemma ) def extractTweetAttributes(nlp, tweet_df): attribute_df = pd.DataFrame( columns = [ 'Keyword', 'Spanish', 'aspect', 'attribute', 'aspect_lemma', 'attribute_lemma' ]) keySet = set( tweet_df['Keyword'] ) for aKey in keySet: print("Extracting ", aKey) spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0] ( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] ) keyList = [aKey] * len(aspectList) spanishList = [spanishWord] * len(aspectList) temp_df = pd.DataFrame({ 'Keyword': keyList, 'Spanish': spanishList, 'aspect': aspectList, 'attribute': attributeList, 'aspect_lemma': aspectList_lemma, 'attribute_lemma': attributeList_lemma }) # Finally, append the data for this keyword to the attribute dataframe attribute_df = attribute_df.append( temp_df ) return attribute_df def countAttributes( aspect_df ): temp_df = pd.DataFrame({ 'Keyword': aspect_df['Keyword'], 'Spanish': aspect_df['Spanish'], 'aspect': aspect_df['aspect_lemma'], 'attribute': aspect_df['attribute_lemma'] }) return temp_df.value_counts() # In the main, this is where the tweet files are loaded... # ...and routed through the analysis functions if __name__ == "__main__": print("In the main") # Create the NLP object that will be used for all the text processing #nlp = spacy.load("en_core_web_sm") # We're actually using a spanish NLP object instead of an English one nlp = spacy.load("es_core_news_sm") tweetFileList = [ './tweet_data/tweet_db_08.27.2021.csv' ] tweet_df = pullTweetsFromCSV( tweetFileList ) # First we need to designate a list of english + spanish keywords to search for keyword_df = pd.read_csv('./keyword_list.csv') #tweet_df = tweetPulls( keyword_df ) #Save the tweet-df because of errors #tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI') # Run the tweets through the attribute extractor aspect_df = extractTweetAttributes ( nlp, tweet_df) # Run the aspects & attributes through a modified version of the wordcount function count_df = countAttributes( aspect_df ) # - Not to mention run some sort of pronoun resolution count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv')
true
true
f70f0787a0e23d8cb0acd7856c1396ee2bb4208f
61,694
py
Python
utils.py
williamhowardsnyder/OnClass
07b2917dbdf01a1de54771de3383bbaa4bb2f283
[ "MIT" ]
33
2019-10-22T17:46:21.000Z
2022-01-31T15:38:54.000Z
utils.py
williamhowardsnyder/OnClass
07b2917dbdf01a1de54771de3383bbaa4bb2f283
[ "MIT" ]
9
2019-11-20T21:58:49.000Z
2022-03-24T17:32:18.000Z
utils.py
williamhowardsnyder/OnClass
07b2917dbdf01a1de54771de3383bbaa4bb2f283
[ "MIT" ]
11
2019-10-30T22:42:53.000Z
2022-03-16T06:35:37.000Z
from anndata import read_h5ad import sys from time import time from scipy import stats, sparse import numpy as np import collections import pickle from sklearn.preprocessing import normalize import os from collections import Counter import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve import time import umap import copy from sklearn import preprocessing from fbpca import pca from sklearn.metrics import roc_auc_score, roc_curve from sklearn.metrics.pairwise import cosine_similarity from scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA #from libs import * from scanorama import find_alignments,merge_datasets,process_data,transform,vstack from sklearn.utils.graph_shortest_path import graph_shortest_path from scipy.sparse.linalg import svds, eigs nn_nhidden = [1000] rsts = [0.5,0.6,0.7,0.8] dfs_depth = 1 co_dim = 5 keep_prob = 1.0 use_diagonal = True max_iter = 20 niter = 5 def translate_paramter(ps): s = [] for p in ps: if isinstance(p, list): p = [str(i) for i in p] p = '.'.join(p) s.append(p) else: s.append(str(p)) s = '_'.join(s) return s pname = translate_paramter([max_iter]) def make_folder(folder): if not os.path.exists(folder): os.makedirs(folder) return folder def create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]): ncls = np.shape(cls2cls)[0] if dname != 'allen': onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file) #network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls) network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress) else: stack_net_bin = np.zeros((ncls,ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue stack_net_bin[n1,n2] = 1 stack_net_bin[n2,n1] = 1 network = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts] return network def fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8): co2name, name2co = get_ontology_name(obo_file = obo_file) from sentence_transformers import SentenceTransformer model = SentenceTransformer('bert-base-nli-mean-tokens') sentences = np.array([sentence.lower() for sentence in sentences]) sentence_embeddings = model.encode(sentences) co_embeddings = [] cos = [] for co in co2emb: co_embeddings.append(co2emb[co]) cos.append(co) co_embeddings = np.array(co_embeddings) sent2co = {} for sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))): scs = cosine_similarity(co_embeddings, embedding.reshape(1,-1)) co_id = np.argmax(scs) sc = scs[co_id] if sc>nlp_mapping_cutoff: sent2co[sentence.lower()] = cos[co_id] names = set() for name in name2co: if name2co[name].upper() == cos[co_id]: names.add(name) #print (sentence, cos[co_id], sc, co2name[cos[co_id]],names) return sent2co def ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1): nclass = np.shape(cls2cls)[0] seen2unseen_sim = cls2cls[:nseen, nseen:] nngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:] ncell = len(y_vec) y_mat = np.zeros((ncell, nclass)) y_mat[:,:nseen] = y_raw[:, :nseen] for i in range(ncell): if y_vec[i] == -1: #kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn] #if len(kngh) == 0: # continue y_mat[i,nseen:] = y_mat[i,nngh] y_mat[i,:nseen] -= 1000000 return y_mat def ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1): nclass = np.shape(cls2cls)[0] seen2unseen_sim = cls2cls[:nseen, nseen:] ncell = len(y_vec) y_mat = np.zeros((ncell, nclass)) y_mat[:,:nseen] = y_raw[:, :nseen] for i in range(ncell): if y_vec[i] == -1: kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn] if len(kngh) == 0: continue y_mat[i,:nseen] -= 1000000 y_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:]) return y_mat def find_gene_ind(genes, common_genes): gid = [] for g in common_genes: gid.append(np.where(genes == g)[0][0]) gid = np.array(gid) return gid def RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7): ncls = len(l2i) onto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file) onto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb) onto_net_mat = np.zeros((ncls, ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue onto_net_mat[n1,n2] = onto_net_nlp[n1, n2] onto_net_mat[n2,n1] = onto_net_nlp[n2, n1] onto_net_rwr = RandomWalkRestart(onto_net_mat, rst) return onto_net_rwr def process_expression(c2g_list): #this data process function is motivated by ACTINN, please check ACTINN for more information. c2g = np.vstack(c2g_list) c2g = c2g.T #print ('onclass d0',np.shape(c2g)) c2g = c2g[np.sum(c2g, axis=1)>0, :] #print (c2g) #print ('onclass d1',np.shape(c2g)) c2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000 c2g = np.log2(c2g+1) expr = np.sum(c2g, axis=1) #total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),] c2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),] #print (c2g) #print ('onclass d2',np.shape(c2g)) cv = np.std(c2g, axis=1) / np.mean(c2g, axis=1) c2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),] #print (c2g) #print ('onclass d3',np.shape(c2g)) c2g = c2g.T #print (c2g) #print ('onclass d4',np.shape(c2g)) c2g_list_new = [] index = 0 for c in c2g_list: ncell = np.shape(c)[0] c2g_list_new.append(c2g[index:index+ncell,:]) index = ncell return c2g_list_new def read_ontology_file(dname, data_folder): if 'allen' in dname: cell_type_network_file = data_folder + 'allen.ontology' cell_type_nlp_emb_file = None cl_obo_file = None if not os.path.isfile(cell_type_network_file): sys.error(cell_type_network_file + ' not found!') else: cell_type_network_file = data_folder + 'cl.ontology' cell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb' cl_obo_file = data_folder + 'cl.obo' if not os.path.isfile(cell_type_nlp_emb_file): sys.exit(cell_type_nlp_emb_file + ' not found!') if not os.path.isfile(cell_type_network_file): sys.exit(cell_type_network_file + ' not found!') if not os.path.isfile(cl_obo_file): sys.exit(cl_obo_file + ' not found!') return cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file def read_data_file(dname, data_dir): if 'microcebus' in dname: tech = '10x' feature_file = data_dir + 'Lemur/' + dname +'.h5ad' filter_key={'method':tech } label_file = None gene_file = '' label_key = 'cell_ontology_class' elif 'muris' in dname: tech = dname.split('_')[1] feature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad' filter_key = {} label_file = None gene_file = '' batch_key = '' label_key = 'cell_ontology_class' elif 'sapiens' in dname: feature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad' filter_key = {} label_file = None gene_file = '' batch_key = '' label_key = 'cell_ontology_type' elif 'allen' in dname: feature_file = data_dir + '/Allen_Brain/features.pkl' label_file = data_dir + '/Allen_Brain/labels.pkl' gene_file = data_dir + '/Allen_Brain/genes.pkl' label_key = '' filter_key = {} elif 'krasnow' in dname: tech = dname.split('_')[1] feature_file = data_dir + '/HLCA/'+tech+'_features.pkl' label_file = data_dir + '/HLCA/'+tech+'_labels.pkl' gene_file = data_dir + '/HLCA/'+tech+'_genes.pkl' label_key = '' filter_key = {} else: sys.exit('wrong dname '+dname) if feature_file.endswith('.pkl'): return feature_file, filter_key, label_key, label_file, gene_file elif feature_file.endswith('.h5ad'): return feature_file, filter_key, label_key, label_file, gene_file sys.exit('wrong file suffix') def read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True): if 'microcebus' in dname: tech = '10x' #file = data_dir + 'TMS_official_060520/' + 'tabula-microcebus_smartseq2-10x_combined_annotated_filtered_gene-labels-correct.h5ad' file = data_dir + 'TMS_official_060520/' + dname +'.h5ad' filter_key={'method':tech } batch_key = ''#original_channel ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' if not read_tissue: feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) else: feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) elif 'muris' in dname: tech = dname.split('_')[1] file = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad' filter_key = {} batch_key = '' ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' if not read_tissue: feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) else: feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) elif 'allen_part' in dname: feature_file = data_dir + 'Allen/matrix_part.csv' label_file = data_dir + 'Allen/metadata.csv' ontology_file = data_dir + 'Allen/cell_type_ontology' ontology_nlp_file = None feature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) elif 'allen' in dname: feature_file = data_dir + 'Allen/features.pkl' label_file = data_dir + 'Allen/labels.pkl' gene_file = data_dir + 'Allen/genes.pkl' ontology_file = data_dir + 'Allen/cell_type_ontology' ontology_nlp_file = None feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) elif 'krasnow' in dname: tech = dname.split('_')[1] feature_file = data_dir + 'Krasnow/'+tech+'_features.pkl' label_file = data_dir + 'Krasnow/'+tech+'_labels.pkl' gene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) else: sys.exit('wrong dname '+dname) if read_tissue: return feature, label, genes, tissues, ontology_nlp_file, ontology_file else: return feature, label, genes, ontology_nlp_file, ontology_file def parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None): np.random.seed(seed) if feature_file.endswith('.pkl'): features = pickle.load(open(feature_file, 'rb')) labels = pickle.load(open(label_file, 'rb')) genes = pickle.load(open(gene_file, 'rb')) ncell, ngene = np.shape(features) assert(ncell == len(labels)) assert(ngene == len(genes)) index = np.random.choice(ncell,min(nsample,ncell),replace=False) features = features[index, :] labels = labels[index] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) #print (len(exclude_terms),'non leaf terms are excluded') features = features[new_ids, :] labels = labels[new_ids] genes = [x.upper() for x in genes] genes = np.array(genes) return features, labels, genes def parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None): np.random.seed(seed) if feature_file.endswith('.pkl'): features = pickle.load(open(feature_file, 'rb')) labels = pickle.load(open(label_file, 'rb')) genes = pickle.load(open(gene_file, 'rb')) ncell, ngene = np.shape(features) assert(ncell == len(labels)) assert(ngene == len(genes)) index = np.random.choice(ncell,ncell,replace=False) features = features[index, :] labels = labels[index] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) #print (len(exclude_terms),'non leaf terms are excluded') features = features[new_ids, :] labels = labels[new_ids] genes = [x.upper() for x in genes] genes = np.array(genes) return features, labels, genes def select_high_var_genes(train_X, test_X, ngene = 200): mat = np.vstack((train_X, test_X)) #mat = mat.todense() gstd = np.std(mat, axis=0) best_genes = np.argsort(gstd*-1) best_genes = best_genes[:ngene] return train_X[:, best_genes], test_X[:, best_genes] def emb_cells(train_X, test_X, dim=20): if dim==-1: return np.log1p(train_X.todense()), np.log1p(test_X.todense()) train_X = np.log1p(train_X) test_X = np.log1p(test_X) train_X = preprocessing.normalize(train_X, axis=1) test_X = preprocessing.normalize(test_X, axis=1) ntrain = np.shape(train_X)[0] mat = sparse.vstack((train_X, test_X)) U, s, Vt = pca(mat, k=dim) # Automatically centers. X = U[:, range(dim)] * s[range(dim)] return X[:ntrain,:], X[ntrain:,:] def write_markers(fname, markers): ## Write marker genes to file fmarker_genes = open(fname,'w') for t in markers: fmarker_genes.write(t+'\t') g2pv = sorted(markers[t].items(), key=lambda item: item[1]) for g,pv in g2pv: fmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\t') fmarker_genes.write('\n') fmarker_genes.close() def calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100): ncell, nterm = np.shape(cell2term) ngene = np.shape(cell2gene)[1] assert(ncell == np.shape(cell2gene)[0]) markers = collections.defaultdict(dict) for t in range(nterm): scs = np.argsort(cell2term[:,t]) k_bot_cells = scs[:topk_cells] k_top_cells = scs[ncell-topk_cells:] pv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] #* ngene top_mean = np.mean(cell2gene[k_top_cells,:],axis=0) bot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0) if only_over_expressed: for g in range(ngene): if top_mean[g] < bot_mean[g]: pv[g] = 1. pv_sort = list(np.argsort(pv)) #for i in range(return_k_genes): #markers[terms[t]][genes[pv_sort[i]]] = pv[pv_sort[i]] markers[terms[t]] = pv for i,p in enumerate(pv): if np.isnan(p): pv[i] = 1. #markers[terms[t]][str(pv_sort[i])] = pv[pv_sort[i]] return markers def peak_h5ad(file): ''' peak the number of cells, classes, genes in h5ad file ''' x = read_h5ad(file) #print (np.shape(x.X)) #print (x.X[:10][:10]) #print (x.obs.keys()) ncell, ngene = np.shape(x.X) nclass = len(np.unique(x.obs['free_annotation'])) #print (np.unique(x.obs['free_annotation'])) f2name = {} sel_cell = 0. for i in range(ncell): if x.obs['method'][i]!='10x': continue free = x.obs['free_annotation'][i] name = x.obs['cell_ontology_class'][i] f2name[free] = name sel_cell += 1 #return f2name #for key in x.obs.keys(): # print (key, np.unique(x.obs[key])) return sel_cell, ngene, nclass #for i in range(10): # print (x.obs['method'][i], x.obs['channel_no_10x'][i]) #for key in x.obs.keys(): # print (key, np.unique(x.obs[key])) #return index def get_onotlogy_parents(GO_net, g): term_valid = set() ngh_GO = set() ngh_GO.add(g) while len(ngh_GO) > 0: for GO in list(ngh_GO): for GO1 in GO_net[GO]: ngh_GO.add(GO1) ngh_GO.remove(GO) term_valid.add(GO) return term_valid def exclude_non_ontology_term(cl_obo_file, labels, label_key): co2name, name2co = get_ontology_name(cl_obo_file) new_labs = [] new_ids = [] if label_key!='cell_ontology_class' and label_key!='cell_ontology_id': use_co = False for kk in np.unique(labels): if kk.lower().startswith('cl:'): use_co = True break else: if label_key == 'cell_ontology_class': use_co = False else: use_co = True for i in range(len(labels)): l = labels[i] if not use_co: if l.lower() in name2co.keys(): new_labs.append(name2co[l.lower()]) new_ids.append(i) else: if l.lower() in co2name.keys(): new_labs.append(l.lower()) new_ids.append(i) new_labs = np.array(new_labs) new_ids = np.array(new_ids) return new_ids, new_labs def parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None): np.random.seed(seed) x = read_h5ad(file) ncell = np.shape(x.raw.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) feature = x.raw.X[select_cells, :] labels = np.array(x.obs[label_key].tolist())[select_cells] if read_tissue: tissues = np.array(x.obs[tissue_key].tolist())[select_cells] if batch_key=='' or batch_key not in x.obs.keys(): batch_labels = np.ones(len(labels)) else: batch_labels = np.array(x.obs[batch_key].tolist())[select_cells] genes = x.var.index ncell = len(select_cells) if exclude_non_ontology: new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) #print (len(exclude_terms),'non leaf terms are excluded') feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] labels = labels[new_ids] if read_tissue: tissues = tissues[new_ids] ncell = len(labels) index = np.random.choice(ncell,min(nsample,ncell),replace=False) batch_labels = batch_labels[index] feature = feature[index, :] # cell by gene matrix labels = labels[index] if read_tissue: tissues = tissues[index] genes = x.var.index corrected_feature = run_scanorama_same_genes(feature, batch_labels) corrected_feature = corrected_feature.toarray() genes = [x.upper() for x in genes] genes = np.array(genes) if read_tissue: assert(len(tissues) == len(labels)) return corrected_feature, labels, genes, tissues else: return corrected_feature, labels, genes def select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None): ncell = np.shape(x.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) features = features[select_cells,: ] if labels is not None: labels = labels[select_cells] if tissues is not None: tissues = tissues[select_cells] x = x[select_cells,:] return features, labels, tissues, x def find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50): cor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T) cor = np.nan_to_num(cor) # cell type to gene nl = len(i2l) c2g = {} for i in range(nl): gl = np.argsort(cor[i,:]*-1) c2g[i2l[i]] = {} for j in range(topk): c2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]] return c2g, cor def use_pretrained_model(OnClass, genes, test_X, models = []): last_l2i = {} last_i2l = {} pred_Y_all_models = 0. ngene = len(genes) for model in models: OnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model) print ('Build model finished for ',model) pred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes) print ('Predict for ',model) pred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1) pred_Y_all = pred_Y_all.T if len(last_l2i)>0: new_ct_ind = [] for i in range(len(last_i2l)): l = last_i2l[i] new_ct_ind.append(OnClass.co2i[l]) pred_Y_all = pred_Y_all[:, np.array(new_ct_ind)] pred_Y_all_models += pred_Y_all else: last_l2i = OnClass.co2i last_i2l = OnClass.i2co pred_Y_all_models = pred_Y_all return pred_Y_all_models def read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None): np.random.seed(seed) x = read_h5ad(feature_file) ncell = np.shape(x.X)[0] dataset = x.X.toarray() genes = np.array([x.upper() for x in x.var.index]) if tissue_key is not None: tissues = np.array(x.obs[tissue_key].tolist()) else: tissues = None if AnnData_label_key is None and label_file is None: print ('no label file is provided') labels = None dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key) return dataset, genes, labels, tissues, x if AnnData_label_key is not None: labels = x.obs[AnnData_label_key].tolist() else: fin = open(label_file) labels = [] for line in fin: labels.append(line.strip()) fin.close() labels = np.array(labels) dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key) ind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file) if tissue_key is not None: tissues = tissues[ind] dataset = dataset[ind, :] x = x[ind, :] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) tissues = tissues[new_ids] dataset = dataset[new_ids, :] labels = labels[new_ids] x = x[new_ids, :] ncell = np.shape(dataset)[0] index = np.random.choice(ncell,ncell,replace=False) dataset = dataset[index, :] # cell by gene matrix labels = labels[index] if tissue_key is not None: tissues = tissues[index] return dataset, genes, labels, tissues, x def exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None): if cl_obo_file is None: return lab2co co2name, name2co = get_ontology_name(obo_file = cl_obo_file) for label in labels: if label.lower() in name2co: lab2co[label.lower()] = name2co[label.lower()] for name in name2co: lab2co[name.lower()] = name2co[name] return lab2co def map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None): lab2co = {} if nlp_mapping: if co2emb is None: sys.exit('Please provide cell type embedding to do NLP-based mapping.') lab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff) lab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file) for ct in ct_mapping_key: lab2co[ct_mapping_key[ct]] = lab2co[ct] ind = [] lab_id = [] unfound_labs = set() for i,l in enumerate(labels): if l in cell_ontology_ids: ind.append(i) lab_id.append(l) elif l.lower() in lab2co: ind.append(i) lab_id.append(lab2co[l.lower()]) else: unfound_labs.add(l) frac = len(ind) * 1. / len(labels) ind = np.array(ind) labels = np.array(lab_id) unfound_labs = set(unfound_labs) warn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100) if frac < 0.5: print (warn_message) print ('Here are unfound labels:',unfound_labs) return ind, labels, unfound_labs def parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None): ''' read h5ad file feature: cell by gene expression label: cell ontology class genes: gene names HGNC ''' np.random.seed(seed) x = read_h5ad(file) ncell = np.shape(x.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) feature = x.X[select_cells, :] labels = np.array(x.obs[label_key].tolist())[select_cells] if read_tissue: tissues = np.array(x.obs['tissue'].tolist())[select_cells] if batch_key=='' or batch_key not in x.obs.keys(): batch_labels = np.ones(len(labels)) else: batch_labels = np.array(x.obs[batch_key].tolist())[select_cells] genes = x.var.index ncell = len(select_cells) if exclude_non_ontology: new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) #print (len(exclude_terms),'non leaf terms are excluded') feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] labels = labels[new_ids] if read_tissue: tissues = tissues[new_ids] ncell = len(labels) index = np.random.choice(ncell,min(nsample,ncell),replace=False) batch_labels = batch_labels[index] feature = feature[index, :] # cell by gene matrix labels = labels[index] if read_tissue: tissues = tissues[index] genes = x.var.index #corrected_feature = run_scanorama_same_genes(feature, batch_labels) corrected_feature = feature.toarray() genes = [x.upper() for x in genes] genes = np.array(genes) if read_tissue: assert(len(tissues) == len(labels)) return corrected_feature, labels, genes, tissues else: return corrected_feature, labels, genes def exclude_parent_child_nodes(cell_ontology_file,labels): uniq_labels = np.unique(labels) excludes = set() net = collections.defaultdict(dict) fin = open(cell_ontology_file) for line in fin: s,p = line.strip().split('\t') net[s][p] = 1 #p is parent fin.close() for n in list(net.keys()): ngh = get_ontology_parents(net, n) for n1 in ngh: net[n][n1] = 1 for l1 in uniq_labels: for l2 in uniq_labels: if l1 in net[l2] and l1!=l2: #l1 is l2 parent excludes.add(l1) #print (excludes) new_ids = [] for i in range(len(labels)): if labels[i] not in excludes: new_ids.append(i) new_ids = np.array(new_ids) return new_ids, excludes def corr2_coeff(A, B): # Rowwise mean of input arrays & subtract from input arrays themeselves A_mA = A - A.mean(1)[:, None] B_mB = B - B.mean(1)[:, None] # Sum of squares across rows ssA = (A_mA**2).sum(1) ssB = (B_mB**2).sum(1) # Finally get corr coeff return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None])) def extract_data_based_on_class(feats, labels, sel_labels): ind = [] for l in sel_labels: id = np.where(labels == l)[0] ind.extend(id) np.random.shuffle(ind) X = feats[ind,:] Y = labels[ind] return X, Y, ind def SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10): np.random.seed(random_state) cls = np.unique(all_Y) cls2ct = Counter(all_Y) ncls = len(cls) test_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False)) for c in cls2ct: if cls2ct[c] < nmin_size: test_cls.append(c) test_cls = np.unique(test_cls) #add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True) train_cls = [x for x in cls if x not in test_cls] train_cls = np.array(train_cls) train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls) test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls) if all_tissues is not None: train_tissues = all_tissues[train_ind] test_tissues = all_tissues[test_ind] train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split( train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_tissues = np.concatenate((test_tissues, train_tissues_test)) train_tissues = train_tissues_train else: train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split( train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_X = np.vstack((test_X, train_X_test)) test_Y = np.concatenate((test_Y, train_Y_test)) train_X = train_X_train train_Y = train_Y_train if all_tissues is not None: return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues else: return train_X, train_Y, test_X, test_Y ''' def SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10): np.random.seed(random_state) cls = np.unique(all_Y) cls2ct = Counter(all_Y) ncls = len(cls) rare_cls = [] not_rare_cls = [] for c in cls2ct: if cls2ct[c] < 2: continue elif cls2ct[c] < nmin_size: rare_cls.append(c) else: not_rare_cls.append(c) cls = np.concatenate((rare_cls, not_rare_cls)) ncls = len(cls) rare_cls = np.array(rare_cls) not_rare_cls = np.array(not_rare_cls) train_non_rare_cls = list(np.random.choice(not_rare_cls, int(len(not_rare_cls) * (1 - nfold_cls)), replace=False)) train_cls = np.concatenate((train_non_rare_cls, rare_cls)) test_cls = [x for x in cls if x not in train_cls] test_cls = np.array(test_cls) assert(len(test_cls) + len(train_cls) == ncls) assert(len(set(test_cls) & set(train_cls)) == 0) #add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True) train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls) test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls) if all_tissues is not None: train_tissues = all_tissues[train_ind] test_tissues = all_tissues[test_ind] train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split( train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_tissues = np.concatenate((test_tissues, train_tissues_test)) train_tissues = train_tissues_train else: train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split( train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_X = np.vstack((test_X, train_X_test)) test_Y = np.concatenate((test_Y, train_Y_test)) train_X = train_X_train train_Y = train_Y_train if all_tissues is not None: return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues else: return train_X, train_Y, test_X, test_Y ''' def LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10): np.random.seed(random_state) cls = np.unique(all_Y) cls2ct = Counter(all_Y) ncls = len(cls) test_cls = [test_Y] test_cls = np.unique(test_cls) #add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True) train_cls = [x for x in cls if x not in test_cls] train_cls = np.array(train_cls) train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls) test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls) if all_tissues is not None: train_tissues = all_tissues[train_ind] test_tissues = all_tissues[test_ind] train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split( train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_tissues = np.concatenate((test_tissues, train_tissues_test)) train_tissues = train_tissues_train else: train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split( train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_X = np.vstack((test_X, train_X_test)) test_Y = np.concatenate((test_Y, train_Y_test)) train_X = train_X_train train_Y = train_Y_train if all_tissues is not None: return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues else: return train_X, train_Y, test_X, test_Y def renorm(X): Y = X.copy() Y = Y.astype(float) ngene,nsample = Y.shape s = np.sum(Y, axis=0) #print s.shape() for i in range(nsample): if s[i]==0: s[i] = 1 if i < ngene: Y[i,i] = 1 else: for j in range(ngene): Y[j,i] = 1. / ngene Y[:,i] = Y[:,i]/s[i] return Y def RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False): if use_torch: device = torch.device("cuda:0") nnode = A.shape[0] #print nnode if reset is None: reset = np.eye(nnode) nsample,nnode = reset.shape #print nsample,nnode P = renorm(A) P = P.T norm_reset = renorm(reset.T) norm_reset = norm_reset.T if use_torch: norm_reset = torch.from_numpy(norm_reset).float().to(device) P = torch.from_numpy(P).float().to(device) Q = norm_reset for i in range(1,max_iter): #Q = gnp.garray(Q) #P = gnp.garray(P) if use_torch: Q_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array() delta = torch.norm(Q-Q_new, 2) else: Q_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array() delta = np.linalg.norm(Q-Q_new, 'fro') Q = Q_new #print (i,Q) sys.stdout.flush() if delta < 1e-4: break if use_torch and not return_torch: Q = Q.cpu().numpy() return Q def DCA_vector(Q, dim): nnode = Q.shape[0] alpha = 1. / (nnode **2) Q = np.log(Q + alpha) - np.log(alpha); #Q = Q * Q'; [U, S, V] = svds(Q, dim); S = np.diag(S) X = np.dot(U, np.sqrt(S)) Y = np.dot(np.sqrt(S), V) Y = np.transpose(Y) return X,U,S,V,Y def read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file): ncls = len(l2i) net = np.zeros((ncls, ncls)) bin_net = np.zeros((ncls, ncls)) fin = open(ontology_nlp_file) for line in fin: s,p,wt = line.upper().strip().split('\t') wt = float(wt) net[l2i[s], l2i[p]] = np.exp(wt) net[l2i[p], l2i[s]] = np.exp(wt) bin_net[l2i[s], l2i[p]] = 1 bin_net[l2i[p], l2i[s]] = 1 fin.close() l2vec = {} fin = open(ontology_nlp_emb_file) for line in fin: w = line.upper().strip().split('\t') l2vec[w[0]] = [] dim = len(w)-1 for i in range(1,len(w)): l2vec[w[0]].append(float(w[i])) fin.close() l2vec_mat = np.zeros((ncls, dim)) for l in l2vec: if l.upper() not in l2i: continue l2vec_mat[l2i[l.upper()],:] = l2vec[l] ''' net_sum = np.sum(net,axis=0) for i in range(ncls): if net_sum[i] == 0: net[i,i] = 1. net[:,i] /= np.sum(net[:,i]) #net = net / net.sum(axis=1)[:, np.newaxis] ''' return net, bin_net, l2vec_mat def GetReverseNet(onto_net): onto_net_rev = collections.defaultdict(dict) for a in onto_net: for b in onto_net[a]: onto_net_rev[b][a] = 1 return onto_net_rev def ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):# unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen) Y_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l) if add_emb_diagonal: Y_emb = np.column_stack((np.eye(len(i2l)), Y_emb)) return unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat def graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None): nl = np.shape(A)[0] if use_seen_only: seen_ind = [] unseen_ind = [] for i in range(nl): if i2l[i] in unseen_l: unseen_ind.append(i) else: seen_ind.append(i) seen_ind = np.array(seen_ind) unseen_ind = np.array(unseen_ind) #if len(seen_ind) * 0.8 < dim: # dim = int(len(seen_ind) * 0.8) if mi==0 or mi == 1: sp = graph_shortest_path(A,method='FW',directed =False) else: sp = RandomWalkRestart(A, 0.8) if use_seen_only: sp = sp[seen_ind, :] sp = sp[:,seen_ind] X = np.zeros((np.shape(sp)[0],dim)) svd_dim = min(dim, np.shape(sp)[0]-1) if mi==0 or mi == 2: X[:,:svd_dim] = svd_emb(sp, dim=svd_dim) else: X[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0] if use_seen_only: X_ret = np.zeros((nl, dim)) X_ret[seen_ind,:] = X else: X_ret = X if mi==2 or mi == 3: sp *= -1 return sp, X_ret def cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None): if use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'): cl_nlp = collections.defaultdict(dict) if ontology_nlp_file is not None: fin = open(ontology_nlp_file) for line in fin: s,p,wt = line.upper().strip().split('\t') cl_nlp[s][p] = float(wt) cl_nlp[p][s] = float(wt) fin.close() fin = open(ontology_file) lset = set() s2p = {} for line in fin: w = line.strip().split('\t') s = w[0] p = w[1] if len(w)==2: if p in cl_nlp and s in cl_nlp[p]: wt = cl_nlp[p][s] else: wt = 1. else: wt = float(w[2]) if s not in s2p: s2p[s] = {} s2p[s][p] = wt lset.add(s) lset.add(p) fin.close() lset = np.sort(list(lset)) nl = len(lset) l2i = dict(zip(lset, range(nl))) i2l = dict(zip(range(nl), lset)) A = np.zeros((nl, nl)) for s in s2p: for p in s2p[s]: A[l2i[s], l2i[p]] = s2p[s][p] A[l2i[p], l2i[s]] = s2p[s][p] sp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l) if use_pretrain is not None: i2l_file = use_pretrain+'i2l.npy' l2i_file = use_pretrain+'l2i.npy' X_file = use_pretrain+'X.npy' sp_file = use_pretrain+'sp.npy' np.save(X_file, X) np.save(i2l_file, i2l) np.save(l2i_file, l2i) np.save(sp_file, sp) else: i2l_file = use_pretrain+'i2l.npy' l2i_file = use_pretrain+'l2i.npy' X_file = use_pretrain+'X.npy' sp_file = use_pretrain+'sp.npy' X = np.load(X_file) i2l = np.load(i2l_file,allow_pickle=True).item() l2i = np.load(l2i_file,allow_pickle=True).item() sp = np.load(sp_file,allow_pickle=True) return X, l2i, i2l, sp def merge_26_datasets(datanames_26datasets, scan_dim = 50): datasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True) datasets, genes = merge_datasets(datasets, genes_list) datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim) datasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150) datasets_dimred = sparse.vstack(expr_datasets).toarray() return datasets_dimred, genes def emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None): X, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l) i2emb = np.zeros((len(i2l),dim)) nl = len(i2l) for i in range(nl): ant = i2l[i] if ant not in ont_l2i: print (ant, ont_l2i) assert('xxx' in ant.lower() or 'nan' in ant.lower()) continue i2emb[i,:] = X[ont_l2i[ant],:] ''' AA = np.zeros((nl, nl)) for i in range(nl): for j in range(nl): anti, antj = i2l[i], i2l[j] if anti in ont_l2i and antj in ont_l2i: AA[i,j] = A[ont_l2i[anti],ont_l2i[antj]] ''' return i2emb ''' def get_ontology_parents(GO_net, g): term_valid = set() ngh_GO = set() ngh_GO.add(g) while len(ngh_GO) > 0: for GO in list(ngh_GO): for GO1 in GO_net[GO]: ngh_GO.add(GO1) ngh_GO.remove(GO) term_valid.add(GO) return term_valid ''' def get_ontology_parents(GO_net, g, dfs_depth=100): term_valid = set() ngh_GO = set() ngh_GO.add(g) depth = {} depth[g] = 0 while len(ngh_GO) > 0: for GO in list(ngh_GO): for GO1 in GO_net[GO]: ngh_GO.add(GO1) depth[GO1] = depth[GO] + 1 ngh_GO.remove(GO) if depth[GO] < dfs_depth: term_valid.add(GO) return term_valid def create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000): fin = open(ontology_file) lset = set() for line in fin: s,p = line.strip().split('\t') lset.add(s) lset.add(p) fin.close() seen_l = sorted(np.unique(train_Y)) unseen_l = sorted(lset - set(train_Y)) ys = np.concatenate((seen_l, unseen_l)) i2l = {} l2i = {} for l in ys: nl = len(i2l) col = l if combine_unseen and l in unseen_l: nl = len(seen_l) l2i[col] = nl i2l[nl] = col continue l2i[col] = nl i2l[nl] = col train_Y = [l2i[y] for y in train_Y] train_X2Y = ConvertLabels(train_Y, ncls = len(i2l)) onto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth) return unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat def query_depth_ontology(net, node, root='cl:0000000'): depth = 0 while node != root: if len(net[node]) == 0: print (node) node = sorted(list(net[node].keys()))[0] depth += 1 if depth>100: sys.error('root not found') return depth def read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000): nl = len(l2i) net = collections.defaultdict(dict) net_mat = np.zeros((nl,nl)) fin = open(ontology_file) for line in fin: s,p = line.strip().split('\t') si = l2i[s] pi = l2i[p] net[si][pi] = 1 net_mat[si][pi] = 1 fin.close() for n in range(nl): ngh = get_ontology_parents(net, n, dfs_depth = dfs_depth) net[n][n] = 1 for n1 in ngh: net[n][n1] = 1 return net, net_mat def extract_label_propagate_tree(onto_net, ncls): tree = np.zeros((ncls,ncls)) for n1 in onto_net: for n2 in onto_net[n1]: tree[n1,n2] = 1 return tree def ConvertLabels(labels, ncls=-1): ncell = np.shape(labels)[0] if len(np.shape(labels)) ==1 : #bin to mat if ncls == -1: ncls = np.max(labels) mat = np.zeros((ncell, ncls)) for i in range(ncell): mat[i, labels[i]] = 1 return mat else: if ncls == -1: ncls = np.shape(labels)[1] vec = np.zeros(ncell) for i in range(ncell): ind = np.where(labels[i,:]!=0)[0] assert(len(ind)<=1) # not multlabel classification if len(ind)==0: vec[i] = -1 else: vec[i] = ind[0] return vec def MapLabel2CL(test_Y, l2i): test_Y_new = np.array([l2i[y] for y in test_Y]) return test_Y_new def get_ontology_name(obo_file, lower=True): fin = open(obo_file) co2name = {} name2co = {} tag_is_syn = {} for line in fin: if line.startswith('id: '): co = line.strip().split('id: ')[1] if line.startswith('name: '): if lower: name = line.strip().lower().split('name: ')[1] else: name = line.strip().split('name: ')[1] co2name[co] = name name2co[name] = co if line.startswith('synonym: '): if lower: syn = line.strip().lower().split('synonym: "')[1].split('" ')[0] else: syn = line.strip().split('synonym: "')[1].split('" ')[0] if syn in name2co: continue name2co[syn] = co fin.close() return co2name, name2co def knn_ngh(Y2Y): ind = np.argsort(Y2Y*-1, axis=1) return ind def extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200): sys.exit(-1)#NOT USED ncls = np.shape(onto_net_rwr)[0] onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1)) pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis] pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:]) pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1) pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio return pred_Y_all def create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file): ncls = np.shape(cls2cls)[0] _, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file) onto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb) onto_net_nlp = np.zeros((ncls, ncls)) onto_net_bin = np.zeros((ncls, ncls)) stack_net_bin = np.zeros((ncls, ncls)) stack_net_nlp = np.zeros((ncls, ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue stack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1] stack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2] stack_net_bin[n1,n2] = 1 stack_net_bin[n2,n1] = 1 for n1 in range(ncls): for n2 in range(ncls): if cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1: onto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2] onto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1] onto_net_bin[n1,n2] = 1 onto_net_bin[n2,n1] = 1 return onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs def create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]): cls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False) ncls = np.shape(onto_net_mat)[0] networks = [] for rst in rsts: for dis in diss: for thres in thress: use_net = np.copy(onto_net_mat) use_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] onto_net_rwr = RandomWalkRestart(use_net, rst) networks.append(onto_net_rwr) return networks def extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False): if not isinstance(networks, list): networks = [networks] pred_Y_all_totoal = 0. for onto_net_rwr in networks: if use_normalize: onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1)) pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis] pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:]) pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1) pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio pred_Y_all_totoal += pred_Y_all return pred_Y_all_totoal def my_auprc(y_true, y_pred): precision, recall, thresholds = precision_recall_curve(y_true, y_pred) area = auc(recall, precision) return area def sampled_auprc(truths,preds): pos = np.where(truths == 1)[0] neg = np.where(truths == 0)[0] assert(len(pos) + len(neg) == len(truths)) nneg = len(neg) npos = len(pos) select_neg = np.random.choice(nneg, npos*3, replace = True) select_ind = np.concatenate((pos, select_neg)) return average_precision_score(truths[select_ind], preds[select_ind]) def evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']): #preprocess scores unseen_l = np.array(list(unseen_l)) ncell,nclass = np.shape(Y_pred_mat) nseen = nclass - len(unseen_l) if Y_ind is not None: non_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind))) if len(non_Y_ind)>0: Y_pred_mat[:,non_Y_ind] = -1 * np.inf if Y_pred_vec is None: Y_pred_vec = np.argmax(Y_pred_mat, axis=1) if Y_truth_bin_mat is None: Y_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass) Y_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass) #class-based metrics class_auc_macro = np.full(nclass, np.nan) class_auprc_macro = np.full(nclass, np.nan) class_f1 = np.full(nclass, np.nan) for i in range(nclass): if len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10: class_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i]) class_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i]) class_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i]) #sample-based metrics extend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l) kappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec) extend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec) accuracy = accuracy_score(Y_truth_vec, Y_pred_vec) prec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3) prec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5) #print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)]) seen_auc_macro = np.nanmean(class_auc_macro[:nseen]) seen_auprc_macro = np.nanmean(class_auprc_macro[:nseen]) seen_f1 = np.nanmean(class_f1[:nseen]) if len(unseen_l) == 0: unseen_auc_macro = 0 unseen_auprc_macro = 0 unseen_f1 = 0 else: unseen_auc_macro = np.nanmean(class_auc_macro[unseen_l]) #unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])# unseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l]) unseen_f1 = np.nanmean(class_f1[unseen_l]) #metrics = ['AUROC','AUPRC','unseen_AUROC', 'unseen_AUPRC','Cohens Kappa','Accuracy@3','Accuracy@5'] #res_v = [seen_auc_macro, seen_auprc_macro, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), extend_kappa, prec_at_k_3, prec_at_k_5, unseen_auc_macro, unseen_auprc_macro] all_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5} res_v = {} for metric in metrics: res_v[metric] = all_v[metric] #res_v = [seen_auc_macro, seen_auprc_macro, seen_f1, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), np.nanmean(class_f1), unseen_auc_macro, unseen_auprc_macro, unseen_f1] if write_screen: print (prefix, end='\t') for v in metrics: print ('%.4f'%res_v[v], end='\t') print ('') sys.stdout.flush() if write_to_file is not None: write_to_file.write(prefix+'\t') for v in metrics: write_to_file.write('%.2f\t'%res_v[v]) write_to_file.write('\n') write_to_file.flush() return res_v def precision_at_k(pred,truth,k): ncell, nclass = np.shape(pred) hit = 0. for i in range(ncell): x = np.argsort(pred[i,:]*-1) rank = np.where(x==truth[i])[0][0] if rank < k: hit += 1. prec = hit / ncell return prec def write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name): if len(np.shape(test_label))==2: test_label = np.argmax(test_label, axis = 1) co2name, name2co = get_ontology_name(cl_obo_file) x = test_AnnData ncell = np.shape(x.X)[0] print (ncell, len(test_label)) assert(ncell == len(test_label)) test_name = [] test_label_id = [] for i in range(ncell): xx = i2tp[test_label[i]] test_label_id.append(xx) test_name.append(co2name[xx]) test_name = np.array(test_name) test_label_id = np.array(test_label_id) x.obs['OnClass_annotation_ontology_ID'] = test_label x.obs['OnClass_annotation_ontology_name'] = test_name return x def read_type2genes(g2i, marker_gene,cl_obo_file): co2name, name2co = get_ontology_name(cl_obo_file) c2cnew = {} c2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower() c2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower() c2cnew['mature NK T cell'] = 'mature NK T cell'.lower() c2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower() fin = open(marker_gene) fin.readline() tp2genes = {} unfound = set() for line in fin: w = line.strip().split('\t') c1 = w[1].lower() c2 = w[2].lower() genes = [] for ww in w[8:]: if ww.upper() in g2i: genes.append(ww.upper()) if len(genes)==0: continue if c1.endswith('s') and c1[:-1] in name2co: c1 = c1[:-1] if c2.endswith('s') and c2[:-1] in name2co: c2 = c2[:-1] if c1 + ' cell' in name2co: c1 +=' cell' if c2 + ' cell' in name2co: c2 +=' cell' if c1 in c2cnew: c1 = c2cnew[c1] if c2 in c2cnew: c2 = c2cnew[c2] if c1 in name2co: tp2genes[name2co[c1]] = genes else: unfound.add(c1) if c2 in name2co: tp2genes[name2co[c2]] = genes else: unfound.add(c2) fin.close() return tp2genes def extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l): unseen_l = set(unseen_l) n = len(test_Y) acc = 0. ntmp = 0. new_pred = [] for i in range(n): if test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l: if test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1: acc += 1 ntmp += 1 new_pred.append(test_Y[i]) else: new_pred.append(test_Y_pred_vec[i]) else: if test_Y[i] == test_Y_pred_vec[i]: acc += 1 new_pred.append(test_Y_pred_vec[i]) new_pred = np.array(new_pred) return acc/n, new_pred def run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100): sparse_datasets = [] for dataset in datasets: sparse_datasets.append(sparse.csr_matrix(dataset)) datasets, genes = merge_datasets(sparse_datasets, genes) datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim) datasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150) dataset_correct = [] for sp in sparse_dataset_correct: dataset_correct.append(np.power(sp.todense(), 2)) return datasets_dimred, dataset_correct def run_scanorama_same_genes(features, batch_labels, scan_dim = 100): batchs = np.unique(batch_labels) nbatch = len(batchs) if nbatch == 1: return features ncell, ngene = np.shape(features) assert(ncell == len(batch_labels)) genes = [] datasets = [] indexs = [] for i in range(nbatch): genes.append(np.array(range(ngene))) index = np.where(batch_labels == batchs[i])[0] dataset = features[index,:] print (batchs[i], np.shape(dataset)) datasets.append(dataset) indexs.append(index) _, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim) assert(len(dataset_correct)) == nbatch for i in range(nbatch): features[indexs[i],:] = dataset_correct[i] return features def my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN, sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None, ds_names=None, batch_size=None, geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors) #this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function. if len(datasets) == 1: return datasets if alignments is None and matches is None: alignments, matches = find_alignments( datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose, ) ds_assembled = {} panoramas = [] ct = 0 for i, j in alignments: ct += 1 print (ct) sys.stdout.flush() if verbose: if ds_names is None: print('Processing datasets {}'.format((i, j))) else: print('Processing datasets {} <=> {}'. format(ds_names[i], ds_names[j])) # Only consider a dataset a fixed amount of times. if not i in ds_assembled: ds_assembled[i] = 0 ds_assembled[i] += 1 if not j in ds_assembled: ds_assembled[j] = 0 ds_assembled[j] += 1 if ds_assembled[i] > 3 and ds_assembled[j] > 3: continue # See if datasets are involved in any current panoramas. panoramas_i = [ panoramas[p] for p in range(len(panoramas)) if i in panoramas[p] ] assert(len(panoramas_i) <= 1) panoramas_j = [ panoramas[p] for p in range(len(panoramas)) if j in panoramas[p] ] assert(len(panoramas_j) <= 1) if len(panoramas_i) == 0 and len(panoramas_j) == 0: if datasets[i].shape[0] < datasets[j].shape[0]: i, j = j, i panoramas.append([ i ]) panoramas_i = [ panoramas[-1] ] # Map dataset i to panorama j. if len(panoramas_i) == 0: curr_ds = datasets[i] curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ]) match = [] base = 0 for p in panoramas_j[0]: if i < p and (i, p) in matches: match.extend([ (a, b + base) for a, b in matches[(i, p)] ]) elif i > p and (p, i) in matches: match.extend([ (b, a + base) for a, b in matches[(p, i)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) datasets[i] = curr_ds + bias if expr_datasets: curr_ds = expr_datasets[i] curr_ref = vstack([ expr_datasets[p] for p in panoramas_j[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) expr_datasets[i] = curr_ds + bias panoramas_j[0].append(i) # Map dataset j to panorama i. elif len(panoramas_j) == 0: curr_ds = datasets[j] curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ]) match = [] base = 0 for p in panoramas_i[0]: if j < p and (j, p) in matches: match.extend([ (a, b + base) for a, b in matches[(j, p)] ]) elif j > p and (p, j) in matches: match.extend([ (b, a + base) for a, b in matches[(p, j)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) datasets[j] = curr_ds + bias if expr_datasets: curr_ds = expr_datasets[j] curr_ref = vstack([ expr_datasets[p] for p in panoramas_i[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) expr_datasets[j] = curr_ds + bias panoramas_i[0].append(j) # Merge two panoramas together. else: curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ]) curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ]) # Find base indices into each panorama. base_i = 0 for p in panoramas_i[0]: if p == i: break base_i += datasets[p].shape[0] base_j = 0 for p in panoramas_j[0]: if p == j: break base_j += datasets[p].shape[0] # Find matching indices. match = [] base = 0 for p in panoramas_i[0]: if p == i and j < p and (j, p) in matches: match.extend([ (b + base, a + base_j) for a, b in matches[(j, p)] ]) elif p == i and j > p and (p, j) in matches: match.extend([ (a + base, b + base_j) for a, b in matches[(p, j)] ]) base += datasets[p].shape[0] base = 0 for p in panoramas_j[0]: if p == j and i < p and (i, p) in matches: match.extend([ (a + base_i, b + base) for a, b in matches[(i, p)] ]) elif p == j and i > p and (p, i) in matches: match.extend([ (b + base_i, a + base) for a, b in matches[(p, i)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] # Apply transformation to entire panorama. bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) curr_ds += bias base = 0 for p in panoramas_i[0]: n_cells = datasets[p].shape[0] datasets[p] = curr_ds[base:(base + n_cells), :] base += n_cells if not expr_datasets is None: curr_ds = vstack([ expr_datasets[p] for p in panoramas_i[0] ]) curr_ref = vstack([ expr_datasets[p] for p in panoramas_j[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) curr_ds += bias base = 0 for p in panoramas_i[0]: n_cells = expr_datasets[p].shape[0] expr_datasets[p] = curr_ds[base:(base + n_cells), :] base += n_cells # Merge panoramas i and j and delete one. if panoramas_i[0] != panoramas_j[0]: panoramas_i[0] += panoramas_j[0] panoramas.remove(panoramas_j[0]) # Visualize. if view_match: plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind) return datasets, expr_datasets
34.64009
336
0.705903
from anndata import read_h5ad import sys from time import time from scipy import stats, sparse import numpy as np import collections import pickle from sklearn.preprocessing import normalize import os from collections import Counter import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve import time import umap import copy from sklearn import preprocessing from fbpca import pca from sklearn.metrics import roc_auc_score, roc_curve from sklearn.metrics.pairwise import cosine_similarity from scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA from scanorama import find_alignments,merge_datasets,process_data,transform,vstack from sklearn.utils.graph_shortest_path import graph_shortest_path from scipy.sparse.linalg import svds, eigs nn_nhidden = [1000] rsts = [0.5,0.6,0.7,0.8] dfs_depth = 1 co_dim = 5 keep_prob = 1.0 use_diagonal = True max_iter = 20 niter = 5 def translate_paramter(ps): s = [] for p in ps: if isinstance(p, list): p = [str(i) for i in p] p = '.'.join(p) s.append(p) else: s.append(str(p)) s = '_'.join(s) return s pname = translate_paramter([max_iter]) def make_folder(folder): if not os.path.exists(folder): os.makedirs(folder) return folder def create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]): ncls = np.shape(cls2cls)[0] if dname != 'allen': onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file) network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress) else: stack_net_bin = np.zeros((ncls,ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue stack_net_bin[n1,n2] = 1 stack_net_bin[n2,n1] = 1 network = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts] return network def fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8): co2name, name2co = get_ontology_name(obo_file = obo_file) from sentence_transformers import SentenceTransformer model = SentenceTransformer('bert-base-nli-mean-tokens') sentences = np.array([sentence.lower() for sentence in sentences]) sentence_embeddings = model.encode(sentences) co_embeddings = [] cos = [] for co in co2emb: co_embeddings.append(co2emb[co]) cos.append(co) co_embeddings = np.array(co_embeddings) sent2co = {} for sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))): scs = cosine_similarity(co_embeddings, embedding.reshape(1,-1)) co_id = np.argmax(scs) sc = scs[co_id] if sc>nlp_mapping_cutoff: sent2co[sentence.lower()] = cos[co_id] names = set() for name in name2co: if name2co[name].upper() == cos[co_id]: names.add(name) return sent2co def ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1): nclass = np.shape(cls2cls)[0] seen2unseen_sim = cls2cls[:nseen, nseen:] nngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:] ncell = len(y_vec) y_mat = np.zeros((ncell, nclass)) y_mat[:,:nseen] = y_raw[:, :nseen] for i in range(ncell): if y_vec[i] == -1: y_mat[i,nseen:] = y_mat[i,nngh] y_mat[i,:nseen] -= 1000000 return y_mat def ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1): nclass = np.shape(cls2cls)[0] seen2unseen_sim = cls2cls[:nseen, nseen:] ncell = len(y_vec) y_mat = np.zeros((ncell, nclass)) y_mat[:,:nseen] = y_raw[:, :nseen] for i in range(ncell): if y_vec[i] == -1: kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn] if len(kngh) == 0: continue y_mat[i,:nseen] -= 1000000 y_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:]) return y_mat def find_gene_ind(genes, common_genes): gid = [] for g in common_genes: gid.append(np.where(genes == g)[0][0]) gid = np.array(gid) return gid def RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7): ncls = len(l2i) onto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file) onto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2 onto_net_mat = np.zeros((ncls, ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue onto_net_mat[n1,n2] = onto_net_nlp[n1, n2] onto_net_mat[n2,n1] = onto_net_nlp[n2, n1] onto_net_rwr = RandomWalkRestart(onto_net_mat, rst) return onto_net_rwr def process_expression(c2g_list): c2g = np.vstack(c2g_list) c2g = c2g.T c2g = c2g[np.sum(c2g, axis=1)>0, :] c2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000 c2g = np.log2(c2g+1) expr = np.sum(c2g, axis=1) c2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),] cv = np.std(c2g, axis=1) / np.mean(c2g, axis=1) c2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),] c2g = c2g.T c2g_list_new = [] index = 0 for c in c2g_list: ncell = np.shape(c)[0] c2g_list_new.append(c2g[index:index+ncell,:]) index = ncell return c2g_list_new def read_ontology_file(dname, data_folder): if 'allen' in dname: cell_type_network_file = data_folder + 'allen.ontology' cell_type_nlp_emb_file = None cl_obo_file = None if not os.path.isfile(cell_type_network_file): sys.error(cell_type_network_file + ' not found!') else: cell_type_network_file = data_folder + 'cl.ontology' cell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb' cl_obo_file = data_folder + 'cl.obo' if not os.path.isfile(cell_type_nlp_emb_file): sys.exit(cell_type_nlp_emb_file + ' not found!') if not os.path.isfile(cell_type_network_file): sys.exit(cell_type_network_file + ' not found!') if not os.path.isfile(cl_obo_file): sys.exit(cl_obo_file + ' not found!') return cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file def read_data_file(dname, data_dir): if 'microcebus' in dname: tech = '10x' feature_file = data_dir + 'Lemur/' + dname +'.h5ad' filter_key={'method':tech } label_file = None gene_file = '' label_key = 'cell_ontology_class' elif 'muris' in dname: tech = dname.split('_')[1] feature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad' filter_key = {} label_file = None gene_file = '' batch_key = '' label_key = 'cell_ontology_class' elif 'sapiens' in dname: feature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad' filter_key = {} label_file = None gene_file = '' batch_key = '' label_key = 'cell_ontology_type' elif 'allen' in dname: feature_file = data_dir + '/Allen_Brain/features.pkl' label_file = data_dir + '/Allen_Brain/labels.pkl' gene_file = data_dir + '/Allen_Brain/genes.pkl' label_key = '' filter_key = {} elif 'krasnow' in dname: tech = dname.split('_')[1] feature_file = data_dir + '/HLCA/'+tech+'_features.pkl' label_file = data_dir + '/HLCA/'+tech+'_labels.pkl' gene_file = data_dir + '/HLCA/'+tech+'_genes.pkl' label_key = '' filter_key = {} else: sys.exit('wrong dname '+dname) if feature_file.endswith('.pkl'): return feature_file, filter_key, label_key, label_file, gene_file elif feature_file.endswith('.h5ad'): return feature_file, filter_key, label_key, label_file, gene_file sys.exit('wrong file suffix') def read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True): if 'microcebus' in dname: tech = '10x' file = data_dir + 'TMS_official_060520/' + dname +'.h5ad' filter_key={'method':tech } batch_key = '' ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' if not read_tissue: feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) else: feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) elif 'muris' in dname: tech = dname.split('_')[1] file = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad' filter_key = {} batch_key = '' ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' if not read_tissue: feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) else: feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file) elif 'allen_part' in dname: feature_file = data_dir + 'Allen/matrix_part.csv' label_file = data_dir + 'Allen/metadata.csv' ontology_file = data_dir + 'Allen/cell_type_ontology' ontology_nlp_file = None feature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) elif 'allen' in dname: feature_file = data_dir + 'Allen/features.pkl' label_file = data_dir + 'Allen/labels.pkl' gene_file = data_dir + 'Allen/genes.pkl' ontology_file = data_dir + 'Allen/cell_type_ontology' ontology_nlp_file = None feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) elif 'krasnow' in dname: tech = dname.split('_')[1] feature_file = data_dir + 'Krasnow/'+tech+'_features.pkl' label_file = data_dir + 'Krasnow/'+tech+'_labels.pkl' gene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl' ontology_file = ontology_dir + '/cell_ontology/cl.ontology' ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp' cl_obo_file = ontology_dir + '/cell_ontology/cl.obo' feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file) else: sys.exit('wrong dname '+dname) if read_tissue: return feature, label, genes, tissues, ontology_nlp_file, ontology_file else: return feature, label, genes, ontology_nlp_file, ontology_file def parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None): np.random.seed(seed) if feature_file.endswith('.pkl'): features = pickle.load(open(feature_file, 'rb')) labels = pickle.load(open(label_file, 'rb')) genes = pickle.load(open(gene_file, 'rb')) ncell, ngene = np.shape(features) assert(ncell == len(labels)) assert(ngene == len(genes)) index = np.random.choice(ncell,min(nsample,ncell),replace=False) features = features[index, :] labels = labels[index] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) features = features[new_ids, :] labels = labels[new_ids] genes = [x.upper() for x in genes] genes = np.array(genes) return features, labels, genes def parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None): np.random.seed(seed) if feature_file.endswith('.pkl'): features = pickle.load(open(feature_file, 'rb')) labels = pickle.load(open(label_file, 'rb')) genes = pickle.load(open(gene_file, 'rb')) ncell, ngene = np.shape(features) assert(ncell == len(labels)) assert(ngene == len(genes)) index = np.random.choice(ncell,ncell,replace=False) features = features[index, :] labels = labels[index] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) features = features[new_ids, :] labels = labels[new_ids] genes = [x.upper() for x in genes] genes = np.array(genes) return features, labels, genes def select_high_var_genes(train_X, test_X, ngene = 200): mat = np.vstack((train_X, test_X)) gstd = np.std(mat, axis=0) best_genes = np.argsort(gstd*-1) best_genes = best_genes[:ngene] return train_X[:, best_genes], test_X[:, best_genes] def emb_cells(train_X, test_X, dim=20): if dim==-1: return np.log1p(train_X.todense()), np.log1p(test_X.todense()) train_X = np.log1p(train_X) test_X = np.log1p(test_X) train_X = preprocessing.normalize(train_X, axis=1) test_X = preprocessing.normalize(test_X, axis=1) ntrain = np.shape(train_X)[0] mat = sparse.vstack((train_X, test_X)) U, s, Vt = pca(mat, k=dim) X = U[:, range(dim)] * s[range(dim)] return X[:ntrain,:], X[ntrain:,:] def write_markers(fname, markers): ,'w') for t in markers: fmarker_genes.write(t+'\t') g2pv = sorted(markers[t].items(), key=lambda item: item[1]) for g,pv in g2pv: fmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\t') fmarker_genes.write('\n') fmarker_genes.close() def calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100): ncell, nterm = np.shape(cell2term) ngene = np.shape(cell2gene)[1] assert(ncell == np.shape(cell2gene)[0]) markers = collections.defaultdict(dict) for t in range(nterm): scs = np.argsort(cell2term[:,t]) k_bot_cells = scs[:topk_cells] k_top_cells = scs[ncell-topk_cells:] pv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] top_mean = np.mean(cell2gene[k_top_cells,:],axis=0) bot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0) if only_over_expressed: for g in range(ngene): if top_mean[g] < bot_mean[g]: pv[g] = 1. pv_sort = list(np.argsort(pv)) markers[terms[t]] = pv for i,p in enumerate(pv): if np.isnan(p): pv[i] = 1. return markers def peak_h5ad(file): x = read_h5ad(file) ncell, ngene = np.shape(x.X) nclass = len(np.unique(x.obs['free_annotation'])) f2name = {} sel_cell = 0. for i in range(ncell): if x.obs['method'][i]!='10x': continue free = x.obs['free_annotation'][i] name = x.obs['cell_ontology_class'][i] f2name[free] = name sel_cell += 1 return sel_cell, ngene, nclass def get_onotlogy_parents(GO_net, g): term_valid = set() ngh_GO = set() ngh_GO.add(g) while len(ngh_GO) > 0: for GO in list(ngh_GO): for GO1 in GO_net[GO]: ngh_GO.add(GO1) ngh_GO.remove(GO) term_valid.add(GO) return term_valid def exclude_non_ontology_term(cl_obo_file, labels, label_key): co2name, name2co = get_ontology_name(cl_obo_file) new_labs = [] new_ids = [] if label_key!='cell_ontology_class' and label_key!='cell_ontology_id': use_co = False for kk in np.unique(labels): if kk.lower().startswith('cl:'): use_co = True break else: if label_key == 'cell_ontology_class': use_co = False else: use_co = True for i in range(len(labels)): l = labels[i] if not use_co: if l.lower() in name2co.keys(): new_labs.append(name2co[l.lower()]) new_ids.append(i) else: if l.lower() in co2name.keys(): new_labs.append(l.lower()) new_ids.append(i) new_labs = np.array(new_labs) new_ids = np.array(new_ids) return new_ids, new_labs def parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None): np.random.seed(seed) x = read_h5ad(file) ncell = np.shape(x.raw.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) feature = x.raw.X[select_cells, :] labels = np.array(x.obs[label_key].tolist())[select_cells] if read_tissue: tissues = np.array(x.obs[tissue_key].tolist())[select_cells] if batch_key=='' or batch_key not in x.obs.keys(): batch_labels = np.ones(len(labels)) else: batch_labels = np.array(x.obs[batch_key].tolist())[select_cells] genes = x.var.index ncell = len(select_cells) if exclude_non_ontology: new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] labels = labels[new_ids] if read_tissue: tissues = tissues[new_ids] ncell = len(labels) index = np.random.choice(ncell,min(nsample,ncell),replace=False) batch_labels = batch_labels[index] feature = feature[index, :] labels = labels[index] if read_tissue: tissues = tissues[index] genes = x.var.index corrected_feature = run_scanorama_same_genes(feature, batch_labels) corrected_feature = corrected_feature.toarray() genes = [x.upper() for x in genes] genes = np.array(genes) if read_tissue: assert(len(tissues) == len(labels)) return corrected_feature, labels, genes, tissues else: return corrected_feature, labels, genes def select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None): ncell = np.shape(x.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) features = features[select_cells,: ] if labels is not None: labels = labels[select_cells] if tissues is not None: tissues = tissues[select_cells] x = x[select_cells,:] return features, labels, tissues, x def find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50): cor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T) cor = np.nan_to_num(cor) nl = len(i2l) c2g = {} for i in range(nl): gl = np.argsort(cor[i,:]*-1) c2g[i2l[i]] = {} for j in range(topk): c2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]] return c2g, cor def use_pretrained_model(OnClass, genes, test_X, models = []): last_l2i = {} last_i2l = {} pred_Y_all_models = 0. ngene = len(genes) for model in models: OnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model) print ('Build model finished for ',model) pred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes) print ('Predict for ',model) pred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1) pred_Y_all = pred_Y_all.T if len(last_l2i)>0: new_ct_ind = [] for i in range(len(last_i2l)): l = last_i2l[i] new_ct_ind.append(OnClass.co2i[l]) pred_Y_all = pred_Y_all[:, np.array(new_ct_ind)] pred_Y_all_models += pred_Y_all else: last_l2i = OnClass.co2i last_i2l = OnClass.i2co pred_Y_all_models = pred_Y_all return pred_Y_all_models def read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None): np.random.seed(seed) x = read_h5ad(feature_file) ncell = np.shape(x.X)[0] dataset = x.X.toarray() genes = np.array([x.upper() for x in x.var.index]) if tissue_key is not None: tissues = np.array(x.obs[tissue_key].tolist()) else: tissues = None if AnnData_label_key is None and label_file is None: print ('no label file is provided') labels = None dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key) return dataset, genes, labels, tissues, x if AnnData_label_key is not None: labels = x.obs[AnnData_label_key].tolist() else: fin = open(label_file) labels = [] for line in fin: labels.append(line.strip()) fin.close() labels = np.array(labels) dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key) ind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file) if tissue_key is not None: tissues = tissues[ind] dataset = dataset[ind, :] x = x[ind, :] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) tissues = tissues[new_ids] dataset = dataset[new_ids, :] labels = labels[new_ids] x = x[new_ids, :] ncell = np.shape(dataset)[0] index = np.random.choice(ncell,ncell,replace=False) dataset = dataset[index, :] labels = labels[index] if tissue_key is not None: tissues = tissues[index] return dataset, genes, labels, tissues, x def exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None): if cl_obo_file is None: return lab2co co2name, name2co = get_ontology_name(obo_file = cl_obo_file) for label in labels: if label.lower() in name2co: lab2co[label.lower()] = name2co[label.lower()] for name in name2co: lab2co[name.lower()] = name2co[name] return lab2co def map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None): lab2co = {} if nlp_mapping: if co2emb is None: sys.exit('Please provide cell type embedding to do NLP-based mapping.') lab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff) lab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file) for ct in ct_mapping_key: lab2co[ct_mapping_key[ct]] = lab2co[ct] ind = [] lab_id = [] unfound_labs = set() for i,l in enumerate(labels): if l in cell_ontology_ids: ind.append(i) lab_id.append(l) elif l.lower() in lab2co: ind.append(i) lab_id.append(lab2co[l.lower()]) else: unfound_labs.add(l) frac = len(ind) * 1. / len(labels) ind = np.array(ind) labels = np.array(lab_id) unfound_labs = set(unfound_labs) warn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100) if frac < 0.5: print (warn_message) print ('Here are unfound labels:',unfound_labs) return ind, labels, unfound_labs def parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None): np.random.seed(seed) x = read_h5ad(file) ncell = np.shape(x.X)[0] select_cells = set(range(ncell)) for key in filter_key: value = filter_key[key] select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0]) select_cells = sorted(select_cells) feature = x.X[select_cells, :] labels = np.array(x.obs[label_key].tolist())[select_cells] if read_tissue: tissues = np.array(x.obs['tissue'].tolist())[select_cells] if batch_key=='' or batch_key not in x.obs.keys(): batch_labels = np.ones(len(labels)) else: batch_labels = np.array(x.obs[batch_key].tolist())[select_cells] genes = x.var.index ncell = len(select_cells) if exclude_non_ontology: new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] if exclude_non_leaf_ontology: new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels) feature = feature[new_ids, :] batch_labels = batch_labels[new_ids] labels = labels[new_ids] if read_tissue: tissues = tissues[new_ids] ncell = len(labels) index = np.random.choice(ncell,min(nsample,ncell),replace=False) batch_labels = batch_labels[index] feature = feature[index, :] labels = labels[index] if read_tissue: tissues = tissues[index] genes = x.var.index corrected_feature = feature.toarray() genes = [x.upper() for x in genes] genes = np.array(genes) if read_tissue: assert(len(tissues) == len(labels)) return corrected_feature, labels, genes, tissues else: return corrected_feature, labels, genes def exclude_parent_child_nodes(cell_ontology_file,labels): uniq_labels = np.unique(labels) excludes = set() net = collections.defaultdict(dict) fin = open(cell_ontology_file) for line in fin: s,p = line.strip().split('\t') net[s][p] = 1 fin.close() for n in list(net.keys()): ngh = get_ontology_parents(net, n) for n1 in ngh: net[n][n1] = 1 for l1 in uniq_labels: for l2 in uniq_labels: if l1 in net[l2] and l1!=l2: excludes.add(l1) new_ids = [] for i in range(len(labels)): if labels[i] not in excludes: new_ids.append(i) new_ids = np.array(new_ids) return new_ids, excludes def corr2_coeff(A, B): A_mA = A - A.mean(1)[:, None] B_mB = B - B.mean(1)[:, None] ssA = (A_mA**2).sum(1) ssB = (B_mB**2).sum(1) return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None])) def extract_data_based_on_class(feats, labels, sel_labels): ind = [] for l in sel_labels: id = np.where(labels == l)[0] ind.extend(id) np.random.shuffle(ind) X = feats[ind,:] Y = labels[ind] return X, Y, ind def SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10): np.random.seed(random_state) cls = np.unique(all_Y) cls2ct = Counter(all_Y) ncls = len(cls) test_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False)) for c in cls2ct: if cls2ct[c] < nmin_size: test_cls.append(c) test_cls = np.unique(test_cls) train_cls = [x for x in cls if x not in test_cls] train_cls = np.array(train_cls) train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls) test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls) if all_tissues is not None: train_tissues = all_tissues[train_ind] test_tissues = all_tissues[test_ind] train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split( train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_tissues = np.concatenate((test_tissues, train_tissues_test)) train_tissues = train_tissues_train else: train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split( train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_X = np.vstack((test_X, train_X_test)) test_Y = np.concatenate((test_Y, train_Y_test)) train_X = train_X_train train_Y = train_Y_train if all_tissues is not None: return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues else: return train_X, train_Y, test_X, test_Y def LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10): np.random.seed(random_state) cls = np.unique(all_Y) cls2ct = Counter(all_Y) ncls = len(cls) test_cls = [test_Y] test_cls = np.unique(test_cls) train_cls = [x for x in cls if x not in test_cls] train_cls = np.array(train_cls) train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls) test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls) if all_tissues is not None: train_tissues = all_tissues[train_ind] test_tissues = all_tissues[test_ind] train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split( train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_tissues = np.concatenate((test_tissues, train_tissues_test)) train_tissues = train_tissues_train else: train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split( train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state) test_X = np.vstack((test_X, train_X_test)) test_Y = np.concatenate((test_Y, train_Y_test)) train_X = train_X_train train_Y = train_Y_train if all_tissues is not None: return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues else: return train_X, train_Y, test_X, test_Y def renorm(X): Y = X.copy() Y = Y.astype(float) ngene,nsample = Y.shape s = np.sum(Y, axis=0) for i in range(nsample): if s[i]==0: s[i] = 1 if i < ngene: Y[i,i] = 1 else: for j in range(ngene): Y[j,i] = 1. / ngene Y[:,i] = Y[:,i]/s[i] return Y def RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False): if use_torch: device = torch.device("cuda:0") nnode = A.shape[0] if reset is None: reset = np.eye(nnode) nsample,nnode = reset.shape P = renorm(A) P = P.T norm_reset = renorm(reset.T) norm_reset = norm_reset.T if use_torch: norm_reset = torch.from_numpy(norm_reset).float().to(device) P = torch.from_numpy(P).float().to(device) Q = norm_reset for i in range(1,max_iter): if use_torch: Q_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P) delta = torch.norm(Q-Q_new, 2) else: Q_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P) delta = np.linalg.norm(Q-Q_new, 'fro') Q = Q_new sys.stdout.flush() if delta < 1e-4: break if use_torch and not return_torch: Q = Q.cpu().numpy() return Q def DCA_vector(Q, dim): nnode = Q.shape[0] alpha = 1. / (nnode **2) Q = np.log(Q + alpha) - np.log(alpha); [U, S, V] = svds(Q, dim); S = np.diag(S) X = np.dot(U, np.sqrt(S)) Y = np.dot(np.sqrt(S), V) Y = np.transpose(Y) return X,U,S,V,Y def read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file): ncls = len(l2i) net = np.zeros((ncls, ncls)) bin_net = np.zeros((ncls, ncls)) fin = open(ontology_nlp_file) for line in fin: s,p,wt = line.upper().strip().split('\t') wt = float(wt) net[l2i[s], l2i[p]] = np.exp(wt) net[l2i[p], l2i[s]] = np.exp(wt) bin_net[l2i[s], l2i[p]] = 1 bin_net[l2i[p], l2i[s]] = 1 fin.close() l2vec = {} fin = open(ontology_nlp_emb_file) for line in fin: w = line.upper().strip().split('\t') l2vec[w[0]] = [] dim = len(w)-1 for i in range(1,len(w)): l2vec[w[0]].append(float(w[i])) fin.close() l2vec_mat = np.zeros((ncls, dim)) for l in l2vec: if l.upper() not in l2i: continue l2vec_mat[l2i[l.upper()],:] = l2vec[l] return net, bin_net, l2vec_mat def GetReverseNet(onto_net): onto_net_rev = collections.defaultdict(dict) for a in onto_net: for b in onto_net[a]: onto_net_rev[b][a] = 1 return onto_net_rev def ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):# unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen) Y_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l) if add_emb_diagonal: Y_emb = np.column_stack((np.eye(len(i2l)), Y_emb)) return unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat def graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None): nl = np.shape(A)[0] if use_seen_only: seen_ind = [] unseen_ind = [] for i in range(nl): if i2l[i] in unseen_l: unseen_ind.append(i) else: seen_ind.append(i) seen_ind = np.array(seen_ind) unseen_ind = np.array(unseen_ind) #if len(seen_ind) * 0.8 < dim: # dim = int(len(seen_ind) * 0.8) if mi==0 or mi == 1: sp = graph_shortest_path(A,method='FW',directed =False) else: sp = RandomWalkRestart(A, 0.8) if use_seen_only: sp = sp[seen_ind, :] sp = sp[:,seen_ind] X = np.zeros((np.shape(sp)[0],dim)) svd_dim = min(dim, np.shape(sp)[0]-1) if mi==0 or mi == 2: X[:,:svd_dim] = svd_emb(sp, dim=svd_dim) else: X[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0] if use_seen_only: X_ret = np.zeros((nl, dim)) X_ret[seen_ind,:] = X else: X_ret = X if mi==2 or mi == 3: sp *= -1 return sp, X_ret def cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None): if use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'): cl_nlp = collections.defaultdict(dict) if ontology_nlp_file is not None: fin = open(ontology_nlp_file) for line in fin: s,p,wt = line.upper().strip().split('\t') cl_nlp[s][p] = float(wt) cl_nlp[p][s] = float(wt) fin.close() fin = open(ontology_file) lset = set() s2p = {} for line in fin: w = line.strip().split('\t') s = w[0] p = w[1] if len(w)==2: if p in cl_nlp and s in cl_nlp[p]: wt = cl_nlp[p][s] else: wt = 1. else: wt = float(w[2]) if s not in s2p: s2p[s] = {} s2p[s][p] = wt lset.add(s) lset.add(p) fin.close() lset = np.sort(list(lset)) nl = len(lset) l2i = dict(zip(lset, range(nl))) i2l = dict(zip(range(nl), lset)) A = np.zeros((nl, nl)) for s in s2p: for p in s2p[s]: A[l2i[s], l2i[p]] = s2p[s][p] A[l2i[p], l2i[s]] = s2p[s][p] sp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l) if use_pretrain is not None: i2l_file = use_pretrain+'i2l.npy' l2i_file = use_pretrain+'l2i.npy' X_file = use_pretrain+'X.npy' sp_file = use_pretrain+'sp.npy' np.save(X_file, X) np.save(i2l_file, i2l) np.save(l2i_file, l2i) np.save(sp_file, sp) else: i2l_file = use_pretrain+'i2l.npy' l2i_file = use_pretrain+'l2i.npy' X_file = use_pretrain+'X.npy' sp_file = use_pretrain+'sp.npy' X = np.load(X_file) i2l = np.load(i2l_file,allow_pickle=True).item() l2i = np.load(l2i_file,allow_pickle=True).item() sp = np.load(sp_file,allow_pickle=True) return X, l2i, i2l, sp def merge_26_datasets(datanames_26datasets, scan_dim = 50): datasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True) datasets, genes = merge_datasets(datasets, genes_list) datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim) datasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150) datasets_dimred = sparse.vstack(expr_datasets).toarray() return datasets_dimred, genes def emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None): X, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l) i2emb = np.zeros((len(i2l),dim)) nl = len(i2l) for i in range(nl): ant = i2l[i] if ant not in ont_l2i: print (ant, ont_l2i) assert('xxx' in ant.lower() or 'nan' in ant.lower()) continue i2emb[i,:] = X[ont_l2i[ant],:] return i2emb def get_ontology_parents(GO_net, g, dfs_depth=100): term_valid = set() ngh_GO = set() ngh_GO.add(g) depth = {} depth[g] = 0 while len(ngh_GO) > 0: for GO in list(ngh_GO): for GO1 in GO_net[GO]: ngh_GO.add(GO1) depth[GO1] = depth[GO] + 1 ngh_GO.remove(GO) if depth[GO] < dfs_depth: term_valid.add(GO) return term_valid def create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000): fin = open(ontology_file) lset = set() for line in fin: s,p = line.strip().split('\t') lset.add(s) lset.add(p) fin.close() seen_l = sorted(np.unique(train_Y)) unseen_l = sorted(lset - set(train_Y)) ys = np.concatenate((seen_l, unseen_l)) i2l = {} l2i = {} for l in ys: nl = len(i2l) col = l if combine_unseen and l in unseen_l: nl = len(seen_l) l2i[col] = nl i2l[nl] = col continue l2i[col] = nl i2l[nl] = col train_Y = [l2i[y] for y in train_Y] train_X2Y = ConvertLabels(train_Y, ncls = len(i2l)) onto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth) return unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat def query_depth_ontology(net, node, root='cl:0000000'): depth = 0 while node != root: if len(net[node]) == 0: print (node) node = sorted(list(net[node].keys()))[0] depth += 1 if depth>100: sys.error('root not found') return depth def read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000): nl = len(l2i) net = collections.defaultdict(dict) net_mat = np.zeros((nl,nl)) fin = open(ontology_file) for line in fin: s,p = line.strip().split('\t') si = l2i[s] pi = l2i[p] net[si][pi] = 1 net_mat[si][pi] = 1 fin.close() for n in range(nl): ngh = get_ontology_parents(net, n, dfs_depth = dfs_depth) net[n][n] = 1 for n1 in ngh: net[n][n1] = 1 return net, net_mat def extract_label_propagate_tree(onto_net, ncls): tree = np.zeros((ncls,ncls)) for n1 in onto_net: for n2 in onto_net[n1]: tree[n1,n2] = 1 return tree def ConvertLabels(labels, ncls=-1): ncell = np.shape(labels)[0] if len(np.shape(labels)) ==1 : #bin to mat if ncls == -1: ncls = np.max(labels) mat = np.zeros((ncell, ncls)) for i in range(ncell): mat[i, labels[i]] = 1 return mat else: if ncls == -1: ncls = np.shape(labels)[1] vec = np.zeros(ncell) for i in range(ncell): ind = np.where(labels[i,:]!=0)[0] assert(len(ind)<=1) # not multlabel classification if len(ind)==0: vec[i] = -1 else: vec[i] = ind[0] return vec def MapLabel2CL(test_Y, l2i): test_Y_new = np.array([l2i[y] for y in test_Y]) return test_Y_new def get_ontology_name(obo_file, lower=True): fin = open(obo_file) co2name = {} name2co = {} tag_is_syn = {} for line in fin: if line.startswith('id: '): co = line.strip().split('id: ')[1] if line.startswith('name: '): if lower: name = line.strip().lower().split('name: ')[1] else: name = line.strip().split('name: ')[1] co2name[co] = name name2co[name] = co if line.startswith('synonym: '): if lower: syn = line.strip().lower().split('synonym: "')[1].split('" ')[0] else: syn = line.strip().split('synonym: "')[1].split('" ')[0] if syn in name2co: continue name2co[syn] = co fin.close() return co2name, name2co def knn_ngh(Y2Y): ind = np.argsort(Y2Y*-1, axis=1) return ind def extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200): sys.exit(-1)#NOT USED ncls = np.shape(onto_net_rwr)[0] onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1)) pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis] pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:]) pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1) pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio return pred_Y_all def create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file): ncls = np.shape(cls2cls)[0] _, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file) onto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb) onto_net_nlp = np.zeros((ncls, ncls)) onto_net_bin = np.zeros((ncls, ncls)) stack_net_bin = np.zeros((ncls, ncls)) stack_net_nlp = np.zeros((ncls, ncls)) for n1 in onto_net: for n2 in onto_net[n1]: if n1==n2: continue stack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1] stack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2] stack_net_bin[n1,n2] = 1 stack_net_bin[n2,n1] = 1 for n1 in range(ncls): for n2 in range(ncls): if cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1: onto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2] onto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1] onto_net_bin[n1,n2] = 1 onto_net_bin[n2,n1] = 1 return onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs def create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]): cls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False) ncls = np.shape(onto_net_mat)[0] networks = [] for rst in rsts: for dis in diss: for thres in thress: use_net = np.copy(onto_net_mat) use_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] onto_net_rwr = RandomWalkRestart(use_net, rst) networks.append(onto_net_rwr) return networks def extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False): if not isinstance(networks, list): networks = [networks] pred_Y_all_totoal = 0. for onto_net_rwr in networks: if use_normalize: onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1)) pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis] pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:]) pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1) pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio pred_Y_all_totoal += pred_Y_all return pred_Y_all_totoal def my_auprc(y_true, y_pred): precision, recall, thresholds = precision_recall_curve(y_true, y_pred) area = auc(recall, precision) return area def sampled_auprc(truths,preds): pos = np.where(truths == 1)[0] neg = np.where(truths == 0)[0] assert(len(pos) + len(neg) == len(truths)) nneg = len(neg) npos = len(pos) select_neg = np.random.choice(nneg, npos*3, replace = True) select_ind = np.concatenate((pos, select_neg)) return average_precision_score(truths[select_ind], preds[select_ind]) def evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']): #preprocess scores unseen_l = np.array(list(unseen_l)) ncell,nclass = np.shape(Y_pred_mat) nseen = nclass - len(unseen_l) if Y_ind is not None: non_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind))) if len(non_Y_ind)>0: Y_pred_mat[:,non_Y_ind] = -1 * np.inf if Y_pred_vec is None: Y_pred_vec = np.argmax(Y_pred_mat, axis=1) if Y_truth_bin_mat is None: Y_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass) Y_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass) #class-based metrics class_auc_macro = np.full(nclass, np.nan) class_auprc_macro = np.full(nclass, np.nan) class_f1 = np.full(nclass, np.nan) for i in range(nclass): if len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10: class_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i]) class_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i]) class_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i]) #sample-based metrics extend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l) kappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec) extend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec) accuracy = accuracy_score(Y_truth_vec, Y_pred_vec) prec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3) prec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5) #print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)]) seen_auc_macro = np.nanmean(class_auc_macro[:nseen]) seen_auprc_macro = np.nanmean(class_auprc_macro[:nseen]) seen_f1 = np.nanmean(class_f1[:nseen]) if len(unseen_l) == 0: unseen_auc_macro = 0 unseen_auprc_macro = 0 unseen_f1 = 0 else: unseen_auc_macro = np.nanmean(class_auc_macro[unseen_l]) #unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])# unseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l]) unseen_f1 = np.nanmean(class_f1[unseen_l]) #metrics = ['AUROC','AUPRC','unseen_AUROC', 'unseen_AUPRC','Cohens Kappa','Accuracy@3','Accuracy@5'] #res_v = [seen_auc_macro, seen_auprc_macro, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), extend_kappa, prec_at_k_3, prec_at_k_5, unseen_auc_macro, unseen_auprc_macro] all_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5} res_v = {} for metric in metrics: res_v[metric] = all_v[metric] #res_v = [seen_auc_macro, seen_auprc_macro, seen_f1, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), np.nanmean(class_f1), unseen_auc_macro, unseen_auprc_macro, unseen_f1] if write_screen: print (prefix, end='\t') for v in metrics: print ('%.4f'%res_v[v], end='\t') print ('') sys.stdout.flush() if write_to_file is not None: write_to_file.write(prefix+'\t') for v in metrics: write_to_file.write('%.2f\t'%res_v[v]) write_to_file.write('\n') write_to_file.flush() return res_v def precision_at_k(pred,truth,k): ncell, nclass = np.shape(pred) hit = 0. for i in range(ncell): x = np.argsort(pred[i,:]*-1) rank = np.where(x==truth[i])[0][0] if rank < k: hit += 1. prec = hit / ncell return prec def write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name): if len(np.shape(test_label))==2: test_label = np.argmax(test_label, axis = 1) co2name, name2co = get_ontology_name(cl_obo_file) x = test_AnnData ncell = np.shape(x.X)[0] print (ncell, len(test_label)) assert(ncell == len(test_label)) test_name = [] test_label_id = [] for i in range(ncell): xx = i2tp[test_label[i]] test_label_id.append(xx) test_name.append(co2name[xx]) test_name = np.array(test_name) test_label_id = np.array(test_label_id) x.obs['OnClass_annotation_ontology_ID'] = test_label x.obs['OnClass_annotation_ontology_name'] = test_name return x def read_type2genes(g2i, marker_gene,cl_obo_file): co2name, name2co = get_ontology_name(cl_obo_file) c2cnew = {} c2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower() c2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower() c2cnew['mature NK T cell'] = 'mature NK T cell'.lower() c2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower() fin = open(marker_gene) fin.readline() tp2genes = {} unfound = set() for line in fin: w = line.strip().split('\t') c1 = w[1].lower() c2 = w[2].lower() genes = [] for ww in w[8:]: if ww.upper() in g2i: genes.append(ww.upper()) if len(genes)==0: continue if c1.endswith('s') and c1[:-1] in name2co: c1 = c1[:-1] if c2.endswith('s') and c2[:-1] in name2co: c2 = c2[:-1] if c1 + ' cell' in name2co: c1 +=' cell' if c2 + ' cell' in name2co: c2 +=' cell' if c1 in c2cnew: c1 = c2cnew[c1] if c2 in c2cnew: c2 = c2cnew[c2] if c1 in name2co: tp2genes[name2co[c1]] = genes else: unfound.add(c1) if c2 in name2co: tp2genes[name2co[c2]] = genes else: unfound.add(c2) fin.close() return tp2genes def extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l): unseen_l = set(unseen_l) n = len(test_Y) acc = 0. ntmp = 0. new_pred = [] for i in range(n): if test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l: if test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1: acc += 1 ntmp += 1 new_pred.append(test_Y[i]) else: new_pred.append(test_Y_pred_vec[i]) else: if test_Y[i] == test_Y_pred_vec[i]: acc += 1 new_pred.append(test_Y_pred_vec[i]) new_pred = np.array(new_pred) return acc/n, new_pred def run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100): sparse_datasets = [] for dataset in datasets: sparse_datasets.append(sparse.csr_matrix(dataset)) datasets, genes = merge_datasets(sparse_datasets, genes) datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim) datasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150) dataset_correct = [] for sp in sparse_dataset_correct: dataset_correct.append(np.power(sp.todense(), 2)) return datasets_dimred, dataset_correct def run_scanorama_same_genes(features, batch_labels, scan_dim = 100): batchs = np.unique(batch_labels) nbatch = len(batchs) if nbatch == 1: return features ncell, ngene = np.shape(features) assert(ncell == len(batch_labels)) genes = [] datasets = [] indexs = [] for i in range(nbatch): genes.append(np.array(range(ngene))) index = np.where(batch_labels == batchs[i])[0] dataset = features[index,:] print (batchs[i], np.shape(dataset)) datasets.append(dataset) indexs.append(index) _, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim) assert(len(dataset_correct)) == nbatch for i in range(nbatch): features[indexs[i],:] = dataset_correct[i] return features def my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN, sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None, ds_names=None, batch_size=None, geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors) #this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function. if len(datasets) == 1: return datasets if alignments is None and matches is None: alignments, matches = find_alignments( datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose, ) ds_assembled = {} panoramas = [] ct = 0 for i, j in alignments: ct += 1 print (ct) sys.stdout.flush() if verbose: if ds_names is None: print('Processing datasets {}'.format((i, j))) else: print('Processing datasets {} <=> {}'. format(ds_names[i], ds_names[j])) # Only consider a dataset a fixed amount of times. if not i in ds_assembled: ds_assembled[i] = 0 ds_assembled[i] += 1 if not j in ds_assembled: ds_assembled[j] = 0 ds_assembled[j] += 1 if ds_assembled[i] > 3 and ds_assembled[j] > 3: continue # See if datasets are involved in any current panoramas. panoramas_i = [ panoramas[p] for p in range(len(panoramas)) if i in panoramas[p] ] assert(len(panoramas_i) <= 1) panoramas_j = [ panoramas[p] for p in range(len(panoramas)) if j in panoramas[p] ] assert(len(panoramas_j) <= 1) if len(panoramas_i) == 0 and len(panoramas_j) == 0: if datasets[i].shape[0] < datasets[j].shape[0]: i, j = j, i panoramas.append([ i ]) panoramas_i = [ panoramas[-1] ] # Map dataset i to panorama j. if len(panoramas_i) == 0: curr_ds = datasets[i] curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ]) match = [] base = 0 for p in panoramas_j[0]: if i < p and (i, p) in matches: match.extend([ (a, b + base) for a, b in matches[(i, p)] ]) elif i > p and (p, i) in matches: match.extend([ (b, a + base) for a, b in matches[(p, i)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) datasets[i] = curr_ds + bias if expr_datasets: curr_ds = expr_datasets[i] curr_ref = vstack([ expr_datasets[p] for p in panoramas_j[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) expr_datasets[i] = curr_ds + bias panoramas_j[0].append(i) # Map dataset j to panorama i. elif len(panoramas_j) == 0: curr_ds = datasets[j] curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ]) match = [] base = 0 for p in panoramas_i[0]: if j < p and (j, p) in matches: match.extend([ (a, b + base) for a, b in matches[(j, p)] ]) elif j > p and (p, j) in matches: match.extend([ (b, a + base) for a, b in matches[(p, j)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) datasets[j] = curr_ds + bias if expr_datasets: curr_ds = expr_datasets[j] curr_ref = vstack([ expr_datasets[p] for p in panoramas_i[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) expr_datasets[j] = curr_ds + bias panoramas_i[0].append(j) # Merge two panoramas together. else: curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ]) curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ]) # Find base indices into each panorama. base_i = 0 for p in panoramas_i[0]: if p == i: break base_i += datasets[p].shape[0] base_j = 0 for p in panoramas_j[0]: if p == j: break base_j += datasets[p].shape[0] # Find matching indices. match = [] base = 0 for p in panoramas_i[0]: if p == i and j < p and (j, p) in matches: match.extend([ (b + base, a + base_j) for a, b in matches[(j, p)] ]) elif p == i and j > p and (p, j) in matches: match.extend([ (a + base, b + base_j) for a, b in matches[(p, j)] ]) base += datasets[p].shape[0] base = 0 for p in panoramas_j[0]: if p == j and i < p and (i, p) in matches: match.extend([ (a + base_i, b + base) for a, b in matches[(i, p)] ]) elif p == j and i > p and (p, i) in matches: match.extend([ (b + base_i, a + base) for a, b in matches[(p, i)] ]) base += datasets[p].shape[0] ds_ind = [ a for a, _ in match ] ref_ind = [ b for _, b in match ] # Apply transformation to entire panorama. bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, batch_size=batch_size) curr_ds += bias base = 0 for p in panoramas_i[0]: n_cells = datasets[p].shape[0] datasets[p] = curr_ds[base:(base + n_cells), :] base += n_cells if not expr_datasets is None: curr_ds = vstack([ expr_datasets[p] for p in panoramas_i[0] ]) curr_ref = vstack([ expr_datasets[p] for p in panoramas_j[0] ]) bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma, cn=True, batch_size=batch_size) curr_ds += bias base = 0 for p in panoramas_i[0]: n_cells = expr_datasets[p].shape[0] expr_datasets[p] = curr_ds[base:(base + n_cells), :] base += n_cells # Merge panoramas i and j and delete one. if panoramas_i[0] != panoramas_j[0]: panoramas_i[0] += panoramas_j[0] panoramas.remove(panoramas_j[0]) # Visualize. if view_match: plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind) return datasets, expr_datasets
true
true
f70f08db465afe069cfaa7e1cf2eaa71195adbb9
8,661
py
Python
Offline/dataset.py
HongLabTHU/dual-mVEPs
f387584865a45a7257d8203fcb9522820e1311de
[ "Apache-2.0" ]
7
2019-11-29T07:16:55.000Z
2021-12-21T02:13:26.000Z
Offline/dataset.py
HongLabTHU/dual-mVEPs
f387584865a45a7257d8203fcb9522820e1311de
[ "Apache-2.0" ]
null
null
null
Offline/dataset.py
HongLabTHU/dual-mVEPs
f387584865a45a7257d8203fcb9522820e1311de
[ "Apache-2.0" ]
4
2020-04-28T08:02:44.000Z
2021-12-21T02:13:27.000Z
import glob import os import warnings from datetime import datetime from copy import deepcopy import numpy as np import pyedflib import scipy.io as sio from config import cfg from thirdparty.cerebus import NsxFile, NevFile from thirdparty.nex import Reader as NexReader from .utils import find_nearest_time def _load_neuracle(data_dir): """ neuracle file loader :param data_dir: root data dir for the experiment :return: data: ndarray, (channels, timesteps) ch_name: list, name of channels timestamp: list, index of trigger """ f = { 'data': os.path.join(data_dir, 'data.bdf'), 'evt': os.path.join(data_dir, 'evt.bdf') } # read data f_data = pyedflib.EdfReader(f['data']) ch_names = f_data.getSignalLabels() data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)]) # sample frequiencies sfreq = f_data.getSampleFrequencies() assert np.unique(sfreq).size == 1 if cfg.amp_info.samplerate != sfreq[0]: warnings.warn('Samplerate in config file does not equal to data file record') cfg.amp_info.samplerate = int(sfreq[0]) # read event f_evt = pyedflib.EdfReader(f['evt']) event, _, _ = f_evt.readAnnotations() event = list(map(lambda x: int(x * cfg.amp_info.samplerate), event)) return data, ch_names, event def _load_usbamp(data_dir): """ USBAmp file loader :param data_dir: root dir :return: data: ndarray, (channels, timesteps) ch_name: list, name of channels timestamp: list, index of trigger """ # edf USBAmp files = glob.glob(os.path.join(data_dir, '*.edf')) assert len(files) == 1 f = pyedflib.EdfReader(files[0]) ch_names = f.getSignalLabels() # filter channel # find trigger channel triggers = [] sig = [] for i, chan in enumerate(ch_names): if 'trigger' in chan: triggers.append(i) else: sig.append(i) sigbuf = np.array([f.readSignal(i) for i in range(len(ch_names))]) ch_names = [ch_names[i] for i in sig] trigger = -1 for ch_ind in triggers: if not np.allclose(np.diff(sigbuf[ch_ind]), 0): trigger = ch_ind break diff = np.diff(sigbuf[trigger]) timestamp = np.nonzero(np.logical_and(diff <= 1, diff >= 0.2))[0].tolist() data = sigbuf[sig] return data, ch_names, timestamp def _load_nex(data_dir): """ nex file loader :param data_dir: :return: data: ndarray, shape (ch, timesteps) ch_names: list, name of each channel timestamps: list, stimulation onset """ files = glob.glob(os.path.join(data_dir, '*.nex')) assert len(files) == 1 reader = NexReader(useNumpy=True) data = reader.ReadNexFile(files[0]) var = data['Variables'] ch_names = [] trigger_ch = None con_data = [] samplerate = cfg.amp_info.samplerate for i, ch in enumerate(var): if 'CH' in ch['Header']['Name']: ch_names.append(ch['Header']['Name']) con_data.append(ch['ContinuousValues']) samplerate = ch['Header']['SamplingRate'] if 'digin' == ch['Header']['Name']: trigger_ch = i if samplerate != cfg.amp_info.samplerate: warnings.warn('Samplerate in config file does not equal to data file record, recorded value is %d' % samplerate) assert trigger_ch is not None timestamp = np.round(data['Variables'][trigger_ch]['Timestamps'] * samplerate).astype(np.int32).tolist() con_data = np.array(con_data) return con_data, ch_names, timestamp def _load_cerebus(data_dir): # search data_dir nsx_files = glob.glob(os.path.join(data_dir, '*.ns*')) nev_files = glob.glob(os.path.join(data_dir, '*.nev')) assert len(nsx_files) == len(nev_files) == 1 # loading f_data = NsxFile(nsx_files[0]) f_evt = NevFile(nev_files[0]) data = f_data.getdata() evt = f_evt.getdata() f_data.close() f_evt.close() # some basic information samplerate = data['samp_per_s'] if cfg.amp_info.samplerate != samplerate: warnings.warn('Samplerate in config file does not equal to data file record') cfg.amp_info.samplerate = samplerate timestampresolution = f_evt.basic_header['TimeStampResolution'] ch_names = [] for info in f_data.extended_headers: ch_names.append(info['ElectrodeLabel']) event = evt['dig_events']['TimeStamps'][0] event = list(map(lambda x: int(x / timestampresolution * cfg.amp_info.samplerate), event)) return data['data'], ch_names, event class Dataset: """ for loading data and event order. """ data_format = { 'nex': _load_nex, 'ns3': _load_cerebus, 'nev': _load_cerebus, 'edf': _load_usbamp, 'bdf': _load_neuracle } def __init__(self, subject, date=None, loaddata=True): self.subject = subject self._subj_path = os.path.dirname(__file__) + '/../data/' + subject if date is None: self._date = find_nearest_time(self._subj_path) else: if isinstance(date, datetime): # convert datetime to str self._date = date.strftime("%Y-%m-%d-%H-%M-%S") else: self._date = date print(self._date) self.root_dir = os.path.join(self._subj_path, self._date) # self.montage = OrderedSet(cfg.subj_info.montage) self.montage = deepcopy(cfg.subj_info.montage) # load stim order self.events = self.load_event() if loaddata: self.load_all() else: self.data, self.ch_names, self.timestamp, self.montage_indices, self.events_backup = [None] * 5 def load_all(self): # load data and timestamps dataarray, ch_names, timestamp = self._load_data() timestamp = Dataset.ts_check(timestamp) self.data = dataarray # list to set self.ch_names = ch_names self.timestamp = timestamp self.montage_indices = self.get_channel_indices(self.montage, self.ch_names) self.events_backup = self.events.copy() if cfg.exp_config.bidir: assert 2 * len(timestamp) == self.events.size, print('Dual-directional: ', len(timestamp), self.events.size) self.events = self.events[:, ::2] else: assert len(timestamp) == self.events.size, print('Unidirectional: ', len(timestamp), self.events.size) def _load_data(self): """ Read data according to file format :return: dataext: str, data file name """ walk_path = self.root_dir loader = None for f in os.listdir(walk_path): _ext = f.split('.')[-1] try: loader = Dataset.data_format[_ext] break except KeyError: pass if loader is None: raise FileNotFoundError('No matching data format found') return loader(walk_path) def load_event(self): walk_path = self.root_dir file = glob.glob(os.path.join(walk_path, self.subject) + '*') assert len(file) == 1 file = file[0] if file.endswith('.mat'): raw = sio.loadmat(file) order = raw['stim_order'] order -= 1 return order.reshape((-1, 12)) else: with open(file) as f: stim_order = [[int(x) for x in line.split()] for line in f if len(line) > 1] return np.array(stim_order) @staticmethod def get_channel_indices(target_channels, channels_in_data): """ Get corresponding index number for channels in target channels :param target_channels: list, target channel names :param channels_in_data: list, all channel names in data source. :return: """ indices = [] # build a dictionary for indexing channel_book = {name: i for i, name in enumerate(channels_in_data)} for ch in target_channels: try: indices.append(channel_book[ch]) except ValueError as err: print(err) return indices @staticmethod def ts_check(ts): # check time stamp intervals. # In our experience, sometimes an accidental wrong trigger may appear at the beginning during recording. fs = cfg.amp_info.samplerate while len(ts) % 12 and (not (fs * 0.1 <= ts[1] - ts[0] <= fs * 0.3)): del ts[0] return ts
32.317164
120
0.607551
import glob import os import warnings from datetime import datetime from copy import deepcopy import numpy as np import pyedflib import scipy.io as sio from config import cfg from thirdparty.cerebus import NsxFile, NevFile from thirdparty.nex import Reader as NexReader from .utils import find_nearest_time def _load_neuracle(data_dir): f = { 'data': os.path.join(data_dir, 'data.bdf'), 'evt': os.path.join(data_dir, 'evt.bdf') } f_data = pyedflib.EdfReader(f['data']) ch_names = f_data.getSignalLabels() data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)]) sfreq = f_data.getSampleFrequencies() assert np.unique(sfreq).size == 1 if cfg.amp_info.samplerate != sfreq[0]: warnings.warn('Samplerate in config file does not equal to data file record') cfg.amp_info.samplerate = int(sfreq[0]) f_evt = pyedflib.EdfReader(f['evt']) event, _, _ = f_evt.readAnnotations() event = list(map(lambda x: int(x * cfg.amp_info.samplerate), event)) return data, ch_names, event def _load_usbamp(data_dir): files = glob.glob(os.path.join(data_dir, '*.edf')) assert len(files) == 1 f = pyedflib.EdfReader(files[0]) ch_names = f.getSignalLabels() triggers = [] sig = [] for i, chan in enumerate(ch_names): if 'trigger' in chan: triggers.append(i) else: sig.append(i) sigbuf = np.array([f.readSignal(i) for i in range(len(ch_names))]) ch_names = [ch_names[i] for i in sig] trigger = -1 for ch_ind in triggers: if not np.allclose(np.diff(sigbuf[ch_ind]), 0): trigger = ch_ind break diff = np.diff(sigbuf[trigger]) timestamp = np.nonzero(np.logical_and(diff <= 1, diff >= 0.2))[0].tolist() data = sigbuf[sig] return data, ch_names, timestamp def _load_nex(data_dir): files = glob.glob(os.path.join(data_dir, '*.nex')) assert len(files) == 1 reader = NexReader(useNumpy=True) data = reader.ReadNexFile(files[0]) var = data['Variables'] ch_names = [] trigger_ch = None con_data = [] samplerate = cfg.amp_info.samplerate for i, ch in enumerate(var): if 'CH' in ch['Header']['Name']: ch_names.append(ch['Header']['Name']) con_data.append(ch['ContinuousValues']) samplerate = ch['Header']['SamplingRate'] if 'digin' == ch['Header']['Name']: trigger_ch = i if samplerate != cfg.amp_info.samplerate: warnings.warn('Samplerate in config file does not equal to data file record, recorded value is %d' % samplerate) assert trigger_ch is not None timestamp = np.round(data['Variables'][trigger_ch]['Timestamps'] * samplerate).astype(np.int32).tolist() con_data = np.array(con_data) return con_data, ch_names, timestamp def _load_cerebus(data_dir): nsx_files = glob.glob(os.path.join(data_dir, '*.ns*')) nev_files = glob.glob(os.path.join(data_dir, '*.nev')) assert len(nsx_files) == len(nev_files) == 1 f_data = NsxFile(nsx_files[0]) f_evt = NevFile(nev_files[0]) data = f_data.getdata() evt = f_evt.getdata() f_data.close() f_evt.close() samplerate = data['samp_per_s'] if cfg.amp_info.samplerate != samplerate: warnings.warn('Samplerate in config file does not equal to data file record') cfg.amp_info.samplerate = samplerate timestampresolution = f_evt.basic_header['TimeStampResolution'] ch_names = [] for info in f_data.extended_headers: ch_names.append(info['ElectrodeLabel']) event = evt['dig_events']['TimeStamps'][0] event = list(map(lambda x: int(x / timestampresolution * cfg.amp_info.samplerate), event)) return data['data'], ch_names, event class Dataset: data_format = { 'nex': _load_nex, 'ns3': _load_cerebus, 'nev': _load_cerebus, 'edf': _load_usbamp, 'bdf': _load_neuracle } def __init__(self, subject, date=None, loaddata=True): self.subject = subject self._subj_path = os.path.dirname(__file__) + '/../data/' + subject if date is None: self._date = find_nearest_time(self._subj_path) else: if isinstance(date, datetime): self._date = date.strftime("%Y-%m-%d-%H-%M-%S") else: self._date = date print(self._date) self.root_dir = os.path.join(self._subj_path, self._date) self.montage = deepcopy(cfg.subj_info.montage) self.events = self.load_event() if loaddata: self.load_all() else: self.data, self.ch_names, self.timestamp, self.montage_indices, self.events_backup = [None] * 5 def load_all(self): dataarray, ch_names, timestamp = self._load_data() timestamp = Dataset.ts_check(timestamp) self.data = dataarray self.ch_names = ch_names self.timestamp = timestamp self.montage_indices = self.get_channel_indices(self.montage, self.ch_names) self.events_backup = self.events.copy() if cfg.exp_config.bidir: assert 2 * len(timestamp) == self.events.size, print('Dual-directional: ', len(timestamp), self.events.size) self.events = self.events[:, ::2] else: assert len(timestamp) == self.events.size, print('Unidirectional: ', len(timestamp), self.events.size) def _load_data(self): walk_path = self.root_dir loader = None for f in os.listdir(walk_path): _ext = f.split('.')[-1] try: loader = Dataset.data_format[_ext] break except KeyError: pass if loader is None: raise FileNotFoundError('No matching data format found') return loader(walk_path) def load_event(self): walk_path = self.root_dir file = glob.glob(os.path.join(walk_path, self.subject) + '*') assert len(file) == 1 file = file[0] if file.endswith('.mat'): raw = sio.loadmat(file) order = raw['stim_order'] order -= 1 return order.reshape((-1, 12)) else: with open(file) as f: stim_order = [[int(x) for x in line.split()] for line in f if len(line) > 1] return np.array(stim_order) @staticmethod def get_channel_indices(target_channels, channels_in_data): indices = [] channel_book = {name: i for i, name in enumerate(channels_in_data)} for ch in target_channels: try: indices.append(channel_book[ch]) except ValueError as err: print(err) return indices @staticmethod def ts_check(ts): fs = cfg.amp_info.samplerate while len(ts) % 12 and (not (fs * 0.1 <= ts[1] - ts[0] <= fs * 0.3)): del ts[0] return ts
true
true
f70f091487d65dd2f5ff52861bbd8b9a57e1dcd8
1,719
py
Python
tools/bitesize.py
jerr/bcc
f3fc87aab83ce3e4f1ca227e33853df21147255a
[ "Apache-2.0" ]
5
2018-11-01T12:17:32.000Z
2021-06-14T10:56:53.000Z
tools/bitesize.py
gdankel/bcc
2cc96a8c17b9b7059883627ea211f30a77061b2b
[ "ECL-2.0", "Apache-2.0" ]
13
2018-02-09T22:24:29.000Z
2018-06-18T22:33:29.000Z
tools/bitesize.py
gdankel/bcc
2cc96a8c17b9b7059883627ea211f30a77061b2b
[ "ECL-2.0", "Apache-2.0" ]
5
2018-01-31T05:04:19.000Z
2018-06-12T00:45:21.000Z
#!/usr/bin/python # # bitehist.py Block I/O size histogram. # For Linux, uses BCC, eBPF. See .c file. # # USAGE: bitesize # # Ctrl-C will print the partially gathered histogram then exit. # # Copyright (c) 2016 Allan McAleavy # Licensed under the Apache License, Version 2.0 (the "License") # # 05-Feb-2016 Allan McAleavy ran pep8 against file from bcc import BPF from time import sleep bpf_text = """ #include <uapi/linux/ptrace.h> #include <linux/blkdev.h> struct proc_key_t { char name[TASK_COMM_LEN]; u64 slot; }; struct val_t { char name[TASK_COMM_LEN]; }; BPF_HISTOGRAM(dist, struct proc_key_t); BPF_HASH(commbyreq, struct request *, struct val_t); int trace_pid_start(struct pt_regs *ctx, struct request *req) { struct val_t val = {}; if (bpf_get_current_comm(&val.name, sizeof(val.name)) == 0) { commbyreq.update(&req, &val); } return 0; } int do_count(struct pt_regs *ctx, struct request *req) { struct val_t *valp; valp = commbyreq.lookup(&req); if (valp == 0) { return 0; } if (req->__data_len > 0) { struct proc_key_t key = {.slot = bpf_log2l(req->__data_len / 1024)}; bpf_probe_read(&key.name, sizeof(key.name),valp->name); dist.increment(key); } return 0; } """ # load BPF program b = BPF(text=bpf_text) b.attach_kprobe(event="blk_account_io_start", fn_name="trace_pid_start") b.attach_kprobe(event="blk_account_io_completion", fn_name="do_count") print("Tracing... Hit Ctrl-C to end.") # trace until Ctrl-C dist = b.get_table("dist") try: sleep(99999999) except KeyboardInterrupt: dist.print_log2_hist("Kbytes", "Process Name", section_print_fn=bytes.decode)
22.324675
76
0.670157
from bcc import BPF from time import sleep bpf_text = """ #include <uapi/linux/ptrace.h> #include <linux/blkdev.h> struct proc_key_t { char name[TASK_COMM_LEN]; u64 slot; }; struct val_t { char name[TASK_COMM_LEN]; }; BPF_HISTOGRAM(dist, struct proc_key_t); BPF_HASH(commbyreq, struct request *, struct val_t); int trace_pid_start(struct pt_regs *ctx, struct request *req) { struct val_t val = {}; if (bpf_get_current_comm(&val.name, sizeof(val.name)) == 0) { commbyreq.update(&req, &val); } return 0; } int do_count(struct pt_regs *ctx, struct request *req) { struct val_t *valp; valp = commbyreq.lookup(&req); if (valp == 0) { return 0; } if (req->__data_len > 0) { struct proc_key_t key = {.slot = bpf_log2l(req->__data_len / 1024)}; bpf_probe_read(&key.name, sizeof(key.name),valp->name); dist.increment(key); } return 0; } """ b = BPF(text=bpf_text) b.attach_kprobe(event="blk_account_io_start", fn_name="trace_pid_start") b.attach_kprobe(event="blk_account_io_completion", fn_name="do_count") print("Tracing... Hit Ctrl-C to end.") dist = b.get_table("dist") try: sleep(99999999) except KeyboardInterrupt: dist.print_log2_hist("Kbytes", "Process Name", section_print_fn=bytes.decode)
true
true
f70f0b4b935ab084e2e02ffe3c58b6c6932fdad3
1,115
py
Python
train/model_stack.py
pabloserna/SentimentAnalysisinAWS
d94572665442ef6f49deb07ed78f8104654fefc3
[ "MIT" ]
null
null
null
train/model_stack.py
pabloserna/SentimentAnalysisinAWS
d94572665442ef6f49deb07ed78f8104654fefc3
[ "MIT" ]
null
null
null
train/model_stack.py
pabloserna/SentimentAnalysisinAWS
d94572665442ef6f49deb07ed78f8104654fefc3
[ "MIT" ]
null
null
null
import torch.nn as nn class LSTMClassifier(nn.Module): """ This is the simple RNN model we will be using to perform Sentiment Analysis. """ def __init__(self, embedding_dim, hidden_dim, vocab_size): """ Initialize the model by settingg up the various layers. """ super(LSTMClassifier, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0) self.lstmA = nn.LSTM(embedding_dim, hidden_dim) self.lstmB = nn.LSTM(hidden_dim, hidden_dim) self.dense = nn.Linear(in_features=hidden_dim, out_features=1) self.sig = nn.Sigmoid() self.word_dict = None def forward(self, x): """ Perform a forward pass of our model on some input. """ x = x.t() lengths = x[0,:] reviews = x[1:,:] embeds = self.embedding(reviews) lstm_out1, _ = self.lstmA(embeds) lstm_out, _ = self.lstmB(lstm_out1) out = self.dense(lstm_out) out = out[lengths - 1, range(len(lengths))] return self.sig(out.squeeze())
32.794118
80
0.603587
import torch.nn as nn class LSTMClassifier(nn.Module): def __init__(self, embedding_dim, hidden_dim, vocab_size): super(LSTMClassifier, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0) self.lstmA = nn.LSTM(embedding_dim, hidden_dim) self.lstmB = nn.LSTM(hidden_dim, hidden_dim) self.dense = nn.Linear(in_features=hidden_dim, out_features=1) self.sig = nn.Sigmoid() self.word_dict = None def forward(self, x): x = x.t() lengths = x[0,:] reviews = x[1:,:] embeds = self.embedding(reviews) lstm_out1, _ = self.lstmA(embeds) lstm_out, _ = self.lstmB(lstm_out1) out = self.dense(lstm_out) out = out[lengths - 1, range(len(lengths))] return self.sig(out.squeeze())
true
true
f70f0bc03e2af0f6fa2ecb979b20813cc105e3d8
4,419
py
Python
applications/RomApplication/python_scripts/structural_mechanics_analysis_rom.py
KlausBSautter/Kratos
245b30e38497a242bbdf999278e9c1b6175a573a
[ "BSD-4-Clause" ]
778
2017-01-27T16:29:17.000Z
2022-03-30T03:01:51.000Z
applications/RomApplication/python_scripts/structural_mechanics_analysis_rom.py
KlausBSautter/Kratos
245b30e38497a242bbdf999278e9c1b6175a573a
[ "BSD-4-Clause" ]
6,634
2017-01-15T22:56:13.000Z
2022-03-31T15:03:36.000Z
applications/RomApplication/python_scripts/structural_mechanics_analysis_rom.py
philbucher/Kratos
1ceb900dbacfab344e27e32285250eafc52093ec
[ "BSD-4-Clause" ]
224
2017-02-07T14:12:49.000Z
2022-03-06T23:09:34.000Z
import KratosMultiphysics import KratosMultiphysics.RomApplication as romapp import KratosMultiphysics.StructuralMechanicsApplication from KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod from KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper from KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis import json import numpy as np class StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis): def __init__(self,model,project_parameters, hyper_reduction_element_selector = None): super().__init__(model,project_parameters) if hyper_reduction_element_selector != None : if hyper_reduction_element_selector == "EmpiricalCubature": self.hyper_reduction_element_selector = EmpiricalCubatureMethod() self.time_step_residual_matrix_container = [] else: err_msg = "The requested element selection method \"" + hyper_reduction_element_selector + "\" is not in the rom application\n" err_msg += "Available options are: \"EmpiricalCubature\"" raise Exception(err_msg) else: self.hyper_reduction_element_selector = None #### Internal functions #### def _CreateSolver(self): """ Create the Solver (and create and import the ModelPart if it is not alread in the model) """ ## Solver construction with open('RomParameters.json') as rom_parameters: rom_settings = KratosMultiphysics.Parameters(rom_parameters.read()) self.project_parameters["solver_settings"].AddValue("rom_settings", rom_settings["rom_settings"]) return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters["solver_settings"],self.project_parameters["problem_data"]["parallel_type"].GetString()) def _GetSimulationName(self): return "::[ROM Simulation]:: " def ModifyAfterSolverInitialize(self): """Here is where the ROM_BASIS is imposed to each node""" super().ModifyAfterSolverInitialize() computing_model_part = self._solver.GetComputingModelPart() with open('RomParameters.json') as f: data = json.load(f) nodal_dofs = len(data["rom_settings"]["nodal_unknowns"]) nodal_modes = data["nodal_modes"] counter = 0 rom_dofs= self.project_parameters["solver_settings"]["rom_settings"]["number_of_rom_dofs"].GetInt() for node in computing_model_part.Nodes: aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs) for j in range(nodal_dofs): Counter=str(node.Id) for i in range(rom_dofs): aux[j,i] = nodal_modes[Counter][j][i] node.SetValue(romapp.ROM_BASIS, aux ) # ROM basis counter+=1 if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters["solver_settings"]["rom_settings"], self._GetSolver().get_solution_scheme()) def FinalizeSolutionStep(self): if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": print('\n\n\n\nGenerating matrix of residuals') ResMat = self.ResidualUtilityObject.GetResiduals() NP_ResMat = np.array(ResMat, copy=False) self.time_step_residual_matrix_container.append(NP_ResMat) super().FinalizeSolutionStep() def Finalize(self): super().Finalize() if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements() ModelPartName = self._GetSolver().settings["model_import_settings"]["input_filename"].GetString() self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName) self.hyper_reduction_element_selector.Run()
53.890244
215
0.699253
import KratosMultiphysics import KratosMultiphysics.RomApplication as romapp import KratosMultiphysics.StructuralMechanicsApplication from KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod from KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper from KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis import json import numpy as np class StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis): def __init__(self,model,project_parameters, hyper_reduction_element_selector = None): super().__init__(model,project_parameters) if hyper_reduction_element_selector != None : if hyper_reduction_element_selector == "EmpiricalCubature": self.hyper_reduction_element_selector = EmpiricalCubatureMethod() self.time_step_residual_matrix_container = [] else: err_msg = "The requested element selection method \"" + hyper_reduction_element_selector + "\" is not in the rom application\n" err_msg += "Available options are: \"EmpiricalCubature\"" raise Exception(err_msg) else: self.hyper_reduction_element_selector = None rom_settings = KratosMultiphysics.Parameters(rom_parameters.read()) self.project_parameters["solver_settings"].AddValue("rom_settings", rom_settings["rom_settings"]) return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters["solver_settings"],self.project_parameters["problem_data"]["parallel_type"].GetString()) def _GetSimulationName(self): return "::[ROM Simulation]:: " def ModifyAfterSolverInitialize(self): super().ModifyAfterSolverInitialize() computing_model_part = self._solver.GetComputingModelPart() with open('RomParameters.json') as f: data = json.load(f) nodal_dofs = len(data["rom_settings"]["nodal_unknowns"]) nodal_modes = data["nodal_modes"] counter = 0 rom_dofs= self.project_parameters["solver_settings"]["rom_settings"]["number_of_rom_dofs"].GetInt() for node in computing_model_part.Nodes: aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs) for j in range(nodal_dofs): Counter=str(node.Id) for i in range(rom_dofs): aux[j,i] = nodal_modes[Counter][j][i] node.SetValue(romapp.ROM_BASIS, aux ) counter+=1 if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters["solver_settings"]["rom_settings"], self._GetSolver().get_solution_scheme()) def FinalizeSolutionStep(self): if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": print('\n\n\n\nGenerating matrix of residuals') ResMat = self.ResidualUtilityObject.GetResiduals() NP_ResMat = np.array(ResMat, copy=False) self.time_step_residual_matrix_container.append(NP_ResMat) super().FinalizeSolutionStep() def Finalize(self): super().Finalize() if self.hyper_reduction_element_selector != None: if self.hyper_reduction_element_selector.Name == "EmpiricalCubature": OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements() ModelPartName = self._GetSolver().settings["model_import_settings"]["input_filename"].GetString() self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName) self.hyper_reduction_element_selector.Run()
true
true
f70f0bd663cf1f4669ac616148ad30bc07b5bb89
1,491
py
Python
examples/JoystickButton.py
rwarren/pyqtgraph
e5d28ad79d0fe1c335e7f98a5d9e7ac0b22f0a2b
[ "MIT" ]
null
null
null
examples/JoystickButton.py
rwarren/pyqtgraph
e5d28ad79d0fe1c335e7f98a5d9e7ac0b22f0a2b
[ "MIT" ]
null
null
null
examples/JoystickButton.py
rwarren/pyqtgraph
e5d28ad79d0fe1c335e7f98a5d9e7ac0b22f0a2b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ JoystickButton is a button with x/y values. When the button is depressed and the mouse dragged, the x/y values change to follow the mouse. When the mouse button is released, the x/y values change to 0,0 (rather like letting go of the joystick). """ import initExample ## Add path to library (just for examples; you do not need this) from pyqtgraph.Qt import QtGui, QtCore import pyqtgraph as pg app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.resize(300,50) mw.setWindowTitle('pyqtgraph example: JoystickButton') cw = QtGui.QWidget() mw.setCentralWidget(cw) layout = QtGui.QGridLayout() cw.setLayout(layout) l1 = pg.ValueLabel(siPrefix=True, suffix='m') l2 = pg.ValueLabel(siPrefix=True, suffix='m') jb = pg.JoystickButton() jb.setFixedWidth(30) jb.setFixedHeight(30) layout.addWidget(l1, 0, 0) layout.addWidget(l2, 0, 1) layout.addWidget(jb, 0, 2) x = 0 y = 0 def update(): global x, y, l1, l2, jb dx, dy = jb.getState() x += dx * 1e-3 y += dy * 1e-3 l1.setValue(x) l2.setValue(y) timer = QtCore.QTimer() timer.timeout.connect(update) timer.start(30) #show() moved to end of file to get around this bug: # https://bugreports.qt-project.org/browse/QTBUG-39019 mw.show() ## Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
25.706897
83
0.701543
import initExample app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.resize(300,50) mw.setWindowTitle('pyqtgraph example: JoystickButton') cw = QtGui.QWidget() mw.setCentralWidget(cw) layout = QtGui.QGridLayout() cw.setLayout(layout) l1 = pg.ValueLabel(siPrefix=True, suffix='m') l2 = pg.ValueLabel(siPrefix=True, suffix='m') jb = pg.JoystickButton() jb.setFixedWidth(30) jb.setFixedHeight(30) layout.addWidget(l1, 0, 0) layout.addWidget(l2, 0, 1) layout.addWidget(jb, 0, 2) x = 0 y = 0 def update(): global x, y, l1, l2, jb dx, dy = jb.getState() x += dx * 1e-3 y += dy * 1e-3 l1.setValue(x) l2.setValue(y) timer = QtCore.QTimer() timer.timeout.connect(update) timer.start(30) mw.show() != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
true
true
f70f0ccfe19d98fa8a0385a18e2bb067db61ce6e
252
py
Python
texar/utils/exceptions.py
Holmeswww/Text_Infilling
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
[ "Apache-2.0" ]
25
2019-01-03T09:15:20.000Z
2022-02-12T04:20:59.000Z
texar/utils/exceptions.py
Holmeswww/Text_Infilling
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
[ "Apache-2.0" ]
4
2019-03-28T11:02:20.000Z
2022-02-15T04:57:33.000Z
texar/utils/exceptions.py
Holmeswww/Text_Infilling
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
[ "Apache-2.0" ]
9
2019-01-03T02:20:37.000Z
2022-02-12T04:20:50.000Z
# """ Texar defined exceptions. """ from __future__ import absolute_import from __future__ import print_function from __future__ import division __all__ = [ "TexarError" ] class TexarError(Exception): """ Texar error. """ pass
12
38
0.690476
from __future__ import absolute_import from __future__ import print_function from __future__ import division __all__ = [ "TexarError" ] class TexarError(Exception): pass
true
true
f70f0db28b86eda5c789b0a6396d94cc6bd56a59
1,781
py
Python
problems/EE/auto/problem149_EE.py
sunandita/ICAPS_Summer_School_RAE_2020
a496b62185bcfdd2c76eb7986ae99cfa85708d28
[ "BSD-3-Clause" ]
5
2020-10-15T14:40:03.000Z
2021-08-20T17:45:41.000Z
problems/EE/auto/problem149_EE.py
sunandita/ICAPS_Summer_School_RAE_2020
a496b62185bcfdd2c76eb7986ae99cfa85708d28
[ "BSD-3-Clause" ]
null
null
null
problems/EE/auto/problem149_EE.py
sunandita/ICAPS_Summer_School_RAE_2020
a496b62185bcfdd2c76eb7986ae99cfa85708d28
[ "BSD-3-Clause" ]
2
2020-10-15T07:06:14.000Z
2020-10-15T17:33:01.000Z
__author__ = 'patras' from domain_exploreEnv import * from timer import DURATION from state import state, rv DURATION.TIME = { 'survey': 5, 'monitor': 5, 'screen': 5, 'sample': 5, 'process': 5, 'fly': 3, 'deposit': 1, 'transferData': 1, 'take': 2, 'put': 2, 'move': 10, 'charge': 5, 'negotiate': 5, 'handleAlien': 5, } DURATION.COUNTER = { 'survey': 5, 'monitor': 5, 'screen': 5, 'sample': 5, 'process': 5, 'fly': 3, 'deposit': 1, 'transferData': 1, 'take': 2, 'put': 2, 'move': 10, 'charge': 5, 'negotiate': 5, 'handleAlien': 5, } rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'} rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'} rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'} rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4'] rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}} def ResetState(): state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'} state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50} state.data = { 'UAV': 1, 'r1': 3, 'r2': 1} state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'} state.load = {'r1': NIL, 'r2': 'e1', 'UAV': 'o1'} state.storm = {'active': False} tasks = { 3: [['doActivities', 'UAV', [['survey', 'z3'], ['survey', 'z4'], ['survey', 'base']]]], 5: [['handleEmergency', 'r2', 'z4']], } eventsEnv = { 5: [alienSpotted, ['z2']] }
29.196721
221
0.485121
__author__ = 'patras' from domain_exploreEnv import * from timer import DURATION from state import state, rv DURATION.TIME = { 'survey': 5, 'monitor': 5, 'screen': 5, 'sample': 5, 'process': 5, 'fly': 3, 'deposit': 1, 'transferData': 1, 'take': 2, 'put': 2, 'move': 10, 'charge': 5, 'negotiate': 5, 'handleAlien': 5, } DURATION.COUNTER = { 'survey': 5, 'monitor': 5, 'screen': 5, 'sample': 5, 'process': 5, 'fly': 3, 'deposit': 1, 'transferData': 1, 'take': 2, 'put': 2, 'move': 10, 'charge': 5, 'negotiate': 5, 'handleAlien': 5, } rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'} rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'} rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'} rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4'] rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}} def ResetState(): state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'} state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50} state.data = { 'UAV': 1, 'r1': 3, 'r2': 1} state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'} state.load = {'r1': NIL, 'r2': 'e1', 'UAV': 'o1'} state.storm = {'active': False} tasks = { 3: [['doActivities', 'UAV', [['survey', 'z3'], ['survey', 'z4'], ['survey', 'base']]]], 5: [['handleEmergency', 'r2', 'z4']], } eventsEnv = { 5: [alienSpotted, ['z2']] }
true
true
f70f0eaeb83c3a6275f22f288f61376329b14e9c
2,132
py
Python
google/cloud/managedidentities/v1beta1/managedidentities-v1beta1-py/google/cloud/managedidentities_v1beta1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
7
2021-02-21T10:39:41.000Z
2021-12-07T07:31:28.000Z
google/cloud/managedidentities/v1/managedidentities-v1-py/google/cloud/managedidentities_v1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
6
2021-02-02T23:46:11.000Z
2021-11-15T01:46:02.000Z
google/cloud/managedidentities/v1beta1/managedidentities-v1beta1-py/google/cloud/managedidentities_v1beta1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
4
2021-01-28T23:25:45.000Z
2021-08-30T01:55:16.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .services.managed_identities_service import ManagedIdentitiesServiceClient from .services.managed_identities_service import ManagedIdentitiesServiceAsyncClient from .types.managed_identities_service import AttachTrustRequest from .types.managed_identities_service import CreateMicrosoftAdDomainRequest from .types.managed_identities_service import DeleteDomainRequest from .types.managed_identities_service import DetachTrustRequest from .types.managed_identities_service import GetDomainRequest from .types.managed_identities_service import ListDomainsRequest from .types.managed_identities_service import ListDomainsResponse from .types.managed_identities_service import OpMetadata from .types.managed_identities_service import ReconfigureTrustRequest from .types.managed_identities_service import ResetAdminPasswordRequest from .types.managed_identities_service import ResetAdminPasswordResponse from .types.managed_identities_service import UpdateDomainRequest from .types.managed_identities_service import ValidateTrustRequest from .types.resource import Domain from .types.resource import Trust __all__ = ( 'ManagedIdentitiesServiceAsyncClient', 'AttachTrustRequest', 'CreateMicrosoftAdDomainRequest', 'DeleteDomainRequest', 'DetachTrustRequest', 'Domain', 'GetDomainRequest', 'ListDomainsRequest', 'ListDomainsResponse', 'ManagedIdentitiesServiceClient', 'OpMetadata', 'ReconfigureTrustRequest', 'ResetAdminPasswordRequest', 'ResetAdminPasswordResponse', 'Trust', 'UpdateDomainRequest', 'ValidateTrustRequest', )
38.763636
84
0.842402
from .services.managed_identities_service import ManagedIdentitiesServiceClient from .services.managed_identities_service import ManagedIdentitiesServiceAsyncClient from .types.managed_identities_service import AttachTrustRequest from .types.managed_identities_service import CreateMicrosoftAdDomainRequest from .types.managed_identities_service import DeleteDomainRequest from .types.managed_identities_service import DetachTrustRequest from .types.managed_identities_service import GetDomainRequest from .types.managed_identities_service import ListDomainsRequest from .types.managed_identities_service import ListDomainsResponse from .types.managed_identities_service import OpMetadata from .types.managed_identities_service import ReconfigureTrustRequest from .types.managed_identities_service import ResetAdminPasswordRequest from .types.managed_identities_service import ResetAdminPasswordResponse from .types.managed_identities_service import UpdateDomainRequest from .types.managed_identities_service import ValidateTrustRequest from .types.resource import Domain from .types.resource import Trust __all__ = ( 'ManagedIdentitiesServiceAsyncClient', 'AttachTrustRequest', 'CreateMicrosoftAdDomainRequest', 'DeleteDomainRequest', 'DetachTrustRequest', 'Domain', 'GetDomainRequest', 'ListDomainsRequest', 'ListDomainsResponse', 'ManagedIdentitiesServiceClient', 'OpMetadata', 'ReconfigureTrustRequest', 'ResetAdminPasswordRequest', 'ResetAdminPasswordResponse', 'Trust', 'UpdateDomainRequest', 'ValidateTrustRequest', )
true
true
f70f0ef293c12a1c4bf47d9eeda991b5c840f5d7
20,689
py
Python
sr/data_loader.py
sentinel-hub/multi-temporal-super-resolution
5ef642304a980db87bdb935a7a7450bd649f8912
[ "MIT" ]
34
2021-05-18T09:04:17.000Z
2022-02-17T09:34:20.000Z
sr/data_loader.py
kurkutesa/multi-temporal-super-resolution
5ef642304a980db87bdb935a7a7450bd649f8912
[ "MIT" ]
1
2021-05-24T13:44:54.000Z
2021-05-25T13:04:02.000Z
sr/data_loader.py
kurkutesa/multi-temporal-super-resolution
5ef642304a980db87bdb935a7a7450bd649f8912
[ "MIT" ]
4
2021-05-25T18:51:57.000Z
2021-06-28T15:57:21.000Z
import os from collections import OrderedDict from typing import Tuple, List, Callable from fs_s3fs import S3FS import numpy as np import pandas as pd import torch from torch.utils.data import Dataset from skimage.exposure import match_histograms from datetime import datetime from eolearn.core import EOPatch def augment( lr: np.ndarray, hr: np.ndarray, flip: bool = True, rotate: bool = True, distribution_shift: bool = False, distribution_scale: bool = False, permute_timestamps: bool = True, max_distribution_shift: float = 0.25, max_distribution_scale_diff: float = 0.25, proba_of_original: float = 0.67 ) -> Tuple[np.ndarray, np.ndarray]: """ Performs a series of image augmentations with specified probability. :param lr: array of low-resolution images, shape is `CxTxHxW` :param hr: array of high-resolution images, shape is `CxHxW` :param flip: whether to randomly flip height or width of arrays :param rotate: whether to randomly rotate the arrays :param distribution_shift: add an offset to the distribution :param distribution_scale: scale the channels distribution :param permute_timestamps: permute timestamps (not desired for HRN) :param max_distribution_shift: set max distribution shift used in distribution shift augmentation :param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation :param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations :returns: augmented lr and hr arrays """ # Base probability which, after `n_aug_conditions`, reduces to `proba_of_original` n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps) if aug_op) rng_threshold = proba_of_original ** (1. / n_aug_conditions) if flip and np.random.random() > rng_threshold: flip_axis = np.random.choice([-2, -1]) lr = np.flip(lr, axis=flip_axis) hr = np.flip(hr, axis=flip_axis) if rotate and np.random.random() > rng_threshold: k = np.random.choice(np.arange(-2, 3)) lr = np.rot90(lr, k=k, axes=(-2, -1)) hr = np.rot90(hr, k=k, axes=(-2, -1)) if distribution_shift and np.random.random() > rng_threshold: d_shift = (np.random.random() - 0.5) * max_distribution_shift lr = lr + d_shift hr = hr + d_shift if distribution_scale and np.random.random() > rng_threshold: d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None] hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None] lr = (lr - lr_mean) * d_scale + lr_mean hr = (hr - hr_mean) * d_scale + hr_mean if permute_timestamps and np.random.random() > rng_threshold: # expects lr in `CxTxHxW` shape indices = np.random.permutation(lr.shape[1]) lr = lr[:, indices] return lr, hr def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray: """ Create an array with first dimension equal to k, filling with 0s in front or at back """ n_pad = k - len(feat) if n_pad < 0: raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}') (_, h, w, c) = feat.shape if pad_to_front: feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat)) else: feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c)))) return feat class ImageSet(OrderedDict): """ An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality. """ def __init__(self, *args, **kwargs): super(ImageSet, self).__init__(*args, **kwargs) def __repr__(self): dict_info = f"{'name':>10} : {self['name']}" for name, v in self.items(): if hasattr(v, 'shape'): dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})" else: dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})" return dict_info def read_imageset(imset_file: str, filesystem: S3FS = None, normalize: bool = True, country_norm_df: pd.DataFrame = None, norm_deimos_npz: np.lib.npyio.NpzFile = None, norm_s2_npz: np.lib.npyio.NpzFile = None, n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet: """ Retrieves all assets from the given directory. :param imset_file: name of npz file with sample imageset :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk :param normalize: whether to normalize data or not :param country_norm_df: S2 median/std normalization factors stored per country :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS :param norm_s2_npz: 1st and 99th percentile normalization factors for S2 :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of timeframes are taken :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image """ assert padding in ['zeros', 'repeat'] # Read asset names npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file, allow_pickle=True) features = npz['features'] hr = npz['labels'] if normalize: country = npz['countries'] country_stats = country_norm_df[country_norm_df.country == str(country)] norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values features = (features - norm_median) / norm_std deimos_p1 = norm_deimos_npz['p1'] deimos_p99 = norm_deimos_npz['p99'] s2_p1 = norm_s2_npz['p1'] s2_p99 = norm_s2_npz['p99'] hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1) features = (features - s2_p1) / (s2_p99 - s2_p1) alphas = np.ones(n_views) if histogram_matching: hr = match_histograms(hr, features[-1], multichannel=True) n_feature_timestamps = len(features) if n_feature_timestamps < n_views: if padding == 'zeros': features = pad_to_k(features, n_views, pad_to_front=False) alphas[n_feature_timestamps:] = 0 elif padding == 'repeat': n_pad = n_views - n_feature_timestamps padded = features[-1:].repeat(n_pad, axis=0) features = np.concatenate((features, padded)) else: features = features[-n_views:, ...] # Tensor is `CxTxHxW` features = np.moveaxis(features, -1, 0) hr = np.moveaxis(hr, 2, 0) imageset = ImageSet(name=os.path.basename(imset_file), timestamp_deimos=str(npz['timetamps_deimos'].item()), lr=features, hr=hr, alphas=alphas) return imageset class ImagesetDataset(Dataset): """ Derived Dataset class for loading many imagesets from a list of directories. :param imset_dir: name of directory containing files :param imset_npz_files: list of filenames that constitute the dataset :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True` if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS (`BxTxCxHxW`) :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk :param normalize: whether to normalize data or not :param country_norm_df: S2 median/std normalization factors stored per country :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS :param norm_s2_npz: 1st and 99th percentile normalization factors for S2 :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence :param channels_labels: which channels (i.e. indices) are extracted from hr image :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of timeframes are taken :param transform: function executed on lr and hr arrays as augmentation :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image """ def __init__( self, imset_dir: str, imset_npz_files: list, time_first: bool, filesystem: object = None, normalize: bool = True, country_norm_df: object = None, norm_deimos_npz: np.ndarray = None, norm_s2_npz: np.ndarray = None, channels_feats: List[int] = [0, 1, 2, 3], channels_labels: List[int] = [0, 1, 2, 3], n_views: int = 16, padding: str = 'zeros', transform: Callable = None, histogram_matching: bool = False ): super().__init__() self.imset_dir = imset_dir self.filesystem = filesystem self.imset_npz_files = imset_npz_files self.time_first = time_first self.normalize = normalize self.country_norm_df = country_norm_df self.norm_deimos_npz = norm_deimos_npz self.norm_s2_npz = norm_s2_npz self.channels_feats = channels_feats self.channels_labels = channels_labels self.n_views = n_views self.padding = padding self.transform = transform self.histogram_matching = histogram_matching def __len__(self): return len(self.imset_npz_files) def __getitem__(self, index: int) -> ImageSet: """ Returns an ImageSet dict of all assets in the directory of the given index.""" if isinstance(index, int): imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index]) else: raise KeyError('Index must be of type `int`.') imset = read_imageset( imset_file=imset_file, filesystem=self.filesystem, normalize=self.normalize, country_norm_df=self.country_norm_df, norm_deimos_npz=self.norm_deimos_npz, norm_s2_npz=self.norm_s2_npz, n_views=self.n_views, padding=self.padding, histogram_matching=self.histogram_matching ) lr = imset['lr'][self.channels_feats] hr = imset['hr'][self.channels_labels] if self.transform is not None: lr, hr = self.transform(lr, hr) if self.time_first: lr = np.swapaxes(lr, 0, 1) imset['lr'] = torch.from_numpy(lr.copy()) imset['hr'] = torch.from_numpy(hr.copy()) imset['alphas'] = torch.from_numpy(imset['alphas']) return imset def filter_cloudy_s2(eop, max_cc): idxs = [] for i, _ in enumerate(eop.timestamp): if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1): idxs.append(i) eop.data['BANDS'] = eop.data['BANDS'][idxs, ...] eop.data['CLP'] = eop.data['CLP'][idxs, ...] eop.mask['CLM'] = eop.mask['CLM'][idxs, ...] eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...] eop.timestamp = list(np.array(eop.timestamp)[idxs]) return eop def timestamps_within_date(timestamps, start_date, end_date): timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date] def read_imageset_eopatch(imset_file: str, start_date: datetime, end_date: datetime, country: str, filesystem: S3FS = None, normalize: bool = True, country_norm_df: pd.DataFrame = None, norm_s2_npz: np.lib.npyio.NpzFile = None, n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet: """ Retrieves all assets from the given directory. :param imset_file: name of npz file with sample imageset :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk :param start_date: specifies the start of the temporal range of the stack of images used for prediction :param end_date: specifies the end of the temporal range of the stack of images used for prediction :param country: specifies the name of the country so it can be matched with the country_norm_df :param normalize: whether to normalize data or not :param country_norm_df: S2 median/std normalization factors stored per country :param norm_s2_npz: 1st and 99th percentile normalization factors for S2 :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of timeframes are taken """ assert padding in ['zeros', 'repeat'] eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True) noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1) ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date) features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000 filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs] if normalize: country_stats = country_norm_df[country_norm_df.country == str(country)] norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values features = (features - norm_median) / norm_std s2_p1 = norm_s2_npz['p1'] s2_p99 = norm_s2_npz['p99'] features = (features - s2_p1) / (s2_p99 - s2_p1) alphas = np.ones(n_views) if histogram_matching: hr = match_histograms(hr, features[-1], multichannel=True) n_feature_timestamps = len(features) if n_feature_timestamps < n_views: if padding == 'zeros': features = pad_to_k(features, n_views, pad_to_front=False) alphas[n_feature_timestamps:] = 0 elif padding == 'repeat': n_pad = n_views - n_feature_timestamps padded = features[-1:].repeat(n_pad, axis=0) features = np.concatenate((features, padded)) else: features = features[-n_views:, ...] # Tensor is `CxTxHxW` features = np.moveaxis(features, -1, 0) imageset = ImageSet(name=os.path.basename(imset_file), lr=features, alphas=alphas, ts=filtered_ts[::-1]) return imageset class EopatchPredictionDataset(Dataset): """ Derived Dataset class for loading many imagesets from a list of directories. :param imset_dir: name of directory containing files :param imset_npz_files: list of filenames that constitute the dataset :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True` if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS (`BxTxCxHxW`) :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk :param start_date: specifies the start of the temporal range of the stack of images used for prediction :param end_date: specifies the end of the temporal range of the stack of images used for prediction :param country: specifies the name of the country so it can be matched with the country_norm_df :param normalize: whether to normalize data or not :param country_norm_df: S2 median/std normalization factors stored per country :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS :param norm_s2_npz: 1st and 99th percentile normalization factors for S2 :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence :param channels_labels: which channels (i.e. indices) are extracted from hr image :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of timeframes are taken :param transform: function executed on lr and hr arrays as augmentation """ def __init__( self, imset_dir: str, imset_npz_files: list, time_first: bool, start_date: datetime, end_date: datetime, country: str, filesystem: object = None, normalize: bool = True, country_norm_df: object = None, norm_deimos_npz: np.ndarray = None, norm_s2_npz: np.ndarray = None, channels_feats: List[int] = [0, 1, 2, 3], n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False ): super().__init__() self.imset_dir = imset_dir self.filesystem = filesystem self.imset_npz_files = imset_npz_files self.time_first = time_first self.normalize = normalize self.country_norm_df = country_norm_df self.norm_deimos_npz = norm_deimos_npz self.norm_s2_npz = norm_s2_npz self.channels_feats = channels_feats self.n_views = n_views self.padding = padding self.start_date = start_date self.end_date = end_date self.histogram_matching = histogram_matching self.country = country def __len__(self): return len(self.imset_npz_files) def __getitem__(self, index: int) -> ImageSet: """ Returns an ImageSet dict of all assets in the directory of the given index.""" if isinstance(index, int): imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index]) else: raise KeyError('Index must be of type `int`.') imset = read_imageset_eopatch( imset_file=imset_file, filesystem=self.filesystem, normalize=self.normalize, country_norm_df=self.country_norm_df, norm_deimos_npz=self.norm_deimos_npz, norm_s2_npz=self.norm_s2_npz, n_views=self.n_views, padding=self.padding, start_date=self.start_date, end_date=self.end_date, country=self.country, histogram_matching=self.histogram_matching, ) lr = imset['lr'][self.channels_feats] if self.time_first: lr = np.swapaxes(lr, 0, 1) imset['lr'] = torch.from_numpy(lr.copy()) imset['alphas'] = torch.from_numpy(imset['alphas']) return imset
42.657732
120
0.64353
import os from collections import OrderedDict from typing import Tuple, List, Callable from fs_s3fs import S3FS import numpy as np import pandas as pd import torch from torch.utils.data import Dataset from skimage.exposure import match_histograms from datetime import datetime from eolearn.core import EOPatch def augment( lr: np.ndarray, hr: np.ndarray, flip: bool = True, rotate: bool = True, distribution_shift: bool = False, distribution_scale: bool = False, permute_timestamps: bool = True, max_distribution_shift: float = 0.25, max_distribution_scale_diff: float = 0.25, proba_of_original: float = 0.67 ) -> Tuple[np.ndarray, np.ndarray]: n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps) if aug_op) rng_threshold = proba_of_original ** (1. / n_aug_conditions) if flip and np.random.random() > rng_threshold: flip_axis = np.random.choice([-2, -1]) lr = np.flip(lr, axis=flip_axis) hr = np.flip(hr, axis=flip_axis) if rotate and np.random.random() > rng_threshold: k = np.random.choice(np.arange(-2, 3)) lr = np.rot90(lr, k=k, axes=(-2, -1)) hr = np.rot90(hr, k=k, axes=(-2, -1)) if distribution_shift and np.random.random() > rng_threshold: d_shift = (np.random.random() - 0.5) * max_distribution_shift lr = lr + d_shift hr = hr + d_shift if distribution_scale and np.random.random() > rng_threshold: d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None] hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None] lr = (lr - lr_mean) * d_scale + lr_mean hr = (hr - hr_mean) * d_scale + hr_mean if permute_timestamps and np.random.random() > rng_threshold: indices = np.random.permutation(lr.shape[1]) lr = lr[:, indices] return lr, hr def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray: n_pad = k - len(feat) if n_pad < 0: raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}') (_, h, w, c) = feat.shape if pad_to_front: feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat)) else: feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c)))) return feat class ImageSet(OrderedDict): def __init__(self, *args, **kwargs): super(ImageSet, self).__init__(*args, **kwargs) def __repr__(self): dict_info = f"{'name':>10} : {self['name']}" for name, v in self.items(): if hasattr(v, 'shape'): dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})" else: dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})" return dict_info def read_imageset(imset_file: str, filesystem: S3FS = None, normalize: bool = True, country_norm_df: pd.DataFrame = None, norm_deimos_npz: np.lib.npyio.NpzFile = None, norm_s2_npz: np.lib.npyio.NpzFile = None, n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet: assert padding in ['zeros', 'repeat'] npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file, allow_pickle=True) features = npz['features'] hr = npz['labels'] if normalize: country = npz['countries'] country_stats = country_norm_df[country_norm_df.country == str(country)] norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values features = (features - norm_median) / norm_std deimos_p1 = norm_deimos_npz['p1'] deimos_p99 = norm_deimos_npz['p99'] s2_p1 = norm_s2_npz['p1'] s2_p99 = norm_s2_npz['p99'] hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1) features = (features - s2_p1) / (s2_p99 - s2_p1) alphas = np.ones(n_views) if histogram_matching: hr = match_histograms(hr, features[-1], multichannel=True) n_feature_timestamps = len(features) if n_feature_timestamps < n_views: if padding == 'zeros': features = pad_to_k(features, n_views, pad_to_front=False) alphas[n_feature_timestamps:] = 0 elif padding == 'repeat': n_pad = n_views - n_feature_timestamps padded = features[-1:].repeat(n_pad, axis=0) features = np.concatenate((features, padded)) else: features = features[-n_views:, ...] features = np.moveaxis(features, -1, 0) hr = np.moveaxis(hr, 2, 0) imageset = ImageSet(name=os.path.basename(imset_file), timestamp_deimos=str(npz['timetamps_deimos'].item()), lr=features, hr=hr, alphas=alphas) return imageset class ImagesetDataset(Dataset): def __init__( self, imset_dir: str, imset_npz_files: list, time_first: bool, filesystem: object = None, normalize: bool = True, country_norm_df: object = None, norm_deimos_npz: np.ndarray = None, norm_s2_npz: np.ndarray = None, channels_feats: List[int] = [0, 1, 2, 3], channels_labels: List[int] = [0, 1, 2, 3], n_views: int = 16, padding: str = 'zeros', transform: Callable = None, histogram_matching: bool = False ): super().__init__() self.imset_dir = imset_dir self.filesystem = filesystem self.imset_npz_files = imset_npz_files self.time_first = time_first self.normalize = normalize self.country_norm_df = country_norm_df self.norm_deimos_npz = norm_deimos_npz self.norm_s2_npz = norm_s2_npz self.channels_feats = channels_feats self.channels_labels = channels_labels self.n_views = n_views self.padding = padding self.transform = transform self.histogram_matching = histogram_matching def __len__(self): return len(self.imset_npz_files) def __getitem__(self, index: int) -> ImageSet: if isinstance(index, int): imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index]) else: raise KeyError('Index must be of type `int`.') imset = read_imageset( imset_file=imset_file, filesystem=self.filesystem, normalize=self.normalize, country_norm_df=self.country_norm_df, norm_deimos_npz=self.norm_deimos_npz, norm_s2_npz=self.norm_s2_npz, n_views=self.n_views, padding=self.padding, histogram_matching=self.histogram_matching ) lr = imset['lr'][self.channels_feats] hr = imset['hr'][self.channels_labels] if self.transform is not None: lr, hr = self.transform(lr, hr) if self.time_first: lr = np.swapaxes(lr, 0, 1) imset['lr'] = torch.from_numpy(lr.copy()) imset['hr'] = torch.from_numpy(hr.copy()) imset['alphas'] = torch.from_numpy(imset['alphas']) return imset def filter_cloudy_s2(eop, max_cc): idxs = [] for i, _ in enumerate(eop.timestamp): if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1): idxs.append(i) eop.data['BANDS'] = eop.data['BANDS'][idxs, ...] eop.data['CLP'] = eop.data['CLP'][idxs, ...] eop.mask['CLM'] = eop.mask['CLM'][idxs, ...] eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...] eop.timestamp = list(np.array(eop.timestamp)[idxs]) return eop def timestamps_within_date(timestamps, start_date, end_date): timestamps = [ts.replace(tzinfo=None) for ts in timestamps] return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date] def read_imageset_eopatch(imset_file: str, start_date: datetime, end_date: datetime, country: str, filesystem: S3FS = None, normalize: bool = True, country_norm_df: pd.DataFrame = None, norm_s2_npz: np.lib.npyio.NpzFile = None, n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet: assert padding in ['zeros', 'repeat'] eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True) noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1) ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date) features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000 filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs] if normalize: country_stats = country_norm_df[country_norm_df.country == str(country)] norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values features = (features - norm_median) / norm_std s2_p1 = norm_s2_npz['p1'] s2_p99 = norm_s2_npz['p99'] features = (features - s2_p1) / (s2_p99 - s2_p1) alphas = np.ones(n_views) if histogram_matching: hr = match_histograms(hr, features[-1], multichannel=True) n_feature_timestamps = len(features) if n_feature_timestamps < n_views: if padding == 'zeros': features = pad_to_k(features, n_views, pad_to_front=False) alphas[n_feature_timestamps:] = 0 elif padding == 'repeat': n_pad = n_views - n_feature_timestamps padded = features[-1:].repeat(n_pad, axis=0) features = np.concatenate((features, padded)) else: features = features[-n_views:, ...] features = np.moveaxis(features, -1, 0) imageset = ImageSet(name=os.path.basename(imset_file), lr=features, alphas=alphas, ts=filtered_ts[::-1]) return imageset class EopatchPredictionDataset(Dataset): def __init__( self, imset_dir: str, imset_npz_files: list, time_first: bool, start_date: datetime, end_date: datetime, country: str, filesystem: object = None, normalize: bool = True, country_norm_df: object = None, norm_deimos_npz: np.ndarray = None, norm_s2_npz: np.ndarray = None, channels_feats: List[int] = [0, 1, 2, 3], n_views: int = 16, padding: str = 'zeros', histogram_matching: bool = False ): super().__init__() self.imset_dir = imset_dir self.filesystem = filesystem self.imset_npz_files = imset_npz_files self.time_first = time_first self.normalize = normalize self.country_norm_df = country_norm_df self.norm_deimos_npz = norm_deimos_npz self.norm_s2_npz = norm_s2_npz self.channels_feats = channels_feats self.n_views = n_views self.padding = padding self.start_date = start_date self.end_date = end_date self.histogram_matching = histogram_matching self.country = country def __len__(self): return len(self.imset_npz_files) def __getitem__(self, index: int) -> ImageSet: if isinstance(index, int): imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index]) else: raise KeyError('Index must be of type `int`.') imset = read_imageset_eopatch( imset_file=imset_file, filesystem=self.filesystem, normalize=self.normalize, country_norm_df=self.country_norm_df, norm_deimos_npz=self.norm_deimos_npz, norm_s2_npz=self.norm_s2_npz, n_views=self.n_views, padding=self.padding, start_date=self.start_date, end_date=self.end_date, country=self.country, histogram_matching=self.histogram_matching, ) lr = imset['lr'][self.channels_feats] if self.time_first: lr = np.swapaxes(lr, 0, 1) imset['lr'] = torch.from_numpy(lr.copy()) imset['alphas'] = torch.from_numpy(imset['alphas']) return imset
true
true
f70f10e88059a23ec5dc7d82b780582041e1ec1f
5,119
py
Python
cost.py
Inspirateur/PitchAssignment
a79f830ea91ef51cb2c8fd59f99ae66c6ba0df08
[ "MIT" ]
null
null
null
cost.py
Inspirateur/PitchAssignment
a79f830ea91ef51cb2c8fd59f99ae66c6ba0df08
[ "MIT" ]
null
null
null
cost.py
Inspirateur/PitchAssignment
a79f830ea91ef51cb2c8fd59f99ae66c6ba0df08
[ "MIT" ]
1
2021-09-20T07:48:36.000Z
2021-09-20T07:48:36.000Z
from collections import defaultdict from itertools import product MULTITASK_PENALTY = 1 AUTHOR_PENALTY = 2 RELATION_COST = .05 DEFAULT_FLEXIBILITY = .1 OVERREQ_PENALTY = 0.5 def workload_diff(target, proposed): """ Helper for pitches_cost :param target: <role, load> :param proposed: <role, load> :return: float """ total = 0 for role in target: # flat penalty of -1 if no students are on a target role diff = target[role] - (proposed[role] if role in proposed else -1) # a negative diff means too much student were assigned on the role if diff < 0: # the penalty for going over requirements can be softened diff *= OVERREQ_PENALTY # the squared diff is added to the cost (so that greater discrepencies cost more) total += diff ** 2 return total def author_tasks(pitches, wishes): tasks = {} for pitch in pitches: author = pitches[pitch]["author"] for wpitch, role in wishes[author]: if wpitch == pitch: tasks[(wpitch, role)] = author return tasks class Cost: def __init__(self, pitches, wishes, relations=None, flexibility=DEFAULT_FLEXIBILITY): """ :param pitches: <pitch, <role, load>> :param wishes: <student, [(pitch, role)]> :param relations: <student, <student, cost>> :param flexibility: float in [0, 1] """ self.pitches = pitches self.wishes = wishes self.relations = relations if relations else {} self.flexibility = flexibility self.author_tasks = author_tasks(pitches, wishes) def __call__(self, solution): return ( (1 - self.flexibility) * self.pitches_cost(solution) + self.flexibility * (self.wishes_cost(solution) + RELATION_COST*self.relations_cost(solution)) ) def author_constraint(self, solution): """ cost of the authors not getting their roles on their pitch :param solution: [student, wish index] :return: float """ # <(pitch, role), author> tasks_solution = {task: None for task in self.author_tasks} for student, i in solution: pitch, role = self.wishes[student][i] if (pitch, role) in self.author_tasks: if student == self.author_tasks[(pitch, role)] or tasks_solution[(pitch, role)] is None: tasks_solution[(pitch, role)] = student author_cost = 0 for task, student in tasks_solution.items(): if student != self.author_tasks[task]: author_cost += 1 return author_cost def pitches_cost(self, solution): """ cost of the pitches workload not being respected :param solution: [student, wish index] :return: float """ tasks_per_students = defaultdict(int) for student, _ in solution: tasks_per_students[student] += 1 workloads = defaultdict(lambda: defaultdict(float)) for student, i in solution: pitch, role = self.wishes[student][i] workloads[pitch][role] += 1/tasks_per_students[student] # a penalty per additionnal task per student is added to avoid students multitasking too much return ( # cost of workload diff between requirements and solution sum( workload_diff(self.pitches[pitch] ["workload"], workloads[pitch]) for pitch in self.pitches if pitch in workloads ) # cost of multitasking + MULTITASK_PENALTY * \ sum(tasks-1 for tasks in tasks_per_students.values()) # cost of author not having their roles + AUTHOR_PENALTY*self.author_constraint(solution) ) def wishes_cost(self, solution): """ cost of the wishes not being respected :param solution: [student, wish index] :return: float """ return sum( ((i+1)/len(self.wishes[student]))**2 for student, i in solution ) def relations_cost(self, solution): """ cost of the relations between students :param solution: [student, wish index] :return: float """ groups = defaultdict(list) for student, i in solution: pitch, role = self.wishes[student][i] groups[pitch].append(student) total = 0 for group in groups.values(): for student, other in product(filter(self.relations.__contains__, group), group): if student != other: if other not in self.relations[student]: total += .5 elif self.relations[student][other] == -1: total += 1 return total def cost(pitches, wishes, solution, relations=None, flexibility=DEFAULT_FLEXIBILITY): return Cost(pitches, wishes, relations, flexibility)(solution)
35.797203
104
0.586247
from collections import defaultdict from itertools import product MULTITASK_PENALTY = 1 AUTHOR_PENALTY = 2 RELATION_COST = .05 DEFAULT_FLEXIBILITY = .1 OVERREQ_PENALTY = 0.5 def workload_diff(target, proposed): total = 0 for role in target: diff = target[role] - (proposed[role] if role in proposed else -1) if diff < 0: diff *= OVERREQ_PENALTY total += diff ** 2 return total def author_tasks(pitches, wishes): tasks = {} for pitch in pitches: author = pitches[pitch]["author"] for wpitch, role in wishes[author]: if wpitch == pitch: tasks[(wpitch, role)] = author return tasks class Cost: def __init__(self, pitches, wishes, relations=None, flexibility=DEFAULT_FLEXIBILITY): self.pitches = pitches self.wishes = wishes self.relations = relations if relations else {} self.flexibility = flexibility self.author_tasks = author_tasks(pitches, wishes) def __call__(self, solution): return ( (1 - self.flexibility) * self.pitches_cost(solution) + self.flexibility * (self.wishes_cost(solution) + RELATION_COST*self.relations_cost(solution)) ) def author_constraint(self, solution): tasks_solution = {task: None for task in self.author_tasks} for student, i in solution: pitch, role = self.wishes[student][i] if (pitch, role) in self.author_tasks: if student == self.author_tasks[(pitch, role)] or tasks_solution[(pitch, role)] is None: tasks_solution[(pitch, role)] = student author_cost = 0 for task, student in tasks_solution.items(): if student != self.author_tasks[task]: author_cost += 1 return author_cost def pitches_cost(self, solution): tasks_per_students = defaultdict(int) for student, _ in solution: tasks_per_students[student] += 1 workloads = defaultdict(lambda: defaultdict(float)) for student, i in solution: pitch, role = self.wishes[student][i] workloads[pitch][role] += 1/tasks_per_students[student] return ( sum( workload_diff(self.pitches[pitch] ["workload"], workloads[pitch]) for pitch in self.pitches if pitch in workloads ) + MULTITASK_PENALTY * \ sum(tasks-1 for tasks in tasks_per_students.values()) + AUTHOR_PENALTY*self.author_constraint(solution) ) def wishes_cost(self, solution): return sum( ((i+1)/len(self.wishes[student]))**2 for student, i in solution ) def relations_cost(self, solution): groups = defaultdict(list) for student, i in solution: pitch, role = self.wishes[student][i] groups[pitch].append(student) total = 0 for group in groups.values(): for student, other in product(filter(self.relations.__contains__, group), group): if student != other: if other not in self.relations[student]: total += .5 elif self.relations[student][other] == -1: total += 1 return total def cost(pitches, wishes, solution, relations=None, flexibility=DEFAULT_FLEXIBILITY): return Cost(pitches, wishes, relations, flexibility)(solution)
true
true
f70f1152cc44929634c97df0c7fa4576d6daf8fb
590
py
Python
DynamicProgramming/MaximumTradingProfit.py
kopok2/algorithms
efb6a423a8447d99584335e9fef8d9b3c74e2ad8
[ "MIT" ]
null
null
null
DynamicProgramming/MaximumTradingProfit.py
kopok2/algorithms
efb6a423a8447d99584335e9fef8d9b3c74e2ad8
[ "MIT" ]
null
null
null
DynamicProgramming/MaximumTradingProfit.py
kopok2/algorithms
efb6a423a8447d99584335e9fef8d9b3c74e2ad8
[ "MIT" ]
null
null
null
# coding=utf-8 """Maximum trade profit problem dynamic programming solution Python implementation.""" def mx_profit(prices): n = len(prices) profit = [0] * n mxp = prices[n - 1] for i in range(n - 2, -1, -1): mxp = max(mxp, prices[i]) profit[i] = max(profit[i + 1], mxp - prices[i]) mnp = prices[0] for i in range(1, n): mnp = min(mnp, prices[i]) profit[i] = max(profit[i - 1], profit[i] + (prices[i] - mnp)) return profit[n - 1] if __name__ == "__main__": prices = [2, 30, 15, 10, 8, 25, 80] print(mx_profit(prices))
26.818182
86
0.561017
def mx_profit(prices): n = len(prices) profit = [0] * n mxp = prices[n - 1] for i in range(n - 2, -1, -1): mxp = max(mxp, prices[i]) profit[i] = max(profit[i + 1], mxp - prices[i]) mnp = prices[0] for i in range(1, n): mnp = min(mnp, prices[i]) profit[i] = max(profit[i - 1], profit[i] + (prices[i] - mnp)) return profit[n - 1] if __name__ == "__main__": prices = [2, 30, 15, 10, 8, 25, 80] print(mx_profit(prices))
true
true
f70f116ad6d2403ad613a608d5c4ed8b8378c8de
9,419
py
Python
tests/end_to_end/test_target_redshift.py
danielerapati/pipelinewise
d36d93c504e40d101e0af61f2aa6a827c3c267b8
[ "Apache-2.0" ]
2
2020-10-05T07:41:36.000Z
2020-10-05T07:41:37.000Z
tests/end_to_end/test_target_redshift.py
danielerapati/pipelinewise
d36d93c504e40d101e0af61f2aa6a827c3c267b8
[ "Apache-2.0" ]
37
2021-06-07T07:12:23.000Z
2022-03-28T23:08:04.000Z
tests/end_to_end/test_target_redshift.py
danielerapati/pipelinewise
d36d93c504e40d101e0af61f2aa6a827c3c267b8
[ "Apache-2.0" ]
1
2020-11-13T20:43:50.000Z
2020-11-13T20:43:50.000Z
import os from datetime import datetime import pytest from .helpers import tasks from .helpers import assertions from .helpers.env import E2EEnv DIR = os.path.dirname(__file__) TAP_MARIADB_ID = 'mariadb_to_rs' TAP_MARIADB_BUFFERED_STREAM_ID = 'mariadb_to_rs_buffered_stream' TAP_POSTGRES_ID = 'postgres_to_rs' TAP_S3_CSV_ID = 's3_csv_to_rs' TARGET_ID = 'redshift' # pylint: disable=attribute-defined-outside-init class TestTargetRedshift: """ End to end tests for Target Redshift """ def setup_method(self): """Initialise test project by generating YAML files from templates for all the configured connectors""" self.project_dir = os.path.join(DIR, 'test-project') # Init query runner methods self.e2e = E2EEnv(self.project_dir) self.run_query_tap_mysql = self.e2e.run_query_tap_mysql self.run_query_tap_postgres = self.e2e.run_query_tap_postgres self.run_query_target_redshift = self.e2e.run_query_target_redshift def teardown_method(self): """Delete test directories and database objects""" @pytest.mark.dependency(name='import_config') def test_import_project(self): """Import the YAML project with taps and target and do discovery mode to write the JSON files for singer connectors """ # Skip every target_postgres related test if env vars not provided if not self.e2e.env['TARGET_REDSHIFT']['is_configured']: pytest.skip('Target Redshift environment variables are not provided') # Setup and clean source and target databases self.e2e.setup_tap_mysql() self.e2e.setup_tap_postgres() if self.e2e.env['TAP_S3_CSV']['is_configured']: self.e2e.setup_tap_s3_csv() self.e2e.setup_target_redshift() # Import project [return_code, stdout, stderr] = tasks.run_command(f'pipelinewise import_config --dir {self.project_dir}') assertions.assert_command_success(return_code, stdout, stderr) @pytest.mark.dependency(depends=['import_config']) def test_replicate_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID): """Replicate data from Postgres to Redshift DWH""" # 1. Run tap first time - both fastsync and a singer should be triggered assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) #assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift, # mysql_to_redshift.tap_type_to_target_type) # 2. Make changes in MariaDB source database # LOG_BASED self.run_query_tap_mysql('UPDATE weight_unit SET isactive = 0 WHERE weight_unit_id IN (2, 3, 4)') self.run_query_tap_mysql('UPDATE all_datatypes SET c_point = NULL') # INCREMENTAL self.run_query_tap_mysql('INSERT INTO address(isactive, street_number, date_created, date_updated,' ' supplier_supplier_id, zip_code_zip_code_id)' 'VALUES (1, 1234, NOW(), NOW(), 0, 1234)') self.run_query_tap_mysql('UPDATE address SET street_number = 9999, date_updated = NOW()' ' WHERE address_id = 1') # FULL_TABLE self.run_query_tap_mysql('DELETE FROM no_pk_table WHERE id > 10') # 3. Run tap second time - both fastsync and a singer should be triggered, there are some FULL_TABLE assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) #assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift, # mysql_to_redshift.tap_type_to_target_type) @pytest.mark.dependency(depends=['import_config']) def test_resync_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID): """Resync tables from MariaDB to Redshift DWH""" assertions.assert_resync_tables_success(tap_mariadb_id, TARGET_ID) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) # assert_all_columns_exist currently not working on Redshift #assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift, # mysql_to_redshift.tap_type_to_target_type) # pylint: disable=invalid-name @pytest.mark.dependency(depends=['import_config']) def test_replicate_mariadb_to_pg_with_custom_buffer_size(self): """Replicate data from MariaDB to Redshift DWH with custom buffer size Same tests cases as test_replicate_mariadb_to_pg but using another tap with custom stream buffer size""" self.test_replicate_mariadb_to_rs(tap_mariadb_id=TAP_MARIADB_BUFFERED_STREAM_ID) @pytest.mark.dependency(depends=['import_config']) def test_replicate_pg_to_rs(self): """Replicate data from Postgres to Redshift DWH""" # 1. Run tap first time - both fastsync and a singer should be triggered assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift) # assert_all_columns_exist currently not working on Redshift #assertions.assert_all_columns_exist(self.run_query_tap_postgres, self.run_query_target_redshift) assertions.assert_date_column_naive_in_target(self.run_query_target_redshift, 'updated_at', 'ppw_e2e_tap_postgres."table_with_space and uppercase"') # 2. Make changes in MariaDB source database # LOG_BASED self.run_query_tap_postgres('insert into public."table_with_space and UPPERCase" (cvarchar, updated_at) values ' "('M', '2020-01-01 08:53:56.8+10')," "('N', '2020-12-31 12:59:00.148+00')," "('O', null)," "('P', '2020-03-03 12:30:00');") # INCREMENTAL self.run_query_tap_postgres('INSERT INTO public.city (id, name, countrycode, district, population) ' "VALUES (4080, 'Bath', 'GBR', 'England', 88859)") self.run_query_tap_postgres('UPDATE public.edgydata SET ' "cjson = json '{\"data\": 1234}', " "cjsonb = jsonb '{\"data\": 2345}', " "cvarchar = 'Liewe Maatjies UPDATED' WHERE cid = 23") # FULL_TABLE self.run_query_tap_postgres("DELETE FROM public.country WHERE code = 'UMI'") # 3. Run tap second time - both fastsync and a singer should be triggered, there are some FULL_TABLE assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift) # assert_all_columns_exist currently not working on Redshift #assertions.assert_all_columns_exist(self.run_query_tap_postgres, self.run_query_target_redshift) assertions.assert_date_column_naive_in_target(self.run_query_target_redshift, 'updated_at', 'ppw_e2e_tap_postgres."table_with_space and uppercase"') result = self.run_query_target_redshift( 'SELECT updated_at FROM ppw_e2e_tap_postgres."table_with_space and uppercase" where cvarchar=\'M\';')[0][0] assert result == datetime(2019, 12, 31, 22, 53, 56, 800000) @pytest.mark.dependency(depends=['import_config']) def test_replicate_s3_to_rs(self): """Replicate csv files from s3 to Redshift, check if return code is zero and success log file created""" # Skip tap_s3_csv related test if required env vars not provided if not self.e2e.env['TAP_S3_CSV']['is_configured']: pytest.skip('Tap S3 CSV environment variables are not provided') def assert_columns_exist(): """Helper inner function to test if every table and column exists in target snowflake""" assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'countries', ['city', 'country', 'currency', 'id', 'language']) assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'people', ['birth_date', 'email', 'first_name', 'gender', 'group', 'id', 'ip_address', 'is_pensioneer', 'last_name']) # 1. Run tap first time - both fastsync and a singer should be triggered assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer']) # 2. Run tap second time - both fastsync and a singer should be triggered assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer']) assert_columns_exist()
56.740964
120
0.664826
import os from datetime import datetime import pytest from .helpers import tasks from .helpers import assertions from .helpers.env import E2EEnv DIR = os.path.dirname(__file__) TAP_MARIADB_ID = 'mariadb_to_rs' TAP_MARIADB_BUFFERED_STREAM_ID = 'mariadb_to_rs_buffered_stream' TAP_POSTGRES_ID = 'postgres_to_rs' TAP_S3_CSV_ID = 's3_csv_to_rs' TARGET_ID = 'redshift' class TestTargetRedshift: def setup_method(self): self.project_dir = os.path.join(DIR, 'test-project') self.e2e = E2EEnv(self.project_dir) self.run_query_tap_mysql = self.e2e.run_query_tap_mysql self.run_query_tap_postgres = self.e2e.run_query_tap_postgres self.run_query_target_redshift = self.e2e.run_query_target_redshift def teardown_method(self): @pytest.mark.dependency(name='import_config') def test_import_project(self): if not self.e2e.env['TARGET_REDSHIFT']['is_configured']: pytest.skip('Target Redshift environment variables are not provided') self.e2e.setup_tap_mysql() self.e2e.setup_tap_postgres() if self.e2e.env['TAP_S3_CSV']['is_configured']: self.e2e.setup_tap_s3_csv() self.e2e.setup_target_redshift() [return_code, stdout, stderr] = tasks.run_command(f'pipelinewise import_config --dir {self.project_dir}') assertions.assert_command_success(return_code, stdout, stderr) @pytest.mark.dependency(depends=['import_config']) def test_replicate_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID): assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) self.run_query_tap_mysql('UPDATE weight_unit SET isactive = 0 WHERE weight_unit_id IN (2, 3, 4)') self.run_query_tap_mysql('UPDATE all_datatypes SET c_point = NULL') self.run_query_tap_mysql('INSERT INTO address(isactive, street_number, date_created, date_updated,' ' supplier_supplier_id, zip_code_zip_code_id)' 'VALUES (1, 1234, NOW(), NOW(), 0, 1234)') self.run_query_tap_mysql('UPDATE address SET street_number = 9999, date_updated = NOW()' ' WHERE address_id = 1') self.run_query_tap_mysql('DELETE FROM no_pk_table WHERE id > 10') assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) @pytest.mark.dependency(depends=['import_config']) def test_resync_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID): assertions.assert_resync_tables_success(tap_mariadb_id, TARGET_ID) assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift) @pytest.mark.dependency(depends=['import_config']) def test_replicate_mariadb_to_pg_with_custom_buffer_size(self): self.test_replicate_mariadb_to_rs(tap_mariadb_id=TAP_MARIADB_BUFFERED_STREAM_ID) @pytest.mark.dependency(depends=['import_config']) def test_replicate_pg_to_rs(self): assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift) assertions.assert_date_column_naive_in_target(self.run_query_target_redshift, 'updated_at', 'ppw_e2e_tap_postgres."table_with_space and uppercase"') self.run_query_tap_postgres('insert into public."table_with_space and UPPERCase" (cvarchar, updated_at) values ' "('M', '2020-01-01 08:53:56.8+10')," "('N', '2020-12-31 12:59:00.148+00')," "('O', null)," "('P', '2020-03-03 12:30:00');") self.run_query_tap_postgres('INSERT INTO public.city (id, name, countrycode, district, population) ' "VALUES (4080, 'Bath', 'GBR', 'England', 88859)") self.run_query_tap_postgres('UPDATE public.edgydata SET ' "cjson = json '{\"data\": 1234}', " "cjsonb = jsonb '{\"data\": 2345}', " "cvarchar = 'Liewe Maatjies UPDATED' WHERE cid = 23") self.run_query_tap_postgres("DELETE FROM public.country WHERE code = 'UMI'") assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer']) assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift) assertions.assert_date_column_naive_in_target(self.run_query_target_redshift, 'updated_at', 'ppw_e2e_tap_postgres."table_with_space and uppercase"') result = self.run_query_target_redshift( 'SELECT updated_at FROM ppw_e2e_tap_postgres."table_with_space and uppercase" where cvarchar=\'M\';')[0][0] assert result == datetime(2019, 12, 31, 22, 53, 56, 800000) @pytest.mark.dependency(depends=['import_config']) def test_replicate_s3_to_rs(self): if not self.e2e.env['TAP_S3_CSV']['is_configured']: pytest.skip('Tap S3 CSV environment variables are not provided') def assert_columns_exist(): assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'countries', ['city', 'country', 'currency', 'id', 'language']) assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'people', ['birth_date', 'email', 'first_name', 'gender', 'group', 'id', 'ip_address', 'is_pensioneer', 'last_name']) assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer']) assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer']) assert_columns_exist()
true
true
f70f126e896590c2b01b095451c40ff7f2026770
2,737
py
Python
donkeycar/templates/config_defaults.py
samvorg/donkey
156f0fcf9a752c9930e2ce5832454e5c5ad4ea87
[ "MIT" ]
null
null
null
donkeycar/templates/config_defaults.py
samvorg/donkey
156f0fcf9a752c9930e2ce5832454e5c5ad4ea87
[ "MIT" ]
null
null
null
donkeycar/templates/config_defaults.py
samvorg/donkey
156f0fcf9a752c9930e2ce5832454e5c5ad4ea87
[ "MIT" ]
1
2018-05-24T17:01:36.000Z
2018-05-24T17:01:36.000Z
""" CAR CONFIG This file is read by your car application's manage.py script to change the car performance. EXMAPLE ----------- import dk cfg = dk.load_config(config_path='~/d2/config.py') print(cfg.CAMERA_RESOLUTION) """ import os #pi information PI_USERNAME = "pi" PI_PASSWD = "raspberry" PI_HOSTNAME = "raspberrypi.local" PI_DONKEY_ROOT = "/home/pi/d2" #PATHS CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(CAR_PATH, 'data') MODELS_PATH = os.path.join(CAR_PATH, 'models') #VEHICLE DRIVE_LOOP_HZ = 20 MAX_LOOPS = 100000 #CAMERA CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM) IMAGE_W = 160 IMAGE_H = 120 IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono CAMERA_FRAMERATE = DRIVE_LOOP_HZ #9865, over rides only if needed, ie. TX2.. PCA9685_I2C_ADDR = 0x40 PCA9685_I2C_BUSNUM = None #drivetrain DRIVE_TRAIN_TYPE = "SERVO_ESC" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM #STEERING STEERING_CHANNEL = 1 STEERING_LEFT_PWM = 460 STEERING_RIGHT_PWM = 290 #THROTTLE THROTTLE_CHANNEL = 0 THROTTLE_FORWARD_PWM = 500 THROTTLE_STOPPED_PWM = 370 THROTTLE_REVERSE_PWM = 220 #DC_STEER_THROTTLE with one motor as steering, one as drive HBRIDGE_PIN_LEFT = 18 HBRIDGE_PIN_RIGHT = 16 HBRIDGE_PIN_FWD = 15 HBRIDGE_PIN_BWD = 13 #DC_TWO_WHEEL - with two wheels as drive, left and right. HBRIDGE_PIN_LEFT_FWD = 18 HBRIDGE_PIN_LEFT_BWD = 16 HBRIDGE_PIN_RIGHT_FWD = 15 HBRIDGE_PIN_RIGHT_BWD = 13 #TRAINING BATCH_SIZE = 128 TRAIN_TEST_SPLIT = 0.8 MAX_EPOCHS = 100 SHOW_PLOT = True VEBOSE_TRAIN = True USE_EARLY_STOP = True EARLY_STOP_PATIENCE = 5 MIN_DELTA = .0005 PRINT_MODEL_SUMMARY = True #print layers and weights to stdout OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default LEARNING_RATE = 0.001 #only used when OPTIMIZER specified LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training #model transfer options FREEZE_LAYERS = False NUM_LAST_LAYERS_TO_TRAIN = 7 #JOYSTICK USE_JOYSTICK_AS_DEFAULT = True JOYSTICK_MAX_THROTTLE = 0.3 JOYSTICK_STEERING_SCALE = 1.0 AUTO_RECORD_ON_THROTTLE = True CONTROLLER_TYPE='ps3' #(ps3|ps4) USE_NETWORKED_JS = False NETWORK_JS_SERVER_IP = "192.168.0.1" #RNN or 3D SEQUENCE_LENGTH = 3 #IMU HAVE_IMU = False #LED HAVE_RGB_LED = False LED_INVERT = False #COMMON ANNODE? #board pin number for pwm outputs LED_PIN_R = 12 LED_PIN_G = 10 LED_PIN_B = 16 #LED status color, 0-100 LED_R = 0 LED_G = 0 LED_B = 1 #BEHAVIORS TRAIN_BEHAVIORS = False BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"] BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel
22.252033
96
0.756303
import os PI_USERNAME = "pi" PI_PASSWD = "raspberry" PI_HOSTNAME = "raspberrypi.local" PI_DONKEY_ROOT = "/home/pi/d2" CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(CAR_PATH, 'data') MODELS_PATH = os.path.join(CAR_PATH, 'models') DRIVE_LOOP_HZ = 20 MAX_LOOPS = 100000 CAMERA_TYPE = "PICAM" IMAGE_W = 160 IMAGE_H = 120 IMAGE_DEPTH = 3 CAMERA_FRAMERATE = DRIVE_LOOP_HZ PCA9685_I2C_ADDR = 0x40 PCA9685_I2C_BUSNUM = None DRIVE_TRAIN_TYPE = "SERVO_ESC" STEERING_CHANNEL = 1 STEERING_LEFT_PWM = 460 STEERING_RIGHT_PWM = 290 THROTTLE_CHANNEL = 0 THROTTLE_FORWARD_PWM = 500 THROTTLE_STOPPED_PWM = 370 THROTTLE_REVERSE_PWM = 220 HBRIDGE_PIN_LEFT = 18 HBRIDGE_PIN_RIGHT = 16 HBRIDGE_PIN_FWD = 15 HBRIDGE_PIN_BWD = 13 HBRIDGE_PIN_LEFT_FWD = 18 HBRIDGE_PIN_LEFT_BWD = 16 HBRIDGE_PIN_RIGHT_FWD = 15 HBRIDGE_PIN_RIGHT_BWD = 13 BATCH_SIZE = 128 TRAIN_TEST_SPLIT = 0.8 MAX_EPOCHS = 100 SHOW_PLOT = True VEBOSE_TRAIN = True USE_EARLY_STOP = True EARLY_STOP_PATIENCE = 5 MIN_DELTA = .0005 PRINT_MODEL_SUMMARY = True OPTIMIZER = None LEARNING_RATE = 0.001 LEARNING_RATE_DECAY = 0.0 SEND_BEST_MODEL_TO_PI = False FREEZE_LAYERS = False NUM_LAST_LAYERS_TO_TRAIN = 7 USE_JOYSTICK_AS_DEFAULT = True JOYSTICK_MAX_THROTTLE = 0.3 JOYSTICK_STEERING_SCALE = 1.0 AUTO_RECORD_ON_THROTTLE = True CONTROLLER_TYPE='ps3' USE_NETWORKED_JS = False NETWORK_JS_SERVER_IP = "192.168.0.1" SEQUENCE_LENGTH = 3 HAVE_IMU = False HAVE_RGB_LED = False LED_INVERT = False LED_PIN_R = 12 LED_PIN_G = 10 LED_PIN_B = 16 LED_R = 0 LED_G = 0 LED_B = 1 TRAIN_BEHAVIORS = False BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"] BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ]
true
true
f70f13d73c9188fa81bf8e7e7ab80aefc3027697
14,758
py
Python
fugue/execution/factory.py
fugue-project/fugue
838fdaa794c62e8bdc7f1474818d9491d5d39ed7
[ "Apache-2.0" ]
547
2020-09-22T08:30:14.000Z
2022-03-30T23:11:05.000Z
fugue/execution/factory.py
fugue-project/fugue
838fdaa794c62e8bdc7f1474818d9491d5d39ed7
[ "Apache-2.0" ]
196
2020-09-22T23:08:26.000Z
2022-03-26T21:22:48.000Z
fugue/execution/factory.py
fugue-project/fugue
838fdaa794c62e8bdc7f1474818d9491d5d39ed7
[ "Apache-2.0" ]
37
2020-09-23T17:05:00.000Z
2022-03-29T18:26:52.000Z
from typing import Any, Callable, Dict, Optional, Type, Union from fugue.execution.execution_engine import ExecutionEngine, SQLEngine from fugue.execution.native_execution_engine import NativeExecutionEngine from triad.utils.convert import to_instance from triad import assert_or_throw class _ExecutionEngineFactory(object): def __init__(self): self._funcs: Dict[str, Callable] = {} self._type_funcs: Dict[Type, Callable] = {} self._sql_funcs: Dict[str, Callable] = {} self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf)) self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine) def register( self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite" ) -> None: if isinstance(name_or_type, str): self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup) else: self._register( self._type_funcs, name=name_or_type, func=func, on_dup=on_dup ) def register_default(self, func: Callable, on_dup="overwrite") -> None: self.register("", func, on_dup) def register_sql_engine( self, name: str, func: Callable, on_dup="overwrite" ) -> None: self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup) def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None: self.register_sql_engine("", func, on_dup) def make( self, engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: if isinstance(engine, tuple): execution_engine = self.make_execution_engine( engine[0], conf=conf, **kwargs ) sql_engine = self.make_sql_engine(engine[1], execution_engine) execution_engine.set_sql_engine(sql_engine) return execution_engine else: return self.make((engine, None), conf=conf, **kwargs) def make_execution_engine( self, engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: def make_engine(engine: Any) -> ExecutionEngine: if isinstance(engine, str) and engine in self._funcs: return self._funcs[engine](conf, **kwargs) for k, f in self._type_funcs.items(): if isinstance(engine, k): return f(engine, conf, **kwargs) if isinstance(engine, ExecutionEngine): if conf is not None: engine.compile_conf.update(conf) engine.compile_conf.update(kwargs) return engine return to_instance( engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs) ) result = make_engine(engine or "") result.compile_conf.update(result.conf) result.compile_conf.update(conf) result.compile_conf.update(kwargs) return result def make_sql_engine( self, engine: Any = None, execution_engine: Optional[ExecutionEngine] = None, **kwargs: Any, ) -> SQLEngine: if engine is None: engine = "" if isinstance(engine, str) and engine in self._sql_funcs: return self._sql_funcs[engine](execution_engine, **kwargs) if isinstance(engine, SQLEngine): assert_or_throw( execution_engine is None and len(kwargs) == 0, lambda: ValueError( f"{engine} is an instance, can't take arguments " f"execution_engine={execution_engine}, kwargs={kwargs}" ), ) return engine return to_instance( engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs) ) def _register( self, callables: Dict[Any, Callable], name: Any, func: Callable, on_dup="overwrite", ) -> None: if name not in callables: callables[name] = func if on_dup in ["raise", "throw"]: raise KeyError(f"{name} is already registered") if on_dup == "overwrite": callables[name] = func return if on_dup == "ignore": return raise ValueError(on_dup) _EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory() def register_execution_engine( name_or_type: Union[str, Type], func: Callable, on_dup="overwrite" ) -> None: """Register :class:`~fugue.execution.execution_engine.ExecutionEngine` with a given name. :param name_or_type: alias of the execution engine, or type of an object that can be converted to an execution engine :param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an :class:`~fugue.execution.execution_engine.ExecutionEngine` instance :param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore" (not overwriting) or "throw" (throw exception), defaults to "overwrite". :raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists .. admonition:: Examples Alias registration examples: .. code-block:: python # create a new engine with name my (overwrites if existed) register_execution_engine("my", lambda conf: MyExecutionEngine(conf)) # 0 make_execution_engine("my") make_execution_engine("my", {"myconfig":"value}) # 1 with FugueWorkflow("my") as dag: dag.create([[0]],"a:int").show() # 2 dag = FugueWorkflow() dag.create([[0]],"a:int").show() dag.run("my", {"myconfig":"value}) # 3 fsql(''' CREATE [[0]] SCHEMA a:int PRINT ''').run("my") Type registration examples: .. code-block:: python from pyspark.sql import SparkSession from fugue_spark import SparkExecutionEngine from fugue_sql import fsql register_execution_engine( SparkSession, lambda session, conf: SparkExecutionEngine(session, conf)) spark_session = SparkSession.builder.getOrCreate() fsql(''' CREATE [[0]] SCHEMA a:int PRINT ''').run(spark_session) """ _EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup) def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None: """Register :class:`~fugue.execution.execution_engine.ExecutionEngine` as the default engine. :param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an :class:`~fugue.execution.execution_engine.ExecutionEngine` instance :param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore" (not overwriting) or "throw" (throw exception), defaults to "overwrite". :raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists .. admonition:: Examples .. code-block:: python # create a new engine with name my (overwrites if existed) register_default_execution_engine(lambda conf: MyExecutionEngine(conf)) # the following examples will use MyExecutionEngine # 0 make_execution_engine() make_execution_engine(None, {"myconfig":"value}) # 1 with FugueWorkflow() as dag: dag.create([[0]],"a:int").show() # 2 dag = FugueWorkflow() dag.create([[0]],"a:int").show() dag.run(None, {"myconfig":"value}) # 3 fsql(''' CREATE [[0]] SCHEMA a:int PRINT ''').run("", {"myconfig":"value}) """ _EXECUTION_ENGINE_FACTORY.register_default(func, on_dup) def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None: """Register :class:`~fugue.execution.execution_engine.SQLEngine` with a given name. :param name: name of the SQL engine :param func: a callable taking :class:`~fugue.execution.execution_engine.ExecutionEngine` and ``**kwargs`` and returning a :class:`~fugue.execution.execution_engine.SQLEngine` instance :param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore" (not overwriting) or "throw" (throw exception), defaults to "overwrite". :raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists .. admonition:: Examples .. code-block:: python # create a new engine with name my (overwrites if existed) register_sql_engine("mysql", lambda engine: MySQLEngine(engine)) # create execution engine with MySQLEngine as the default make_execution_engine(("", "mysql")) # create DaskExecutionEngine with MySQLEngine as the default make_execution_engine(("dask", "mysql")) # default execution engine + MySQLEngine with FugueWorkflow(("","mysql")) as dag: dag.create([[0]],"a:int").show() """ _EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup) def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None: """Register :class:`~fugue.execution.execution_engine.SQLEngine` as the default engine :param func: a callable taking :class:`~fugue.execution.execution_engine.ExecutionEngine` and ``**kwargs`` and returning a :class:`~fugue.execution.execution_engine.SQLEngine` instance :param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore" (not overwriting) or "throw" (throw exception), defaults to "overwrite". :raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists .. note:: You should be careful to use this function, because when you set a custom SQL engine as default, all execution engines you create will use this SQL engine unless you are explicit. For example if you set the default SQL engine to be a Spark specific one, then if you start a NativeExecutionEngine, it will try to use it and will throw exceptions. So it's always a better idea to use ``register_sql_engine`` instead .. admonition:: Examples .. code-block:: python # create a new engine with name my (overwrites if existed) register_default_sql_engine(lambda engine: MySQLEngine(engine)) # create NativeExecutionEngine with MySQLEngine as the default make_execution_engine() # create SparkExecutionEngine with MySQLEngine instead of SparkSQLEngine make_execution_engine("spark") # NativeExecutionEngine with MySQLEngine with FugueWorkflow() as dag: dag.create([[0]],"a:int").show() """ _EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup) def make_execution_engine( engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: """Create :class:`~fugue.execution.execution_engine.ExecutionEngine` with specified ``engine`` :param engine: it can be empty string or null (use the default execution engine), a string (use the registered execution engine), an :class:`~fugue.execution.execution_engine.ExecutionEngine` type, or the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance , or a tuple of two values where the first value represents execution engine and the second value represents the sql engine (you can use ``None`` for either of them to use the default one), defaults to None :param conf: |ParamsLikeObject|, defaults to None :param kwargs: additional parameters to initialize the execution engine :return: the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance .. admonition:: Examples .. code-block:: python register_default_execution_engine(lambda conf: E1(conf)) register_execution_engine("e2", lambda conf, **kwargs: E2(conf, **kwargs)) register_sql_engine("s", lambda conf: S2(conf)) # E1 + E1.default_sql_engine make_execution_engine() # E2 + E2.default_sql_engine make_execution_engine(e2) # E1 + S2 make_execution_engine((None, "s")) # E2(conf, a=1, b=2) + S2 make_execution_engine(("e2", "s"), conf, a=1, b=2) # SparkExecutionEngine + SparkSQLEngine make_execution_engine(SparkExecutionEngine) make_execution_engine(SparkExecutionEngine(spark_session, conf)) # SparkExecutionEngine + S2 make_execution_engine((SparkExecutionEngine, "s")) """ return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs) def make_sql_engine( engine: Any = None, execution_engine: Optional[ExecutionEngine] = None, **kwargs: Any, ) -> SQLEngine: """Create :class:`~fugue.execution.execution_engine.SQLEngine` with specified ``engine`` :param engine: it can be empty string or null (use the default SQL engine), a string (use the registered SQL engine), an :class:`~fugue.execution.execution_engine.SQLEngine` type, or the :class:`~fugue.execution.execution_engine.SQLEngine` instance (you can use ``None`` to use the default one), defaults to None :param execution_engine: the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance to create the :class:`~fugue.execution.execution_engine.SQLEngine`. Normally you should always provide this value. :param kwargs: additional parameters to initialize the sql engine :return: the :class:`~fugue.execution.execution_engine.SQLEngine` instance .. note:: For users, you normally don't need to call this function directly. Use ``make_execution_engine`` instead .. admonition:: Examples .. code-block:: python register_default_sql_engine(lambda conf: S1(conf)) register_sql_engine("s2", lambda conf: S2(conf)) engine = NativeExecutionEngine() # S1(engine) make_sql_engine(None, engine) # S1(engine, a=1) make_sql_engine(None, engine, a=1) # S2(engine) make_sql_engine("s2", engine) # SqliteEngine(engine) make_sql_engine(SqliteEngine) """ return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
36.529703
88
0.629896
from typing import Any, Callable, Dict, Optional, Type, Union from fugue.execution.execution_engine import ExecutionEngine, SQLEngine from fugue.execution.native_execution_engine import NativeExecutionEngine from triad.utils.convert import to_instance from triad import assert_or_throw class _ExecutionEngineFactory(object): def __init__(self): self._funcs: Dict[str, Callable] = {} self._type_funcs: Dict[Type, Callable] = {} self._sql_funcs: Dict[str, Callable] = {} self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf)) self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine) def register( self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite" ) -> None: if isinstance(name_or_type, str): self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup) else: self._register( self._type_funcs, name=name_or_type, func=func, on_dup=on_dup ) def register_default(self, func: Callable, on_dup="overwrite") -> None: self.register("", func, on_dup) def register_sql_engine( self, name: str, func: Callable, on_dup="overwrite" ) -> None: self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup) def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None: self.register_sql_engine("", func, on_dup) def make( self, engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: if isinstance(engine, tuple): execution_engine = self.make_execution_engine( engine[0], conf=conf, **kwargs ) sql_engine = self.make_sql_engine(engine[1], execution_engine) execution_engine.set_sql_engine(sql_engine) return execution_engine else: return self.make((engine, None), conf=conf, **kwargs) def make_execution_engine( self, engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: def make_engine(engine: Any) -> ExecutionEngine: if isinstance(engine, str) and engine in self._funcs: return self._funcs[engine](conf, **kwargs) for k, f in self._type_funcs.items(): if isinstance(engine, k): return f(engine, conf, **kwargs) if isinstance(engine, ExecutionEngine): if conf is not None: engine.compile_conf.update(conf) engine.compile_conf.update(kwargs) return engine return to_instance( engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs) ) result = make_engine(engine or "") result.compile_conf.update(result.conf) result.compile_conf.update(conf) result.compile_conf.update(kwargs) return result def make_sql_engine( self, engine: Any = None, execution_engine: Optional[ExecutionEngine] = None, **kwargs: Any, ) -> SQLEngine: if engine is None: engine = "" if isinstance(engine, str) and engine in self._sql_funcs: return self._sql_funcs[engine](execution_engine, **kwargs) if isinstance(engine, SQLEngine): assert_or_throw( execution_engine is None and len(kwargs) == 0, lambda: ValueError( f"{engine} is an instance, can't take arguments " f"execution_engine={execution_engine}, kwargs={kwargs}" ), ) return engine return to_instance( engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs) ) def _register( self, callables: Dict[Any, Callable], name: Any, func: Callable, on_dup="overwrite", ) -> None: if name not in callables: callables[name] = func if on_dup in ["raise", "throw"]: raise KeyError(f"{name} is already registered") if on_dup == "overwrite": callables[name] = func return if on_dup == "ignore": return raise ValueError(on_dup) _EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory() def register_execution_engine( name_or_type: Union[str, Type], func: Callable, on_dup="overwrite" ) -> None: _EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup) def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None: _EXECUTION_ENGINE_FACTORY.register_default(func, on_dup) def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None: _EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup) def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None: _EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup) def make_execution_engine( engine: Any = None, conf: Any = None, **kwargs: Any ) -> ExecutionEngine: return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs) def make_sql_engine( engine: Any = None, execution_engine: Optional[ExecutionEngine] = None, **kwargs: Any, ) -> SQLEngine: return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
true
true
f70f158e9a7b259d04251a187e3972e2d88d6610
2,151
py
Python
ranndom_m.py
aaparikh/Intermediate-Python-Practice
6f49bea8f677e7ed500cd1ec91df4c8531832abb
[ "Apache-2.0" ]
1
2022-03-08T16:35:51.000Z
2022-03-08T16:35:51.000Z
ranndom_m.py
aaparikh/Intermediate-Python-Practice
6f49bea8f677e7ed500cd1ec91df4c8531832abb
[ "Apache-2.0" ]
null
null
null
ranndom_m.py
aaparikh/Intermediate-Python-Practice
6f49bea8f677e7ed500cd1ec91df4c8531832abb
[ "Apache-2.0" ]
null
null
null
#there are many ways we can do random numbers #1. import random #used to produce pseudo-random numbers. # They are called pseudo-random because they are not truly random and can be reproduced. import random a = random.random() #random float between 0 and 1 b = random.uniform(1,10) #random float between 1 and 10 c = random.randrange(1,10) #random integer between 1 and 10 (not including 10) d = random.randint(1,10) #random integer between 1 and 10 (including 10) e = random.choice(['a','b','c']) #random element from a list #sample picks one element one time and choices may pick one element multiple times f = random.sample(range(1,10),3) #3 random elements from a list g = random.choices(range(1,10),k=3) #3 random elements from a list h = random.normalvariate(0,1) #random float from normal distribution with mean 0 and standard deviation 1 random.shuffle(['a','b','c']) #shuffle a list in place random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated) import secrets #secrets — Generate secure random numbers for managing secrets (True randomness) # https://docs.python.org/3/library/secrets.html #But this is slower than random module as more complex algorithms are used. a = secrets.randbelow(10) #random integer between 0 and 9 b = secrets.randbits(10) #random integer between 0 and 2**10-1 c = secrets.choice(['a','b','c']) #random element from a list d = secrets.sample(range(1,10),3) #3 random elements from a list #2. import numpy import numpy as np #numpy random generator uses a different generator than random module and also has a different seed np.random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated) a = np.random.random() #random float between 0 and 1 b = np.random.uniform(1,10) #random float between 1 and 10 c = np.random.randrange(1,10) #random integer between 1 and 10 (not including 10) d = np.random.randint(1,10) #random integer between 1 and 10 (including 10) e = np.random.choice(['a','b','c']) #random element from a list f = np.random.randn(3) #list of 3 random elements
53.775
127
0.748024
import random a = random.random() b = random.uniform(1,10) c = random.randrange(1,10) d = random.randint(1,10) e = random.choice(['a','b','c']) f = random.sample(range(1,10),3) g = random.choices(range(1,10),k=3) h = random.normalvariate(0,1) random.shuffle(['a','b','c']) random.seed(10) import secrets a = secrets.randbelow(10) b = secrets.randbits(10) c = secrets.choice(['a','b','c']) d = secrets.sample(range(1,10),3) import numpy as np np.random.seed(10) a = np.random.random() b = np.random.uniform(1,10) c = np.random.randrange(1,10) d = np.random.randint(1,10) e = np.random.choice(['a','b','c']) f = np.random.randn(3)
true
true
f70f16405f2a7091f370917811545289d3bd69fa
2,873
py
Python
core/migrations/0001_initial.py
uadson/django-consumindo-api
21213205d9dfb816f729a15ba3623724d97fd4ce
[ "MIT" ]
1
2021-11-17T02:41:38.000Z
2021-11-17T02:41:38.000Z
core/migrations/0001_initial.py
uadson/django-consumindo-api
21213205d9dfb816f729a15ba3623724d97fd4ce
[ "MIT" ]
1
2021-11-16T15:38:06.000Z
2021-11-16T15:38:06.000Z
core/migrations/0001_initial.py
uadson/django-consumindo-api
21213205d9dfb816f729a15ba3623724d97fd4ce
[ "MIT" ]
null
null
null
# Generated by Django 3.2.9 on 2021-11-16 11:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
63.844444
329
0.663766
import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
true
true
f70f16af7eacc72139cb3aa69abab59899a7fc3b
1,183
py
Python
tests/aa_pbs_exporter/test_aa_pbs_exporter.py
DonalChilde/aa_pbs_exporter
a627cd9fbd76f4cf27e83e0867074e9d2091dbd9
[ "MIT" ]
null
null
null
tests/aa_pbs_exporter/test_aa_pbs_exporter.py
DonalChilde/aa_pbs_exporter
a627cd9fbd76f4cf27e83e0867074e9d2091dbd9
[ "MIT" ]
null
null
null
tests/aa_pbs_exporter/test_aa_pbs_exporter.py
DonalChilde/aa_pbs_exporter
a627cd9fbd76f4cf27e83e0867074e9d2091dbd9
[ "MIT" ]
null
null
null
# #!/usr/bin/env python # """Tests for `aa_pbs_exporter` package.""" # from click.testing import CliRunner # from aa_pbs_exporter.cli import aa_pbs_exporter_cli as cli # def test_content(response): # """Sample pytest test function with the pytest fixture as an argument.""" # # from bs4 import BeautifulSoup # # assert 'GitHub' in BeautifulSoup(response.content).title.string # def test_command_line_interface(): # """Test the CLI.""" # runner = CliRunner() # result = runner.invoke(cli.main) # assert result.exit_code == 0 # assert "Console script for aa_pbs_exporter" in result.output # help_result = runner.invoke(cli.main, ["--help"]) # assert help_result.exit_code == 0 # assert "--help Show this message and exit." in help_result.output # def test_hello(): # """Test the hello command.""" # runner = CliRunner() # result = runner.invoke(cli.main, ["hello", "Foo"]) # assert result.exit_code == 0 # assert "Hello Foo" in result.output # help_result = runner.invoke(cli.main, ["--help"]) # assert help_result.exit_code == 0 # assert "--help Show this message and exit." in help_result.output
32.861111
79
0.67033
true
true
f70f18cff050b3c4ad8472311641f95c9352b029
3,185
py
Python
test_runner.py
dbulka/pytests
d2658cff3832293cb1b8abcd970f7df83a2b5035
[ "MIT" ]
null
null
null
test_runner.py
dbulka/pytests
d2658cff3832293cb1b8abcd970f7df83a2b5035
[ "MIT" ]
null
null
null
test_runner.py
dbulka/pytests
d2658cff3832293cb1b8abcd970f7df83a2b5035
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import json import os import sys import time from echopy import Echo from project import RESOURCES_DIR, BLOCK_RELEASE_INTERVAL if "BASE_URL" not in os.environ: BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"] else: BASE_URL = os.environ["BASE_URL"] categories = [ # API SECTION 'api', 'login_api', 'asset_api', 'history_api', 'network_broadcast_api', 'registration_api', 'database_api', 'connection_to_apis', # database_api section 'database_api_objects', 'database_api_subscriptions', 'database_api_blocks_transactions', 'database_api_globals', 'database_api_keys', 'database_api_accounts', 'database_api_contracts', 'database_api_balances', 'database_api_assets', 'database_api_committee_members', 'database_api_votes', 'database_api_authority_validation', 'database_api_proposed_transactions', 'database_api_sidechain_ethereum', 'database_api_sidechain_erc20', 'database_api_contract_fee_pool', # OPERATIONS SECTION 'operations', 'account_management_operations', 'assert_conditions_operations', 'asset_management_operations', 'balance_object_operations', 'committee_members_operations', 'contract_operations', 'sidechain_operations', 'custom_extension_operations', 'assets_market_operations', 'proposal_operations', 'asset_transfer_operations', 'vesting_balances_operations', 'withdrawal_permissions_operations', 'sidechain', 'sidechain_ethereum', 'sidechain_erc20', 'scenarios', ] types = [ # TEST TYPES "main", "positive", "negative" ] def process_filters(filters): category_filters = [] type_filters = [] for pytests_filter in filters: if pytests_filter in types: type_filters.append(pytests_filter) else: category_filters.append(pytests_filter) command = "" if len(category_filters): command = "{}-a ".format(command) for category_filter in category_filters: command = "{}{} ".format(command, category_filter) if len(type_filters): command = "{}-m ".format(command) for type_filter in type_filters: command = "{}{}:type ".format(command, type_filter) return command PYTESTS_FILTERS = "" if "PYTESTS_FILTERS" not in os.environ else os.environ["PYTESTS_FILTERS"].lower().split(":") PYTESTS_FILTER_COMMAND = process_filters(PYTESTS_FILTERS) def get_head_block_num(echo_connection): return echo_connection.api.database.get_dynamic_global_properties()["head_block_number"] def run(echo_connection, filter_command): if get_head_block_num(echo_connection): execution_status = os.system("if ! lcc run {}--exit-error-on-failure; then lcc report --failed; exit 1; fi" .format(filter_command)) sys.exit(1 if execution_status > 1 else execution_status) else: time.sleep(BLOCK_RELEASE_INTERVAL) run(echo_connection, filter_command) echo = Echo() echo.connect(BASE_URL) run(echo, PYTESTS_FILTER_COMMAND)
26.322314
115
0.69325
import json import os import sys import time from echopy import Echo from project import RESOURCES_DIR, BLOCK_RELEASE_INTERVAL if "BASE_URL" not in os.environ: BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"] else: BASE_URL = os.environ["BASE_URL"] categories = [ 'api', 'login_api', 'asset_api', 'history_api', 'network_broadcast_api', 'registration_api', 'database_api', 'connection_to_apis', 'database_api_objects', 'database_api_subscriptions', 'database_api_blocks_transactions', 'database_api_globals', 'database_api_keys', 'database_api_accounts', 'database_api_contracts', 'database_api_balances', 'database_api_assets', 'database_api_committee_members', 'database_api_votes', 'database_api_authority_validation', 'database_api_proposed_transactions', 'database_api_sidechain_ethereum', 'database_api_sidechain_erc20', 'database_api_contract_fee_pool', 'operations', 'account_management_operations', 'assert_conditions_operations', 'asset_management_operations', 'balance_object_operations', 'committee_members_operations', 'contract_operations', 'sidechain_operations', 'custom_extension_operations', 'assets_market_operations', 'proposal_operations', 'asset_transfer_operations', 'vesting_balances_operations', 'withdrawal_permissions_operations', 'sidechain', 'sidechain_ethereum', 'sidechain_erc20', 'scenarios', ] types = [ "main", "positive", "negative" ] def process_filters(filters): category_filters = [] type_filters = [] for pytests_filter in filters: if pytests_filter in types: type_filters.append(pytests_filter) else: category_filters.append(pytests_filter) command = "" if len(category_filters): command = "{}-a ".format(command) for category_filter in category_filters: command = "{}{} ".format(command, category_filter) if len(type_filters): command = "{}-m ".format(command) for type_filter in type_filters: command = "{}{}:type ".format(command, type_filter) return command PYTESTS_FILTERS = "" if "PYTESTS_FILTERS" not in os.environ else os.environ["PYTESTS_FILTERS"].lower().split(":") PYTESTS_FILTER_COMMAND = process_filters(PYTESTS_FILTERS) def get_head_block_num(echo_connection): return echo_connection.api.database.get_dynamic_global_properties()["head_block_number"] def run(echo_connection, filter_command): if get_head_block_num(echo_connection): execution_status = os.system("if ! lcc run {}--exit-error-on-failure; then lcc report --failed; exit 1; fi" .format(filter_command)) sys.exit(1 if execution_status > 1 else execution_status) else: time.sleep(BLOCK_RELEASE_INTERVAL) run(echo_connection, filter_command) echo = Echo() echo.connect(BASE_URL) run(echo, PYTESTS_FILTER_COMMAND)
true
true
f70f19d000d6f8359c77e2784b7e70d5352e4de1
11,800
py
Python
pythainlp/transliterate/thai2rom.py
Subarna578/pythainlp
9650a40396719284add17bb09f50e948dea41053
[ "Apache-2.0" ]
null
null
null
pythainlp/transliterate/thai2rom.py
Subarna578/pythainlp
9650a40396719284add17bb09f50e948dea41053
[ "Apache-2.0" ]
null
null
null
pythainlp/transliterate/thai2rom.py
Subarna578/pythainlp
9650a40396719284add17bb09f50e948dea41053
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Romanization of Thai words based on machine-learnt engine ("thai2rom") """ import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pythainlp.corpus import download, get_corpus_path device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class ThaiTransliterator: def __init__(self): """ Transliteration of Thai words Now supports Thai to Latin (romanization) """ # Download the model, if it's not on your machine. self.__filemodel = get_corpus_path("thai2rom-pytorch-attn") if not self.__filemodel: download("thai2rom-pytorch-attn") self.__filemodel = get_corpus_path("thai2rom-pytorch-attn") loader = torch.load(self.__filemodel, map_location=device) INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"] OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"] self._maxlength = 100 self._char_to_ix = loader["char_to_ix"] self._ix_to_char = loader["ix_to_char"] self._target_char_to_ix = loader["target_char_to_ix"] self._ix_to_target_char = loader["ix_to_target_char"] # encoder/ decoder # Restore the model and construct the encoder and decoder. self._encoder = Encoder( INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT) self._decoder = AttentionDecoder( OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT ) self._network = Seq2Seq( self._encoder, self._decoder, self._target_char_to_ix["<start>"], self._target_char_to_ix["<end>"], self._maxlength, ).to(device) self._network.load_state_dict(loader["model_state_dict"]) self._network.eval() def _prepare_sequence_in(self, text: str): """ Prepare input sequence for PyTorch """ idxs = [] for ch in text: if ch in self._char_to_ix: idxs.append(self._char_to_ix[ch]) else: idxs.append(self._char_to_ix["<UNK>"]) idxs.append(self._char_to_ix["<end>"]) tensor = torch.tensor(idxs, dtype=torch.long) return tensor.to(device) def romanize(self, text: str) -> str: """ :param str text: Thai text to be romanized :return: English (more or less) text that spells out how the Thai text should be pronounced. """ input_tensor = self._prepare_sequence_in(text).view(1, -1) input_length = [len(text) + 1] target_tensor_logits = self._network(input_tensor, input_length, None, 0) # Seq2seq model returns <END> as the first token, # As a result, target_tensor_logits.size() is torch.Size([0]) if target_tensor_logits.size(0) == 0: target = ["<PAD>"] else: target_tensor = ( torch.argmax( target_tensor_logits.squeeze(1), 1).cpu().numpy() ) target = [self._ix_to_target_char[t] for t in target_tensor] return "".join(target) class Encoder(nn.Module): def __init__(self, vocabulary_size, embedding_size, hidden_size, dropout=0.5): """Constructor""" super(Encoder, self).__init__() self.hidden_size = hidden_size self.character_embedding = nn.Embedding(vocabulary_size, embedding_size) self.rnn = nn.LSTM( input_size=embedding_size, hidden_size=hidden_size // 2, bidirectional=True, batch_first=True, ) self.dropout = nn.Dropout(dropout) def forward(self, sequences, sequences_lengths): # sequences: (batch_size, sequence_length=MAX_LENGTH) # sequences_lengths: (batch_size) batch_size = sequences.size(0) self.hidden = self.init_hidden(batch_size) sequences_lengths = np.sort(sequences_lengths)[::-1] index_sorted = np.argsort( -sequences_lengths ) # use negation in sort in descending order index_unsort = np.argsort(index_sorted) # to unsorted sequence index_sorted = torch.from_numpy(index_sorted) sequences = sequences.index_select(0, index_sorted.to(device)) sequences = self.character_embedding(sequences) sequences = self.dropout(sequences) sequences_packed = nn.utils.rnn.pack_padded_sequence( sequences, sequences_lengths.copy(), batch_first=True ) sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden) sequences_output, _ = nn.utils.rnn.pad_packed_sequence( sequences_output, batch_first=True ) index_unsort = torch.from_numpy(index_unsort).to(device) sequences_output = sequences_output.index_select( 0, index_unsort.clone().detach() ) return sequences_output, self.hidden def init_hidden(self, batch_size): h_0 = torch.zeros( [2, batch_size, self.hidden_size // 2], requires_grad=True ).to(device) c_0 = torch.zeros( [2, batch_size, self.hidden_size // 2], requires_grad=True ).to(device) return (h_0, c_0) class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == "general": self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == "concat": self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.other = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs, mask): # Calculate energies for each encoder output if self.method == "dot": attn_energies = torch.bmm(encoder_outputs, hidden.transpose(1, 2)).squeeze(2) elif self.method == "general": attn_energies = self.attn( encoder_outputs.view(-1, encoder_outputs.size(-1)) ) # (batch_size * sequence_len, hidden_size) attn_energies = torch.bmm( attn_energies.view( *encoder_outputs.size()), hidden.transpose(1, 2) ).squeeze(2) # (batch_size, sequence_len) elif self.method == "concat": attn_energies = self.attn( torch.cat(( hidden.expand(*encoder_outputs.size()), encoder_outputs ), 2) ) # (batch_size, sequence_len, hidden_size) attn_energies = torch.bmm( attn_energies, self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2), ).squeeze(2) attn_energies = attn_energies.masked_fill(mask == 0, -1e10) # Normalize energies to weights in range 0 to 1 return F.softmax(attn_energies, 1) class AttentionDecoder(nn.Module): def __init__(self, vocabulary_size, embedding_size, hidden_size, dropout=0.5): """Constructor""" super(AttentionDecoder, self).__init__() self.vocabulary_size = vocabulary_size self.hidden_size = hidden_size self.character_embedding = nn.Embedding(vocabulary_size, embedding_size) self.rnn = nn.LSTM( input_size=embedding_size + self.hidden_size, hidden_size=hidden_size, bidirectional=False, batch_first=True, ) self.attn = Attn(method="general", hidden_size=self.hidden_size) self.linear = nn.Linear(hidden_size, vocabulary_size) self.dropout = nn.Dropout(dropout) def forward(self, input, last_hidden, encoder_outputs, mask): """"Defines the forward computation of the decoder""" # input: (batch_size, 1) # last_hidden: (batch_size, hidden_dim) # encoder_outputs: (batch_size, sequence_len, hidden_dim) # mask: (batch_size, sequence_len) hidden = last_hidden.permute(1, 0, 2) attn_weights = self.attn(hidden, encoder_outputs, mask) context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs) context_vector = torch.sum(context_vector, dim=1) context_vector = context_vector.unsqueeze(1) embedded = self.character_embedding(input) embedded = self.dropout(embedded) rnn_input = torch.cat((context_vector, embedded), -1) output, hidden = self.rnn(rnn_input) output = output.view(-1, output.size(2)) x = self.linear(output) return x, hidden[0], attn_weights class Seq2Seq(nn.Module): def __init__( self, encoder, decoder, target_start_token, target_end_token, max_length ): super().__init__() self.encoder = encoder self.decoder = decoder self.pad_idx = 0 self.target_start_token = target_start_token self.target_end_token = target_end_token self.max_length = max_length assert encoder.hidden_size == decoder.hidden_size def create_mask(self, source_seq): mask = source_seq != self.pad_idx return mask def forward( self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5 ): # source_seq: (batch_size, MAX_LENGTH) # source_seq_len: (batch_size, 1) # target_seq: (batch_size, MAX_LENGTH) batch_size = source_seq.size(0) start_token = self.target_start_token end_token = self.target_end_token max_len = self.max_length target_vocab_size = self.decoder.vocabulary_size outputs = torch.zeros(max_len, batch_size, target_vocab_size).to(device) if target_seq is None: assert teacher_forcing_ratio == 0, "Must be zero during inference" inference = True else: inference = False encoder_outputs, encoder_hidden = self.encoder(source_seq, source_seq_len) decoder_input = ( torch.tensor([[start_token] * batch_size]).view(batch_size, 1).to(device) ) encoder_hidden_h_t = torch.cat( [encoder_hidden[0][0], encoder_hidden[0][1]], dim=1 ).unsqueeze(dim=0) decoder_hidden = encoder_hidden_h_t max_source_len = encoder_outputs.size(1) mask = self.create_mask(source_seq[:, 0:max_source_len]) for di in range(max_len): decoder_output, decoder_hidden, _ = self.decoder( decoder_input, decoder_hidden, encoder_outputs, mask ) topv, topi = decoder_output.topk(1) outputs[di] = decoder_output.to(device) teacher_force = random.random() < teacher_forcing_ratio decoder_input = ( target_seq[:, di].reshape(batch_size, 1) if teacher_force else topi.detach() ) if inference and decoder_input == end_token: return outputs[:di] return outputs _THAI_TO_ROM = ThaiTransliterator() def romanize(text: str) -> str: return _THAI_TO_ROM.romanize(text)
33.810888
79
0.592373
import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pythainlp.corpus import download, get_corpus_path device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class ThaiTransliterator: def __init__(self): self.__filemodel = get_corpus_path("thai2rom-pytorch-attn") if not self.__filemodel: download("thai2rom-pytorch-attn") self.__filemodel = get_corpus_path("thai2rom-pytorch-attn") loader = torch.load(self.__filemodel, map_location=device) INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"] OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"] self._maxlength = 100 self._char_to_ix = loader["char_to_ix"] self._ix_to_char = loader["ix_to_char"] self._target_char_to_ix = loader["target_char_to_ix"] self._ix_to_target_char = loader["ix_to_target_char"] # encoder/ decoder # Restore the model and construct the encoder and decoder. self._encoder = Encoder( INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT) self._decoder = AttentionDecoder( OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT ) self._network = Seq2Seq( self._encoder, self._decoder, self._target_char_to_ix["<start>"], self._target_char_to_ix["<end>"], self._maxlength, ).to(device) self._network.load_state_dict(loader["model_state_dict"]) self._network.eval() def _prepare_sequence_in(self, text: str): idxs = [] for ch in text: if ch in self._char_to_ix: idxs.append(self._char_to_ix[ch]) else: idxs.append(self._char_to_ix["<UNK>"]) idxs.append(self._char_to_ix["<end>"]) tensor = torch.tensor(idxs, dtype=torch.long) return tensor.to(device) def romanize(self, text: str) -> str: input_tensor = self._prepare_sequence_in(text).view(1, -1) input_length = [len(text) + 1] target_tensor_logits = self._network(input_tensor, input_length, None, 0) # Seq2seq model returns <END> as the first token, # As a result, target_tensor_logits.size() is torch.Size([0]) if target_tensor_logits.size(0) == 0: target = ["<PAD>"] else: target_tensor = ( torch.argmax( target_tensor_logits.squeeze(1), 1).cpu().numpy() ) target = [self._ix_to_target_char[t] for t in target_tensor] return "".join(target) class Encoder(nn.Module): def __init__(self, vocabulary_size, embedding_size, hidden_size, dropout=0.5): super(Encoder, self).__init__() self.hidden_size = hidden_size self.character_embedding = nn.Embedding(vocabulary_size, embedding_size) self.rnn = nn.LSTM( input_size=embedding_size, hidden_size=hidden_size // 2, bidirectional=True, batch_first=True, ) self.dropout = nn.Dropout(dropout) def forward(self, sequences, sequences_lengths): # sequences: (batch_size, sequence_length=MAX_LENGTH) # sequences_lengths: (batch_size) batch_size = sequences.size(0) self.hidden = self.init_hidden(batch_size) sequences_lengths = np.sort(sequences_lengths)[::-1] index_sorted = np.argsort( -sequences_lengths ) # use negation in sort in descending order index_unsort = np.argsort(index_sorted) # to unsorted sequence index_sorted = torch.from_numpy(index_sorted) sequences = sequences.index_select(0, index_sorted.to(device)) sequences = self.character_embedding(sequences) sequences = self.dropout(sequences) sequences_packed = nn.utils.rnn.pack_padded_sequence( sequences, sequences_lengths.copy(), batch_first=True ) sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden) sequences_output, _ = nn.utils.rnn.pad_packed_sequence( sequences_output, batch_first=True ) index_unsort = torch.from_numpy(index_unsort).to(device) sequences_output = sequences_output.index_select( 0, index_unsort.clone().detach() ) return sequences_output, self.hidden def init_hidden(self, batch_size): h_0 = torch.zeros( [2, batch_size, self.hidden_size // 2], requires_grad=True ).to(device) c_0 = torch.zeros( [2, batch_size, self.hidden_size // 2], requires_grad=True ).to(device) return (h_0, c_0) class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == "general": self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == "concat": self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.other = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs, mask): # Calculate energies for each encoder output if self.method == "dot": attn_energies = torch.bmm(encoder_outputs, hidden.transpose(1, 2)).squeeze(2) elif self.method == "general": attn_energies = self.attn( encoder_outputs.view(-1, encoder_outputs.size(-1)) ) # (batch_size * sequence_len, hidden_size) attn_energies = torch.bmm( attn_energies.view( *encoder_outputs.size()), hidden.transpose(1, 2) ).squeeze(2) # (batch_size, sequence_len) elif self.method == "concat": attn_energies = self.attn( torch.cat(( hidden.expand(*encoder_outputs.size()), encoder_outputs ), 2) ) # (batch_size, sequence_len, hidden_size) attn_energies = torch.bmm( attn_energies, self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2), ).squeeze(2) attn_energies = attn_energies.masked_fill(mask == 0, -1e10) # Normalize energies to weights in range 0 to 1 return F.softmax(attn_energies, 1) class AttentionDecoder(nn.Module): def __init__(self, vocabulary_size, embedding_size, hidden_size, dropout=0.5): super(AttentionDecoder, self).__init__() self.vocabulary_size = vocabulary_size self.hidden_size = hidden_size self.character_embedding = nn.Embedding(vocabulary_size, embedding_size) self.rnn = nn.LSTM( input_size=embedding_size + self.hidden_size, hidden_size=hidden_size, bidirectional=False, batch_first=True, ) self.attn = Attn(method="general", hidden_size=self.hidden_size) self.linear = nn.Linear(hidden_size, vocabulary_size) self.dropout = nn.Dropout(dropout) def forward(self, input, last_hidden, encoder_outputs, mask): # input: (batch_size, 1) # last_hidden: (batch_size, hidden_dim) # encoder_outputs: (batch_size, sequence_len, hidden_dim) # mask: (batch_size, sequence_len) hidden = last_hidden.permute(1, 0, 2) attn_weights = self.attn(hidden, encoder_outputs, mask) context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs) context_vector = torch.sum(context_vector, dim=1) context_vector = context_vector.unsqueeze(1) embedded = self.character_embedding(input) embedded = self.dropout(embedded) rnn_input = torch.cat((context_vector, embedded), -1) output, hidden = self.rnn(rnn_input) output = output.view(-1, output.size(2)) x = self.linear(output) return x, hidden[0], attn_weights class Seq2Seq(nn.Module): def __init__( self, encoder, decoder, target_start_token, target_end_token, max_length ): super().__init__() self.encoder = encoder self.decoder = decoder self.pad_idx = 0 self.target_start_token = target_start_token self.target_end_token = target_end_token self.max_length = max_length assert encoder.hidden_size == decoder.hidden_size def create_mask(self, source_seq): mask = source_seq != self.pad_idx return mask def forward( self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5 ): # source_seq: (batch_size, MAX_LENGTH) # source_seq_len: (batch_size, 1) # target_seq: (batch_size, MAX_LENGTH) batch_size = source_seq.size(0) start_token = self.target_start_token end_token = self.target_end_token max_len = self.max_length target_vocab_size = self.decoder.vocabulary_size outputs = torch.zeros(max_len, batch_size, target_vocab_size).to(device) if target_seq is None: assert teacher_forcing_ratio == 0, "Must be zero during inference" inference = True else: inference = False encoder_outputs, encoder_hidden = self.encoder(source_seq, source_seq_len) decoder_input = ( torch.tensor([[start_token] * batch_size]).view(batch_size, 1).to(device) ) encoder_hidden_h_t = torch.cat( [encoder_hidden[0][0], encoder_hidden[0][1]], dim=1 ).unsqueeze(dim=0) decoder_hidden = encoder_hidden_h_t max_source_len = encoder_outputs.size(1) mask = self.create_mask(source_seq[:, 0:max_source_len]) for di in range(max_len): decoder_output, decoder_hidden, _ = self.decoder( decoder_input, decoder_hidden, encoder_outputs, mask ) topv, topi = decoder_output.topk(1) outputs[di] = decoder_output.to(device) teacher_force = random.random() < teacher_forcing_ratio decoder_input = ( target_seq[:, di].reshape(batch_size, 1) if teacher_force else topi.detach() ) if inference and decoder_input == end_token: return outputs[:di] return outputs _THAI_TO_ROM = ThaiTransliterator() def romanize(text: str) -> str: return _THAI_TO_ROM.romanize(text)
true
true
f70f1afff68e5a2398c858fe97997b9eef497a0d
19,508
py
Python
sphinxcontrib/doxylink/doxylink.py
sphinx-contrib/doxylink
0c8bda7504e483b527bd8f516a240547caa01646
[ "BSD-2-Clause" ]
9
2017-12-04T14:05:55.000Z
2021-08-04T12:28:36.000Z
sphinxcontrib/doxylink/doxylink.py
sphinx-contrib/doxylink
0c8bda7504e483b527bd8f516a240547caa01646
[ "BSD-2-Clause" ]
27
2017-12-02T13:39:17.000Z
2021-09-10T09:57:57.000Z
sphinxcontrib/doxylink/doxylink.py
sphinx-contrib/doxylink
0c8bda7504e483b527bd8f516a240547caa01646
[ "BSD-2-Clause" ]
14
2017-11-27T08:41:14.000Z
2022-02-02T08:35:55.000Z
# -*- coding: utf-8 -*- import os import re import requests import shutil import time import xml.etree.ElementTree as ET import urllib.parse from collections import namedtuple from dateutil.parser import parse as parsedate from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title from sphinx.util.console import bold, standout from sphinx import __version__ as sphinx_version if sphinx_version >= '1.6.0': from sphinx.util.logging import getLogger from ..doxylink import __version__ from .parsing import normalise, ParseException Entry = namedtuple('Entry', ['kind', 'file']) def report_info(env, msg, docname=None, lineno=None): '''Convenience function for logging an informational Args: msg (str): Message of the warning docname (str): Name of the document on which the error occured lineno (str): Line number in the document on which the error occured ''' if sphinx_version >= '1.6.0': logger = getLogger(__name__) if lineno is not None: logger.info(msg, location=(docname, lineno)) else: logger.info(msg, location=docname) else: env.info(docname, msg, lineno=lineno) def report_warning(env, msg, docname=None, lineno=None): '''Convenience function for logging a warning Args: msg (str): Message of the warning docname (str): Name of the document on which the error occured lineno (str): Line number in the document on which the error occured ''' if sphinx_version >= '1.6.0': logger = getLogger(__name__) if lineno is not None: logger.warning(msg, location=(docname, lineno)) else: logger.warning(msg, location=docname) else: env.warn(docname, msg, lineno=lineno) def is_url(str_to_validate): ''' Helper function to check if string contains URL Args: str_to_validate (str): String to validate as URL Returns: bool: True if given string is a URL, False otherwise ''' regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) return bool(re.match(regex, str_to_validate)) class FunctionList: """A FunctionList maps argument lists to specific entries""" def __init__(self): self.kind = 'function_list' self._arglist = {} # type: MutableMapping[str, str] def __getitem__(self, arglist: str) -> Entry: # If the user has requested a specific function through specifying an arglist then get the right anchor if arglist: try: filename = self._arglist[arglist] except KeyError: # TODO Offer fuzzy suggestion raise LookupError('Argument list match not found') else: # Otherwise just return the first entry (if they don't care they get whatever comes first) filename = list(self._arglist.values())[0] return Entry(kind='function', file=filename) def add_overload(self, arglist: str, file: str) -> None: self._arglist[arglist] = file class SymbolMap: """A SymbolMap maps symbols to Entries or FunctionLists""" def __init__(self, xml_doc: ET.ElementTree) -> None: self._mapping = parse_tag_file(xml_doc) def _get_symbol_match(self, symbol: str) -> str: if self._mapping.get(symbol): return symbol piecewise_list = match_piecewise(self._mapping.keys(), symbol) # If there is only one match, return it. if len(piecewise_list) == 1: return list(piecewise_list)[0] # If there is more than one item in piecewise_list then there is an ambiguity # Often this is due to the symbol matching the name of the constructor as well as the class name itself # We will prefer the class classes_list = {s for s in piecewise_list if self._mapping[s].kind == 'class'} # If there is only one by here we return it. if len(classes_list) == 1: return list(classes_list)[0] # Now, to disambiguate between ``PolyVox::Array< 1, ElementType >::operator[]`` and ``PolyVox::Array::operator[]`` matching ``operator[]``, # we will ignore templated (as in C++ templates) tag names by removing names containing ``<`` no_templates_list = {s for s in piecewise_list if '<' not in s} if len(no_templates_list) == 1: return list(no_templates_list)[0] # If not found by now, return the shortest match, assuming that's the most specific if no_templates_list: # TODO return a warning here? return min(no_templates_list, key=len) # TODO Offer fuzzy suggestion raise LookupError('Could not find a match') def __getitem__(self, item: str) -> Entry: symbol, normalised_arglist = normalise(item) matched_symbol = self._get_symbol_match(symbol) entry = self._mapping[matched_symbol] if isinstance(entry, FunctionList): entry = entry[normalised_arglist] return entry def parse_tag_file(doc: ET.ElementTree) -> dict: """ Takes in an XML tree from a Doxygen tag file and returns a dictionary that looks something like: .. code-block:: python {'PolyVox': Entry(...), 'PolyVox::Array': Entry(...), 'PolyVox::Array1DDouble': Entry(...), 'PolyVox::Array1DFloat': Entry(...), 'PolyVox::Array1DInt16': Entry(...), 'QScriptContext::throwError': FunctionList(...), 'QScriptContext::toString': FunctionList(...) } Note the different form for functions. This is required to allow for 'overloading by argument type'. :Parameters: doc : xml.etree.ElementTree The XML DOM object :return: a dictionary mapping fully qualified symbols to files """ mapping = {} # type: MutableMapping[str, Union[Entry, FunctionList]] function_list = [] # This is a list of function to be parsed and inserted into mapping at the end of the function. for compound in doc.findall('./compound'): compound_kind = compound.get('kind') if compound_kind not in {'namespace', 'class', 'struct', 'file', 'define', 'group', 'page'}: continue compound_name = compound.findtext('name') compound_filename = compound.findtext('filename') # TODO The following is a hack bug fix I think # Doxygen doesn't seem to include the file extension to <compound kind="file"><filename> entries # If it's a 'file' type, check if it _does_ have an extension, if not append '.html' if compound_kind in ('file', 'page') and not os.path.splitext(compound_filename)[1]: compound_filename = compound_filename + '.html' # If it's a compound we can simply add it mapping[compound_name] = Entry(kind=compound_kind, file=compound_filename) for member in compound.findall('member'): # If the member doesn't have an <anchorfile> element, use the parent compounds <filename> instead # This is the way it is in the qt.tag and is perhaps an artefact of old Doxygen anchorfile = member.findtext('anchorfile') or compound_filename member_symbol = compound_name + '::' + member.findtext('name') member_kind = member.get('kind') arglist_text = member.findtext('./arglist') # If it has an <arglist> then we assume it's a function. Empty <arglist> returns '', not None. Things like typedefs and enums can have empty arglists if arglist_text and member_kind not in {'variable', 'typedef', 'enumeration'}: function_list.append((member_symbol, arglist_text, member_kind, join(anchorfile, '#', member.findtext('anchor')))) else: mapping[member_symbol] = Entry(kind=member.get('kind'), file=join(anchorfile, '#', member.findtext('anchor'))) for member_symbol, arglist, kind, anchor_link in function_list: try: normalised_arglist = normalise(member_symbol + arglist)[1] except ParseException as e: print('Skipping %s %s%s. Error reported from parser was: %s' % (kind, member_symbol, arglist, e)) else: if mapping.get(member_symbol) and isinstance(mapping[member_symbol], FunctionList): mapping[member_symbol].add_overload(normalised_arglist, anchor_link) else: mapping[member_symbol] = FunctionList() mapping[member_symbol].add_overload(normalised_arglist, anchor_link) return mapping def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set: """ Match the requested symbol reverse piecewise (split on ``::``) against the candidates. This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass`` Args: candidates: set of possible matches for symbol symbol: the symbol to match against sep: the separator between identifier elements Returns: set of matches """ piecewise_list = set() for item in candidates: split_symbol = symbol.split(sep) split_item = item.split(sep) split_symbol.reverse() split_item.reverse() min_length = len(split_symbol) split_item = split_item[:min_length] if split_symbol == split_item: piecewise_list.add(item) return piecewise_list def join(*args): return ''.join(args) def create_role(app, tag_filename, rootdir, cache_name, pdf=""): # Tidy up the root directory path if not rootdir.endswith(('/', '\\')): rootdir = join(rootdir, os.sep) try: if is_url(tag_filename): hresponse = requests.head(tag_filename, allow_redirects=True) if hresponse.status_code != 200: raise FileNotFoundError try: modification_time = parsedate(hresponse.headers['last-modified']).timestamp() except KeyError: # no last-modified header from server modification_time = time.time() def _parse(): response = requests.get(tag_filename, allow_redirects=True) if response.status_code != 200: raise FileNotFoundError return ET.fromstring(response.text) else: modification_time = os.path.getmtime(tag_filename) def _parse(): return ET.parse(tag_filename) report_info(app.env, bold('Checking tag file cache for %s: ' % cache_name)) if not hasattr(app.env, 'doxylink_cache'): # no cache present at all, initialise it report_info(app.env, 'No cache at all, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache = {cache_name: {'mapping': mapping, 'mtime': modification_time}} elif not app.env.doxylink_cache.get(cache_name): # Main cache is there but the specific sub-cache for this tag file is not report_info(app.env, 'Sub cache is missing, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} elif app.env.doxylink_cache[cache_name]['mtime'] < modification_time: # tag file has been modified since sub-cache creation report_info(app.env, 'Sub-cache is out of date, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} elif not app.env.doxylink_cache[cache_name].get('version') or app.env.doxylink_cache[cache_name].get('version') != __version__: # sub-cache doesn't have a version or the version doesn't match report_info(app.env, 'Sub-cache schema version doesn\'t match, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} else: # The cache is up to date report_info(app.env, 'Sub-cache is up-to-date') except FileNotFoundError: tag_file_found = False report_warning(app.env, standout('Could not find tag file %s. Make sure your `doxylink` config variable is set correctly.' % tag_filename)) else: tag_file_found = True def find_doxygen_link(name, rawtext, text, lineno, inliner, options={}, content=[]): # from :name:`title <part>` has_explicit_title, title, part = split_explicit_title(text) part = utils.unescape(part) warning_messages = [] if not tag_file_found: warning_messages.append('Could not find match for `%s` because tag file not found' % part) return [nodes.inline(title, title)], [] try: url = app.env.doxylink_cache[cache_name]['mapping'][part] except LookupError as error: inliner.reporter.warning('Could not find match for `%s` in `%s` tag file. Error reported was %s' % (part, tag_filename, error), line=lineno) return [nodes.inline(title, title)], [] except ParseException as error: inliner.reporter.warning('Error while parsing `%s`. Is not a well-formed C++ function call or symbol.' 'If this is not the case, it is a doxylink bug so please report it.' 'Error reported was: %s' % (part, error), line=lineno) return [nodes.inline(title, title)], [] if pdf: full_url = join(pdf, '#', url.file) full_url = full_url.replace('.html#', '_') # for links to variables and functions full_url = full_url.replace('.html', '') # for links to files # If it's an absolute path then the link will work regardless of the document directory # Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file') elif os.path.isabs(rootdir) or urllib.parse.urlparse(rootdir).scheme: full_url = join(rootdir, url.file) # But otherwise we need to add the relative path of the current document to the root source directory to the link else: relative_path_to_docsrc = os.path.relpath(app.env.srcdir, os.path.dirname(inliner.document.attributes['source'])) full_url = join(relative_path_to_docsrc, '/', rootdir, url.file) # We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes) if url.kind == 'function' and app.config.add_function_parentheses and normalise(title)[1] == '' and not has_explicit_title: title = join(title, '()') pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] return find_doxygen_link def extract_configuration(values): if len(values) == 3: tag_filename, rootdir, pdf_filename = values elif len(values) == 2: tag_filename = values[0] if values[1].endswith('.pdf'): pdf_filename = values[1] rootdir = "" else: rootdir = values[1] pdf_filename = "" else: raise ValueError("Config variable `doxylink` is incorrectly configured. Expected a tuple with 2 to 3 " "elements; got %s" % values) return tag_filename, rootdir, pdf_filename def fetch_file(app, source, output_path): """Fetches file and puts it in the desired location if it does not exist yet. Local files will be copied and remote files will be downloaded. Directories in the ``output_path`` get created if needed. Args: app: Sphinx' application instance source (str): Path to local file or URL to remote file output_path (str): Path with filename to copy/download the source to, relative to Sphinx' output directory """ if not os.path.isabs(output_path): output_path = os.path.join(app.outdir, output_path) if os.path.exists(output_path): return os.makedirs(os.path.dirname(output_path), exist_ok=True) if is_url(source): response = requests.get(source, allow_redirects=True) if response.status_code != 200: report_warning(app.env, standout("Could not find file %r. Make sure your `doxylink_pdf_files` config variable is " "set correctly." % source)) return with open(output_path, 'wb') as file: file.write(response.content) else: if not os.path.isabs(source): source = os.path.join(app.outdir, source) if os.path.exists(source): shutil.copy(source, output_path) else: report_warning(app.env, standout("Expected a URL or a path that exists as value for `doxylink_pdf_files` " "config variable; got %r" % source)) def process_configuration(app, tag_filename, rootdir, pdf_filename): """Processes the configured values for ``doxylink`` and ``doxylink_pdf_files`` and warns about potential issues. The type of builder decides which values shall be used. Args: app: Sphinx' application instance tag_filename (str): Path to the Doxygen tag file rootdir (str): Path to the root directory of Doxygen HTML documentation pdf_filename (str): Path to the pdf file; may be empty when LaTeX builder is not used """ if app.builder.format == 'latex': if not pdf_filename: if is_url(rootdir): report_warning(app.env, "Linking from PDF to remote Doxygen html is not supported yet; got %r." "Consider linking to a Doxygen pdf file instead as " "third element of the tuple in the `doxylink` config variable." % rootdir) else: report_warning(app.env, "Linking from PDF to local Doxygen html is not possible; got %r." "Consider linking to a Doxygen pdf file instead as third element of the tuple in the " "`doxylink` config variable." % rootdir) elif pdf_filename in app.config.doxylink_pdf_files: source = app.config.doxylink_pdf_files[pdf_filename] fetch_file(app, source, pdf_filename) elif pdf_filename and not rootdir: report_warning(app.env, "Linking from HTML to Doxygen pdf (%r) is not supported. Consider setting " "the root directory of Doxygen's HTML output as value instead." % pdf_filename) def setup_doxylink_roles(app): for name, values in app.config.doxylink.items(): tag_filename, rootdir, pdf_filename = extract_configuration(values) process_configuration(app, tag_filename, rootdir, pdf_filename) app.add_role(name, create_role(app, tag_filename, rootdir, name, pdf=pdf_filename))
43.544643
219
0.629947
import os import re import requests import shutil import time import xml.etree.ElementTree as ET import urllib.parse from collections import namedtuple from dateutil.parser import parse as parsedate from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title from sphinx.util.console import bold, standout from sphinx import __version__ as sphinx_version if sphinx_version >= '1.6.0': from sphinx.util.logging import getLogger from ..doxylink import __version__ from .parsing import normalise, ParseException Entry = namedtuple('Entry', ['kind', 'file']) def report_info(env, msg, docname=None, lineno=None): if sphinx_version >= '1.6.0': logger = getLogger(__name__) if lineno is not None: logger.info(msg, location=(docname, lineno)) else: logger.info(msg, location=docname) else: env.info(docname, msg, lineno=lineno) def report_warning(env, msg, docname=None, lineno=None): if sphinx_version >= '1.6.0': logger = getLogger(__name__) if lineno is not None: logger.warning(msg, location=(docname, lineno)) else: logger.warning(msg, location=docname) else: env.warn(docname, msg, lineno=lineno) def is_url(str_to_validate): regex = re.compile( r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) return bool(re.match(regex, str_to_validate)) class FunctionList: def __init__(self): self.kind = 'function_list' self._arglist = {} def __getitem__(self, arglist: str) -> Entry: if arglist: try: filename = self._arglist[arglist] except KeyError: raise LookupError('Argument list match not found') else: filename = list(self._arglist.values())[0] return Entry(kind='function', file=filename) def add_overload(self, arglist: str, file: str) -> None: self._arglist[arglist] = file class SymbolMap: def __init__(self, xml_doc: ET.ElementTree) -> None: self._mapping = parse_tag_file(xml_doc) def _get_symbol_match(self, symbol: str) -> str: if self._mapping.get(symbol): return symbol piecewise_list = match_piecewise(self._mapping.keys(), symbol) # If there is only one match, return it. if len(piecewise_list) == 1: return list(piecewise_list)[0] # If there is more than one item in piecewise_list then there is an ambiguity # Often this is due to the symbol matching the name of the constructor as well as the class name itself # We will prefer the class classes_list = {s for s in piecewise_list if self._mapping[s].kind == 'class'} # If there is only one by here we return it. if len(classes_list) == 1: return list(classes_list)[0] # Now, to disambiguate between ``PolyVox::Array< 1, ElementType >::operator[]`` and ``PolyVox::Array::operator[]`` matching ``operator[]``, # we will ignore templated (as in C++ templates) tag names by removing names containing ``<`` no_templates_list = {s for s in piecewise_list if '<' not in s} if len(no_templates_list) == 1: return list(no_templates_list)[0] # If not found by now, return the shortest match, assuming that's the most specific if no_templates_list: return min(no_templates_list, key=len) raise LookupError('Could not find a match') def __getitem__(self, item: str) -> Entry: symbol, normalised_arglist = normalise(item) matched_symbol = self._get_symbol_match(symbol) entry = self._mapping[matched_symbol] if isinstance(entry, FunctionList): entry = entry[normalised_arglist] return entry def parse_tag_file(doc: ET.ElementTree) -> dict: mapping = {} function_list = [] for compound in doc.findall('./compound'): compound_kind = compound.get('kind') if compound_kind not in {'namespace', 'class', 'struct', 'file', 'define', 'group', 'page'}: continue compound_name = compound.findtext('name') compound_filename = compound.findtext('filename') # If it's a 'file' type, check if it _does_ have an extension, if not append '.html' if compound_kind in ('file', 'page') and not os.path.splitext(compound_filename)[1]: compound_filename = compound_filename + '.html' mapping[compound_name] = Entry(kind=compound_kind, file=compound_filename) for member in compound.findall('member'): # If the member doesn't have an <anchorfile> element, use the parent compounds <filename> instead anchorfile = member.findtext('anchorfile') or compound_filename member_symbol = compound_name + '::' + member.findtext('name') member_kind = member.get('kind') arglist_text = member.findtext('./arglist') if arglist_text and member_kind not in {'variable', 'typedef', 'enumeration'}: function_list.append((member_symbol, arglist_text, member_kind, join(anchorfile, ' else: mapping[member_symbol] = Entry(kind=member.get('kind'), file=join(anchorfile, ' for member_symbol, arglist, kind, anchor_link in function_list: try: normalised_arglist = normalise(member_symbol + arglist)[1] except ParseException as e: print('Skipping %s %s%s. Error reported from parser was: %s' % (kind, member_symbol, arglist, e)) else: if mapping.get(member_symbol) and isinstance(mapping[member_symbol], FunctionList): mapping[member_symbol].add_overload(normalised_arglist, anchor_link) else: mapping[member_symbol] = FunctionList() mapping[member_symbol].add_overload(normalised_arglist, anchor_link) return mapping def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set: piecewise_list = set() for item in candidates: split_symbol = symbol.split(sep) split_item = item.split(sep) split_symbol.reverse() split_item.reverse() min_length = len(split_symbol) split_item = split_item[:min_length] if split_symbol == split_item: piecewise_list.add(item) return piecewise_list def join(*args): return ''.join(args) def create_role(app, tag_filename, rootdir, cache_name, pdf=""): # Tidy up the root directory path if not rootdir.endswith(('/', '\\')): rootdir = join(rootdir, os.sep) try: if is_url(tag_filename): hresponse = requests.head(tag_filename, allow_redirects=True) if hresponse.status_code != 200: raise FileNotFoundError try: modification_time = parsedate(hresponse.headers['last-modified']).timestamp() except KeyError: # no last-modified header from server modification_time = time.time() def _parse(): response = requests.get(tag_filename, allow_redirects=True) if response.status_code != 200: raise FileNotFoundError return ET.fromstring(response.text) else: modification_time = os.path.getmtime(tag_filename) def _parse(): return ET.parse(tag_filename) report_info(app.env, bold('Checking tag file cache for %s: ' % cache_name)) if not hasattr(app.env, 'doxylink_cache'): # no cache present at all, initialise it report_info(app.env, 'No cache at all, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache = {cache_name: {'mapping': mapping, 'mtime': modification_time}} elif not app.env.doxylink_cache.get(cache_name): # Main cache is there but the specific sub-cache for this tag file is not report_info(app.env, 'Sub cache is missing, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} elif app.env.doxylink_cache[cache_name]['mtime'] < modification_time: # tag file has been modified since sub-cache creation report_info(app.env, 'Sub-cache is out of date, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} elif not app.env.doxylink_cache[cache_name].get('version') or app.env.doxylink_cache[cache_name].get('version') != __version__: # sub-cache doesn't have a version or the version doesn't match report_info(app.env, 'Sub-cache schema version doesn\'t match, rebuilding...') mapping = SymbolMap(_parse()) app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time} else: report_info(app.env, 'Sub-cache is up-to-date') except FileNotFoundError: tag_file_found = False report_warning(app.env, standout('Could not find tag file %s. Make sure your `doxylink` config variable is set correctly.' % tag_filename)) else: tag_file_found = True def find_doxygen_link(name, rawtext, text, lineno, inliner, options={}, content=[]): has_explicit_title, title, part = split_explicit_title(text) part = utils.unescape(part) warning_messages = [] if not tag_file_found: warning_messages.append('Could not find match for `%s` because tag file not found' % part) return [nodes.inline(title, title)], [] try: url = app.env.doxylink_cache[cache_name]['mapping'][part] except LookupError as error: inliner.reporter.warning('Could not find match for `%s` in `%s` tag file. Error reported was %s' % (part, tag_filename, error), line=lineno) return [nodes.inline(title, title)], [] except ParseException as error: inliner.reporter.warning('Error while parsing `%s`. Is not a well-formed C++ function call or symbol.' 'If this is not the case, it is a doxylink bug so please report it.' 'Error reported was: %s' % (part, error), line=lineno) return [nodes.inline(title, title)], [] if pdf: full_url = join(pdf, '#', url.file) full_url = full_url.replace('.html#', '_') full_url = full_url.replace('.html', '') # Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file') elif os.path.isabs(rootdir) or urllib.parse.urlparse(rootdir).scheme: full_url = join(rootdir, url.file) # But otherwise we need to add the relative path of the current document to the root source directory to the link else: relative_path_to_docsrc = os.path.relpath(app.env.srcdir, os.path.dirname(inliner.document.attributes['source'])) full_url = join(relative_path_to_docsrc, '/', rootdir, url.file) # We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes) if url.kind == 'function' and app.config.add_function_parentheses and normalise(title)[1] == '' and not has_explicit_title: title = join(title, '()') pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] return find_doxygen_link def extract_configuration(values): if len(values) == 3: tag_filename, rootdir, pdf_filename = values elif len(values) == 2: tag_filename = values[0] if values[1].endswith('.pdf'): pdf_filename = values[1] rootdir = "" else: rootdir = values[1] pdf_filename = "" else: raise ValueError("Config variable `doxylink` is incorrectly configured. Expected a tuple with 2 to 3 " "elements; got %s" % values) return tag_filename, rootdir, pdf_filename def fetch_file(app, source, output_path): if not os.path.isabs(output_path): output_path = os.path.join(app.outdir, output_path) if os.path.exists(output_path): return os.makedirs(os.path.dirname(output_path), exist_ok=True) if is_url(source): response = requests.get(source, allow_redirects=True) if response.status_code != 200: report_warning(app.env, standout("Could not find file %r. Make sure your `doxylink_pdf_files` config variable is " "set correctly." % source)) return with open(output_path, 'wb') as file: file.write(response.content) else: if not os.path.isabs(source): source = os.path.join(app.outdir, source) if os.path.exists(source): shutil.copy(source, output_path) else: report_warning(app.env, standout("Expected a URL or a path that exists as value for `doxylink_pdf_files` " "config variable; got %r" % source)) def process_configuration(app, tag_filename, rootdir, pdf_filename): if app.builder.format == 'latex': if not pdf_filename: if is_url(rootdir): report_warning(app.env, "Linking from PDF to remote Doxygen html is not supported yet; got %r." "Consider linking to a Doxygen pdf file instead as " "third element of the tuple in the `doxylink` config variable." % rootdir) else: report_warning(app.env, "Linking from PDF to local Doxygen html is not possible; got %r." "Consider linking to a Doxygen pdf file instead as third element of the tuple in the " "`doxylink` config variable." % rootdir) elif pdf_filename in app.config.doxylink_pdf_files: source = app.config.doxylink_pdf_files[pdf_filename] fetch_file(app, source, pdf_filename) elif pdf_filename and not rootdir: report_warning(app.env, "Linking from HTML to Doxygen pdf (%r) is not supported. Consider setting " "the root directory of Doxygen's HTML output as value instead." % pdf_filename) def setup_doxylink_roles(app): for name, values in app.config.doxylink.items(): tag_filename, rootdir, pdf_filename = extract_configuration(values) process_configuration(app, tag_filename, rootdir, pdf_filename) app.add_role(name, create_role(app, tag_filename, rootdir, name, pdf=pdf_filename))
true
true
f70f1b00fd0ae86be320e4714bb9c7b34baf8028
15,022
py
Python
speechbrain/lobes/models/huggingface_wav2vec.py
RaphaelOlivier/speechbrain
142dc6caa4b46ca4c9341b0cd39627f489808749
[ "Apache-2.0" ]
null
null
null
speechbrain/lobes/models/huggingface_wav2vec.py
RaphaelOlivier/speechbrain
142dc6caa4b46ca4c9341b0cd39627f489808749
[ "Apache-2.0" ]
null
null
null
speechbrain/lobes/models/huggingface_wav2vec.py
RaphaelOlivier/speechbrain
142dc6caa4b46ca4c9341b0cd39627f489808749
[ "Apache-2.0" ]
null
null
null
"""This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models. Reference: https://arxiv.org/abs/2006.11477 Reference: https://arxiv.org/abs/1904.05862 Reference: https://arxiv.org/abs/2110.13900 Transformer from HuggingFace needs to be installed: https://huggingface.co/transformers/installation.html Authors * Titouan Parcollet 2021 * Boumadane Abdelmoumene 2021 """ import os import torch import logging import pathlib import numpy as np import torch.nn.functional as F from torch import nn from huggingface_hub import model_info from speechbrain.pretrained.fetching import fetch # We check if transformers is installed. try: import transformers from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig from transformers import Wav2Vec2FeatureExtractor from transformers import Wav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_wav2vec2 import ( _compute_mask_indices, ) except ImportError: MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n" MSG += "E.G. run: pip install transformers" raise ImportError(MSG) logger = logging.getLogger(__name__) HF_models = { "wav2vec2": Wav2Vec2Model, "hubert": HubertModel, "wavlm": WavLMModel, "data2vec": Data2VecAudioModel } HF_config = { "wav2vec2": Wav2Vec2Config, "hubert": HubertConfig, "wavlm": WavLMConfig, "data2vec": Data2VecAudioConfig } class HuggingFaceWav2Vec2(nn.Module): """This lobe enables the integration of HuggingFace and SpeechBrain pretrained wav2vec2.0/Hubert models. Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477 Source paper Hubert: https://arxiv.org/abs/2106.07447 Transformer from HuggingFace needs to be installed: https://huggingface.co/transformers/installation.html The model can be used as a fixed feature extractor or can be finetuned. It will download automatically the model from HuggingFace or use a local path. Arguments --------- source : str HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60" save_path : str Path (dir) of the downloaded model. output_norm : bool (default: True) If True, a layer_norm (affine) will be applied to the output obtained from the wav2vec model. freeze : bool (default: True) If True, the model is frozen. If False, the model will be trained alongside with the rest of the pipeline. freeze_feature_extractor : bool (default: False) When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False all the wav2vec model will be trained including featue_extractor module. apply_spec_augment : bool (default: False) If True, the model will apply spec augment on the output of feature extractor (inside huggingface Wav2VecModel() class). If False, the model will not apply spec augment. We set this to false to prevent from doing it twice. Example ------- >>> inputs = torch.rand([10, 600]) >>> model_hub = "facebook/wav2vec2-base-960h" >>> save_path = "savedir" >>> model = HuggingFaceWav2Vec2(model_hub, save_path) >>> outputs = model(inputs) """ def __init__( self, source, save_path, output_norm=True, freeze=True, freeze_feature_extractor=False, apply_spec_augment=False, load_pretrained_weights=True, ): super().__init__() # Download the extractor from HuggingFace. # The extractor is only used to retrieve the normalisation information self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( source, cache_dir=save_path ) # Select specific self-supervised loader (eg. Wav2Vec2, Hubert) if "hubert" in source: config = HF_config.get("hubert") model = HF_models.get("hubert") elif "wavlm" in source: config = HF_config.get("wavlm") model = HF_models.get("wavlm") elif "data2vec" in source: config = HF_config.get("data2vec") model = HF_models.get("data2vec") else: config = HF_config.get("wav2vec2") model = HF_models.get("wav2vec2") # Download and load the model self._from_pretrained( source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights ) # set apply_spec_augment self.model.config.apply_spec_augment = apply_spec_augment # We check if inputs need to be normalized w.r.t pretrained wav2vec2 self.normalize_wav = self.feature_extractor.do_normalize self.freeze = freeze self.freeze_feature_extractor = freeze_feature_extractor self.output_norm = output_norm if self.freeze: logger.warning( "speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen." ) self.model.eval() for param in self.model.parameters(): param.requires_grad = False else: self.model.train() if self.freeze_feature_extractor: self.model.feature_extractor._freeze_parameters() def _from_pretrained(self, source, config, model, save_path, load_weights): """This function manages the source checking and loading of the params. # 1. Is the model from HF or a local path # 2. Is the model pretrained with HF or SpeechBrain # 3. Download (if appropriate) and load with respect to 1. and 2. """ is_sb, ckpt_file = self._check_model_source(source) if not load_weights: config = config.from_pretrained(source, cache_dir=save_path) self.model = model(config) elif is_sb: config = config.from_pretrained(source, cache_dir=save_path) self.model = model(config) self.model.gradient_checkpointing_disable() # Required by DDP # fetch the checkpoint file ckpt_full_path = fetch( filename=ckpt_file, source=source, savedir=save_path ) # We transfer the parameters from the checkpoint. self._load_sb_pretrained_w2v2_parameters(ckpt_full_path) else: if load_weights: self.model = model.from_pretrained(source, cache_dir=save_path) else: self.model=model() def _load_sb_pretrained_w2v2_parameters(self, path): """Loads the parameter of a w2v2 model pretrained with SpeechBrain and the HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom loading because HuggingFace adds a level to the checkpoint when storing the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain and HuggingFaceWav2Vec2. In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading). """ modified_state_dict = {} orig_state_dict = torch.load(path, map_location="cpu") # We remove the .wav2vec2 in the state dict. for key, params in orig_state_dict.items(): if "wav2vec2." in key: save_key = key.replace("model.wav2vec2.", "") modified_state_dict[save_key] = params incompatible_keys = self.model.load_state_dict( modified_state_dict, strict=False ) for missing_key in incompatible_keys.missing_keys: logger.warning( f"During parameter transfer to {self.model} loading from " + f"{path}, the transferred parameters did not have " + f"parameters for the key: {missing_key}" ) for unexpected_key in incompatible_keys.unexpected_keys: logger.warning( f"The param with the key: {unexpected_key} is discarded as it " + "is useless for wav2vec 2.0 finetuning." ) def _check_model_source(self, path): """Checks if the pretrained model has been trained with SpeechBrain and is hosted locally or on a HuggingFace hub. """ checkpoint_filename = "" source = pathlib.Path(path) is_local = True is_sb = True # If path is a huggingface hub. if not source.exists(): is_local = False if is_local: # Test for HuggingFace model if any(File.endswith(".bin") for File in os.listdir(path)): is_sb = False return is_sb, checkpoint_filename # Test for SpeechBrain model and get the filename. for File in os.listdir(path): if File.endswith(".ckpt"): checkpoint_filename = os.path.join(path, File) is_sb = True return is_sb, checkpoint_filename else: files = model_info( path ).siblings # get the list of files of the Hub # Test if it's an HuggingFace model or a SB one for File in files: if File.rfilename.endswith(".ckpt"): checkpoint_filename = File.rfilename is_sb = True return is_sb, checkpoint_filename for File in files: if File.rfilename.endswith(".bin"): checkpoint_filename = File.rfilename is_sb = False return is_sb, checkpoint_filename err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !" raise FileNotFoundError(err_msg) def forward(self, wav): """Takes an input waveform and return its corresponding wav2vec encoding. Arguments --------- wav : torch.Tensor (signal) A batch of audio signals to transform to features. """ # If we freeze, we simply remove all grads and features from the graph. if self.freeze: with torch.no_grad(): return self.extract_features(wav).detach() return self.extract_features(wav) def extract_features(self, wav): """Takes an input waveform and return its corresponding wav2vec encoding. Arguments --------- wav : torch.Tensor (signal) A batch of audio signals to transform to features. """ if self.normalize_wav: wav = F.layer_norm(wav, wav.shape) # Extract wav2vec output out = self.model(wav)[0] # We normalize the output if required if self.output_norm: out = F.layer_norm(out, out.shape) return out class HuggingFaceWav2Vec2Pretrain(nn.Module): """This lobe enables the integration of HuggingFace wav2vec2.0 models to be pretrained. Source paper: https://arxiv.org/abs/2006.11477 Transformer from HuggingFace needs to be installed: https://huggingface.co/transformers/installation.html The return is an HuggingFace format and the mask indices that contains: https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining For instance, it returns the loss that can be accessed with .loss Arguments --------- source : str HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60" save_path : str Path (dir) of the downloaded model. mask_prob : float (default: 0.65) Probability of masking a given frame. Default is taken from the paper. mask_length : float (default: 10) Length (i.e. number of consecutive masked frames). Default is taken from the paper. Example ------- >>> inputs = torch.rand([10, 32000]) >>> model_hub = "facebook/wav2vec2-base-960h" >>> save_path = "savedir" >>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path) >>> outputs, _ = model(inputs) """ def __init__( self, source, save_path, mask_prob=0.65, mask_length=10, normalize_wav=True, ): super().__init__() self.mask_prob = mask_prob self.mask_length = mask_length self.normalize_wav = normalize_wav # Download the config of the model from HuggingFace. self.config = Wav2Vec2Config.from_pretrained( source, cache_dir=save_path ) self.config.output_hidden_states = ( True # We want the hidden states as well! ) self.model = Wav2Vec2ForPreTraining(self.config) self.model.gradient_checkpointing_disable() # Required by DDP self.model.train() # We check if inputs need to be normalized w.r.t pretrained wav2vec2 def forward(self, wav): """Takes an input waveform and return its corresponding wav2vec encoding. Arguments --------- wav : torch.Tensor (signal) A batch of audio signals to transform to features. """ batch_size, raw_sequence_length = wav.shape if self.normalize_wav: wav = F.layer_norm(wav, wav.shape) sequence_length = self.model._get_feat_extract_output_lengths( raw_sequence_length ) # 1. Compute the indices that will be masked mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.mask_prob, mask_length=self.mask_length, ) torch_mask_time_indices = torch.tensor( mask_time_indices, device=wav.device, dtype=torch.long, ) # 2. Sample the negative samples from the entire sequence. # Fairseq does it only on the masked indices, but this only work if you # have long sentences. For more versatily, we sample on the entire sequence. # value. full_sentence_indices = np.ones((batch_size, sequence_length)) # print(np.sum(mask_time_indices, axis=1)) negative_sample_indices = torch.tensor( transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices( (batch_size, sequence_length), num_negatives=self.config.num_negatives, mask_time_indices=full_sentence_indices, ), device=wav.device, dtype=torch.long, ) return ( self.model( wav, mask_time_indices=torch_mask_time_indices, sampled_negative_indices=negative_sample_indices, ), torch_mask_time_indices, )
36.19759
123
0.637598
import os import torch import logging import pathlib import numpy as np import torch.nn.functional as F from torch import nn from huggingface_hub import model_info from speechbrain.pretrained.fetching import fetch try: import transformers from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig from transformers import Wav2Vec2FeatureExtractor from transformers import Wav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_wav2vec2 import ( _compute_mask_indices, ) except ImportError: MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n" MSG += "E.G. run: pip install transformers" raise ImportError(MSG) logger = logging.getLogger(__name__) HF_models = { "wav2vec2": Wav2Vec2Model, "hubert": HubertModel, "wavlm": WavLMModel, "data2vec": Data2VecAudioModel } HF_config = { "wav2vec2": Wav2Vec2Config, "hubert": HubertConfig, "wavlm": WavLMConfig, "data2vec": Data2VecAudioConfig } class HuggingFaceWav2Vec2(nn.Module): def __init__( self, source, save_path, output_norm=True, freeze=True, freeze_feature_extractor=False, apply_spec_augment=False, load_pretrained_weights=True, ): super().__init__() self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( source, cache_dir=save_path ) if "hubert" in source: config = HF_config.get("hubert") model = HF_models.get("hubert") elif "wavlm" in source: config = HF_config.get("wavlm") model = HF_models.get("wavlm") elif "data2vec" in source: config = HF_config.get("data2vec") model = HF_models.get("data2vec") else: config = HF_config.get("wav2vec2") model = HF_models.get("wav2vec2") self._from_pretrained( source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights ) self.model.config.apply_spec_augment = apply_spec_augment self.normalize_wav = self.feature_extractor.do_normalize self.freeze = freeze self.freeze_feature_extractor = freeze_feature_extractor self.output_norm = output_norm if self.freeze: logger.warning( "speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen." ) self.model.eval() for param in self.model.parameters(): param.requires_grad = False else: self.model.train() if self.freeze_feature_extractor: self.model.feature_extractor._freeze_parameters() def _from_pretrained(self, source, config, model, save_path, load_weights): is_sb, ckpt_file = self._check_model_source(source) if not load_weights: config = config.from_pretrained(source, cache_dir=save_path) self.model = model(config) elif is_sb: config = config.from_pretrained(source, cache_dir=save_path) self.model = model(config) self.model.gradient_checkpointing_disable() ckpt_full_path = fetch( filename=ckpt_file, source=source, savedir=save_path ) self._load_sb_pretrained_w2v2_parameters(ckpt_full_path) else: if load_weights: self.model = model.from_pretrained(source, cache_dir=save_path) else: self.model=model() def _load_sb_pretrained_w2v2_parameters(self, path): modified_state_dict = {} orig_state_dict = torch.load(path, map_location="cpu") for key, params in orig_state_dict.items(): if "wav2vec2." in key: save_key = key.replace("model.wav2vec2.", "") modified_state_dict[save_key] = params incompatible_keys = self.model.load_state_dict( modified_state_dict, strict=False ) for missing_key in incompatible_keys.missing_keys: logger.warning( f"During parameter transfer to {self.model} loading from " + f"{path}, the transferred parameters did not have " + f"parameters for the key: {missing_key}" ) for unexpected_key in incompatible_keys.unexpected_keys: logger.warning( f"The param with the key: {unexpected_key} is discarded as it " + "is useless for wav2vec 2.0 finetuning." ) def _check_model_source(self, path): checkpoint_filename = "" source = pathlib.Path(path) is_local = True is_sb = True if not source.exists(): is_local = False if is_local: if any(File.endswith(".bin") for File in os.listdir(path)): is_sb = False return is_sb, checkpoint_filename for File in os.listdir(path): if File.endswith(".ckpt"): checkpoint_filename = os.path.join(path, File) is_sb = True return is_sb, checkpoint_filename else: files = model_info( path ).siblings for File in files: if File.rfilename.endswith(".ckpt"): checkpoint_filename = File.rfilename is_sb = True return is_sb, checkpoint_filename for File in files: if File.rfilename.endswith(".bin"): checkpoint_filename = File.rfilename is_sb = False return is_sb, checkpoint_filename err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !" raise FileNotFoundError(err_msg) def forward(self, wav): # If we freeze, we simply remove all grads and features from the graph. if self.freeze: with torch.no_grad(): return self.extract_features(wav).detach() return self.extract_features(wav) def extract_features(self, wav): if self.normalize_wav: wav = F.layer_norm(wav, wav.shape) # Extract wav2vec output out = self.model(wav)[0] # We normalize the output if required if self.output_norm: out = F.layer_norm(out, out.shape) return out class HuggingFaceWav2Vec2Pretrain(nn.Module): def __init__( self, source, save_path, mask_prob=0.65, mask_length=10, normalize_wav=True, ): super().__init__() self.mask_prob = mask_prob self.mask_length = mask_length self.normalize_wav = normalize_wav # Download the config of the model from HuggingFace. self.config = Wav2Vec2Config.from_pretrained( source, cache_dir=save_path ) self.config.output_hidden_states = ( True # We want the hidden states as well! ) self.model = Wav2Vec2ForPreTraining(self.config) self.model.gradient_checkpointing_disable() # Required by DDP self.model.train() # We check if inputs need to be normalized w.r.t pretrained wav2vec2 def forward(self, wav): batch_size, raw_sequence_length = wav.shape if self.normalize_wav: wav = F.layer_norm(wav, wav.shape) sequence_length = self.model._get_feat_extract_output_lengths( raw_sequence_length ) # 1. Compute the indices that will be masked mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.mask_prob, mask_length=self.mask_length, ) torch_mask_time_indices = torch.tensor( mask_time_indices, device=wav.device, dtype=torch.long, ) # 2. Sample the negative samples from the entire sequence. # Fairseq does it only on the masked indices, but this only work if you # have long sentences. For more versatily, we sample on the entire sequence. # value. full_sentence_indices = np.ones((batch_size, sequence_length)) # print(np.sum(mask_time_indices, axis=1)) negative_sample_indices = torch.tensor( transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices( (batch_size, sequence_length), num_negatives=self.config.num_negatives, mask_time_indices=full_sentence_indices, ), device=wav.device, dtype=torch.long, ) return ( self.model( wav, mask_time_indices=torch_mask_time_indices, sampled_negative_indices=negative_sample_indices, ), torch_mask_time_indices, )
true
true
f70f1b1d9f8febeb246974b7a40d95a388399f2c
318
py
Python
Python/Whatsapp_Sms_Bomber/xpath.py
CharvyJain/Rotten-Scripts
c9b8f7dde378620e4a82eae7aacec53f1eeea3c5
[ "MIT" ]
3
2021-08-19T08:38:10.000Z
2022-01-03T14:37:50.000Z
Python/Whatsapp_Sms_Bomber/xpath.py
SKAUL05/Rotten-Scripts
c44e69754bbecb8a547fe2cc3a29be5acf97c46a
[ "MIT" ]
3
2022-01-15T07:33:28.000Z
2022-03-24T04:23:03.000Z
Python/Whatsapp_Sms_Bomber/xpath.py
SKAUL05/Rotten-Scripts
c44e69754bbecb8a547fe2cc3a29be5acf97c46a
[ "MIT" ]
1
2020-12-25T18:42:30.000Z
2020-12-25T18:42:30.000Z
newchat_xpath= "//*[@id='side']/header/div[2]/div/span/div[2]" search_xpath= "//*[@id='app']/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/div/div[2]" #user_xpath= "//span[@title='{}']" message_xpath= "//*[@id='main']/footer/div[1]/div[2]/div/div[2]" sendbutton_xpath= "//*[@id='main']/footer/div[1]/div[3]"
63.6
98
0.625786
newchat_xpath= "//*[@id='side']/header/div[2]/div/span/div[2]" search_xpath= "//*[@id='app']/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/div/div[2]" message_xpath= "//*[@id='main']/footer/div[1]/div[2]/div/div[2]" sendbutton_xpath= "//*[@id='main']/footer/div[1]/div[3]"
true
true
f70f1b4158f8ca00b905ad9334de0f38c58821fd
15,602
py
Python
vrchatapi/model/create_world_request.py
vrchatapi/vrchatapi-python
afe5ec9fda298723e7408358473aafe343e27d18
[ "MIT" ]
8
2021-08-25T02:35:30.000Z
2022-03-28T18:11:58.000Z
vrchatapi/model/create_world_request.py
vrchatapi/vrchatapi-python
afe5ec9fda298723e7408358473aafe343e27d18
[ "MIT" ]
1
2022-03-18T20:29:30.000Z
2022-03-18T20:35:05.000Z
vrchatapi/model/create_world_request.py
vrchatapi/vrchatapi-python
afe5ec9fda298723e7408358473aafe343e27d18
[ "MIT" ]
1
2022-01-11T10:49:12.000Z
2022-01-11T10:49:12.000Z
""" VRChat API Documentation The version of the OpenAPI document: 1.6.8 Contact: me@ruby.js.org Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from vrchatapi.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from ..model_utils import OpenApiModel from vrchatapi.exceptions import ApiAttributeError def lazy_import(): from vrchatapi.model.release_status import ReleaseStatus from vrchatapi.model.tag import Tag from vrchatapi.model.world_id import WorldID globals()['ReleaseStatus'] = ReleaseStatus globals()['Tag'] = Tag globals()['WorldID'] = WorldID class CreateWorldRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { ('asset_url',): { 'min_length': 1, }, ('image_url',): { 'min_length': 1, }, ('name',): { 'min_length': 1, }, ('asset_version',): { 'inclusive_minimum': 0, }, ('author_name',): { 'min_length': 1, }, ('capacity',): { 'inclusive_maximum': 40, 'inclusive_minimum': 0, }, ('unity_package_url',): { 'min_length': 1, }, ('unity_version',): { 'min_length': 1, }, } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'asset_url': (str,), # noqa: E501 'image_url': (str,), # noqa: E501 'name': (str,), # noqa: E501 'asset_version': (int,), # noqa: E501 'author_id': (str,), # noqa: E501 'author_name': (str,), # noqa: E501 'capacity': (int,), # noqa: E501 'description': (str,), # noqa: E501 'id': (WorldID,), # noqa: E501 'platform': (str,), # noqa: E501 'release_status': (ReleaseStatus,), # noqa: E501 'tags': ([Tag],), # noqa: E501 'unity_package_url': (str,), # noqa: E501 'unity_version': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'asset_url': 'assetUrl', # noqa: E501 'image_url': 'imageUrl', # noqa: E501 'name': 'name', # noqa: E501 'asset_version': 'assetVersion', # noqa: E501 'author_id': 'authorId', # noqa: E501 'author_name': 'authorName', # noqa: E501 'capacity': 'capacity', # noqa: E501 'description': 'description', # noqa: E501 'id': 'id', # noqa: E501 'platform': 'platform', # noqa: E501 'release_status': 'releaseStatus', # noqa: E501 'tags': 'tags', # noqa: E501 'unity_package_url': 'unityPackageUrl', # noqa: E501 'unity_version': 'unityVersion', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, asset_url, image_url, name, *args, **kwargs): # noqa: E501 """CreateWorldRequest - a model defined in OpenAPI Args: asset_url (str): image_url (str): name (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) asset_version (int): [optional] # noqa: E501 author_id (str): A users unique ID, usually in the form of `usr_c1644b5b-3ca4-45b4-97c6-a2a0de70d469`. Legacy players can have old IDs in the form of `8JoV9XEdpo`. The ID can never be changed.. [optional] # noqa: E501 author_name (str): [optional] # noqa: E501 capacity (int): [optional] # noqa: E501 description (str): [optional] # noqa: E501 id (WorldID): [optional] # noqa: E501 platform (str): This can be `standalonewindows` or `android`, but can also pretty much be any random Unity verison such as `2019.2.4-801-Release` or `2019.2.2-772-Release` or even `unknownplatform`.. [optional] # noqa: E501 release_status (ReleaseStatus): [optional] # noqa: E501 tags ([Tag]): [optional] # noqa: E501 unity_package_url (str): [optional] # noqa: E501 unity_version (str): [optional] if omitted the server will use the default value of "5.3.4p1" # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.asset_url = asset_url self.image_url = image_url self.name = name for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, asset_url, image_url, name, *args, **kwargs): # noqa: E501 """CreateWorldRequest - a model defined in OpenAPI Args: asset_url (str): image_url (str): name (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) asset_version (int): [optional] # noqa: E501 author_id (str): A users unique ID, usually in the form of `usr_c1644b5b-3ca4-45b4-97c6-a2a0de70d469`. Legacy players can have old IDs in the form of `8JoV9XEdpo`. The ID can never be changed.. [optional] # noqa: E501 author_name (str): [optional] # noqa: E501 capacity (int): [optional] # noqa: E501 description (str): [optional] # noqa: E501 id (WorldID): [optional] # noqa: E501 platform (str): This can be `standalonewindows` or `android`, but can also pretty much be any random Unity verison such as `2019.2.4-801-Release` or `2019.2.2-772-Release` or even `unknownplatform`.. [optional] # noqa: E501 release_status (ReleaseStatus): [optional] # noqa: E501 tags ([Tag]): [optional] # noqa: E501 unity_package_url (str): [optional] # noqa: E501 unity_version (str): [optional] if omitted the server will use the default value of "5.3.4p1" # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.asset_url = asset_url self.image_url = image_url self.name = name for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
44.1983
236
0.563966
import re import sys from vrchatapi.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from ..model_utils import OpenApiModel from vrchatapi.exceptions import ApiAttributeError def lazy_import(): from vrchatapi.model.release_status import ReleaseStatus from vrchatapi.model.tag import Tag from vrchatapi.model.world_id import WorldID globals()['ReleaseStatus'] = ReleaseStatus globals()['Tag'] = Tag globals()['WorldID'] = WorldID class CreateWorldRequest(ModelNormal): allowed_values = { } validations = { ('asset_url',): { 'min_length': 1, }, ('image_url',): { 'min_length': 1, }, ('name',): { 'min_length': 1, }, ('asset_version',): { 'inclusive_minimum': 0, }, ('author_name',): { 'min_length': 1, }, ('capacity',): { 'inclusive_maximum': 40, 'inclusive_minimum': 0, }, ('unity_package_url',): { 'min_length': 1, }, ('unity_version',): { 'min_length': 1, }, } @cached_property def additional_properties_type(): lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) _nullable = False @cached_property def openapi_types(): lazy_import() return { 'asset_url': (str,), 'image_url': (str,), 'name': (str,), 'asset_version': (int,), 'author_id': (str,), 'author_name': (str,), 'capacity': (int,), 'description': (str,), 'id': (WorldID,), 'platform': (str,), 'release_status': (ReleaseStatus,), 'tags': ([Tag],), 'unity_package_url': (str,), 'unity_version': (str,), } @cached_property def discriminator(): return None attribute_map = { 'asset_url': 'assetUrl', 'image_url': 'imageUrl', 'name': 'name', 'asset_version': 'assetVersion', 'author_id': 'authorId', 'author_name': 'authorName', 'capacity': 'capacity', 'description': 'description', 'id': 'id', 'platform': 'platform', 'release_status': 'releaseStatus', 'tags': 'tags', 'unity_package_url': 'unityPackageUrl', 'unity_version': 'unityVersion', } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, asset_url, image_url, name, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.asset_url = asset_url self.image_url = image_url self.name = name for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, asset_url, image_url, name, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.asset_url = asset_url self.image_url = image_url self.name = name for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
true
true
f70f1b515751dfaf4638ec73883e1665c80bf12e
67,139
py
Python
poky/bitbake/lib/bb/fetch2/__init__.py
buildlinux/unityos
dcbe232d0589013d77a62c33959d6a69f9bfbc5e
[ "Apache-2.0" ]
null
null
null
poky/bitbake/lib/bb/fetch2/__init__.py
buildlinux/unityos
dcbe232d0589013d77a62c33959d6a69f9bfbc5e
[ "Apache-2.0" ]
null
null
null
poky/bitbake/lib/bb/fetch2/__init__.py
buildlinux/unityos
dcbe232d0589013d77a62c33959d6a69f9bfbc5e
[ "Apache-2.0" ]
null
null
null
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- """ BitBake 'Fetch' implementations Classes for obtaining upstream sources for the BitBake build tools. """ # Copyright (C) 2003, 2004 Chris Larson # Copyright (C) 2012 Intel Corporation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Based on functions from the base bb module, Copyright 2003 Holger Schurig import os, re import signal import logging import urllib.request, urllib.parse, urllib.error if 'git' not in urllib.parse.uses_netloc: urllib.parse.uses_netloc.append('git') import operator import collections import subprocess import pickle import errno import bb.persist_data, bb.utils import bb.checksum import bb.process import bb.event __version__ = "2" _checksum_cache = bb.checksum.FileChecksumCache() logger = logging.getLogger("BitBake.Fetcher") class BBFetchException(Exception): """Class all fetch exceptions inherit from""" def __init__(self, message): self.msg = message Exception.__init__(self, message) def __str__(self): return self.msg class UntrustedUrl(BBFetchException): """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS""" def __init__(self, url, message=''): if message: msg = message else: msg = "The URL: '%s' is not trusted and cannot be used" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class MalformedUrl(BBFetchException): """Exception raised when encountering an invalid url""" def __init__(self, url, message=''): if message: msg = message else: msg = "The URL: '%s' is invalid and cannot be interpreted" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class FetchError(BBFetchException): """General fetcher exception when something happens incorrectly""" def __init__(self, message, url = None): if url: msg = "Fetcher failure for URL: '%s'. %s" % (url, message) else: msg = "Fetcher failure: %s" % message self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class ChecksumError(FetchError): """Exception when mismatched checksum encountered""" def __init__(self, message, url = None, checksum = None): self.checksum = checksum FetchError.__init__(self, message, url) class NoChecksumError(FetchError): """Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set""" class UnpackError(BBFetchException): """General fetcher exception when something happens incorrectly when unpacking""" def __init__(self, message, url): msg = "Unpack failure for URL: '%s'. %s" % (url, message) self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class NoMethodError(BBFetchException): """Exception raised when there is no method to obtain a supplied url or set of urls""" def __init__(self, url): msg = "Could not find a fetcher which supports the URL: '%s'" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class MissingParameterError(BBFetchException): """Exception raised when a fetch method is missing a critical parameter in the url""" def __init__(self, missing, url): msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) self.url = url self.missing = missing BBFetchException.__init__(self, msg) self.args = (missing, url) class ParameterError(BBFetchException): """Exception raised when a url cannot be proccessed due to invalid parameters.""" def __init__(self, message, url): msg = "URL: '%s' has invalid parameters. %s" % (url, message) self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class NetworkAccess(BBFetchException): """Exception raised when network access is disabled but it is required.""" def __init__(self, url, cmd): msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) self.url = url self.cmd = cmd BBFetchException.__init__(self, msg) self.args = (url, cmd) class NonLocalMethod(Exception): def __init__(self): Exception.__init__(self) class MissingChecksumEvent(bb.event.Event): def __init__(self, url, md5sum, sha256sum): self.url = url self.checksums = {'md5sum': md5sum, 'sha256sum': sha256sum} bb.event.Event.__init__(self) class URI(object): """ A class representing a generic URI, with methods for accessing the URI components, and stringifies to the URI. It is constructed by calling it with a URI, or setting the attributes manually: uri = URI("http://example.com/") uri = URI() uri.scheme = 'http' uri.hostname = 'example.com' uri.path = '/' It has the following attributes: * scheme (read/write) * userinfo (authentication information) (read/write) * username (read/write) * password (read/write) Note, password is deprecated as of RFC 3986. * hostname (read/write) * port (read/write) * hostport (read only) "hostname:port", if both are set, otherwise just "hostname" * path (read/write) * path_quoted (read/write) A URI quoted version of path * params (dict) (read/write) * query (dict) (read/write) * relative (bool) (read only) True if this is a "relative URI", (e.g. file:foo.diff) It stringifies to the URI itself. Some notes about relative URIs: while it's specified that a URI beginning with <scheme>:// should either be directly followed by a hostname or a /, the old URI handling of the fetch2 library did not comform to this. Therefore, this URI class has some kludges to make sure that URIs are parsed in a way comforming to bitbake's current usage. This URI class supports the following: file:relative/path.diff (IETF compliant) git:relative/path.git (IETF compliant) git:///absolute/path.git (IETF compliant) file:///absolute/path.diff (IETF compliant) file://relative/path.diff (not IETF compliant) But it does not support the following: file://hostname/absolute/path.diff (would be IETF compliant) Note that the last case only applies to a list of "whitelisted" schemes (currently only file://), that requires its URIs to not have a network location. """ _relative_schemes = ['file', 'git'] _netloc_forbidden = ['file'] def __init__(self, uri=None): self.scheme = '' self.userinfo = '' self.hostname = '' self.port = None self._path = '' self.params = {} self.query = {} self.relative = False if not uri: return # We hijack the URL parameters, since the way bitbake uses # them are not quite RFC compliant. uri, param_str = (uri.split(";", 1) + [None])[:2] urlp = urllib.parse.urlparse(uri) self.scheme = urlp.scheme reparse = 0 # Coerce urlparse to make URI scheme use netloc if not self.scheme in urllib.parse.uses_netloc: urllib.parse.uses_params.append(self.scheme) reparse = 1 # Make urlparse happy(/ier) by converting local resources # to RFC compliant URL format. E.g.: # file://foo.diff -> file:foo.diff if urlp.scheme in self._netloc_forbidden: uri = re.sub("(?<=:)//(?!/)", "", uri, 1) reparse = 1 if reparse: urlp = urllib.parse.urlparse(uri) # Identify if the URI is relative or not if urlp.scheme in self._relative_schemes and \ re.compile("^\w+:(?!//)").match(uri): self.relative = True if not self.relative: self.hostname = urlp.hostname or '' self.port = urlp.port self.userinfo += urlp.username or '' if urlp.password: self.userinfo += ':%s' % urlp.password self.path = urllib.parse.unquote(urlp.path) if param_str: self.params = self._param_str_split(param_str, ";") if urlp.query: self.query = self._param_str_split(urlp.query, "&") def __str__(self): userinfo = self.userinfo if userinfo: userinfo += '@' return "%s:%s%s%s%s%s%s" % ( self.scheme, '' if self.relative else '//', userinfo, self.hostport, self.path_quoted, self._query_str(), self._param_str()) def _param_str(self): return ( ''.join([';', self._param_str_join(self.params, ";")]) if self.params else '') def _query_str(self): return ( ''.join(['?', self._param_str_join(self.query, "&")]) if self.query else '') def _param_str_split(self, string, elmdelim, kvdelim="="): ret = collections.OrderedDict() for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]: ret[k] = v return ret def _param_str_join(self, dict_, elmdelim, kvdelim="="): return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()]) @property def hostport(self): if not self.port: return self.hostname return "%s:%d" % (self.hostname, self.port) @property def path_quoted(self): return urllib.parse.quote(self.path) @path_quoted.setter def path_quoted(self, path): self.path = urllib.parse.unquote(path) @property def path(self): return self._path @path.setter def path(self, path): self._path = path if not path or re.compile("^/").match(path): self.relative = False else: self.relative = True @property def username(self): if self.userinfo: return (self.userinfo.split(":", 1))[0] return '' @username.setter def username(self, username): password = self.password self.userinfo = username if password: self.userinfo += ":%s" % password @property def password(self): if self.userinfo and ":" in self.userinfo: return (self.userinfo.split(":", 1))[1] return '' @password.setter def password(self, password): self.userinfo = "%s:%s" % (self.username, password) def decodeurl(url): """Decodes an URL into the tokens (scheme, network location, path, user, password, parameters). """ m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url) if not m: raise MalformedUrl(url) type = m.group('type') location = m.group('location') if not location: raise MalformedUrl(url) user = m.group('user') parm = m.group('parm') locidx = location.find('/') if locidx != -1 and type.lower() != 'file': host = location[:locidx] path = location[locidx:] elif type.lower() == 'file': host = "" path = location else: host = location path = "" if user: m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user) if m: user = m.group('user') pswd = m.group('pswd') else: user = '' pswd = '' p = collections.OrderedDict() if parm: for s in parm.split(';'): if s: if not '=' in s: raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s)) s1, s2 = s.split('=') p[s1] = s2 return type, host, urllib.parse.unquote(path), user, pswd, p def encodeurl(decoded): """Encodes a URL from tokens (scheme, network location, path, user, password, parameters). """ type, host, path, user, pswd, p = decoded if not type: raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) url = '%s://' % type if user and type != "file": url += "%s" % user if pswd: url += ":%s" % pswd url += "@" if host and type != "file": url += "%s" % host if path: # Standardise path to ensure comparisons work while '//' in path: path = path.replace("//", "/") url += "%s" % urllib.parse.quote(path) if p: for parm in p: url += ";%s=%s" % (parm, p[parm]) return url def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None): if not ud.url or not uri_find or not uri_replace: logger.error("uri_replace: passed an undefined value, not replacing") return None uri_decoded = list(decodeurl(ud.url)) uri_find_decoded = list(decodeurl(uri_find)) uri_replace_decoded = list(decodeurl(uri_replace)) logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded)) result_decoded = ['', '', '', '', '', {}] for loc, i in enumerate(uri_find_decoded): result_decoded[loc] = uri_decoded[loc] regexp = i if loc == 0 and regexp and not regexp.endswith("$"): # Leaving the type unanchored can mean "https" matching "file" can become "files" # which is clearly undesirable. regexp += "$" if loc == 5: # Handle URL parameters if i: # Any specified URL parameters must match for k in uri_replace_decoded[loc]: if uri_decoded[loc][k] != uri_replace_decoded[loc][k]: return None # Overwrite any specified replacement parameters for k in uri_replace_decoded[loc]: for l in replacements: uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l]) result_decoded[loc][k] = uri_replace_decoded[loc][k] elif (re.match(regexp, uri_decoded[loc])): if not uri_replace_decoded[loc]: result_decoded[loc] = "" else: for k in replacements: uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) #bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc])) result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1) if loc == 2: # Handle path manipulations basename = None if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball: # If the source and destination url types differ, must be a mirrortarball mapping basename = os.path.basename(mirrortarball) # Kill parameters, they make no sense for mirror tarballs uri_decoded[5] = {} elif ud.localpath and ud.method.supports_checksum(ud): basename = os.path.basename(ud.localpath) if basename and not result_decoded[loc].endswith(basename): result_decoded[loc] = os.path.join(result_decoded[loc], basename) else: return None result = encodeurl(result_decoded) if result == ud.url: return None logger.debug(2, "For url %s returning %s" % (ud.url, result)) return result methods = [] urldata_cache = {} saved_headrevs = {} def fetcher_init(d): """ Called to initialize the fetchers once the configuration data is known. Calls before this must not hit the cache. """ # When to drop SCM head revisions controlled by user policy srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear" if srcrev_policy == "cache": logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) elif srcrev_policy == "clear": logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) revs = bb.persist_data.persist('BB_URI_HEADREVS', d) try: bb.fetch2.saved_headrevs = revs.items() except: pass revs.clear() else: raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy) _checksum_cache.init_cache(d) for m in methods: if hasattr(m, "init"): m.init(d) def fetcher_parse_save(): _checksum_cache.save_extras() def fetcher_parse_done(): _checksum_cache.save_merge() def fetcher_compare_revisions(): """ Compare the revisions in the persistant cache with current values and return true/false on whether they've changed. """ data = bb.persist_data.persist('BB_URI_HEADREVS', d).items() data2 = bb.fetch2.saved_headrevs changed = False for key in data: if key not in data2 or data2[key] != data[key]: logger.debug(1, "%s changed", key) changed = True return True else: logger.debug(2, "%s did not change", key) return False def mirror_from_string(data): mirrors = (data or "").replace('\\n',' ').split() # Split into pairs if len(mirrors) % 2 != 0: bb.warn('Invalid mirror data %s, should have paired members.' % data) return list(zip(*[iter(mirrors)]*2)) def verify_checksum(ud, d, precomputed={}): """ verify the MD5 and SHA256 checksum for downloaded src Raises a FetchError if one or both of the SRC_URI checksums do not match the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no checksums specified. Returns a dict of checksums that can be stored in a done stamp file and passed in as precomputed parameter in a later call to avoid re-computing the checksums from the file. This allows verifying the checksums of the file against those in the recipe each time, rather than only after downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571. """ _MD5_KEY = "md5" _SHA256_KEY = "sha256" if ud.ignore_checksums or not ud.method.supports_checksum(ud): return {} if _MD5_KEY in precomputed: md5data = precomputed[_MD5_KEY] else: md5data = bb.utils.md5_file(ud.localpath) if _SHA256_KEY in precomputed: sha256data = precomputed[_SHA256_KEY] else: sha256data = bb.utils.sha256_file(ud.localpath) if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected: # If strict checking enabled and neither sum defined, raise error strict = d.getVar("BB_STRICT_CHECKSUM") or "0" if strict == "1": logger.error('No checksum specified for %s, please add at least one to the recipe:\n' 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' % (ud.localpath, ud.md5_name, md5data, ud.sha256_name, sha256data)) raise NoChecksumError('Missing SRC_URI checksum', ud.url) bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d) if strict == "ignore": return { _MD5_KEY: md5data, _SHA256_KEY: sha256data } # Log missing sums so user can more easily add them logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n' 'SRC_URI[%s] = "%s"', ud.localpath, ud.md5_name, md5data) logger.warning('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n' 'SRC_URI[%s] = "%s"', ud.localpath, ud.sha256_name, sha256data) # We want to alert the user if a checksum is defined in the recipe but # it does not match. msg = "" mismatch = False if ud.md5_expected and ud.md5_expected != md5data: msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected) mismatch = True; if ud.sha256_expected and ud.sha256_expected != sha256data: msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected) mismatch = True; if mismatch: msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data) if len(msg): raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data) return { _MD5_KEY: md5data, _SHA256_KEY: sha256data } def verify_donestamp(ud, d, origud=None): """ Check whether the done stamp file has the right checksums (if the fetch method supports them). If it doesn't, delete the done stamp and force a re-download. Returns True, if the donestamp exists and is valid, False otherwise. When returning False, any existing done stamps are removed. """ if not ud.needdonestamp or (origud and not origud.needdonestamp): return True if not os.path.exists(ud.donestamp): return False if (not ud.method.supports_checksum(ud) or (origud and not origud.method.supports_checksum(origud))): # done stamp exists, checksums not supported; assume the local file is # current return True if not os.path.exists(ud.localpath): # done stamp exists, but the downloaded file does not; the done stamp # must be incorrect, re-trigger the download bb.utils.remove(ud.donestamp) return False precomputed_checksums = {} # Only re-use the precomputed checksums if the donestamp is newer than the # file. Do not rely on the mtime of directories, though. If ud.localpath is # a directory, there will probably not be any checksums anyway. if (os.path.isdir(ud.localpath) or os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)): try: with open(ud.donestamp, "rb") as cachefile: pickled = pickle.Unpickler(cachefile) precomputed_checksums.update(pickled.load()) except Exception as e: # Avoid the warnings on the upgrade path from emtpy done stamp # files to those containing the checksums. if not isinstance(e, EOFError): # Ignore errors, they aren't fatal logger.warning("Couldn't load checksums from donestamp %s: %s " "(msg: %s)" % (ud.donestamp, type(e).__name__, str(e))) try: checksums = verify_checksum(ud, d, precomputed_checksums) # If the cache file did not have the checksums, compute and store them # as an upgrade path from the previous done stamp file format. if checksums != precomputed_checksums: with open(ud.donestamp, "wb") as cachefile: p = pickle.Pickler(cachefile, 2) p.dump(checksums) return True except ChecksumError as e: # Checksums failed to verify, trigger re-download and remove the # incorrect stamp file. logger.warning("Checksum mismatch for local file %s\n" "Cleaning and trying again." % ud.localpath) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) bb.utils.remove(ud.donestamp) return False def update_stamp(ud, d): """ donestamp is file stamp indicating the whole fetching is done this function update the stamp after verifying the checksum """ if not ud.needdonestamp: return if os.path.exists(ud.donestamp): # Touch the done stamp file to show active use of the download try: os.utime(ud.donestamp, None) except: # Errors aren't fatal here pass else: try: checksums = verify_checksum(ud, d) # Store the checksums for later re-verification against the recipe with open(ud.donestamp, "wb") as cachefile: p = pickle.Pickler(cachefile, 2) p.dump(checksums) except ChecksumError as e: # Checksums failed to verify, trigger re-download and remove the # incorrect stamp file. logger.warning("Checksum mismatch for local file %s\n" "Cleaning and trying again." % ud.localpath) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) bb.utils.remove(ud.donestamp) raise def subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. # SIGPIPE errors are known issues with gzip/bash signal.signal(signal.SIGPIPE, signal.SIG_DFL) def get_autorev(d): # only not cache src rev in autorev case if d.getVar('BB_SRCREV_POLICY') != "cache": d.setVar('BB_DONT_CACHE', '1') return "AUTOINC" def get_srcrev(d, method_name='sortable_revision'): """ Return the revision string, usually for use in the version string (PV) of the current package Most packages usually only have one SCM so we just pass on the call. In the multi SCM case, we build a value based on SRCREV_FORMAT which must have been set. The idea here is that we put the string "AUTOINC+" into return value if the revisions are not incremental, other code is then responsible for turning that into an increasing value (if needed) A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if that fetcher provides a method with the given name and the same signature as sortable_revision. """ scms = [] fetcher = Fetch(d.getVar('SRC_URI').split(), d) urldata = fetcher.ud for u in urldata: if urldata[u].method.supports_srcrev(): scms.append(u) if len(scms) == 0: raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI") if len(scms) == 1 and len(urldata[scms[0]].names) == 1: autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0]) if len(rev) > 10: rev = rev[:10] if autoinc: return "AUTOINC+" + rev return rev # # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT # format = d.getVar('SRCREV_FORMAT') if not format: raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.") name_to_rev = {} seenautoinc = False for scm in scms: ud = urldata[scm] for name in ud.names: autoinc, rev = getattr(ud.method, method_name)(ud, d, name) seenautoinc = seenautoinc or autoinc if len(rev) > 10: rev = rev[:10] name_to_rev[name] = rev # Replace names by revisions in the SRCREV_FORMAT string. The approach used # here can handle names being prefixes of other names and names appearing # as substrings in revisions (in which case the name should not be # expanded). The '|' regular expression operator tries matches from left to # right, so we need to sort the names with the longest ones first. names_descending_len = sorted(name_to_rev, key=len, reverse=True) name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len) format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format) if seenautoinc: format = "AUTOINC+" + format return format def localpath(url, d): fetcher = bb.fetch2.Fetch([url], d) return fetcher.localpath(url) def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None): """ Run cmd returning the command output Raise an error if interrupted or cmd fails Optionally echo command output to stdout Optionally remove the files/directories listed in cleanup upon failure """ # Need to export PATH as binary could be in metadata paths # rather than host provided # Also include some other variables. # FIXME: Should really include all export varaiables? exportvars = ['HOME', 'PATH', 'HTTP_PROXY', 'http_proxy', 'HTTPS_PROXY', 'https_proxy', 'FTP_PROXY', 'ftp_proxy', 'FTPS_PROXY', 'ftps_proxy', 'NO_PROXY', 'no_proxy', 'ALL_PROXY', 'all_proxy', 'GIT_PROXY_COMMAND', 'GIT_SSL_CAINFO', 'GIT_SMART_HTTP', 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'SOCKS5_USER', 'SOCKS5_PASSWD', 'DBUS_SESSION_BUS_ADDRESS', 'P4CONFIG'] if not cleanup: cleanup = [] # If PATH contains WORKDIR which contains PV which contains SRCPV we # can end up in circular recursion here so give the option of breaking it # in a data store copy. try: d.getVar("PV") except bb.data_smart.ExpansionError: d = bb.data.createCopy(d) d.setVar("PV", "fetcheravoidrecurse") origenv = d.getVar("BB_ORIGENV", False) for var in exportvars: val = d.getVar(var) or (origenv and origenv.getVar(var)) if val: cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) logger.debug(1, "Running %s", cmd) success = False error_message = "" try: (output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir) success = True except bb.process.NotFoundError as e: error_message = "Fetch command %s" % (e.command) except bb.process.ExecutionError as e: if e.stdout: output = "output:\n%s\n%s" % (e.stdout, e.stderr) elif e.stderr: output = "output:\n%s" % e.stderr else: output = "no output" error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output) except bb.process.CmdError as e: error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg) if not success: for f in cleanup: try: bb.utils.remove(f, True) except OSError: pass raise FetchError(error_message) return output def check_network_access(d, info, url): """ log remote network access, and error if BB_NO_NETWORK is set or the given URI is untrusted """ if d.getVar("BB_NO_NETWORK") == "1": raise NetworkAccess(url, info) elif not trusted_network(d, url): raise UntrustedUrl(url, info) else: logger.debug(1, "Fetcher accessed the network with the command %s" % info) def build_mirroruris(origud, mirrors, ld): uris = [] uds = [] replacements = {} replacements["TYPE"] = origud.type replacements["HOST"] = origud.host replacements["PATH"] = origud.path replacements["BASENAME"] = origud.path.split("/")[-1] replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') def adduri(ud, uris, uds, mirrors, tarballs): for line in mirrors: try: (find, replace) = line except ValueError: continue for tarball in tarballs: newuri = uri_replace(ud, find, replace, replacements, ld, tarball) if not newuri or newuri in uris or newuri == origud.url: continue if not trusted_network(ld, newuri): logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri)) continue # Create a local copy of the mirrors minus the current line # this will prevent us from recursively processing the same line # as well as indirect recursion A -> B -> C -> A localmirrors = list(mirrors) localmirrors.remove(line) try: newud = FetchData(newuri, ld) newud.setup_localpath(ld) except bb.fetch2.BBFetchException as e: logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) logger.debug(1, str(e)) try: # setup_localpath of file:// urls may fail, we should still see # if mirrors of the url exist adduri(newud, uris, uds, localmirrors, tarballs) except UnboundLocalError: pass continue uris.append(newuri) uds.append(newud) adduri(newud, uris, uds, localmirrors, tarballs) adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None]) return uris, uds def rename_bad_checksum(ud, suffix): """ Renames files to have suffix from parameter """ if ud.localpath is None: return new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix) bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath)) bb.utils.movefile(ud.localpath, new_localpath) def try_mirror_url(fetch, origud, ud, ld, check = False): # Return of None or a value means we're finished # False means try another url if ud.lockfile and ud.lockfile != origud.lockfile: lf = bb.utils.lockfile(ud.lockfile) try: if check: found = ud.method.checkstatus(fetch, ud, ld) if found: return found return False if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld): ud.method.download(ud, ld) if hasattr(ud.method,"build_mirror_data"): ud.method.build_mirror_data(ud, ld) if not ud.localpath or not os.path.exists(ud.localpath): return False if ud.localpath == origud.localpath: return ud.localpath # We may be obtaining a mirror tarball which needs further processing by the real fetcher # If that tarball is a local file:// we need to provide a symlink to it dldir = ld.getVar("DL_DIR") if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): # Create donestamp in old format to avoid triggering a re-download if ud.donestamp: bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) open(ud.donestamp, 'w').close() dest = os.path.join(dldir, os.path.basename(ud.localpath)) if not os.path.exists(dest): # In case this is executing without any file locks held (as is # the case for file:// URLs), two tasks may end up here at the # same time, in which case we do not want the second task to # fail when the link has already been created by the first task. try: os.symlink(ud.localpath, dest) except FileExistsError: pass if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld): origud.method.download(origud, ld) if hasattr(origud.method, "build_mirror_data"): origud.method.build_mirror_data(origud, ld) return origud.localpath # Otherwise the result is a local file:// and we symlink to it if not os.path.exists(origud.localpath): if os.path.islink(origud.localpath): # Broken symbolic link os.unlink(origud.localpath) # As per above, in case two tasks end up here simultaneously. try: os.symlink(ud.localpath, origud.localpath) except FileExistsError: pass update_stamp(origud, ld) return ud.localpath except bb.fetch2.NetworkAccess: raise except IOError as e: if e.errno in [os.errno.ESTALE]: logger.warning("Stale Error Observed %s." % ud.url) return False raise except bb.fetch2.BBFetchException as e: if isinstance(e, ChecksumError): logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url)) logger.warning(str(e)) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) elif isinstance(e, NoChecksumError): raise else: logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url)) logger.debug(1, str(e)) try: ud.method.clean(ud, ld) except UnboundLocalError: pass return False finally: if ud.lockfile and ud.lockfile != origud.lockfile: bb.utils.unlockfile(lf) def try_mirrors(fetch, d, origud, mirrors, check = False): """ Try to use a mirrored version of the sources. This method will be automatically called before the fetchers go. d Is a bb.data instance uri is the original uri we're trying to download mirrors is the list of mirrors we're going to try """ ld = d.createCopy() uris, uds = build_mirroruris(origud, mirrors, ld) for index, uri in enumerate(uris): ret = try_mirror_url(fetch, origud, uds[index], ld, check) if ret != False: return ret return None def trusted_network(d, url): """ Use a trusted url during download if networking is enabled and BB_ALLOWED_NETWORKS is set globally or for a specific recipe. Note: modifies SRC_URI & mirrors. """ if d.getVar('BB_NO_NETWORK') == "1": return True pkgname = d.expand(d.getVar('PN', False)) trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False) if not trusted_hosts: trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS') # Not enabled. if not trusted_hosts: return True scheme, network, path, user, passwd, param = decodeurl(url) if not network: return True network = network.split(':')[0] network = network.lower() for host in trusted_hosts.split(" "): host = host.lower() if host.startswith("*.") and ("." + network).endswith(host[1:]): return True if host == network: return True return False def srcrev_internal_helper(ud, d, name): """ Return: a) a source revision if specified b) latest revision if SRCREV="AUTOINC" c) None if not specified """ srcrev = None pn = d.getVar("PN") attempts = [] if name != '' and pn: attempts.append("SRCREV_%s_pn-%s" % (name, pn)) if name != '': attempts.append("SRCREV_%s" % name) if pn: attempts.append("SRCREV_pn-%s" % pn) attempts.append("SRCREV") for a in attempts: srcrev = d.getVar(a) if srcrev and srcrev != "INVALID": break if 'rev' in ud.parm and 'tag' in ud.parm: raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url)) if 'rev' in ud.parm or 'tag' in ud.parm: if 'rev' in ud.parm: parmrev = ud.parm['rev'] else: parmrev = ud.parm['tag'] if srcrev == "INVALID" or not srcrev: return parmrev if srcrev != parmrev: raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev)) return parmrev if srcrev == "INVALID" or not srcrev: raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url) if srcrev == "AUTOINC": srcrev = ud.method.latest_revision(ud, d, name) return srcrev def get_checksum_file_list(d): """ Get a list of files checksum in SRC_URI Returns the resolved local paths of all local file entries in SRC_URI as a space-separated string """ fetch = Fetch([], d, cache = False, localonly = True) dl_dir = d.getVar('DL_DIR') filelist = [] for u in fetch.urls: ud = fetch.ud[u] if ud and isinstance(ud.method, local.Local): paths = ud.method.localpaths(ud, d) for f in paths: pth = ud.decodedurl if '*' in pth: f = os.path.join(os.path.abspath(f), pth) if f.startswith(dl_dir): # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else if os.path.exists(f): bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f))) else: bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f))) filelist.append(f + ":" + str(os.path.exists(f))) return " ".join(filelist) def get_file_checksums(filelist, pn): """Get a list of the checksums for a list of local files Returns the checksums for a list of local files, caching the results as it proceeds """ return _checksum_cache.get_checksums(filelist, pn) class FetchData(object): """ A class which represents the fetcher state for a given URI. """ def __init__(self, url, d, localonly = False): # localpath is the location of a downloaded result. If not set, the file is local. self.donestamp = None self.needdonestamp = True self.localfile = "" self.localpath = None self.lockfile = None self.mirrortarballs = [] self.basename = None self.basepath = None (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url)) self.date = self.getSRCDate(d) self.url = url if not self.user and "user" in self.parm: self.user = self.parm["user"] if not self.pswd and "pswd" in self.parm: self.pswd = self.parm["pswd"] self.setup = False if "name" in self.parm: self.md5_name = "%s.md5sum" % self.parm["name"] self.sha256_name = "%s.sha256sum" % self.parm["name"] else: self.md5_name = "md5sum" self.sha256_name = "sha256sum" if self.md5_name in self.parm: self.md5_expected = self.parm[self.md5_name] elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: self.md5_expected = None else: self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name) if self.sha256_name in self.parm: self.sha256_expected = self.parm[self.sha256_name] elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: self.sha256_expected = None else: self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name) self.ignore_checksums = False self.names = self.parm.get("name",'default').split(',') self.method = None for m in methods: if m.supports(self, d): self.method = m break if not self.method: raise NoMethodError(url) if localonly and not isinstance(self.method, local.Local): raise NonLocalMethod() if self.parm.get("proto", None) and "protocol" not in self.parm: logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN')) self.parm["protocol"] = self.parm.get("proto", None) if hasattr(self.method, "urldata_init"): self.method.urldata_init(self, d) if "localpath" in self.parm: # if user sets localpath for file, use it instead. self.localpath = self.parm["localpath"] self.basename = os.path.basename(self.localpath) elif self.localfile: self.localpath = self.method.localpath(self, d) dldir = d.getVar("DL_DIR") if not self.needdonestamp: return # Note: .done and .lock files should always be in DL_DIR whereas localpath may not be. if self.localpath and self.localpath.startswith(dldir): basepath = self.localpath elif self.localpath: basepath = dldir + os.sep + os.path.basename(self.localpath) elif self.basepath or self.basename: basepath = dldir + os.sep + (self.basepath or self.basename) else: bb.fatal("Can't determine lock path for url %s" % url) self.donestamp = basepath + '.done' self.lockfile = basepath + '.lock' def setup_revisions(self, d): self.revisions = {} for name in self.names: self.revisions[name] = srcrev_internal_helper(self, d, name) # add compatibility code for non name specified case if len(self.names) == 1: self.revision = self.revisions[self.names[0]] def setup_localpath(self, d): if not self.localpath: self.localpath = self.method.localpath(self, d) def getSRCDate(self, d): """ Return the SRC Date for the component d the bb.data module """ if "srcdate" in self.parm: return self.parm['srcdate'] pn = d.getVar("PN") if pn: return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE") return d.getVar("SRCDATE") or d.getVar("DATE") class FetchMethod(object): """Base class for 'fetch'ing data""" def __init__(self, urls=None): self.urls = [] def supports(self, urldata, d): """ Check to see if this fetch class supports a given url. """ return 0 def localpath(self, urldata, d): """ Return the local filename of a given url assuming a successful fetch. Can also setup variables in urldata for use in go (saving code duplication and duplicate code execution) """ return os.path.join(d.getVar("DL_DIR"), urldata.localfile) def supports_checksum(self, urldata): """ Is localpath something that can be represented by a checksum? """ # We cannot compute checksums for directories if os.path.isdir(urldata.localpath) == True: return False if urldata.localpath.find("*") != -1: return False return True def recommends_checksum(self, urldata): """ Is the backend on where checksumming is recommended (should warnings be displayed if there is no checksum)? """ return False def _strip_leading_slashes(self, relpath): """ Remove leading slash as os.path.join can't cope """ while os.path.isabs(relpath): relpath = relpath[1:] return relpath def setUrls(self, urls): self.__urls = urls def getUrls(self): return self.__urls urls = property(getUrls, setUrls, None, "Urls property") def need_update(self, ud, d): """ Force a fetch, even if localpath exists? """ if os.path.exists(ud.localpath): return False return True def supports_srcrev(self): """ The fetcher supports auto source revisions (SRCREV) """ return False def download(self, urldata, d): """ Fetch urls Assumes localpath was called first """ raise NoMethodError(url) def unpack(self, urldata, rootdir, data): iterate = False file = urldata.localpath # Localpath can't deal with 'dir/*' entries, so it converts them to '.', # but it must be corrected back for local files copying if urldata.basename == '*' and file.endswith('/.'): file = '%s/%s' % (file.rstrip('/.'), urldata.path) try: unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) except ValueError as exc: bb.fatal("Invalid value for 'unpack' parameter for %s: %s" % (file, urldata.parm.get('unpack'))) base, ext = os.path.splitext(file) if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']: efile = os.path.join(rootdir, os.path.basename(base)) else: efile = file cmd = None if unpack: if file.endswith('.tar'): cmd = 'tar x --no-same-owner -f %s' % file elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): cmd = 'tar xz --no-same-owner -f %s' % file elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'): cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'): cmd = 'gzip -dc %s > %s' % (file, efile) elif file.endswith('.bz2'): cmd = 'bzip2 -dc %s > %s' % (file, efile) elif file.endswith('.tar.xz'): cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.xz'): cmd = 'xz -dc %s > %s' % (file, efile) elif file.endswith('.tar.lz'): cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.lz'): cmd = 'lzip -dc %s > %s' % (file, efile) elif file.endswith('.tar.7z'): cmd = '7z x -so %s | tar x --no-same-owner -f -' % file elif file.endswith('.7z'): cmd = '7za x -y %s 1>/dev/null' % file elif file.endswith('.zip') or file.endswith('.jar'): try: dos = bb.utils.to_boolean(urldata.parm.get('dos'), False) except ValueError as exc: bb.fatal("Invalid value for 'dos' parameter for %s: %s" % (file, urldata.parm.get('dos'))) cmd = 'unzip -q -o' if dos: cmd = '%s -a' % cmd cmd = "%s '%s'" % (cmd, file) elif file.endswith('.rpm') or file.endswith('.srpm'): if 'extract' in urldata.parm: unpack_file = urldata.parm.get('extract') cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file) iterate = True iterate_file = unpack_file else: cmd = 'rpm2cpio.sh %s | cpio -id' % (file) elif file.endswith('.deb') or file.endswith('.ipk'): output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True) datafile = None if output: for line in output.decode().splitlines(): if line.startswith('data.tar.'): datafile = line break else: raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url) else: raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url) cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile) # If 'subdir' param exists, create a dir and use it as destination for unpack cmd if 'subdir' in urldata.parm: subdir = urldata.parm.get('subdir') if os.path.isabs(subdir): if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)): raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url) unpackdir = subdir else: unpackdir = os.path.join(rootdir, subdir) bb.utils.mkdirhier(unpackdir) else: unpackdir = rootdir if not unpack or not cmd: # If file == dest, then avoid any copies, as we already put the file into dest! dest = os.path.join(unpackdir, os.path.basename(file)) if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)): destdir = '.' # For file:// entries all intermediate dirs in path must be created at destination if urldata.type == "file": # Trailing '/' does a copying to wrong place urlpath = urldata.path.rstrip('/') # Want files places relative to cwd so no leading '/' urlpath = urlpath.lstrip('/') if urlpath.find("/") != -1: destdir = urlpath.rsplit("/", 1)[0] + '/' bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir)) cmd = 'cp -fpPRH %s %s' % (file, destdir) if not cmd: return path = data.getVar('PATH') if path: cmd = "PATH=\"%s\" %s" % (path, cmd) bb.note("Unpacking %s to %s/" % (file, unpackdir)) ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir) if ret != 0: raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url) if iterate is True: iterate_urldata = urldata iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file) self.unpack(urldata, rootdir, data) return def clean(self, urldata, d): """ Clean any existing full or partial download """ bb.utils.remove(urldata.localpath) def try_premirror(self, urldata, d): """ Should premirrors be used? """ return True def checkstatus(self, fetch, urldata, d): """ Check the status of a URL Assumes localpath was called first """ logger.info("URL %s could not be checked for status since no method exists.", url) return True def latest_revision(self, ud, d, name): """ Look in the cache for the latest revision, if not present ask the SCM. """ if not hasattr(self, "_latest_revision"): raise ParameterError("The fetcher for this URL does not support _latest_revision", url) revs = bb.persist_data.persist('BB_URI_HEADREVS', d) key = self.generate_revision_key(ud, d, name) try: return revs[key] except KeyError: revs[key] = rev = self._latest_revision(ud, d, name) return rev def sortable_revision(self, ud, d, name): latest_rev = self._build_revision(ud, d, name) return True, str(latest_rev) def generate_revision_key(self, ud, d, name): key = self._revision_key(ud, d, name) return "%s-%s" % (key, d.getVar("PN") or "") def latest_versionstring(self, ud, d): """ Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" by searching through the tags output of ls-remote, comparing versions and returning the highest match as a (version, revision) pair. """ return ('', '') class Fetch(object): def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None): if localonly and cache: raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time") if len(urls) == 0: urls = d.getVar("SRC_URI").split() self.urls = urls self.d = d self.ud = {} self.connection_cache = connection_cache fn = d.getVar('FILE') mc = d.getVar('__BBMULTICONFIG') or "" if cache and fn and mc + fn in urldata_cache: self.ud = urldata_cache[mc + fn] for url in urls: if url not in self.ud: try: self.ud[url] = FetchData(url, d, localonly) except NonLocalMethod: if localonly: self.ud[url] = None pass if fn and cache: urldata_cache[mc + fn] = self.ud def localpath(self, url): if url not in self.urls: self.ud[url] = FetchData(url, self.d) self.ud[url].setup_localpath(self.d) return self.d.expand(self.ud[url].localpath) def localpaths(self): """ Return a list of the local filenames, assuming successful fetch """ local = [] for u in self.urls: ud = self.ud[u] ud.setup_localpath(self.d) local.append(ud.localpath) return local def download(self, urls=None): """ Fetch all urls """ if not urls: urls = self.urls network = self.d.getVar("BB_NO_NETWORK") premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1") for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) m = ud.method localpath = "" if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) try: self.d.setVar("BB_NO_NETWORK", network) if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d): localpath = ud.localpath elif m.try_premirror(ud, self.d): logger.debug(1, "Trying PREMIRRORS") mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) localpath = try_mirrors(self, self.d, ud, mirrors, False) if localpath: try: # early checksum verification so that if the checksum of the premirror # contents mismatch the fetcher can still try upstream and mirrors update_stamp(ud, self.d) except ChecksumError as e: logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u) logger.debug(1, str(e)) localpath = "" if premirroronly: self.d.setVar("BB_NO_NETWORK", "1") firsterr = None verified_stamp = verify_donestamp(ud, self.d) if not localpath and (not verified_stamp or m.need_update(ud, self.d)): try: if not trusted_network(self.d, ud.url): raise UntrustedUrl(ud.url) logger.debug(1, "Trying Upstream") m.download(ud, self.d) if hasattr(m, "build_mirror_data"): m.build_mirror_data(ud, self.d) localpath = ud.localpath # early checksum verify, so that if checksum mismatched, # fetcher still have chance to fetch from mirror update_stamp(ud, self.d) except bb.fetch2.NetworkAccess: raise except BBFetchException as e: if isinstance(e, ChecksumError): logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u) logger.debug(1, str(e)) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) elif isinstance(e, NoChecksumError): raise else: logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u) logger.debug(1, str(e)) firsterr = e # Remove any incomplete fetch if not verified_stamp: m.clean(ud, self.d) logger.debug(1, "Trying MIRRORS") mirrors = mirror_from_string(self.d.getVar('MIRRORS')) localpath = try_mirrors(self, self.d, ud, mirrors) if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1): if firsterr: logger.error(str(firsterr)) raise FetchError("Unable to fetch URL from any source.", u) update_stamp(ud, self.d) except IOError as e: if e.errno in [os.errno.ESTALE]: logger.error("Stale Error Observed %s." % u) raise ChecksumError("Stale Error Detected") except BBFetchException as e: if isinstance(e, ChecksumError): logger.error("Checksum failure fetching %s" % u) raise finally: if ud.lockfile: bb.utils.unlockfile(lf) def checkstatus(self, urls=None): """ Check all urls exist upstream """ if not urls: urls = self.urls for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) m = ud.method logger.debug(1, "Testing URL %s", u) # First try checking uri, u, from PREMIRRORS mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) ret = try_mirrors(self, self.d, ud, mirrors, True) if not ret: # Next try checking from the original uri, u ret = m.checkstatus(self, ud, self.d) if not ret: # Finally, try checking uri, u, from MIRRORS mirrors = mirror_from_string(self.d.getVar('MIRRORS')) ret = try_mirrors(self, self.d, ud, mirrors, True) if not ret: raise FetchError("URL %s doesn't work" % u, u) def unpack(self, root, urls=None): """ Unpack urls to root """ if not urls: urls = self.urls for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) ud.method.unpack(ud, root, self.d) if ud.lockfile: bb.utils.unlockfile(lf) def clean(self, urls=None): """ Clean files that the fetcher gets or places """ if not urls: urls = self.urls for url in urls: if url not in self.ud: self.ud[url] = FetchData(url, d) ud = self.ud[url] ud.setup_localpath(self.d) if not ud.localfile and ud.localpath is None: continue if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) ud.method.clean(ud, self.d) if ud.donestamp: bb.utils.remove(ud.donestamp) if ud.lockfile: bb.utils.unlockfile(lf) class FetchConnectionCache(object): """ A class which represents an container for socket connections. """ def __init__(self): self.cache = {} def get_connection_name(self, host, port): return host + ':' + str(port) def add_connection(self, host, port, connection): cn = self.get_connection_name(host, port) if cn not in self.cache: self.cache[cn] = connection def get_connection(self, host, port): connection = None cn = self.get_connection_name(host, port) if cn in self.cache: connection = self.cache[cn] return connection def remove_connection(self, host, port): cn = self.get_connection_name(host, port) if cn in self.cache: self.cache[cn].close() del self.cache[cn] def close_connections(self): for cn in list(self.cache.keys()): self.cache[cn].close() del self.cache[cn] from . import cvs from . import git from . import gitsm from . import gitannex from . import local from . import svn from . import wget from . import ssh from . import sftp from . import s3 from . import perforce from . import bzr from . import hg from . import osc from . import repo from . import clearcase from . import npm methods.append(local.Local()) methods.append(wget.Wget()) methods.append(svn.Svn()) methods.append(git.Git()) methods.append(gitsm.GitSM()) methods.append(gitannex.GitANNEX()) methods.append(cvs.Cvs()) methods.append(ssh.SSH()) methods.append(sftp.SFTP()) methods.append(s3.S3()) methods.append(perforce.Perforce()) methods.append(bzr.Bzr()) methods.append(hg.Hg()) methods.append(osc.Osc()) methods.append(repo.Repo()) methods.append(clearcase.ClearCase()) methods.append(npm.Npm())
36.038111
416
0.579574
import os, re import signal import logging import urllib.request, urllib.parse, urllib.error if 'git' not in urllib.parse.uses_netloc: urllib.parse.uses_netloc.append('git') import operator import collections import subprocess import pickle import errno import bb.persist_data, bb.utils import bb.checksum import bb.process import bb.event __version__ = "2" _checksum_cache = bb.checksum.FileChecksumCache() logger = logging.getLogger("BitBake.Fetcher") class BBFetchException(Exception): def __init__(self, message): self.msg = message Exception.__init__(self, message) def __str__(self): return self.msg class UntrustedUrl(BBFetchException): def __init__(self, url, message=''): if message: msg = message else: msg = "The URL: '%s' is not trusted and cannot be used" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class MalformedUrl(BBFetchException): def __init__(self, url, message=''): if message: msg = message else: msg = "The URL: '%s' is invalid and cannot be interpreted" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class FetchError(BBFetchException): def __init__(self, message, url = None): if url: msg = "Fetcher failure for URL: '%s'. %s" % (url, message) else: msg = "Fetcher failure: %s" % message self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class ChecksumError(FetchError): def __init__(self, message, url = None, checksum = None): self.checksum = checksum FetchError.__init__(self, message, url) class NoChecksumError(FetchError): class UnpackError(BBFetchException): def __init__(self, message, url): msg = "Unpack failure for URL: '%s'. %s" % (url, message) self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class NoMethodError(BBFetchException): def __init__(self, url): msg = "Could not find a fetcher which supports the URL: '%s'" % url self.url = url BBFetchException.__init__(self, msg) self.args = (url,) class MissingParameterError(BBFetchException): def __init__(self, missing, url): msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) self.url = url self.missing = missing BBFetchException.__init__(self, msg) self.args = (missing, url) class ParameterError(BBFetchException): def __init__(self, message, url): msg = "URL: '%s' has invalid parameters. %s" % (url, message) self.url = url BBFetchException.__init__(self, msg) self.args = (message, url) class NetworkAccess(BBFetchException): def __init__(self, url, cmd): msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) self.url = url self.cmd = cmd BBFetchException.__init__(self, msg) self.args = (url, cmd) class NonLocalMethod(Exception): def __init__(self): Exception.__init__(self) class MissingChecksumEvent(bb.event.Event): def __init__(self, url, md5sum, sha256sum): self.url = url self.checksums = {'md5sum': md5sum, 'sha256sum': sha256sum} bb.event.Event.__init__(self) class URI(object): _relative_schemes = ['file', 'git'] _netloc_forbidden = ['file'] def __init__(self, uri=None): self.scheme = '' self.userinfo = '' self.hostname = '' self.port = None self._path = '' self.params = {} self.query = {} self.relative = False if not uri: return uri, param_str = (uri.split(";", 1) + [None])[:2] urlp = urllib.parse.urlparse(uri) self.scheme = urlp.scheme reparse = 0 if not self.scheme in urllib.parse.uses_netloc: urllib.parse.uses_params.append(self.scheme) reparse = 1 if urlp.scheme in self._netloc_forbidden: uri = re.sub("(?<=:)//(?!/)", "", uri, 1) reparse = 1 if reparse: urlp = urllib.parse.urlparse(uri) if urlp.scheme in self._relative_schemes and \ re.compile("^\w+:(?!//)").match(uri): self.relative = True if not self.relative: self.hostname = urlp.hostname or '' self.port = urlp.port self.userinfo += urlp.username or '' if urlp.password: self.userinfo += ':%s' % urlp.password self.path = urllib.parse.unquote(urlp.path) if param_str: self.params = self._param_str_split(param_str, ";") if urlp.query: self.query = self._param_str_split(urlp.query, "&") def __str__(self): userinfo = self.userinfo if userinfo: userinfo += '@' return "%s:%s%s%s%s%s%s" % ( self.scheme, '' if self.relative else '//', userinfo, self.hostport, self.path_quoted, self._query_str(), self._param_str()) def _param_str(self): return ( ''.join([';', self._param_str_join(self.params, ";")]) if self.params else '') def _query_str(self): return ( ''.join(['?', self._param_str_join(self.query, "&")]) if self.query else '') def _param_str_split(self, string, elmdelim, kvdelim="="): ret = collections.OrderedDict() for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]: ret[k] = v return ret def _param_str_join(self, dict_, elmdelim, kvdelim="="): return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()]) @property def hostport(self): if not self.port: return self.hostname return "%s:%d" % (self.hostname, self.port) @property def path_quoted(self): return urllib.parse.quote(self.path) @path_quoted.setter def path_quoted(self, path): self.path = urllib.parse.unquote(path) @property def path(self): return self._path @path.setter def path(self, path): self._path = path if not path or re.compile("^/").match(path): self.relative = False else: self.relative = True @property def username(self): if self.userinfo: return (self.userinfo.split(":", 1))[0] return '' @username.setter def username(self, username): password = self.password self.userinfo = username if password: self.userinfo += ":%s" % password @property def password(self): if self.userinfo and ":" in self.userinfo: return (self.userinfo.split(":", 1))[1] return '' @password.setter def password(self, password): self.userinfo = "%s:%s" % (self.username, password) def decodeurl(url): m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url) if not m: raise MalformedUrl(url) type = m.group('type') location = m.group('location') if not location: raise MalformedUrl(url) user = m.group('user') parm = m.group('parm') locidx = location.find('/') if locidx != -1 and type.lower() != 'file': host = location[:locidx] path = location[locidx:] elif type.lower() == 'file': host = "" path = location else: host = location path = "" if user: m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user) if m: user = m.group('user') pswd = m.group('pswd') else: user = '' pswd = '' p = collections.OrderedDict() if parm: for s in parm.split(';'): if s: if not '=' in s: raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s)) s1, s2 = s.split('=') p[s1] = s2 return type, host, urllib.parse.unquote(path), user, pswd, p def encodeurl(decoded): type, host, path, user, pswd, p = decoded if not type: raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) url = '%s://' % type if user and type != "file": url += "%s" % user if pswd: url += ":%s" % pswd url += "@" if host and type != "file": url += "%s" % host if path: while '//' in path: path = path.replace("//", "/") url += "%s" % urllib.parse.quote(path) if p: for parm in p: url += ";%s=%s" % (parm, p[parm]) return url def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None): if not ud.url or not uri_find or not uri_replace: logger.error("uri_replace: passed an undefined value, not replacing") return None uri_decoded = list(decodeurl(ud.url)) uri_find_decoded = list(decodeurl(uri_find)) uri_replace_decoded = list(decodeurl(uri_replace)) logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded)) result_decoded = ['', '', '', '', '', {}] for loc, i in enumerate(uri_find_decoded): result_decoded[loc] = uri_decoded[loc] regexp = i if loc == 0 and regexp and not regexp.endswith("$"): regexp += "$" if loc == 5: if i: for k in uri_replace_decoded[loc]: if uri_decoded[loc][k] != uri_replace_decoded[loc][k]: return None for k in uri_replace_decoded[loc]: for l in replacements: uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l]) result_decoded[loc][k] = uri_replace_decoded[loc][k] elif (re.match(regexp, uri_decoded[loc])): if not uri_replace_decoded[loc]: result_decoded[loc] = "" else: for k in replacements: uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1) if loc == 2: basename = None if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball: basename = os.path.basename(mirrortarball) uri_decoded[5] = {} elif ud.localpath and ud.method.supports_checksum(ud): basename = os.path.basename(ud.localpath) if basename and not result_decoded[loc].endswith(basename): result_decoded[loc] = os.path.join(result_decoded[loc], basename) else: return None result = encodeurl(result_decoded) if result == ud.url: return None logger.debug(2, "For url %s returning %s" % (ud.url, result)) return result methods = [] urldata_cache = {} saved_headrevs = {} def fetcher_init(d): srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear" if srcrev_policy == "cache": logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) elif srcrev_policy == "clear": logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) revs = bb.persist_data.persist('BB_URI_HEADREVS', d) try: bb.fetch2.saved_headrevs = revs.items() except: pass revs.clear() else: raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy) _checksum_cache.init_cache(d) for m in methods: if hasattr(m, "init"): m.init(d) def fetcher_parse_save(): _checksum_cache.save_extras() def fetcher_parse_done(): _checksum_cache.save_merge() def fetcher_compare_revisions(): data = bb.persist_data.persist('BB_URI_HEADREVS', d).items() data2 = bb.fetch2.saved_headrevs changed = False for key in data: if key not in data2 or data2[key] != data[key]: logger.debug(1, "%s changed", key) changed = True return True else: logger.debug(2, "%s did not change", key) return False def mirror_from_string(data): mirrors = (data or "").replace('\\n',' ').split() if len(mirrors) % 2 != 0: bb.warn('Invalid mirror data %s, should have paired members.' % data) return list(zip(*[iter(mirrors)]*2)) def verify_checksum(ud, d, precomputed={}): _MD5_KEY = "md5" _SHA256_KEY = "sha256" if ud.ignore_checksums or not ud.method.supports_checksum(ud): return {} if _MD5_KEY in precomputed: md5data = precomputed[_MD5_KEY] else: md5data = bb.utils.md5_file(ud.localpath) if _SHA256_KEY in precomputed: sha256data = precomputed[_SHA256_KEY] else: sha256data = bb.utils.sha256_file(ud.localpath) if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected: strict = d.getVar("BB_STRICT_CHECKSUM") or "0" if strict == "1": logger.error('No checksum specified for %s, please add at least one to the recipe:\n' 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' % (ud.localpath, ud.md5_name, md5data, ud.sha256_name, sha256data)) raise NoChecksumError('Missing SRC_URI checksum', ud.url) bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d) if strict == "ignore": return { _MD5_KEY: md5data, _SHA256_KEY: sha256data } logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n' 'SRC_URI[%s] = "%s"', ud.localpath, ud.md5_name, md5data) logger.warning('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n' 'SRC_URI[%s] = "%s"', ud.localpath, ud.sha256_name, sha256data) msg = "" mismatch = False if ud.md5_expected and ud.md5_expected != md5data: msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected) mismatch = True; if ud.sha256_expected and ud.sha256_expected != sha256data: msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected) mismatch = True; if mismatch: msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data) if len(msg): raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data) return { _MD5_KEY: md5data, _SHA256_KEY: sha256data } def verify_donestamp(ud, d, origud=None): if not ud.needdonestamp or (origud and not origud.needdonestamp): return True if not os.path.exists(ud.donestamp): return False if (not ud.method.supports_checksum(ud) or (origud and not origud.method.supports_checksum(origud))): return True if not os.path.exists(ud.localpath): bb.utils.remove(ud.donestamp) return False precomputed_checksums = {} if (os.path.isdir(ud.localpath) or os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)): try: with open(ud.donestamp, "rb") as cachefile: pickled = pickle.Unpickler(cachefile) precomputed_checksums.update(pickled.load()) except Exception as e: if not isinstance(e, EOFError): logger.warning("Couldn't load checksums from donestamp %s: %s " "(msg: %s)" % (ud.donestamp, type(e).__name__, str(e))) try: checksums = verify_checksum(ud, d, precomputed_checksums) if checksums != precomputed_checksums: with open(ud.donestamp, "wb") as cachefile: p = pickle.Pickler(cachefile, 2) p.dump(checksums) return True except ChecksumError as e: logger.warning("Checksum mismatch for local file %s\n" "Cleaning and trying again." % ud.localpath) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) bb.utils.remove(ud.donestamp) return False def update_stamp(ud, d): if not ud.needdonestamp: return if os.path.exists(ud.donestamp): try: os.utime(ud.donestamp, None) except: pass else: try: checksums = verify_checksum(ud, d) # Store the checksums for later re-verification against the recipe with open(ud.donestamp, "wb") as cachefile: p = pickle.Pickler(cachefile, 2) p.dump(checksums) except ChecksumError as e: # Checksums failed to verify, trigger re-download and remove the # incorrect stamp file. logger.warning("Checksum mismatch for local file %s\n" "Cleaning and trying again." % ud.localpath) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) bb.utils.remove(ud.donestamp) raise def subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. # SIGPIPE errors are known issues with gzip/bash signal.signal(signal.SIGPIPE, signal.SIG_DFL) def get_autorev(d): # only not cache src rev in autorev case if d.getVar('BB_SRCREV_POLICY') != "cache": d.setVar('BB_DONT_CACHE', '1') return "AUTOINC" def get_srcrev(d, method_name='sortable_revision'): scms = [] fetcher = Fetch(d.getVar('SRC_URI').split(), d) urldata = fetcher.ud for u in urldata: if urldata[u].method.supports_srcrev(): scms.append(u) if len(scms) == 0: raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI") if len(scms) == 1 and len(urldata[scms[0]].names) == 1: autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0]) if len(rev) > 10: rev = rev[:10] if autoinc: return "AUTOINC+" + rev return rev # # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT # format = d.getVar('SRCREV_FORMAT') if not format: raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.") name_to_rev = {} seenautoinc = False for scm in scms: ud = urldata[scm] for name in ud.names: autoinc, rev = getattr(ud.method, method_name)(ud, d, name) seenautoinc = seenautoinc or autoinc if len(rev) > 10: rev = rev[:10] name_to_rev[name] = rev # Replace names by revisions in the SRCREV_FORMAT string. The approach used # here can handle names being prefixes of other names and names appearing # as substrings in revisions (in which case the name should not be # expanded). The '|' regular expression operator tries matches from left to # right, so we need to sort the names with the longest ones first. names_descending_len = sorted(name_to_rev, key=len, reverse=True) name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len) format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format) if seenautoinc: format = "AUTOINC+" + format return format def localpath(url, d): fetcher = bb.fetch2.Fetch([url], d) return fetcher.localpath(url) def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None): # Need to export PATH as binary could be in metadata paths # rather than host provided # Also include some other variables. # FIXME: Should really include all export varaiables? exportvars = ['HOME', 'PATH', 'HTTP_PROXY', 'http_proxy', 'HTTPS_PROXY', 'https_proxy', 'FTP_PROXY', 'ftp_proxy', 'FTPS_PROXY', 'ftps_proxy', 'NO_PROXY', 'no_proxy', 'ALL_PROXY', 'all_proxy', 'GIT_PROXY_COMMAND', 'GIT_SSL_CAINFO', 'GIT_SMART_HTTP', 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'SOCKS5_USER', 'SOCKS5_PASSWD', 'DBUS_SESSION_BUS_ADDRESS', 'P4CONFIG'] if not cleanup: cleanup = [] # If PATH contains WORKDIR which contains PV which contains SRCPV we # can end up in circular recursion here so give the option of breaking it # in a data store copy. try: d.getVar("PV") except bb.data_smart.ExpansionError: d = bb.data.createCopy(d) d.setVar("PV", "fetcheravoidrecurse") origenv = d.getVar("BB_ORIGENV", False) for var in exportvars: val = d.getVar(var) or (origenv and origenv.getVar(var)) if val: cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) logger.debug(1, "Running %s", cmd) success = False error_message = "" try: (output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir) success = True except bb.process.NotFoundError as e: error_message = "Fetch command %s" % (e.command) except bb.process.ExecutionError as e: if e.stdout: output = "output:\n%s\n%s" % (e.stdout, e.stderr) elif e.stderr: output = "output:\n%s" % e.stderr else: output = "no output" error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output) except bb.process.CmdError as e: error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg) if not success: for f in cleanup: try: bb.utils.remove(f, True) except OSError: pass raise FetchError(error_message) return output def check_network_access(d, info, url): if d.getVar("BB_NO_NETWORK") == "1": raise NetworkAccess(url, info) elif not trusted_network(d, url): raise UntrustedUrl(url, info) else: logger.debug(1, "Fetcher accessed the network with the command %s" % info) def build_mirroruris(origud, mirrors, ld): uris = [] uds = [] replacements = {} replacements["TYPE"] = origud.type replacements["HOST"] = origud.host replacements["PATH"] = origud.path replacements["BASENAME"] = origud.path.split("/")[-1] replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') def adduri(ud, uris, uds, mirrors, tarballs): for line in mirrors: try: (find, replace) = line except ValueError: continue for tarball in tarballs: newuri = uri_replace(ud, find, replace, replacements, ld, tarball) if not newuri or newuri in uris or newuri == origud.url: continue if not trusted_network(ld, newuri): logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri)) continue # Create a local copy of the mirrors minus the current line # this will prevent us from recursively processing the same line # as well as indirect recursion A -> B -> C -> A localmirrors = list(mirrors) localmirrors.remove(line) try: newud = FetchData(newuri, ld) newud.setup_localpath(ld) except bb.fetch2.BBFetchException as e: logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) logger.debug(1, str(e)) try: # setup_localpath of file:// urls may fail, we should still see # if mirrors of the url exist adduri(newud, uris, uds, localmirrors, tarballs) except UnboundLocalError: pass continue uris.append(newuri) uds.append(newud) adduri(newud, uris, uds, localmirrors, tarballs) adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None]) return uris, uds def rename_bad_checksum(ud, suffix): if ud.localpath is None: return new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix) bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath)) bb.utils.movefile(ud.localpath, new_localpath) def try_mirror_url(fetch, origud, ud, ld, check = False): # Return of None or a value means we're finished if ud.lockfile and ud.lockfile != origud.lockfile: lf = bb.utils.lockfile(ud.lockfile) try: if check: found = ud.method.checkstatus(fetch, ud, ld) if found: return found return False if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld): ud.method.download(ud, ld) if hasattr(ud.method,"build_mirror_data"): ud.method.build_mirror_data(ud, ld) if not ud.localpath or not os.path.exists(ud.localpath): return False if ud.localpath == origud.localpath: return ud.localpath dldir = ld.getVar("DL_DIR") if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): if ud.donestamp: bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) open(ud.donestamp, 'w').close() dest = os.path.join(dldir, os.path.basename(ud.localpath)) if not os.path.exists(dest): try: os.symlink(ud.localpath, dest) except FileExistsError: pass if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld): origud.method.download(origud, ld) if hasattr(origud.method, "build_mirror_data"): origud.method.build_mirror_data(origud, ld) return origud.localpath if not os.path.exists(origud.localpath): if os.path.islink(origud.localpath): os.unlink(origud.localpath) try: os.symlink(ud.localpath, origud.localpath) except FileExistsError: pass update_stamp(origud, ld) return ud.localpath except bb.fetch2.NetworkAccess: raise except IOError as e: if e.errno in [os.errno.ESTALE]: logger.warning("Stale Error Observed %s." % ud.url) return False raise except bb.fetch2.BBFetchException as e: if isinstance(e, ChecksumError): logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url)) logger.warning(str(e)) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) elif isinstance(e, NoChecksumError): raise else: logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url)) logger.debug(1, str(e)) try: ud.method.clean(ud, ld) except UnboundLocalError: pass return False finally: if ud.lockfile and ud.lockfile != origud.lockfile: bb.utils.unlockfile(lf) def try_mirrors(fetch, d, origud, mirrors, check = False): ld = d.createCopy() uris, uds = build_mirroruris(origud, mirrors, ld) for index, uri in enumerate(uris): ret = try_mirror_url(fetch, origud, uds[index], ld, check) if ret != False: return ret return None def trusted_network(d, url): if d.getVar('BB_NO_NETWORK') == "1": return True pkgname = d.expand(d.getVar('PN', False)) trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False) if not trusted_hosts: trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS') if not trusted_hosts: return True scheme, network, path, user, passwd, param = decodeurl(url) if not network: return True network = network.split(':')[0] network = network.lower() for host in trusted_hosts.split(" "): host = host.lower() if host.startswith("*.") and ("." + network).endswith(host[1:]): return True if host == network: return True return False def srcrev_internal_helper(ud, d, name): srcrev = None pn = d.getVar("PN") attempts = [] if name != '' and pn: attempts.append("SRCREV_%s_pn-%s" % (name, pn)) if name != '': attempts.append("SRCREV_%s" % name) if pn: attempts.append("SRCREV_pn-%s" % pn) attempts.append("SRCREV") for a in attempts: srcrev = d.getVar(a) if srcrev and srcrev != "INVALID": break if 'rev' in ud.parm and 'tag' in ud.parm: raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url)) if 'rev' in ud.parm or 'tag' in ud.parm: if 'rev' in ud.parm: parmrev = ud.parm['rev'] else: parmrev = ud.parm['tag'] if srcrev == "INVALID" or not srcrev: return parmrev if srcrev != parmrev: raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev)) return parmrev if srcrev == "INVALID" or not srcrev: raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url) if srcrev == "AUTOINC": srcrev = ud.method.latest_revision(ud, d, name) return srcrev def get_checksum_file_list(d): fetch = Fetch([], d, cache = False, localonly = True) dl_dir = d.getVar('DL_DIR') filelist = [] for u in fetch.urls: ud = fetch.ud[u] if ud and isinstance(ud.method, local.Local): paths = ud.method.localpaths(ud, d) for f in paths: pth = ud.decodedurl if '*' in pth: f = os.path.join(os.path.abspath(f), pth) if f.startswith(dl_dir): if os.path.exists(f): bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f))) else: bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f))) filelist.append(f + ":" + str(os.path.exists(f))) return " ".join(filelist) def get_file_checksums(filelist, pn): return _checksum_cache.get_checksums(filelist, pn) class FetchData(object): def __init__(self, url, d, localonly = False): self.donestamp = None self.needdonestamp = True self.localfile = "" self.localpath = None self.lockfile = None self.mirrortarballs = [] self.basename = None self.basepath = None (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url)) self.date = self.getSRCDate(d) self.url = url if not self.user and "user" in self.parm: self.user = self.parm["user"] if not self.pswd and "pswd" in self.parm: self.pswd = self.parm["pswd"] self.setup = False if "name" in self.parm: self.md5_name = "%s.md5sum" % self.parm["name"] self.sha256_name = "%s.sha256sum" % self.parm["name"] else: self.md5_name = "md5sum" self.sha256_name = "sha256sum" if self.md5_name in self.parm: self.md5_expected = self.parm[self.md5_name] elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: self.md5_expected = None else: self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name) if self.sha256_name in self.parm: self.sha256_expected = self.parm[self.sha256_name] elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: self.sha256_expected = None else: self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name) self.ignore_checksums = False self.names = self.parm.get("name",'default').split(',') self.method = None for m in methods: if m.supports(self, d): self.method = m break if not self.method: raise NoMethodError(url) if localonly and not isinstance(self.method, local.Local): raise NonLocalMethod() if self.parm.get("proto", None) and "protocol" not in self.parm: logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN')) self.parm["protocol"] = self.parm.get("proto", None) if hasattr(self.method, "urldata_init"): self.method.urldata_init(self, d) if "localpath" in self.parm: self.localpath = self.parm["localpath"] self.basename = os.path.basename(self.localpath) elif self.localfile: self.localpath = self.method.localpath(self, d) dldir = d.getVar("DL_DIR") if not self.needdonestamp: return if self.localpath and self.localpath.startswith(dldir): basepath = self.localpath elif self.localpath: basepath = dldir + os.sep + os.path.basename(self.localpath) elif self.basepath or self.basename: basepath = dldir + os.sep + (self.basepath or self.basename) else: bb.fatal("Can't determine lock path for url %s" % url) self.donestamp = basepath + '.done' self.lockfile = basepath + '.lock' def setup_revisions(self, d): self.revisions = {} for name in self.names: self.revisions[name] = srcrev_internal_helper(self, d, name) # add compatibility code for non name specified case if len(self.names) == 1: self.revision = self.revisions[self.names[0]] def setup_localpath(self, d): if not self.localpath: self.localpath = self.method.localpath(self, d) def getSRCDate(self, d): if "srcdate" in self.parm: return self.parm['srcdate'] pn = d.getVar("PN") if pn: return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE") return d.getVar("SRCDATE") or d.getVar("DATE") class FetchMethod(object): def __init__(self, urls=None): self.urls = [] def supports(self, urldata, d): return 0 def localpath(self, urldata, d): return os.path.join(d.getVar("DL_DIR"), urldata.localfile) def supports_checksum(self, urldata): # We cannot compute checksums for directories if os.path.isdir(urldata.localpath) == True: return False if urldata.localpath.find("*") != -1: return False return True def recommends_checksum(self, urldata): return False def _strip_leading_slashes(self, relpath): while os.path.isabs(relpath): relpath = relpath[1:] return relpath def setUrls(self, urls): self.__urls = urls def getUrls(self): return self.__urls urls = property(getUrls, setUrls, None, "Urls property") def need_update(self, ud, d): if os.path.exists(ud.localpath): return False return True def supports_srcrev(self): return False def download(self, urldata, d): raise NoMethodError(url) def unpack(self, urldata, rootdir, data): iterate = False file = urldata.localpath # Localpath can't deal with 'dir/*' entries, so it converts them to '.', if urldata.basename == '*' and file.endswith('/.'): file = '%s/%s' % (file.rstrip('/.'), urldata.path) try: unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) except ValueError as exc: bb.fatal("Invalid value for 'unpack' parameter for %s: %s" % (file, urldata.parm.get('unpack'))) base, ext = os.path.splitext(file) if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']: efile = os.path.join(rootdir, os.path.basename(base)) else: efile = file cmd = None if unpack: if file.endswith('.tar'): cmd = 'tar x --no-same-owner -f %s' % file elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): cmd = 'tar xz --no-same-owner -f %s' % file elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'): cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'): cmd = 'gzip -dc %s > %s' % (file, efile) elif file.endswith('.bz2'): cmd = 'bzip2 -dc %s > %s' % (file, efile) elif file.endswith('.tar.xz'): cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.xz'): cmd = 'xz -dc %s > %s' % (file, efile) elif file.endswith('.tar.lz'): cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file elif file.endswith('.lz'): cmd = 'lzip -dc %s > %s' % (file, efile) elif file.endswith('.tar.7z'): cmd = '7z x -so %s | tar x --no-same-owner -f -' % file elif file.endswith('.7z'): cmd = '7za x -y %s 1>/dev/null' % file elif file.endswith('.zip') or file.endswith('.jar'): try: dos = bb.utils.to_boolean(urldata.parm.get('dos'), False) except ValueError as exc: bb.fatal("Invalid value for 'dos' parameter for %s: %s" % (file, urldata.parm.get('dos'))) cmd = 'unzip -q -o' if dos: cmd = '%s -a' % cmd cmd = "%s '%s'" % (cmd, file) elif file.endswith('.rpm') or file.endswith('.srpm'): if 'extract' in urldata.parm: unpack_file = urldata.parm.get('extract') cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file) iterate = True iterate_file = unpack_file else: cmd = 'rpm2cpio.sh %s | cpio -id' % (file) elif file.endswith('.deb') or file.endswith('.ipk'): output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True) datafile = None if output: for line in output.decode().splitlines(): if line.startswith('data.tar.'): datafile = line break else: raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url) else: raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url) cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile) if 'subdir' in urldata.parm: subdir = urldata.parm.get('subdir') if os.path.isabs(subdir): if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)): raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url) unpackdir = subdir else: unpackdir = os.path.join(rootdir, subdir) bb.utils.mkdirhier(unpackdir) else: unpackdir = rootdir if not unpack or not cmd: # If file == dest, then avoid any copies, as we already put the file into dest! dest = os.path.join(unpackdir, os.path.basename(file)) if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)): destdir = '.' # For file:// entries all intermediate dirs in path must be created at destination if urldata.type == "file": # Trailing '/' does a copying to wrong place urlpath = urldata.path.rstrip('/') # Want files places relative to cwd so no leading '/' urlpath = urlpath.lstrip('/') if urlpath.find("/") != -1: destdir = urlpath.rsplit("/", 1)[0] + '/' bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir)) cmd = 'cp -fpPRH %s %s' % (file, destdir) if not cmd: return path = data.getVar('PATH') if path: cmd = "PATH=\"%s\" %s" % (path, cmd) bb.note("Unpacking %s to %s/" % (file, unpackdir)) ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir) if ret != 0: raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url) if iterate is True: iterate_urldata = urldata iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file) self.unpack(urldata, rootdir, data) return def clean(self, urldata, d): bb.utils.remove(urldata.localpath) def try_premirror(self, urldata, d): return True def checkstatus(self, fetch, urldata, d): logger.info("URL %s could not be checked for status since no method exists.", url) return True def latest_revision(self, ud, d, name): if not hasattr(self, "_latest_revision"): raise ParameterError("The fetcher for this URL does not support _latest_revision", url) revs = bb.persist_data.persist('BB_URI_HEADREVS', d) key = self.generate_revision_key(ud, d, name) try: return revs[key] except KeyError: revs[key] = rev = self._latest_revision(ud, d, name) return rev def sortable_revision(self, ud, d, name): latest_rev = self._build_revision(ud, d, name) return True, str(latest_rev) def generate_revision_key(self, ud, d, name): key = self._revision_key(ud, d, name) return "%s-%s" % (key, d.getVar("PN") or "") def latest_versionstring(self, ud, d): return ('', '') class Fetch(object): def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None): if localonly and cache: raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time") if len(urls) == 0: urls = d.getVar("SRC_URI").split() self.urls = urls self.d = d self.ud = {} self.connection_cache = connection_cache fn = d.getVar('FILE') mc = d.getVar('__BBMULTICONFIG') or "" if cache and fn and mc + fn in urldata_cache: self.ud = urldata_cache[mc + fn] for url in urls: if url not in self.ud: try: self.ud[url] = FetchData(url, d, localonly) except NonLocalMethod: if localonly: self.ud[url] = None pass if fn and cache: urldata_cache[mc + fn] = self.ud def localpath(self, url): if url not in self.urls: self.ud[url] = FetchData(url, self.d) self.ud[url].setup_localpath(self.d) return self.d.expand(self.ud[url].localpath) def localpaths(self): local = [] for u in self.urls: ud = self.ud[u] ud.setup_localpath(self.d) local.append(ud.localpath) return local def download(self, urls=None): if not urls: urls = self.urls network = self.d.getVar("BB_NO_NETWORK") premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1") for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) m = ud.method localpath = "" if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) try: self.d.setVar("BB_NO_NETWORK", network) if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d): localpath = ud.localpath elif m.try_premirror(ud, self.d): logger.debug(1, "Trying PREMIRRORS") mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) localpath = try_mirrors(self, self.d, ud, mirrors, False) if localpath: try: # early checksum verification so that if the checksum of the premirror # contents mismatch the fetcher can still try upstream and mirrors update_stamp(ud, self.d) except ChecksumError as e: logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u) logger.debug(1, str(e)) localpath = "" if premirroronly: self.d.setVar("BB_NO_NETWORK", "1") firsterr = None verified_stamp = verify_donestamp(ud, self.d) if not localpath and (not verified_stamp or m.need_update(ud, self.d)): try: if not trusted_network(self.d, ud.url): raise UntrustedUrl(ud.url) logger.debug(1, "Trying Upstream") m.download(ud, self.d) if hasattr(m, "build_mirror_data"): m.build_mirror_data(ud, self.d) localpath = ud.localpath # early checksum verify, so that if checksum mismatched, # fetcher still have chance to fetch from mirror update_stamp(ud, self.d) except bb.fetch2.NetworkAccess: raise except BBFetchException as e: if isinstance(e, ChecksumError): logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u) logger.debug(1, str(e)) if os.path.exists(ud.localpath): rename_bad_checksum(ud, e.checksum) elif isinstance(e, NoChecksumError): raise else: logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u) logger.debug(1, str(e)) firsterr = e # Remove any incomplete fetch if not verified_stamp: m.clean(ud, self.d) logger.debug(1, "Trying MIRRORS") mirrors = mirror_from_string(self.d.getVar('MIRRORS')) localpath = try_mirrors(self, self.d, ud, mirrors) if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1): if firsterr: logger.error(str(firsterr)) raise FetchError("Unable to fetch URL from any source.", u) update_stamp(ud, self.d) except IOError as e: if e.errno in [os.errno.ESTALE]: logger.error("Stale Error Observed %s." % u) raise ChecksumError("Stale Error Detected") except BBFetchException as e: if isinstance(e, ChecksumError): logger.error("Checksum failure fetching %s" % u) raise finally: if ud.lockfile: bb.utils.unlockfile(lf) def checkstatus(self, urls=None): if not urls: urls = self.urls for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) m = ud.method logger.debug(1, "Testing URL %s", u) # First try checking uri, u, from PREMIRRORS mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) ret = try_mirrors(self, self.d, ud, mirrors, True) if not ret: # Next try checking from the original uri, u ret = m.checkstatus(self, ud, self.d) if not ret: # Finally, try checking uri, u, from MIRRORS mirrors = mirror_from_string(self.d.getVar('MIRRORS')) ret = try_mirrors(self, self.d, ud, mirrors, True) if not ret: raise FetchError("URL %s doesn't work" % u, u) def unpack(self, root, urls=None): if not urls: urls = self.urls for u in urls: ud = self.ud[u] ud.setup_localpath(self.d) if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) ud.method.unpack(ud, root, self.d) if ud.lockfile: bb.utils.unlockfile(lf) def clean(self, urls=None): if not urls: urls = self.urls for url in urls: if url not in self.ud: self.ud[url] = FetchData(url, d) ud = self.ud[url] ud.setup_localpath(self.d) if not ud.localfile and ud.localpath is None: continue if ud.lockfile: lf = bb.utils.lockfile(ud.lockfile) ud.method.clean(ud, self.d) if ud.donestamp: bb.utils.remove(ud.donestamp) if ud.lockfile: bb.utils.unlockfile(lf) class FetchConnectionCache(object): def __init__(self): self.cache = {} def get_connection_name(self, host, port): return host + ':' + str(port) def add_connection(self, host, port, connection): cn = self.get_connection_name(host, port) if cn not in self.cache: self.cache[cn] = connection def get_connection(self, host, port): connection = None cn = self.get_connection_name(host, port) if cn in self.cache: connection = self.cache[cn] return connection def remove_connection(self, host, port): cn = self.get_connection_name(host, port) if cn in self.cache: self.cache[cn].close() del self.cache[cn] def close_connections(self): for cn in list(self.cache.keys()): self.cache[cn].close() del self.cache[cn] from . import cvs from . import git from . import gitsm from . import gitannex from . import local from . import svn from . import wget from . import ssh from . import sftp from . import s3 from . import perforce from . import bzr from . import hg from . import osc from . import repo from . import clearcase from . import npm methods.append(local.Local()) methods.append(wget.Wget()) methods.append(svn.Svn()) methods.append(git.Git()) methods.append(gitsm.GitSM()) methods.append(gitannex.GitANNEX()) methods.append(cvs.Cvs()) methods.append(ssh.SSH()) methods.append(sftp.SFTP()) methods.append(s3.S3()) methods.append(perforce.Perforce()) methods.append(bzr.Bzr()) methods.append(hg.Hg()) methods.append(osc.Osc()) methods.append(repo.Repo()) methods.append(clearcase.ClearCase()) methods.append(npm.Npm())
true
true
f70f1b99f377edef719842ceaad9281b62f33dae
1,801
py
Python
src/summ/rank_sent.py
yumoxu/querysum
924c3a0789a50d82f23bcc8faf95407739c0b171
[ "MIT" ]
18
2020-11-20T03:11:42.000Z
2021-11-02T10:42:15.000Z
src/summ/rank_sent.py
yumoxu/querysum
924c3a0789a50d82f23bcc8faf95407739c0b171
[ "MIT" ]
15
2020-12-28T16:41:12.000Z
2021-12-16T16:56:24.000Z
src/summ/rank_sent.py
yumoxu/querysum
924c3a0789a50d82f23bcc8faf95407739c0b171
[ "MIT" ]
2
2021-01-05T13:24:43.000Z
2021-06-07T10:54:19.000Z
# -*- coding: utf-8 -*- from os.path import dirname, abspath import sys sys.path.insert(0, dirname(dirname(abspath(__file__)))) import utils.config_loader as config import utils.config_loader as config import utils.tools as tools import torch import shutil versions = ['sl', 'alpha'] para_org = True for vv in versions: if config.meta_model_name.endswith(vv): para_org = False def sort_sid2score(sid2score): sid_score_list = sorted(sid2score.items(), key=lambda item: item[1], reverse=True) return sid_score_list def get_rank_records(sid_score_list, sents=None, flat_sents=False): """ optional: display sentence in record :param sid_score_list: :param sents: :param flat_sents: if True, iterate sent directly; if False, need use sid to get doc_idx and sent_idx. :return: """ rank_records = [] for sid, score in sid_score_list: items = [sid, str(score)] if sents: if flat_sents: sent = sents[len(rank_records)] # the current point else: doc_idx, sent_idx = tools.get_sent_info(sid) sent = sents[doc_idx][sent_idx] items.append(sent) record = '\t'.join(items) rank_records.append(record) return rank_records def dump_rank_records(rank_records, out_fp, with_rank_idx): """ each line is ranking sid score sid: config.SEP.join((doc_idx, para_idx, sent_idx)) :param sid_score_list: :param out_fp: :return: """ lines = rank_records if with_rank_idx: lines = ['\t'.join((str(rank), record)) for rank, record in enumerate(rank_records)] with open(out_fp, mode='a', encoding='utf-8') as f: f.write('\n'.join(lines)) return len(lines)
26.880597
106
0.642976
from os.path import dirname, abspath import sys sys.path.insert(0, dirname(dirname(abspath(__file__)))) import utils.config_loader as config import utils.config_loader as config import utils.tools as tools import torch import shutil versions = ['sl', 'alpha'] para_org = True for vv in versions: if config.meta_model_name.endswith(vv): para_org = False def sort_sid2score(sid2score): sid_score_list = sorted(sid2score.items(), key=lambda item: item[1], reverse=True) return sid_score_list def get_rank_records(sid_score_list, sents=None, flat_sents=False): rank_records = [] for sid, score in sid_score_list: items = [sid, str(score)] if sents: if flat_sents: sent = sents[len(rank_records)] else: doc_idx, sent_idx = tools.get_sent_info(sid) sent = sents[doc_idx][sent_idx] items.append(sent) record = '\t'.join(items) rank_records.append(record) return rank_records def dump_rank_records(rank_records, out_fp, with_rank_idx): lines = rank_records if with_rank_idx: lines = ['\t'.join((str(rank), record)) for rank, record in enumerate(rank_records)] with open(out_fp, mode='a', encoding='utf-8') as f: f.write('\n'.join(lines)) return len(lines)
true
true
f70f1c03d635f43dee9281507f189ddd0c443a45
2,237
py
Python
mars/web/dashboard.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
2
2019-03-29T04:11:10.000Z
2020-07-08T10:19:54.000Z
mars/web/dashboard.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
null
null
null
mars/web/dashboard.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
null
null
null
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .server import MarsWebAPI, MarsRequestHandler, register_web_handler, get_jinja_env _jinja_env = get_jinja_env() class DashboardHandler(MarsRequestHandler): def get(self): web_api = MarsWebAPI(self._scheduler) scheduler_infos = web_api.get_schedulers_info() worker_infos = web_api.get_workers_meta() scheduler_summary = { 'count': len(scheduler_infos), 'cpu_used': sum(si['cpu_used'] for si in scheduler_infos.values()), 'cpu_total': sum(si['cpu_total'] for si in scheduler_infos.values()), 'memory_used': sum(si['memory_used'] for si in scheduler_infos.values()), 'memory_total': sum(si['memory_total'] for si in scheduler_infos.values()), 'git_branches': set(si['git_info'] for si in scheduler_infos.values()), } worker_summary = { 'count': len(worker_infos), 'cpu_used': sum(wi['hardware']['cpu_used'] for wi in worker_infos.values()), 'cpu_total': sum(wi['hardware']['cpu_total'] for wi in worker_infos.values()), 'memory_used': sum(wi['hardware']['memory_used'] for wi in worker_infos.values()), 'memory_total': sum(wi['hardware']['memory_total'] for wi in worker_infos.values()), 'git_branches': set(wi['details']['git_info'] for wi in worker_infos.values()), } template = _jinja_env.get_template('dashboard.html') self.write_rendered( template, scheduler_summary=scheduler_summary, worker_summary=worker_summary ) register_web_handler('/', DashboardHandler)
43.019231
96
0.673223
from .server import MarsWebAPI, MarsRequestHandler, register_web_handler, get_jinja_env _jinja_env = get_jinja_env() class DashboardHandler(MarsRequestHandler): def get(self): web_api = MarsWebAPI(self._scheduler) scheduler_infos = web_api.get_schedulers_info() worker_infos = web_api.get_workers_meta() scheduler_summary = { 'count': len(scheduler_infos), 'cpu_used': sum(si['cpu_used'] for si in scheduler_infos.values()), 'cpu_total': sum(si['cpu_total'] for si in scheduler_infos.values()), 'memory_used': sum(si['memory_used'] for si in scheduler_infos.values()), 'memory_total': sum(si['memory_total'] for si in scheduler_infos.values()), 'git_branches': set(si['git_info'] for si in scheduler_infos.values()), } worker_summary = { 'count': len(worker_infos), 'cpu_used': sum(wi['hardware']['cpu_used'] for wi in worker_infos.values()), 'cpu_total': sum(wi['hardware']['cpu_total'] for wi in worker_infos.values()), 'memory_used': sum(wi['hardware']['memory_used'] for wi in worker_infos.values()), 'memory_total': sum(wi['hardware']['memory_total'] for wi in worker_infos.values()), 'git_branches': set(wi['details']['git_info'] for wi in worker_infos.values()), } template = _jinja_env.get_template('dashboard.html') self.write_rendered( template, scheduler_summary=scheduler_summary, worker_summary=worker_summary ) register_web_handler('/', DashboardHandler)
true
true
f70f1e3812bc6ff84fd875a359dea536690df0d9
5,576
py
Python
pilco/environments/custom/continuous_mountaincar.py
sbrml/pilco
77b6d8b9033ffdb23cae4936b028f42144f37846
[ "MIT" ]
null
null
null
pilco/environments/custom/continuous_mountaincar.py
sbrml/pilco
77b6d8b9033ffdb23cae4936b028f42144f37846
[ "MIT" ]
4
2020-11-13T18:43:28.000Z
2022-02-10T01:17:03.000Z
pilco/environments/custom/continuous_mountaincar.py
sbrml/pilco
77b6d8b9033ffdb23cae4936b028f42144f37846
[ "MIT" ]
1
2020-03-22T10:14:21.000Z
2020-03-22T10:14:21.000Z
""" Our modification of the OpenAI Gym Continuous Mountain Car by Olivier Sigaud: https://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py which was (ultimately) based on Sutton's implementation: http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp """ from pilco.errors import EnvironmentError import gym from gym import spaces from gym.utils import seeding import numpy as np class MountainCar(gym.Env): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30} def __init__(self): # State and action bounds self.min_action = -1.0 self.max_action = 1.0 self.min_position = - 3.0 self.max_position = 3.0 self.max_speed = 0.07 self.goal_position = 0.5 # Force per mass the car can output self.power = 0.0015 self.low_state = np.array([self.min_position, -self.max_speed], dtype=np.float32) self.high_state = np.array([self.max_position, self.max_speed], dtype=np.float32) self.viewer = None # Allowed action space self.action_space = spaces.Box(low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32) self.seed() # Temporary hack to work with rest of library self.env = self def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): # Check if action is in permissible space if not self.action_space.contains(action): raise EnvironmentError(f'Expected action in the range of [-1., 1.] ' f'got action {action}.') # Unpack positiion and valocity position, velocity = self.state # Increment position by velocity position_ = position + velocity # Increment velocity by Euler rule and clip velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position) velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed) self.state = np.array([position_, velocity_]) return self.state, None, False, {} def reset(self): self.state = np.array([-0.5, 0.]) return np.array(self.state) def _height(self, xs): return 0.55 + 0.45 * np.sin(3 * xs) def render(self, mode='human'): # Set picture size screen_width = 600 screen_height = 400 world_width = self.max_position - self.min_position scale = screen_width/world_width # Set car size carwidth = 40 carheight = 20 if self.viewer is None: from gym.envs.classic_control import rendering # Car constants clearance = 10 # Overall viewer self.viewer = rendering.Viewer(screen_width, screen_height) # Track on which the car moves xs = np.linspace(self.min_position, self.max_position, 200) ys = self._height(xs) xys = list(zip((xs - self.min_position) * scale, ys * scale)) # Add car self.track = rendering.make_polyline(xys) self.track.set_linewidth(4) self.viewer.add_geom(self.track) self.cartrans = rendering.Transform() # Car chasis l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0 car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) car.add_attr(rendering.Transform(translation=(0, clearance))) car.add_attr(self.cartrans) self.viewer.add_geom(car) # Front wheel frontwheel = rendering.make_circle(carheight / 2.5) frontwheel.set_color(.5, .5, .5) frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance))) frontwheel.add_attr(self.cartrans) self.viewer.add_geom(frontwheel) # Back wheel backwheel = rendering.make_circle(carheight / 2.5) backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance))) backwheel.add_attr(self.cartrans) backwheel.set_color(.5, .5, .5) self.viewer.add_geom(backwheel) # Flagpole on mountain peak flagx = scale * (0.5 - self.min_position) flagy1 = scale * self._height(self.goal_position) flagy2 = flagy1 + 50 flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2)) self.viewer.add_geom(flagpole) # Flag on flagpole flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]) flag.set_color(.8, .8, 0) self.viewer.add_geom(flag) # Translate and rotate car self.cartrans.set_translation(scale * (self.state[0] - self.min_position), scale * self._height(self.state[0])) self.cartrans.set_rotation(np.cos(3 * self.state[0])) return self.viewer.render(return_rgb_array=mode=='rgb_array') def close(self): if self.viewer: self.viewer.close() self.viewer = None
31.862857
93
0.565638
from pilco.errors import EnvironmentError import gym from gym import spaces from gym.utils import seeding import numpy as np class MountainCar(gym.Env): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30} def __init__(self): self.min_action = -1.0 self.max_action = 1.0 self.min_position = - 3.0 self.max_position = 3.0 self.max_speed = 0.07 self.goal_position = 0.5 self.power = 0.0015 self.low_state = np.array([self.min_position, -self.max_speed], dtype=np.float32) self.high_state = np.array([self.max_position, self.max_speed], dtype=np.float32) self.viewer = None self.action_space = spaces.Box(low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32) self.seed() self.env = self def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): if not self.action_space.contains(action): raise EnvironmentError(f'Expected action in the range of [-1., 1.] ' f'got action {action}.') position, velocity = self.state position_ = position + velocity velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position) velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed) self.state = np.array([position_, velocity_]) return self.state, None, False, {} def reset(self): self.state = np.array([-0.5, 0.]) return np.array(self.state) def _height(self, xs): return 0.55 + 0.45 * np.sin(3 * xs) def render(self, mode='human'): screen_width = 600 screen_height = 400 world_width = self.max_position - self.min_position scale = screen_width/world_width carwidth = 40 carheight = 20 if self.viewer is None: from gym.envs.classic_control import rendering clearance = 10 self.viewer = rendering.Viewer(screen_width, screen_height) xs = np.linspace(self.min_position, self.max_position, 200) ys = self._height(xs) xys = list(zip((xs - self.min_position) * scale, ys * scale)) self.track = rendering.make_polyline(xys) self.track.set_linewidth(4) self.viewer.add_geom(self.track) self.cartrans = rendering.Transform() l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0 car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) car.add_attr(rendering.Transform(translation=(0, clearance))) car.add_attr(self.cartrans) self.viewer.add_geom(car) frontwheel = rendering.make_circle(carheight / 2.5) frontwheel.set_color(.5, .5, .5) frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance))) frontwheel.add_attr(self.cartrans) self.viewer.add_geom(frontwheel) backwheel = rendering.make_circle(carheight / 2.5) backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance))) backwheel.add_attr(self.cartrans) backwheel.set_color(.5, .5, .5) self.viewer.add_geom(backwheel) flagx = scale * (0.5 - self.min_position) flagy1 = scale * self._height(self.goal_position) flagy2 = flagy1 + 50 flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2)) self.viewer.add_geom(flagpole) flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]) flag.set_color(.8, .8, 0) self.viewer.add_geom(flag) self.cartrans.set_translation(scale * (self.state[0] - self.min_position), scale * self._height(self.state[0])) self.cartrans.set_rotation(np.cos(3 * self.state[0])) return self.viewer.render(return_rgb_array=mode=='rgb_array') def close(self): if self.viewer: self.viewer.close() self.viewer = None
true
true
f70f1eaceada771991938b17b192bade1dc91d80
14,849
py
Python
release/scripts/modules/bpy_extras/mesh_utils.py
rbabari/blender
6daa85f14b2974abfc3d0f654c5547f487bb3b74
[ "Naumen", "Condor-1.1", "MS-PL" ]
116
2015-11-02T16:36:53.000Z
2021-06-08T20:36:18.000Z
release/scripts/modules/bpy_extras/mesh_utils.py
rbabari/blender
6daa85f14b2974abfc3d0f654c5547f487bb3b74
[ "Naumen", "Condor-1.1", "MS-PL" ]
39
2016-04-25T12:18:34.000Z
2021-03-01T19:06:36.000Z
release/scripts/modules/bpy_extras/mesh_utils.py
rbabari/blender
6daa85f14b2974abfc3d0f654c5547f487bb3b74
[ "Naumen", "Condor-1.1", "MS-PL" ]
19
2016-01-24T14:24:00.000Z
2020-07-19T05:26:24.000Z
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8-80 compliant> __all__ = ( "mesh_linked_uv_islands", "mesh_linked_triangles", "edge_face_count_dict", "edge_face_count", "edge_loops_from_edges", "ngon_tessellate", "triangle_random_points", ) def mesh_linked_uv_islands(mesh): """ Splits the mesh into connected polygons, use this for separating cubes from other mesh elements within 1 mesh datablock. :arg mesh: the mesh used to group with. :type mesh: :class:`bpy.types.Mesh` :return: lists of lists containing polygon indices :rtype: list """ uv_loops = [luv.uv[:] for luv in mesh.uv_layers.active.data] poly_loops = [poly.loop_indices for poly in mesh.polygons] luv_hash = {} luv_hash_get = luv_hash.get luv_hash_ls = [None] * len(uv_loops) for pi, poly_indices in enumerate(poly_loops): for li in poly_indices: uv = uv_loops[li] uv_hub = luv_hash_get(uv) if uv_hub is None: uv_hub = luv_hash[uv] = [pi] else: uv_hub.append(pi) luv_hash_ls[li] = uv_hub poly_islands = [] # 0 = none, 1 = added, 2 = searched poly_tag = [0] * len(poly_loops) while True: poly_index = -1 for i in range(len(poly_loops)): if poly_tag[i] == 0: poly_index = i break if poly_index != -1: island = [poly_index] poly_tag[poly_index] = 1 poly_islands.append(island) else: break # we're done added = True while added: added = False for poly_index in island[:]: if poly_tag[poly_index] == 1: for li in poly_loops[poly_index]: for poly_index_shared in luv_hash_ls[li]: if poly_tag[poly_index_shared] == 0: added = True poly_tag[poly_index_shared] = 1 island.append(poly_index_shared) poly_tag[poly_index] = 2 return poly_islands def mesh_linked_triangles(mesh): """ Splits the mesh into connected triangles, use this for separating cubes from other mesh elements within 1 mesh datablock. :arg mesh: the mesh used to group with. :type mesh: :class:`bpy.types.Mesh` :return: lists of lists containing triangles. :rtype: list """ # Build vert face connectivity vert_tris = [[] for i in range(len(mesh.vertices))] for t in mesh.loop_triangles: for v in t.vertices: vert_tris[v].append(t) # sort triangles into connectivity groups tri_groups = [[t] for t in mesh.loop_triangles] # map old, new tri location tri_mapping = list(range(len(mesh.loop_triangles))) # Now clump triangles iteratively ok = True while ok: ok = False for t in mesh.loop_triangles: mapped_index = tri_mapping[t.index] mapped_group = tri_groups[mapped_index] for v in t.vertices: for nxt_t in vert_tris[v]: if nxt_t != t: nxt_mapped_index = tri_mapping[nxt_t.index] # We are not a part of the same group if mapped_index != nxt_mapped_index: ok = True # Assign mapping to this group so they # all map to this group for grp_t in tri_groups[nxt_mapped_index]: tri_mapping[grp_t.index] = mapped_index # Move triangles into this group mapped_group.extend(tri_groups[nxt_mapped_index]) # remove reference to the list tri_groups[nxt_mapped_index] = None # return all tri groups that are not null # this is all the triangles that are connected in their own lists. return [tg for tg in tri_groups if tg] def edge_face_count_dict(mesh): """ :return: dict of edge keys with their value set to the number of faces using each edge. :rtype: dict """ face_edge_count = {} loops = mesh.loops edges = mesh.edges for poly in mesh.polygons: for i in poly.loop_indices: key = edges[loops[i].edge_index].key try: face_edge_count[key] += 1 except: face_edge_count[key] = 1 return face_edge_count def edge_face_count(mesh): """ :return: list face users for each item in mesh.edges. :rtype: list """ edge_face_count = edge_face_count_dict(mesh) get = dict.get return [get(edge_face_count, ed.key, 0) for ed in mesh.edges] def edge_loops_from_edges(mesh, edges=None): """ Edge loops defined by edges Takes me.edges or a list of edges and returns the edge loops return a list of vertex indices. [ [1, 6, 7, 2], ...] closed loops have matching start and end values. """ line_polys = [] # Get edges not used by a face if edges is None: edges = mesh.edges if not hasattr(edges, "pop"): edges = edges[:] while edges: current_edge = edges.pop() vert_end, vert_start = current_edge.vertices[:] line_poly = [vert_start, vert_end] ok = True while ok: ok = False # for i, ed in enumerate(edges): i = len(edges) while i: i -= 1 ed = edges[i] v1, v2 = ed.vertices if v1 == vert_end: line_poly.append(v2) vert_end = line_poly[-1] ok = 1 del edges[i] # break elif v2 == vert_end: line_poly.append(v1) vert_end = line_poly[-1] ok = 1 del edges[i] # break elif v1 == vert_start: line_poly.insert(0, v2) vert_start = line_poly[0] ok = 1 del edges[i] # break elif v2 == vert_start: line_poly.insert(0, v1) vert_start = line_poly[0] ok = 1 del edges[i] # break line_polys.append(line_poly) return line_polys def ngon_tessellate(from_data, indices, fix_loops=True, debug_print=True): """ Takes a polyline of indices (ngon) and returns a list of face index lists. Designed to be used for importers that need indices for an ngon to create from existing verts. :arg from_data: either a mesh, or a list/tuple of vectors. :type from_data: list or :class:`bpy.types.Mesh` :arg indices: a list of indices to use this list is the ordered closed polyline to fill, and can be a subset of the data given. :type indices: list :arg fix_loops: If this is enabled polylines that use loops to make multiple polylines are delt with correctly. :type fix_loops: bool """ from mathutils.geometry import tessellate_polygon from mathutils import Vector vector_to_tuple = Vector.to_tuple if not indices: return [] def mlen(co): # Manhatten length of a vector, faster then length. return abs(co[0]) + abs(co[1]) + abs(co[2]) def vert_treplet(v, i): return v, vector_to_tuple(v, 6), i, mlen(v) def ed_key_mlen(v1, v2): if v1[3] > v2[3]: return v2[1], v1[1] else: return v1[1], v2[1] if not fix_loops: # Normal single concave loop filling. if type(from_data) in {tuple, list}: verts = [Vector(from_data[i]) for ii, i in enumerate(indices)] else: verts = [from_data.vertices[i].co for ii, i in enumerate(indices)] # same as reversed(range(1, len(verts))): for i in range(len(verts) - 1, 0, -1): if verts[i][1] == verts[i - 1][0]: verts.pop(i - 1) fill = tessellate_polygon([verts]) else: # Separate this loop into multiple loops be finding edges that are # used twice. This is used by Light-Wave LWO files a lot. if type(from_data) in {tuple, list}: verts = [ vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices) ] else: verts = [ vert_treplet(from_data.vertices[i].co, ii) for ii, i in enumerate(indices) ] edges = [(i, i - 1) for i in range(len(verts))] if edges: edges[0] = (0, len(verts) - 1) if not verts: return [] edges_used = set() edges_doubles = set() # We need to check if any edges are used twice location based. for ed in edges: edkey = ed_key_mlen(verts[ed[0]], verts[ed[1]]) if edkey in edges_used: edges_doubles.add(edkey) else: edges_used.add(edkey) # Store a list of unconnected loop segments split by double edges. # will join later loop_segments = [] v_prev = verts[0] context_loop = [v_prev] loop_segments = [context_loop] for v in verts: if v != v_prev: # Are we crossing an edge we removed? if ed_key_mlen(v, v_prev) in edges_doubles: context_loop = [v] loop_segments.append(context_loop) else: if context_loop and context_loop[-1][1] == v[1]: pass else: context_loop.append(v) v_prev = v # Now join loop segments def join_seg(s1, s2): if s2[-1][1] == s1[0][1]: s1, s2 = s2, s1 elif s1[-1][1] == s2[0][1]: pass else: return False # If were still here s1 and s2 are 2 segments in the same poly-line. s1.pop() # remove the last vert from s1 s1.extend(s2) # add segment 2 to segment 1 if s1[0][1] == s1[-1][1]: # remove endpoints double s1.pop() del s2[:] # Empty this segment s2 so we don't use it again. return True joining_segments = True while joining_segments: joining_segments = False segcount = len(loop_segments) for j in range(segcount - 1, -1, -1): # reversed(range(segcount)): seg_j = loop_segments[j] if seg_j: for k in range(j - 1, -1, -1): # reversed(range(j)): if not seg_j: break seg_k = loop_segments[k] if seg_k and join_seg(seg_j, seg_k): joining_segments = True loop_list = loop_segments for verts in loop_list: while verts and verts[0][1] == verts[-1][1]: verts.pop() loop_list = [verts for verts in loop_list if len(verts) > 2] # DONE DEALING WITH LOOP FIXING # vert mapping vert_map = [None] * len(indices) ii = 0 for verts in loop_list: if len(verts) > 2: for i, vert in enumerate(verts): vert_map[i + ii] = vert[2] ii += len(verts) fill = tessellate_polygon([[v[0] for v in loop] for loop in loop_list]) # draw_loops(loop_list) #raise Exception("done loop") # map to original indices fill = [[vert_map[i] for i in f] for f in fill] if not fill: if debug_print: print('Warning Cannot scanfill, fallback on a triangle fan.') fill = [[0, i - 1, i] for i in range(2, len(indices))] else: # Use real scan-fill. # See if its flipped the wrong way. flip = None for fi in fill: if flip is not None: break for i, vi in enumerate(fi): if vi == 0 and fi[i - 1] == 1: flip = False break elif vi == 1 and fi[i - 1] == 0: flip = True break if not flip: for i, fi in enumerate(fill): fill[i] = tuple([ii for ii in reversed(fi)]) return fill def triangle_random_points(num_points, loop_triangles): """ Generates a list of random points over mesh loop triangles. :arg num_points: the number of random points to generate on each triangle. :type int: :arg loop_triangles: list of the triangles to generate points on. :type loop_triangles: :class:`bpy.types.MeshLoopTriangle`, sequence :return: list of random points over all triangles. :rtype: list """ from random import random # For each triangle, generate the required number of random points sampled_points = [None] * (num_points * len(loop_triangles)) for i, lt in enumerate(loop_triangles): # Get triangle vertex coordinates verts = lt.id_data.vertices ltv = lt.vertices[:] tv = (verts[ltv[0]].co, verts[ltv[1]].co, verts[ltv[2]].co) for k in range(num_points): u1 = random() u2 = random() u_tot = u1 + u2 if u_tot > 1: u1 = 1.0 - u1 u2 = 1.0 - u2 side1 = tv[1] - tv[0] side2 = tv[2] - tv[0] p = tv[0] + u1 * side1 + u2 * side2 sampled_points[num_points * i + k] = p return sampled_points
31.393235
80
0.536534
loop_indices for poly in mesh.polygons] luv_hash = {} luv_hash_get = luv_hash.get luv_hash_ls = [None] * len(uv_loops) for pi, poly_indices in enumerate(poly_loops): for li in poly_indices: uv = uv_loops[li] uv_hub = luv_hash_get(uv) if uv_hub is None: uv_hub = luv_hash[uv] = [pi] else: uv_hub.append(pi) luv_hash_ls[li] = uv_hub poly_islands = [] poly_tag = [0] * len(poly_loops) while True: poly_index = -1 for i in range(len(poly_loops)): if poly_tag[i] == 0: poly_index = i break if poly_index != -1: island = [poly_index] poly_tag[poly_index] = 1 poly_islands.append(island) else: break added = True while added: added = False for poly_index in island[:]: if poly_tag[poly_index] == 1: for li in poly_loops[poly_index]: for poly_index_shared in luv_hash_ls[li]: if poly_tag[poly_index_shared] == 0: added = True poly_tag[poly_index_shared] = 1 island.append(poly_index_shared) poly_tag[poly_index] = 2 return poly_islands def mesh_linked_triangles(mesh): # Build vert face connectivity vert_tris = [[] for i in range(len(mesh.vertices))] for t in mesh.loop_triangles: for v in t.vertices: vert_tris[v].append(t) # sort triangles into connectivity groups tri_groups = [[t] for t in mesh.loop_triangles] # map old, new tri location tri_mapping = list(range(len(mesh.loop_triangles))) # Now clump triangles iteratively ok = True while ok: ok = False for t in mesh.loop_triangles: mapped_index = tri_mapping[t.index] mapped_group = tri_groups[mapped_index] for v in t.vertices: for nxt_t in vert_tris[v]: if nxt_t != t: nxt_mapped_index = tri_mapping[nxt_t.index] # We are not a part of the same group if mapped_index != nxt_mapped_index: ok = True # Assign mapping to this group so they # all map to this group for grp_t in tri_groups[nxt_mapped_index]: tri_mapping[grp_t.index] = mapped_index # Move triangles into this group mapped_group.extend(tri_groups[nxt_mapped_index]) # remove reference to the list tri_groups[nxt_mapped_index] = None # return all tri groups that are not null # this is all the triangles that are connected in their own lists. return [tg for tg in tri_groups if tg] def edge_face_count_dict(mesh): face_edge_count = {} loops = mesh.loops edges = mesh.edges for poly in mesh.polygons: for i in poly.loop_indices: key = edges[loops[i].edge_index].key try: face_edge_count[key] += 1 except: face_edge_count[key] = 1 return face_edge_count def edge_face_count(mesh): edge_face_count = edge_face_count_dict(mesh) get = dict.get return [get(edge_face_count, ed.key, 0) for ed in mesh.edges] def edge_loops_from_edges(mesh, edges=None): line_polys = [] # Get edges not used by a face if edges is None: edges = mesh.edges if not hasattr(edges, "pop"): edges = edges[:] while edges: current_edge = edges.pop() vert_end, vert_start = current_edge.vertices[:] line_poly = [vert_start, vert_end] ok = True while ok: ok = False # for i, ed in enumerate(edges): i = len(edges) while i: i -= 1 ed = edges[i] v1, v2 = ed.vertices if v1 == vert_end: line_poly.append(v2) vert_end = line_poly[-1] ok = 1 del edges[i] # break elif v2 == vert_end: line_poly.append(v1) vert_end = line_poly[-1] ok = 1 del edges[i] # break elif v1 == vert_start: line_poly.insert(0, v2) vert_start = line_poly[0] ok = 1 del edges[i] # break elif v2 == vert_start: line_poly.insert(0, v1) vert_start = line_poly[0] ok = 1 del edges[i] # break line_polys.append(line_poly) return line_polys def ngon_tessellate(from_data, indices, fix_loops=True, debug_print=True): from mathutils.geometry import tessellate_polygon from mathutils import Vector vector_to_tuple = Vector.to_tuple if not indices: return [] def mlen(co): # Manhatten length of a vector, faster then length. return abs(co[0]) + abs(co[1]) + abs(co[2]) def vert_treplet(v, i): return v, vector_to_tuple(v, 6), i, mlen(v) def ed_key_mlen(v1, v2): if v1[3] > v2[3]: return v2[1], v1[1] else: return v1[1], v2[1] if not fix_loops: # Normal single concave loop filling. if type(from_data) in {tuple, list}: verts = [Vector(from_data[i]) for ii, i in enumerate(indices)] else: verts = [from_data.vertices[i].co for ii, i in enumerate(indices)] # same as reversed(range(1, len(verts))): for i in range(len(verts) - 1, 0, -1): if verts[i][1] == verts[i - 1][0]: verts.pop(i - 1) fill = tessellate_polygon([verts]) else: # Separate this loop into multiple loops be finding edges that are # used twice. This is used by Light-Wave LWO files a lot. if type(from_data) in {tuple, list}: verts = [ vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices) ] else: verts = [ vert_treplet(from_data.vertices[i].co, ii) for ii, i in enumerate(indices) ] edges = [(i, i - 1) for i in range(len(verts))] if edges: edges[0] = (0, len(verts) - 1) if not verts: return [] edges_used = set() edges_doubles = set() # We need to check if any edges are used twice location based. for ed in edges: edkey = ed_key_mlen(verts[ed[0]], verts[ed[1]]) if edkey in edges_used: edges_doubles.add(edkey) else: edges_used.add(edkey) # Store a list of unconnected loop segments split by double edges. # will join later loop_segments = [] v_prev = verts[0] context_loop = [v_prev] loop_segments = [context_loop] for v in verts: if v != v_prev: # Are we crossing an edge we removed? if ed_key_mlen(v, v_prev) in edges_doubles: context_loop = [v] loop_segments.append(context_loop) else: if context_loop and context_loop[-1][1] == v[1]: pass else: context_loop.append(v) v_prev = v # Now join loop segments def join_seg(s1, s2): if s2[-1][1] == s1[0][1]: s1, s2 = s2, s1 elif s1[-1][1] == s2[0][1]: pass else: return False # If were still here s1 and s2 are 2 segments in the same poly-line. s1.pop() # remove the last vert from s1 s1.extend(s2) # add segment 2 to segment 1 if s1[0][1] == s1[-1][1]: # remove endpoints double s1.pop() del s2[:] # Empty this segment s2 so we don't use it again. return True joining_segments = True while joining_segments: joining_segments = False segcount = len(loop_segments) for j in range(segcount - 1, -1, -1): seg_j = loop_segments[j] if seg_j: for k in range(j - 1, -1, -1): if not seg_j: break seg_k = loop_segments[k] if seg_k and join_seg(seg_j, seg_k): joining_segments = True loop_list = loop_segments for verts in loop_list: while verts and verts[0][1] == verts[-1][1]: verts.pop() loop_list = [verts for verts in loop_list if len(verts) > 2] vert_map = [None] * len(indices) ii = 0 for verts in loop_list: if len(verts) > 2: for i, vert in enumerate(verts): vert_map[i + ii] = vert[2] ii += len(verts) fill = tessellate_polygon([[v[0] for v in loop] for loop in loop_list]) fill = [[vert_map[i] for i in f] for f in fill] if not fill: if debug_print: print('Warning Cannot scanfill, fallback on a triangle fan.') fill = [[0, i - 1, i] for i in range(2, len(indices))] else: flip = None for fi in fill: if flip is not None: break for i, vi in enumerate(fi): if vi == 0 and fi[i - 1] == 1: flip = False break elif vi == 1 and fi[i - 1] == 0: flip = True break if not flip: for i, fi in enumerate(fill): fill[i] = tuple([ii for ii in reversed(fi)]) return fill def triangle_random_points(num_points, loop_triangles): from random import random sampled_points = [None] * (num_points * len(loop_triangles)) for i, lt in enumerate(loop_triangles): verts = lt.id_data.vertices ltv = lt.vertices[:] tv = (verts[ltv[0]].co, verts[ltv[1]].co, verts[ltv[2]].co) for k in range(num_points): u1 = random() u2 = random() u_tot = u1 + u2 if u_tot > 1: u1 = 1.0 - u1 u2 = 1.0 - u2 side1 = tv[1] - tv[0] side2 = tv[2] - tv[0] p = tv[0] + u1 * side1 + u2 * side2 sampled_points[num_points * i + k] = p return sampled_points
true
true
f70f1f28974c7dca57cac919fb7d82881164e5fb
983
py
Python
src/notif/urls.py
triump0870/notif
ccc36ce85b3721ea85bf22038541d375c2ff5996
[ "MIT" ]
null
null
null
src/notif/urls.py
triump0870/notif
ccc36ce85b3721ea85bf22038541d375c2ff5996
[ "MIT" ]
null
null
null
src/notif/urls.py
triump0870/notif
ccc36ce85b3721ea85bf22038541d375c2ff5996
[ "MIT" ]
null
null
null
from django.conf.urls import include, url from django.contrib import admin from django.conf import settings from django.conf.urls.static import static import profiles.urls import accounts.urls from . import views urlpatterns = [ url(r'^$', views.HomePage.as_view(), name='home'), url(r'^about/$', views.AboutPage.as_view(), name='about'), url(r'^users/', include(profiles.urls, namespace='profiles')), url(r'^admin/', include(admin.site.urls)), url(r'^', include(accounts.urls, namespace='accounts')), url(r'^apis/', include('apis.urls', namespace='apis')), url(r'^weather/', include('secondhome.urls', namespace='pets')), ] # User-uploaded files like profile pics need to be served in development urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # Include django debug toolbar if DEBUG is on if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ]
32.766667
76
0.708037
from django.conf.urls import include, url from django.contrib import admin from django.conf import settings from django.conf.urls.static import static import profiles.urls import accounts.urls from . import views urlpatterns = [ url(r'^$', views.HomePage.as_view(), name='home'), url(r'^about/$', views.AboutPage.as_view(), name='about'), url(r'^users/', include(profiles.urls, namespace='profiles')), url(r'^admin/', include(admin.site.urls)), url(r'^', include(accounts.urls, namespace='accounts')), url(r'^apis/', include('apis.urls', namespace='apis')), url(r'^weather/', include('secondhome.urls', namespace='pets')), ] urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ]
true
true
f70f1fa11ff388ced5d59e8a7ef2a1be228de8c6
516
py
Python
vunit/verilog/check/run.py
ThomasWismer/vunit
3dd2d34e72c21d7a5b23cbf5a775ea28923a7718
[ "Artistic-2.0", "Apache-2.0" ]
2
2021-09-24T01:44:27.000Z
2022-01-18T19:42:16.000Z
vunit/verilog/check/run.py
ThomasWismer/vunit
3dd2d34e72c21d7a5b23cbf5a775ea28923a7718
[ "Artistic-2.0", "Apache-2.0" ]
1
2021-09-12T01:03:44.000Z
2021-09-19T15:28:12.000Z
vunit/verilog/check/run.py
ThomasWismer/vunit
3dd2d34e72c21d7a5b23cbf5a775ea28923a7718
[ "Artistic-2.0", "Apache-2.0" ]
null
null
null
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2021, Lars Asplund lars.anders.asplund@gmail.com from pathlib import Path from vunit.verilog import VUnit ROOT = Path(__file__).parent VU = VUnit.from_argv() VU.add_library("lib").add_source_files(ROOT / "test" / "*.sv") VU.set_sim_option("modelsim.vsim_flags.gui", ["-novopt"]) VU.main()
28.666667
75
0.728682
from pathlib import Path from vunit.verilog import VUnit ROOT = Path(__file__).parent VU = VUnit.from_argv() VU.add_library("lib").add_source_files(ROOT / "test" / "*.sv") VU.set_sim_option("modelsim.vsim_flags.gui", ["-novopt"]) VU.main()
true
true
f70f1fee1810154c5a52377ec53945a73677dcdc
1,294
py
Python
stubs/System/Drawing/Configuration.py
ricardyn/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2021-02-02T13:39:16.000Z
2021-02-02T13:39:16.000Z
stubs/System/Drawing/Configuration.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs/System/Drawing/Configuration.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
# encoding: utf-8 # module System.Drawing.Configuration calls itself Configuration # from System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a # by generator 1.145 # no doc # no imports # no functions # classes class SystemDrawingSection(ConfigurationSection): """ SystemDrawingSection() """ BitmapSuffix = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Get: BitmapSuffix(self: SystemDrawingSection) -> str Set: BitmapSuffix(self: SystemDrawingSection) = value """ ElementProperty = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Gets the System.Configuration.ConfigurationElementProperty object that represents the System.Configuration.ConfigurationElement object itself. """ EvaluationContext = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Gets the System.Configuration.ContextInformation object for the System.Configuration.ConfigurationElement object. """ HasContext = property(lambda self: object(), lambda self, v: None, lambda self: None) # default Properties = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
36.971429
151
0.719474
class SystemDrawingSection(ConfigurationSection): BitmapSuffix = property(lambda self: object(), lambda self, v: None, lambda self: None) """ HasContext = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
true
true
f70f210275c45c6bc3b24ff9b6183cf804f8307c
411
py
Python
18_greatest.py
hritxvij/python-basic-practise
42f55320ff55346558ecc164967cbe399060247c
[ "Apache-2.0" ]
null
null
null
18_greatest.py
hritxvij/python-basic-practise
42f55320ff55346558ecc164967cbe399060247c
[ "Apache-2.0" ]
null
null
null
18_greatest.py
hritxvij/python-basic-practise
42f55320ff55346558ecc164967cbe399060247c
[ "Apache-2.0" ]
null
null
null
num1 = input("Enter the first number:\n ") num2 = input("Enter the second number:\n ") num3 = input("Enter the third number:\n ") num4 = input("Enter the fourth number:\n ") if (num1>num2) and (num2>num3): print("The greatest number is:", num1) elif (num2>num1) and (num1>num3): print("The greatest nymber is:", num2) else: print("The greatest number is:", num3)
31.615385
55
0.608273
num1 = input("Enter the first number:\n ") num2 = input("Enter the second number:\n ") num3 = input("Enter the third number:\n ") num4 = input("Enter the fourth number:\n ") if (num1>num2) and (num2>num3): print("The greatest number is:", num1) elif (num2>num1) and (num1>num3): print("The greatest nymber is:", num2) else: print("The greatest number is:", num3)
true
true
f70f2224b7c29a2a61b7f2a64979a1761e77d84f
11,600
py
Python
code/python/IRNContacts/v1/fds/sdk/IRNContacts/model/operation_type.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
6
2022-02-07T16:34:18.000Z
2022-03-30T08:04:57.000Z
code/python/IRNContacts/v1/fds/sdk/IRNContacts/model/operation_type.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
2
2022-02-07T05:25:57.000Z
2022-03-07T14:18:04.000Z
code/python/IRNContacts/v1/fds/sdk/IRNContacts/model/operation_type.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
null
null
null
""" IRN API v1 Allows users to extract, create, update and configure IRN data. # noqa: E501 The version of the OpenAPI document: 1 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from fds.sdk.IRNContacts.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from fds.sdk.IRNContacts.exceptions import ApiAttributeError class OperationType(ModelSimple): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { ('value',): { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, }, } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'value': (int,), } @cached_property def discriminator(): return None attribute_map = {} read_only_vars = set() _composed_schemas = None required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): """OperationType - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501 Keyword Args: value (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): """OperationType - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501 Keyword Args: value (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) self = super(OpenApiModel, cls).__new__(cls) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) return self
40.277778
110
0.547845
import re import sys from fds.sdk.IRNContacts.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from fds.sdk.IRNContacts.exceptions import ApiAttributeError class OperationType(ModelSimple): allowed_values = { ('value',): { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, }, } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): return { 'value': (int,), } @cached_property def discriminator(): return None attribute_map = {} read_only_vars = set() _composed_schemas = None required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): _path_to_item = kwargs.pop('_path_to_item', ()) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) self = super(OpenApiModel, cls).__new__(cls) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) return self
true
true
f70f2240675cbbff5008d275512cd6a5bb90088b
2,087
py
Python
dvc/dependency/repo.py
drorata/dvc
b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0
[ "Apache-2.0" ]
null
null
null
dvc/dependency/repo.py
drorata/dvc
b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0
[ "Apache-2.0" ]
null
null
null
dvc/dependency/repo.py
drorata/dvc
b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0
[ "Apache-2.0" ]
null
null
null
from __future__ import unicode_literals import copy from funcy import merge from schema import Optional from contextlib import contextmanager from dvc.external_repo import external_repo from dvc.utils.compat import str from .local import DependencyLOCAL class DependencyREPO(DependencyLOCAL): PARAM_REPO = "repo" PARAM_URL = "url" PARAM_REV = "rev" PARAM_REV_LOCK = "rev_lock" REPO_SCHEMA = { Optional(PARAM_URL): str, Optional(PARAM_REV): str, Optional(PARAM_REV_LOCK): str, } def __init__(self, def_repo, stage, *args, **kwargs): self.def_repo = def_repo super(DependencyREPO, self).__init__(stage, *args, **kwargs) def _parse_path(self, remote, path): return None @property def is_in_repo(self): return False def __str__(self): return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL]) @contextmanager def _make_repo(self, **overrides): with external_repo(**merge(self.def_repo, overrides)) as repo: yield repo def status(self): with self._make_repo() as repo: current = repo.find_out_by_relpath(self.def_path).info with self._make_repo(rev_lock=None) as repo: updated = repo.find_out_by_relpath(self.def_path).info if current != updated: return {str(self): "update available"} return {} def save(self): pass def dumpd(self): return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo} def download(self, to, resume=False): with self._make_repo( cache_dir=self.repo.cache.local.cache_dir ) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev() out = repo.find_out_by_relpath(self.def_path) repo.fetch(out.stage.path) to.info = copy.copy(out.info) to.checkout() def update(self): with self._make_repo(rev_lock=None) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
26.75641
79
0.643987
from __future__ import unicode_literals import copy from funcy import merge from schema import Optional from contextlib import contextmanager from dvc.external_repo import external_repo from dvc.utils.compat import str from .local import DependencyLOCAL class DependencyREPO(DependencyLOCAL): PARAM_REPO = "repo" PARAM_URL = "url" PARAM_REV = "rev" PARAM_REV_LOCK = "rev_lock" REPO_SCHEMA = { Optional(PARAM_URL): str, Optional(PARAM_REV): str, Optional(PARAM_REV_LOCK): str, } def __init__(self, def_repo, stage, *args, **kwargs): self.def_repo = def_repo super(DependencyREPO, self).__init__(stage, *args, **kwargs) def _parse_path(self, remote, path): return None @property def is_in_repo(self): return False def __str__(self): return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL]) @contextmanager def _make_repo(self, **overrides): with external_repo(**merge(self.def_repo, overrides)) as repo: yield repo def status(self): with self._make_repo() as repo: current = repo.find_out_by_relpath(self.def_path).info with self._make_repo(rev_lock=None) as repo: updated = repo.find_out_by_relpath(self.def_path).info if current != updated: return {str(self): "update available"} return {} def save(self): pass def dumpd(self): return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo} def download(self, to, resume=False): with self._make_repo( cache_dir=self.repo.cache.local.cache_dir ) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev() out = repo.find_out_by_relpath(self.def_path) repo.fetch(out.stage.path) to.info = copy.copy(out.info) to.checkout() def update(self): with self._make_repo(rev_lock=None) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
true
true
f70f22e7a983dcb07f28f40547898d34230c6980
843
py
Python
examples/custom_problem.py
eltociear/NiaPy
7884aefec8f013d9f8db5c1af7080a61dd19a31d
[ "MIT" ]
null
null
null
examples/custom_problem.py
eltociear/NiaPy
7884aefec8f013d9f8db5c1af7080a61dd19a31d
[ "MIT" ]
null
null
null
examples/custom_problem.py
eltociear/NiaPy
7884aefec8f013d9f8db5c1af7080a61dd19a31d
[ "MIT" ]
null
null
null
# encoding=utf8 # This is temporary fix to import module from parent folder # It will be removed when package is published on PyPI import sys sys.path.append('../') import numpy as np from niapy.task import StoppingTask from niapy.problems import Problem from niapy.algorithms.basic import ParticleSwarmAlgorithm class MyProblem(Problem): def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs): super().__init__(dimension, lower, upper, *args, **kwargs) def _evaluate(self, x): return np.sum(x ** 2) # we will run Particle Swarm Algorithm on custom problem task = StoppingTask(problem=MyProblem(dimension=10), max_iters=1000) algo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4) best = algo.run(task=task) print('%s -> %s ' % (best[0], best[1]))
31.222222
105
0.725979
import sys sys.path.append('../') import numpy as np from niapy.task import StoppingTask from niapy.problems import Problem from niapy.algorithms.basic import ParticleSwarmAlgorithm class MyProblem(Problem): def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs): super().__init__(dimension, lower, upper, *args, **kwargs) def _evaluate(self, x): return np.sum(x ** 2) task = StoppingTask(problem=MyProblem(dimension=10), max_iters=1000) algo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4) best = algo.run(task=task) print('%s -> %s ' % (best[0], best[1]))
true
true
f70f2304c5bc68c201e9756c24ea51014e1794d0
8,831
py
Python
datahub/search/elasticsearch.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
6
2019-12-02T16:11:24.000Z
2022-03-18T10:02:02.000Z
datahub/search/elasticsearch.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
1,696
2019-10-31T14:08:37.000Z
2022-03-29T12:35:57.000Z
datahub/search/elasticsearch.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
9
2019-11-22T12:42:03.000Z
2021-09-03T14:25:05.000Z
from contextlib import contextmanager from logging import getLogger from django.conf import settings from elasticsearch.helpers import bulk as es_bulk from elasticsearch_dsl import analysis, Index from elasticsearch_dsl.connections import connections logger = getLogger(__name__) # Normalises values to improve sorting (by keeping e, E, è, ê etc. together) lowercase_asciifolding_normalizer = analysis.normalizer( 'lowercase_asciifolding_normalizer', filter=('lowercase', 'asciifolding'), ) # Trigram tokenizer enables us to support partial matching trigram = analysis.tokenizer( 'trigram', 'nGram', min_gram=3, max_gram=3, token_chars=('letter', 'digit'), ) # Filters out "-" so that t-shirt and tshirt can be matched special_chars = analysis.char_filter('special_chars', 'mapping', mappings=('-=>',)) trigram_analyzer = analysis.CustomAnalyzer( 'trigram_analyzer', tokenizer=trigram, char_filter=special_chars, filter=('lowercase',), ) space_remover = analysis.token_filter( 'space_remover', type='pattern_replace', pattern=' ', replacement='', ) AREA_REGEX = r'[a-z]{1,2}' DISTRICT_REGEX = r'(?:[0-9][a-z]|[0-9]{1,2})' SECTOR_REGEX = r'[0-9]' UNIT_REGEX = r'[a-z]{2}' postcode_filter = analysis.token_filter( 'postcode_filter', type='pattern_capture', # Index whole postcode (with space) preserve_original=True, patterns=[ # Index postcode area # See the Royal Mail programmer's guide for the exact definitions rf'^({AREA_REGEX}){DISTRICT_REGEX} {SECTOR_REGEX}{UNIT_REGEX}', # Index postcode district (with sub-district code ignored) # This is so `wc1` query would match `wc1ab` and `wc1a1ab`, but not `wc111ab` # Area + one or two digits rf'^(({AREA_REGEX}[0-9]) {SECTOR_REGEX}{UNIT_REGEX}|' rf'({AREA_REGEX}[0-9]{{2}}) {SECTOR_REGEX}{UNIT_REGEX}|' rf'({AREA_REGEX}[0-9])[a-z]? {SECTOR_REGEX}{UNIT_REGEX})', # Index postcode district (including sub-district) rf'^({AREA_REGEX}{DISTRICT_REGEX}) {SECTOR_REGEX}{UNIT_REGEX}', # Index postcode sector rf'^({AREA_REGEX}{DISTRICT_REGEX} {SECTOR_REGEX}){UNIT_REGEX}', ], ) # Token filter that adds a space to well-formed UK postcodes that don't have one. normalise_postcode_filter = analysis.token_filter( 'normalise_postcode_filter', type='pattern_replace', pattern=rf'^' rf'(?<area>{AREA_REGEX})' rf'(?<district>{DISTRICT_REGEX})' rf'(?<sector>{SECTOR_REGEX})' rf'(?<unit>{UNIT_REGEX})' rf'$', replacement=r'${area}${district} ${sector}${unit}', ) postcode_analyzer = analysis.CustomAnalyzer( 'postcode_analyzer_v2', type='custom', tokenizer='keyword', filter=(space_remover, 'lowercase', normalise_postcode_filter, postcode_filter), ) postcode_search_analyzer = analysis.CustomAnalyzer( 'postcode_search_analyzer_v2', type='custom', tokenizer='keyword', filter=('lowercase', normalise_postcode_filter), ) english_possessive_stemmer = analysis.token_filter( 'english_possessive_stemmer', type='stemmer', language='possessive_english', ) english_stemmer = analysis.token_filter( 'english_stemmer', type='stemmer', language='english', ) english_stop = analysis.token_filter( 'english_stop', type='stop', stopwords='_english_', ) english_analyzer = analysis.CustomAnalyzer( 'english_analyzer', tokenizer='standard', filter=[ english_possessive_stemmer, 'lowercase', english_stop, english_stemmer, ], ) ANALYZERS = ( trigram_analyzer, english_analyzer, ) def configure_connection(): """Configure Elasticsearch default connection.""" connections_default = { 'hosts': [settings.ES_URL], 'verify_certs': settings.ES_VERIFY_CERTS, } connections.configure(default=connections_default) def get_client(): """Gets an instance of the Elasticsearch client from the connection cache.""" return connections.get_connection() def index_exists(index_name): """Checks if an index exists.""" client = get_client() return client.indices.exists(index_name) def create_index(index_name, mapping, alias_names=()): """ Creates an index, initialises it with a mapping, and optionally associates aliases with it. Note: If you need to perform multiple alias operations atomically, you should use start_alias_transaction() instead of specifying aliases when creating an index. """ index = Index(index_name) for analyzer in ANALYZERS: index.analyzer(analyzer) index.settings(**settings.ES_INDEX_SETTINGS) index.mapping(mapping) # ES allows you to specify filter criteria for aliases but we don't make use of that – # hence the empty dict for each alias alias_mapping = {alias_name: {} for alias_name in alias_names} index.aliases(**alias_mapping) index.create() def delete_index(index_name): """Deletes an index.""" logger.info(f'Deleting the {index_name} index...') client = get_client() client.indices.delete(index_name) def get_indices_for_aliases(*alias_names): """Gets the indices referenced by one or more aliases.""" client = get_client() alias_to_index_mapping = {alias_name: set() for alias_name in alias_names} index_to_alias_mapping = client.indices.get_alias(name=alias_names) for index_name, index_properties in index_to_alias_mapping.items(): for alias_name in index_properties['aliases']: alias_to_index_mapping[alias_name].add(index_name) return [alias_to_index_mapping[alias_name] for alias_name in alias_names] def get_aliases_for_index(index_name): """Gets the aliases referencing an index.""" client = get_client() alias_response = client.indices.get_alias(index=index_name) return alias_response[index_name]['aliases'].keys() def alias_exists(alias_name): """Checks if an alias exists.""" client = get_client() return client.indices.exists_alias(name=alias_name) def delete_alias(alias_name): """Deletes an alias entirely (dissociating it from all indices).""" logger.info(f'Deleting the {alias_name} alias...') client = get_client() client.indices.delete_alias('_all', alias_name) class _AliasUpdater: """Helper class for making multiple alias updates atomically.""" def __init__(self): """Initialises the instance with an empty list of pending operations.""" self.actions = [] def associate_indices_with_alias(self, alias_name, index_names): """Adds a pending operation to associate a new or existing alias with a set of indices.""" self.actions.append({ 'add': { 'alias': alias_name, 'indices': list(index_names), }, }) def dissociate_indices_from_alias(self, alias_name, index_names): """Adds a pending operation to dissociate an existing alias from a set of indices.""" self.actions.append({ 'remove': { 'alias': alias_name, 'indices': list(index_names), }, }) def commit(self): """Commits (flushes) pending operations.""" client = get_client() client.indices.update_aliases(body={ 'actions': self.actions, }) self.actions = [] @contextmanager def start_alias_transaction(): """ Returns a context manager that can be used to create and update aliases atomically. Changes are committed when the context manager exits. Usage example: with start_alias_transaction() as alias_transaction: alias_transaction.dissociate_indices_from_alias( 'some-alias', ['an-index', 'another-index], ) alias_transaction.associate_indices_with_alias( 'another-alias', ['new-index], ) """ alias_updater = _AliasUpdater() yield alias_updater alias_updater.commit() def associate_index_with_alias(alias_name, index_name): """ Associates a new or existing alias with an index. This is only intended to be a convenience function for simple operations. For more complex operations, use start_alias_transaction(). """ client = get_client() client.indices.put_alias(index_name, alias_name) def bulk( actions=None, chunk_size=500, max_chunk_bytes=settings.ES_BULK_MAX_CHUNK_BYTES, **kwargs, ): """Send data in bulk to Elasticsearch.""" return es_bulk( get_client(), actions=actions, chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, **kwargs, )
29.049342
98
0.674895
from contextlib import contextmanager from logging import getLogger from django.conf import settings from elasticsearch.helpers import bulk as es_bulk from elasticsearch_dsl import analysis, Index from elasticsearch_dsl.connections import connections logger = getLogger(__name__) lowercase_asciifolding_normalizer = analysis.normalizer( 'lowercase_asciifolding_normalizer', filter=('lowercase', 'asciifolding'), ) trigram = analysis.tokenizer( 'trigram', 'nGram', min_gram=3, max_gram=3, token_chars=('letter', 'digit'), ) special_chars = analysis.char_filter('special_chars', 'mapping', mappings=('-=>',)) trigram_analyzer = analysis.CustomAnalyzer( 'trigram_analyzer', tokenizer=trigram, char_filter=special_chars, filter=('lowercase',), ) space_remover = analysis.token_filter( 'space_remover', type='pattern_replace', pattern=' ', replacement='', ) AREA_REGEX = r'[a-z]{1,2}' DISTRICT_REGEX = r'(?:[0-9][a-z]|[0-9]{1,2})' SECTOR_REGEX = r'[0-9]' UNIT_REGEX = r'[a-z]{2}' postcode_filter = analysis.token_filter( 'postcode_filter', type='pattern_capture', preserve_original=True, patterns=[ rf'^({AREA_REGEX}){DISTRICT_REGEX} {SECTOR_REGEX}{UNIT_REGEX}', # Index postcode district (with sub-district code ignored) # This is so `wc1` query would match `wc1ab` and `wc1a1ab`, but not `wc111ab` # Area + one or two digits rf'^(({AREA_REGEX}[0-9]) {SECTOR_REGEX}{UNIT_REGEX}|' rf'({AREA_REGEX}[0-9]{{2}}) {SECTOR_REGEX}{UNIT_REGEX}|' rf'({AREA_REGEX}[0-9])[a-z]? {SECTOR_REGEX}{UNIT_REGEX})', # Index postcode district (including sub-district) rf'^({AREA_REGEX}{DISTRICT_REGEX}) {SECTOR_REGEX}{UNIT_REGEX}', # Index postcode sector rf'^({AREA_REGEX}{DISTRICT_REGEX} {SECTOR_REGEX}){UNIT_REGEX}', ], ) # Token filter that adds a space to well-formed UK postcodes that don't have one. normalise_postcode_filter = analysis.token_filter( 'normalise_postcode_filter', type='pattern_replace', pattern=rf'^' rf'(?<area>{AREA_REGEX})' rf'(?<district>{DISTRICT_REGEX})' rf'(?<sector>{SECTOR_REGEX})' rf'(?<unit>{UNIT_REGEX})' rf'$', replacement=r'${area}${district} ${sector}${unit}', ) postcode_analyzer = analysis.CustomAnalyzer( 'postcode_analyzer_v2', type='custom', tokenizer='keyword', filter=(space_remover, 'lowercase', normalise_postcode_filter, postcode_filter), ) postcode_search_analyzer = analysis.CustomAnalyzer( 'postcode_search_analyzer_v2', type='custom', tokenizer='keyword', filter=('lowercase', normalise_postcode_filter), ) english_possessive_stemmer = analysis.token_filter( 'english_possessive_stemmer', type='stemmer', language='possessive_english', ) english_stemmer = analysis.token_filter( 'english_stemmer', type='stemmer', language='english', ) english_stop = analysis.token_filter( 'english_stop', type='stop', stopwords='_english_', ) english_analyzer = analysis.CustomAnalyzer( 'english_analyzer', tokenizer='standard', filter=[ english_possessive_stemmer, 'lowercase', english_stop, english_stemmer, ], ) ANALYZERS = ( trigram_analyzer, english_analyzer, ) def configure_connection(): connections_default = { 'hosts': [settings.ES_URL], 'verify_certs': settings.ES_VERIFY_CERTS, } connections.configure(default=connections_default) def get_client(): return connections.get_connection() def index_exists(index_name): client = get_client() return client.indices.exists(index_name) def create_index(index_name, mapping, alias_names=()): index = Index(index_name) for analyzer in ANALYZERS: index.analyzer(analyzer) index.settings(**settings.ES_INDEX_SETTINGS) index.mapping(mapping) # hence the empty dict for each alias alias_mapping = {alias_name: {} for alias_name in alias_names} index.aliases(**alias_mapping) index.create() def delete_index(index_name): logger.info(f'Deleting the {index_name} index...') client = get_client() client.indices.delete(index_name) def get_indices_for_aliases(*alias_names): client = get_client() alias_to_index_mapping = {alias_name: set() for alias_name in alias_names} index_to_alias_mapping = client.indices.get_alias(name=alias_names) for index_name, index_properties in index_to_alias_mapping.items(): for alias_name in index_properties['aliases']: alias_to_index_mapping[alias_name].add(index_name) return [alias_to_index_mapping[alias_name] for alias_name in alias_names] def get_aliases_for_index(index_name): client = get_client() alias_response = client.indices.get_alias(index=index_name) return alias_response[index_name]['aliases'].keys() def alias_exists(alias_name): client = get_client() return client.indices.exists_alias(name=alias_name) def delete_alias(alias_name): logger.info(f'Deleting the {alias_name} alias...') client = get_client() client.indices.delete_alias('_all', alias_name) class _AliasUpdater: def __init__(self): self.actions = [] def associate_indices_with_alias(self, alias_name, index_names): self.actions.append({ 'add': { 'alias': alias_name, 'indices': list(index_names), }, }) def dissociate_indices_from_alias(self, alias_name, index_names): self.actions.append({ 'remove': { 'alias': alias_name, 'indices': list(index_names), }, }) def commit(self): client = get_client() client.indices.update_aliases(body={ 'actions': self.actions, }) self.actions = [] @contextmanager def start_alias_transaction(): alias_updater = _AliasUpdater() yield alias_updater alias_updater.commit() def associate_index_with_alias(alias_name, index_name): client = get_client() client.indices.put_alias(index_name, alias_name) def bulk( actions=None, chunk_size=500, max_chunk_bytes=settings.ES_BULK_MAX_CHUNK_BYTES, **kwargs, ): return es_bulk( get_client(), actions=actions, chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, **kwargs, )
true
true