id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3279143 |
import exc
from bisect import bisect_right
from functools import reduce
__all__ = ['Source', 'Location', 'merge_locations']
class Source(object):
def __init__(self, text, url = None):
self.text = text
self.url = url
self.lines = [0]
for i, c in enumerate(text):
if c == "\n":
self.lines.append(i + 1)
def linecol(self, pos):
if 0 <= pos <= len(self.text):
line = bisect_right(self.lines, pos) - 1
return (line + 1, pos - self.lines[line] + 1)
else:
raise exc.IndexError['sourcepos'](dict(pos = pos,
source = self,
length = len(self.text)))
def substring(self, start, end):
return self.text[start:end]
def __descr__(self, recurse):
if self.url is None:
return [self.text]
else:
return [{"file"}, self.url, self.text]
class Location(object):
"""
Location object - meant to represent some code excerpt. It
contains a pointer to the source and a (start, end) tuple
representing the extent of the excerpt in the source.
Methods are provided to get line/columns for the excerpt, raw or
formatted.
"""
def __init__(self, source, span, tokens = []):
self.source = source
self.span = span
self.start = span[0]
self.end = span[1]
self.tokens = tokens
self._linecol = None
def get(self):
return self.source.substring(self.start, self.end)
def linecol(self):
def helper(source, start, end, promote_zerolength = False):
end -= 1 # end position is now inclusive
l1, c1 = source.linecol(start)
if start > end:
return ((l1, c1), (l1, c1) if promote_zerolength else None)
l2, c2 = source.linecol(end)
return ((l1, c1), (l2, c2))
if self._linecol is not None:
return self._linecol
self._linecol = helper(self.source, self.start, self.end)
return self._linecol
def ref(self):
"""
Returns a string representing the location of the excerpt. If
the excerpt is only one character, it will format the location
as "line:column". If it is on a single line, the format will
be "line:colstart-colend". Else,
"linestart:colstart-lineend:colend". In the special case where
the excerpt is a token not in the source text (e.g. one that
was inserted by the parser), "<" will be appended to the end.
"""
((l1, c1), lc2) = self.linecol()
if lc2 is not None:
l2, c2 = lc2
if lc2 is None or l1 == l2 and c1 == c2:
return ("%s:%s" % (l1, c1)) + ("<" if lc2 is None else "")
elif l1 == l2:
return "%s:%s-%s" % (l1, c1, c2)
else:
return "%s:%s-%s:%s" % (l1, c1, l2, c2)
def change_start(self, n):
return Location(self.source, (self.start + n, self.end))
def change_end(self, n):
return Location(self.source, (self.start, self.end + n))
def at_start(self):
return Location(self.source, (self.start, self.start))
def at_end(self):
return Location(self.source, (self.end, self.end))
def __add__(self, loc):
return merge_locations([self, loc])
def __radd__(self, loc):
return merge_locations([loc, self])
def __gt__(self, loc):
return loc.start < self.start
def __lt__(self, loc):
return loc.start > self.start
def __ge__(self, loc):
return loc.start <= self.start
def __le__(self, loc):
return loc.start >= self.start
def __str__(self):
return self.ref()
def __repr__(self):
return self.ref()
def __descr__(self, recurse):
return [{"location"},
recurse(self.source),
(self.start, self.end, {"hl2"})]
class Locations:
__hls__ = ["hl1", "hl2", "hl3", "hlE"]
def __init__(self, locations):
self.locations = locations
def get_hl(self, i):
return self.__hls__[i % len(self.__hls__)]
def __descr__(self, recurse):
locations = [(l.start, l.end, {self.get_hl(i)})
for i, l in enumerate(self.locations)]
if self.locations:
return [{"location"},
recurse(self.locations[0].source)] + locations
return []
def merge_locations(locations):
"""
Handy function to merge *contiguous* locations. (note: assuming
that you gave a, b, c in the right order, merge_locations(a, b, c)
does the same thing as merge_locations(a, c). However, a future
version of the function might differentiate them, so *don't do
it*)
TODO: it'd be nice to have a class for discontinuous locations, so
that you could highlight two tokens on the same line that are not
next to each other. Do it if a good use case arise.
"""
locations = [loc for loc in locations if loc]
if not locations:
return Location("", (0, 0), [])
return Location(source = locations[0].source,
span = (min(l.start for l in locations),
max(l.end for l in locations)))
| StarcoderdataPython |
31746 | <reponame>embiem/chia-blockchain<gh_stars>1-10
import io
from typing import Any, List, Set
from src.types.sized_bytes import bytes32
from src.util.clvm import run_program, sexp_from_stream, sexp_to_stream
from clvm import SExp
from src.util.hash import std_hash
from clvm_tools.curry import curry
class Program(SExp): # type: ignore # noqa
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
def __init__(self, v):
if isinstance(v, SExp):
v = v.v
super(Program, self).__init__(v)
@classmethod
def parse(cls, f):
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> Any:
f = io.BytesIO(blob)
return cls.parse(f) # type: ignore # noqa
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # type: ignore # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def _tree_hash(self, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if self.listp():
left = self.to(self.first())._tree_hash(precalculated)
right = self.to(self.rest())._tree_hash(precalculated)
s = b"\2" + left + right
else:
atom = self.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return self._tree_hash(set(args))
def run(self, args) -> "Program":
prog_args = Program.to(args)
cost, r = run_program(self, prog_args)
return Program.to(r)
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
| StarcoderdataPython |
3290960 | <gh_stars>1-10
#!/usr/bin/env python
#External settings
import settings
#External modules
import time
print("Use this to test clock impulses on pin "+str(settings.slavePin)+" (per settings.py)")
if settings.piMode:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(settings.slavePin, GPIO.OUT)
else:
print('Please enable piMode in settings.py, if this is indeed running on a Pi.')
exit()
try:
print("Type Ctrl+C to exit.");
impDurLast = 0.2
while 1:
try:
impDur = input("Duration of impulse in seconds (Enter for "+str(impDurLast)+"): ")
except SyntaxError: #empty string
impDur = impDurLast
if impDur > 1:
impDur = 1
if impDur < 0.05:
impDur = 0.05
impDurLast = impDur
print("Waiting then impulsing for "+str(impDur)+" seconds")
time.sleep(impDur) #so it will crash before setting the pin high, if it's going to crash
GPIO.output(settings.slavePin, GPIO.HIGH)
time.sleep(impDur)
GPIO.output(settings.slavePin, GPIO.LOW)
except AttributeError: #Easier to ask forgiveness than permission (EAFP) - http://stackoverflow.com/a/610923
print("\r\nAttributeError. Please ensure your settings.py includes all items from settings-sample.py.")
except KeyboardInterrupt:
print("\r\nBye!")
# except:
# print("Error")
finally:
if settings.piMode:
GPIO.cleanup()
#end try/except/finally | StarcoderdataPython |
1122 | #! /usr/bin/env python3
"""
constants.py - Contains all constants used by the device manager
Author:
- <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>)
Date: 12/3/2016
"""
number_of_rows = 3 # total number rows of Index Servers
number_of_links = 5 # number of links to be sent to Crawler
number_of_chunks = 5 # number of chunks to be sent to Index Builder
number_of_comps = 10 # number of components managed by each watchdog
| StarcoderdataPython |
3277355 | import pandas as pd
import csv
from sklearn.model_selection import train_test_split
data_cross_path = '~/Code/data/argmining19-same-side-classification/data/same-side-classification/cross-topic/{}.csv'
data_within_path = '~/Code/data/argmining19-same-side-classification/data/same-side-classification/within-topic/{}.csv'
cross_traindev_df = pd.read_csv(data_cross_path.format('training'), quotechar='"',quoting=csv.QUOTE_ALL,encoding='utf-8',escapechar='\\',doublequote=False,index_col='id')
# cross_test_df = pd.read_csv(data_cross_path.format('test'), quotechar='"',quoting=csv.QUOTE_ALL,encoding='utf-8',escapechar='\\',doublequote=False,index_col='id')
within_traindev_df = pd.read_csv(data_within_path.format('training'),quotechar='"',quoting=csv.QUOTE_ALL,encoding='utf-8',escapechar='\\',doublequote=False,index_col='id')
# within_test_df = pd.read_csv(data_within_path.format('test'), quotechar='"',quoting=csv.QUOTE_ALL,encoding='utf-8',escapechar='\\',doublequote=False,index_col='id')
# train, test = train_test_split(within_traindev_df, test_size=0.1, shuffle=True)
#
# train.to_csv('~/Code/data/argmining19-same-side-classification/data/same-side-classification/within-topic2/training.csv')
# test.to_csv('~/Code/data/argmining19-same-side-classification/data/same-side-classification/within-topic2/test.csv')
train, test = train_test_split(cross_traindev_df, test_size=0.1, shuffle=True)
train.to_csv('~/Code/data/argmining19-same-side-classification/data/same-side-classification/cross-topic2/training.csv')
test.to_csv('~/Code/data/argmining19-same-side-classification/data/same-side-classification/cross-topic2/test.csv')
| StarcoderdataPython |
180366 | <filename>task2.py
from random import randint
def main():
min_value = 1
max_value = 0
while min_value > max_value:
min_value = randint(0, 150)
max_value = randint(75, 200)
while True:
user_input = input(f'Input number in range {min_value} - {max_value}\n')
try:
n = int(user_input)
if n < min_value:
print(f'Input number is lesser than min value!!!\n')
elif n > max_value:
print(f'Input number is greater than max value!!!\n')
else:
input_value = n
break
except ValueError:
print("Input is in wrong format!!!\n")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3339412 | <filename>news/urls.py
from django.urls import path
from .views import NewsListView,SportsListView,EconomyListView,PoliticsListView,LifestyleListView,EntertainmentListView
from . import views
urlpatterns = [
path('scrape/', views.scrape, name="scrape"),
path('scrape1/', views.scrape1, name="scrape1"),
path('scrape2/', views.scrape2, name="scrape2"),
path('scrape3/', views.scrape3, name="scrape3"),
path('scrape4/', views.scrape4, name="scrape4"),
path('scrape5/', views.scrape5, name="scrape5"),
path('getnews/', NewsListView.as_view(), name='home'),
path('geteconomynews/', EconomyListView.as_view(), name='economy_home'),
path('getsportsnews/', SportsListView.as_view(), name='sports_home'),
path('getpoliticsnews/', PoliticsListView.as_view(), name='politics_home'),
path('getlifestylenews/', LifestyleListView.as_view(), name='lifestyle_home'),
path('getentertainmentnews/', EntertainmentListView.as_view(), name='entertainment_home'),
path('menu/', views.menu_list, name='menu'),
path('', views.home1, name="starter"),
] | StarcoderdataPython |
1775032 | <reponame>RaitzeR/VAL
import random, sys, math, pygame
from pygame.locals import *
from Helpers.Helpers import *
from Helpers.IntersectingLineDetection import *
from Robot.Robot import Robot
from Robot.Sensors.Simulated.Ultrasonic import Ultrasonic
from Robot.Sensors.Simulated.Hall import Hall
from Robot.Sensors.Simulated.Magnetometer import Magnetometer
from Environment.Environment import Environment
from Helpers.Colors import getColor
from Handlers.EventHandler import TerminateHandler,RobotActionHandler
from Render.RobotViewRenderer import RobotViewRenderer
from Render.EnvironmentRenderer import EnvironmentRenderer
from Render.RandomRenderer import RandomRenderer
from Render.Camera import Camera
from Render.Display import Display
#1 pixel is 1cm
FPS = 30 #Frames per second to update the screen
WINWIDTH = 1000 #Window width
WINHEIGHT = 480 #Window height
ROBOSIZE = 25 #Diameter of the robot in CM
HALF_WINWIDTH = int(WINWIDTH / 2)
HALF_WINHEIGHT = int(WINHEIGHT / 2)
CAMERA_SLACK = 90
SCAN_ROTATION_SPEED = 100 #how many milliseconds does it take to turn 1 degree
FRICTION = 2
def main() -> None:
global FPSCLOCK, DISPLAY
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAY = Display(WINWIDTH,WINHEIGHT)
pygame.display.set_caption('ScanSimulator')
while True:
runSimulation()
def runSimulation() -> None:
camera = Camera(CAMERA_SLACK)
environment = Environment()
environment.buildWalls()
robot = Robot(ROBOSIZE,HALF_WINWIDTH,HALF_WINHEIGHT)
##Init Renderers
environmentRenderer = EnvironmentRenderer(environment,camera,DISPLAY)
randomRenderer = RandomRenderer(environment,camera,robot,DISPLAY)
robotViewRenderer = RobotViewRenderer(robot,camera,DISPLAY)
##
##Add sensors to the robot
ultrasonic = Ultrasonic(350)
robot.attachUltrasonic(ultrasonic)
hall = Hall()
robot.attachHall(hall)
magnetometer = Magnetometer()
robot.attachMagnetometer(magnetometer)
##
##Add robot to the environment
environment.addRobot(robot)
##
while True:
camera.adjustCamera(DISPLAY,robot)
## Render Stuff
environmentRenderer.render(getColor('white'),getColor('white'),getColor('red'))
randomRenderer.render()
robotViewRenderer.render(getColor('blue'),getColor('white'),getColor('black'),True)
##
##Handle key presses
for event in pygame.event.get():
robot.moveDown,robot.moveUp,robot.rotateRight,robot.rotateLeft,robot.rotate,doScan,robot.servoLeft,robot.servoRight = RobotActionHandler(event,robot)
doTerminate = TerminateHandler(event)
if doTerminate:
terminate()
if doScan:
robot.scan(environment)
##
##handle robot movement
robot.handleMovementCommands()
environment.triggerRobotRotation()
environment.triggerRobotMovement()
##
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate() -> None:
pygame.quit()
sys.exit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1753227 | from flask import Flask, render_template, request
import plotly
import plotly.graph_objs as go
import plotly.express as px
import json
import io
import base64
import sys
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# sys.path.append('/Desktop/myproject')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
app = Flask(__name__)
sns.set_style(style='dark')
sns.color_palette("rocket")
@app.route("/")
def homepage():
return 'Go to /home'
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/home')
def imlucky():
data = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/worldwide-aggregate.csv')
deaths = np.array(data['Deaths'].values)
confirmed = np.array(data['Confirmed'].values)
basic_data = { 'dead' :deaths[-1], 'conf' : confirmed[-1] }
graph_data = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv')
graph_data['Date'] = pd.to_datetime(graph_data['Date'].astype(str), format = '%Y-%m-%d')
global_graph = graph_data.groupby(['Date'])['Confirmed','Recovered','Deaths'].sum()
global_graph.reset_index(inplace = True)
global_graph[['Confirmed','Recovered','Deaths']] = (global_graph[['Confirmed','Recovered','Deaths']].values)/1e6
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
axis.set_title("Global Spread of the Virus")
axis.set_xlabel("Time")
axis.set_ylabel("Cases (in mil)")
sns.lineplot(x = 'Date', y = 'Confirmed' , data = global_graph, ax =axis)
sns.lineplot(x = 'Date', y = 'Deaths' , data = global_graph, ax =axis, color = 'red')
sns.lineplot(x = 'Date', y = 'Recovered' , data = global_graph, ax =axis, color = 'green')
axis.legend(['confirmed', 'deaths', 'recovered'], loc = 'upper left')
#Convert plot to PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
# Encode PNG image to base64 string
pngImageB64String = "data:image/png;base64,"
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')
top_c = ['US', 'India', 'Brazil', 'Russia', 'Colombia', 'Peru', 'Mexico',
'South Africa', 'Spain', 'Argentina']
countries = graph_data.groupby(['Date', 'Country'])['Confirmed','Recovered','Deaths'].sum()
countries.reset_index(inplace = True)
countries['Country'] = [x if x in top_c else 'Other' for x in countries['Country']]
top_countries = countries[countries['Country']!= 'Other']
top_countries[['Confirmed','Recovered','Deaths']] = (top_countries[['Confirmed','Recovered','Deaths']].values)/1e5
fig1 = Figure()
axis = fig1.add_subplot(1, 1, 1)
axis.set_title("Country-wise Spread of the Virus")
axis.set_xlabel("Time")
axis.set_ylabel("Cases (in 100k)")
axis.legend(loc = 'upper left')
sns.lineplot(x = 'Date', y = 'Confirmed' , hue = 'Country', hue_order = top_c, palette = 'husl', data = top_countries, estimator = 'sum', ax = axis)
#Convert plot to PNG image
pngImage = io.BytesIO()
FigureCanvas(fig1).print_png(pngImage)
# Encode PNG image to base64 string
pngImageB64String1 = "data:image/png;base64,"
pngImageB64String1 += base64.b64encode(pngImage.getvalue()).decode('utf8')
bar, scatter = create_plot('conf')
return render_template('index.html', basic = basic_data, image = pngImageB64String, image1 = pngImageB64String1, plot = {'data': bar,'layout': scatter })
def create_plot(feature):
counties = json.load(open('world-countries.json', 'r'))
graph_data = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/time-series-19-covid-combined.csv')
graph_data['Date'] = pd.to_datetime(graph_data['Date'].astype(str), format = '%Y-%m-%d')
df = graph_data[graph_data['Date']== max(graph_data['Date'])]
df['Country'] = df['Country/Region']
df = df.drop(['Province/State', 'Country/Region'], axis = 1)
state_map_id = {}
for feature in counties['features']:
feature['iso_code'] = feature['id']
state_map_id[feature['properties']['name']] = feature['iso_code']
df['iso_code'] = [lambda x : state_map_id[x] if x in df['Country'] else None for x in df['Country']]
if feature == 'conf':
data = [px.choropleth(
data_frame=df, locations='Country', locationmode='country names',
geojson=counties, featureidkey='iso_code' , color='Confirmed', color_continuous_scale=px.colors.sequential.Plasma,
projection='equirectangular' )
]
elif feature == 'dead':
data = [px.choropleth(
data_frame=df, locations='Country', locationmode='country names',
geojson=counties, featureidkey='iso_code' , color='Deaths', color_continuous_scale=px.colors.sequential.Plasma,
projection='equirectangular' )
]
else:
fig = px.choropleth(
data_frame=df, locations='Country', locationmode='country names',
geojson=counties, featureidkey='iso_code' , color='Deaths', color_continuous_scale="Viridis",
projection='equirectangular' )
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
data = [fig]
dataJSON = data[0]['data']
layoutJSON = data[0]['layout']
dataJSON = json.dumps(dataJSON, cls=plotly.utils.PlotlyJSONEncoder)
layoutJSON = json.dumps(layoutJSON, cls=plotly.utils.PlotlyJSONEncoder)
return dataJSON, layoutJSON
@app.route('/bar', methods=['GET', 'POST'])
def change_features():
feature = request.args.get('selected')
dataJSON, layoutJSON = create_plot(feature)
return dataJSON, layoutJSON
if '__main__' == __name__ :
app.run(host= 'localhost', port=8000, debug= True)
| StarcoderdataPython |
3240284 | <filename>src/gedml/launcher/trainers/__init__.py
"""
This module takes charge of training.
"""
from .base_trainer import BaseTrainer | StarcoderdataPython |
3222941 | from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
import json
class LobbyConsumer(WebsocketConsumer):
def connect(self):
self.room_name = 'lobby'
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
}))
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['channel_id']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
})) | StarcoderdataPython |
3386477 | #
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
Mesos network manager
"""
# Standard library import
import gevent
import sys
from gevent.queue import Queue
# Application library import
import common.args as mesos_args
import common.logger as logger
from cfgm_common import vnc_cgitb
import vnc.vnc_mesos as vnc_mesos
import mesos_server as mserver
class MesosNetworkManager(object):
'''Starts all background process'''
_mesos_network_manager = None
def __init__(self, args=None, mesos_api_connected=False, queue=None):
self.args = args
if queue:
self.queue = queue
else:
self.queue = Queue()
self.logger = logger.MesosManagerLogger(args)
self.vnc = vnc_mesos.VncMesos(args=self.args,
logger=self.logger,
queue=self.queue)
self.mserver = mserver.MesosServer(args=self.args,
logger=self.logger,
queue=self.queue)
# end __init__
def start_tasks(self):
self.logger.info("Starting all tasks.")
gevent.joinall([
gevent.spawn(self.vnc.vnc_process),
gevent.spawn(self.mserver.start_server),
])
# end start_tasks
def reset(self):
for cls in DBBaseMM.get_obj_type_map().values():
cls.reset()
@classmethod
def get_instance(cls):
return MesosNetworkManager._mesos_network_manager
@classmethod
def destroy_instance(cls):
inst = cls.get_instance()
if inst is None:
return
inst.vnc = None
inst.q = None
MesosNetworkManager._mesos_network_manager = None
# end class MesosNetworkManager
def main(args_str=None, mesos_api_skip=False, event_queue=None):
vnc_cgitb.enable(format='text')
args = mesos_args.parse_args(args_str)
mesos_nw_mgr = MesosNetworkManager(args,
mesos_api_connected=mesos_api_skip,
queue=event_queue)
MesosNetworkManager._mesos_network_manager = mesos_nw_mgr
mesos_nw_mgr.start_tasks()
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
3202000 | print("blah blah blah\n")
#This is a comment
| StarcoderdataPython |
4805040 | <filename>test/integration_test.py
import unittest
import luigi
from netCDF4 import Dataset
from iasi import DecompressDataset
from iasi.file import MoveVariables
from test_precision import TestCompareDecompressionResult
class IntegrationTest(TestCompareDecompressionResult):
@classmethod
def setUpClass(cls):
file = '/tmp/data/METOPA_20160625001453_50240_20190209151722.nc'
compression = DecompressDataset(
file=file,
dst='/tmp/iasi',
force_upstream=True,
compress_upstream=True
)
uncompressed = MoveVariables(
file=file,
dst='/tmp/iasi',
force_upstream=True
)
assert luigi.build([compression, uncompressed], local_scheduler=True)
cls.compressed = Dataset(compression.output().path)
cls.uncompressed = Dataset(uncompressed.output().path)
| StarcoderdataPython |
4817792 | import numpy as np
import urllib.request, json, time, os, copy, sys
from scipy.optimize import linprog
global penguin_url
penguin_url = 'https://penguin-stats.io/PenguinStats/api/'
class MaterialPlanning(object):
def __init__(self,
filter_freq=20,
filter_stages=[],
url_stats='result/matrix?show_stage_details=true&show_item_details=true',
url_rules='formula',
path_stats='data/matrix.json',
path_rules='data/formula.json'):
"""
Object initialization.
Args:
filter_freq: int or None. The lowest frequence that we consider.
No filter will be applied if None.
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
try:
material_probs, convertion_rules = load_data(path_stats, path_rules)
except:
print('Requesting data from web resources (i.e., penguin-stats.io)...', end=' ')
material_probs, convertion_rules = request_data(penguin_url+url_stats, penguin_url+url_rules, path_stats, path_rules)
print('done.')
if filter_freq:
filtered_probs = []
for dct in material_probs['matrix']:
if dct['times']>=filter_freq and dct['stage']['code'] not in filter_stages:
filtered_probs.append(dct)
material_probs['matrix'] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _pre_processing(self, material_probs, convertion_rules):
"""
Compute costs, convertion rules and items probabilities from requested dictionaries.
Args:
material_probs: List of dictionaries recording the dropping info per stage per item.
Keys of instances: ["itemID", "times", "itemName", "quantity", "apCost", "stageCode", "stageID"].
convertion_rules: List of dictionaries recording the rules of composing.
Keys of instances: ["id", "name", "level", "source", "madeof"].
"""
# To count items and stages.
additional_items = {'30135': u'D32钢', '30125': u'双极纳米片', '30115': u'聚合剂'}
item_dct = {}
stage_dct = {}
for dct in material_probs['matrix']:
item_dct[dct['item']['itemId']]=dct['item']['name']
stage_dct[dct['stage']['code']]=dct['stage']['code']
item_dct.update(additional_items)
# To construct mapping from id to item names.
item_array = []
item_id_array = []
for k,v in item_dct.items():
try:
float(k)
item_array.append(v)
item_id_array.append(k)
except:
pass
self.item_array = np.array(item_array)
self.item_id_array = np.array(item_id_array)
self.item_dct_rv = {v:k for k,v in enumerate(item_array)}
# To construct mapping from stage id to stage names and vice versa.
stage_array = []
for k,v in stage_dct.items():
stage_array.append(v)
self.stage_array = np.array(stage_array)
self.stage_dct_rv = {v:k for k,v in enumerate(self.stage_array)}
# To format dropping records into sparse probability matrix
probs_matrix = np.zeros([len(stage_array), len(item_array)])
cost_lst = np.zeros(len(stage_array))
for dct in material_probs['matrix']:
try:
float(dct['item']['itemId'])
probs_matrix[self.stage_dct_rv[dct['stage']['code']], self.item_dct_rv[dct['item']['name']]] = dct['quantity']/float(dct['times'])
cost_lst[self.stage_dct_rv[dct['stage']['code']]] = dct['stage']['apCost']
except:
pass
cost_lst[self.stage_dct_rv['S4-6']] -= 3228 * 0.004
# To build equavalence relationship from convert_rule_dct.
self.convertions_dct = {}
convertion_matrix = []
convertion_outc_matrix = []
convertion_cost_lst = []
for rule in convertion_rules:
convertion = np.zeros(len(self.item_array))
convertion[self.item_dct_rv[rule['name']]] = 1
comp_dct = {comp['name']:comp['count'] for comp in rule['costs']}
self.convertions_dct[rule['name']] = comp_dct
for iname in comp_dct:
convertion[self.item_dct_rv[iname]] -= comp_dct[iname]
convertion_matrix.append(copy.deepcopy(convertion))
outc_dct = {outc['name']:outc['count'] for outc in rule['extraOutcome']}
outc_wgh = {outc['name']:outc['weight'] for outc in rule['extraOutcome']}
weight_sum = float(sum(outc_wgh.values()))
for iname in outc_dct:
convertion[self.item_dct_rv[iname]] += outc_dct[iname]*0.175*outc_wgh[iname]/weight_sum
convertion_outc_matrix.append(convertion)
convertion_cost_lst.append(rule['goldCost']*0.004)
convertion_matrix = np.array(convertion_matrix)
convertion_outc_matrix = np.array(convertion_outc_matrix)
convertion_cost_lst = np.array(convertion_cost_lst)
return convertion_matrix, convertion_outc_matrix, convertion_cost_lst, probs_matrix, cost_lst
def _set_lp_parameters(self, convertion_matrix,
convertion_outc_matrix,
convertion_cost_lst,
probs_matrix, cost_lst):
"""
Object initialization.
Args:
convertion_matrix: matrix of shape [n_rules, n_items].
Each row represent a rule.
convertion_cost_lst: list. Cost in equal value to the currency spent in convertion.
probs_matrix: sparse matrix of shape [n_stages, n_items].
Items per clear (probabilities) at each stage.
cost_lst: list. Costs per clear at each stage.
"""
self.convertion_matrix = convertion_matrix
self.convertion_outc_matrix = convertion_outc_matrix
self.convertion_cost_lst = convertion_cost_lst
self.probs_matrix = probs_matrix
self.cost_lst = cost_lst
assert len(self.probs_matrix)==len(self.cost_lst)
assert len(self.convertion_matrix)==len(self.convertion_cost_lst)
assert self.probs_matrix.shape[1]==self.convertion_matrix.shape[1]
self.equav_cost_lst = np.hstack([cost_lst, convertion_cost_lst])
self.equav_matrix = np.vstack([probs_matrix, convertion_matrix])
self.equav_matrix_outc = np.vstack([probs_matrix, convertion_outc_matrix])
def update(self,
filter_freq=20,
filter_stages=[],
url_stats='result/matrix?show_stage_details=true&show_item_details=true',
url_rules='formula',
path_stats='data/matrix.json',
path_rules='data/formula.json'):
"""
To update parameters when probabilities change or new items added.
Args:
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
print('Requesting data from web resources (i.e., penguin-stats.io)...', end=' ')
material_probs, convertion_rules = request_data(penguin_url+url_stats, penguin_url+url_rules, path_stats, path_rules)
print('done.')
if filter_freq:
filtered_probs = []
for dct in material_probs['matrix']:
if dct['times']>=filter_freq and dct['stage']['code'] not in filter_stages:
filtered_probs.append(dct)
material_probs['matrix'] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _get_plan_no_prioties(self, demand_lst, outcome=False):
"""
To solve linear programming problem without prioties.
Args:
demand_lst: list of materials demand. Should include all items (zero if not required).
Returns:
strategy: list of required clear times for each stage.
fun: estimated total cost.
"""
A_ub = self.equav_matrix_outc if outcome else self.equav_matrix
excp_factor = 1.0
dual_factor = 1.0
while excp_factor>1e-5:
solution = linprog(c=np.array(self.equav_cost_lst),
A_ub=-A_ub.T,
b_ub=-np.array(demand_lst)*excp_factor,
method='interior-point')
if solution.status != 4:
break
excp_factor /= 10.0
while dual_factor>1e-5:
dual_solution = linprog(c=-np.array(demand_lst)*excp_factor*dual_factor,
A_ub=A_ub,
b_ub=np.array(self.equav_cost_lst),
method='interior-point')
if solution.status != 4:
break
dual_factor /= 10.0
return solution, dual_solution.x, excp_factor
def get_plan(self, requirement_dct, deposited_dct={}, print_output=True, prioty_dct=None, outcome=False):
"""
User API. Computing the material plan given requirements and owned items.
Args:
requirement_dct: dictionary. Contain only required items with their numbers.
deposit_dct: dictionary. Contain only owned items with their numbers.
"""
status_dct = {0: 'Optimization terminated successfully. ',
1: 'Iteration limit reached. ',
2: 'Problem appears to be infeasible. ',
3: 'Problem appears to be unbounded. ',
4: 'Numerical difficulties encountered.'}
demand_lst = np.zeros(len(self.item_array))
for k, v in requirement_dct.items():
demand_lst[self.item_dct_rv[k]] = v
for k, v in deposited_dct.items():
demand_lst[self.item_dct_rv[k]] -= v
stt = time.time()
solution, dual_solution, excp_factor = self._get_plan_no_prioties(demand_lst, outcome)
correction_factor = 1/excp_factor
x, cost, status = solution.x*correction_factor, solution.fun*correction_factor, solution.status
n_looting = x[:len(self.cost_lst)]
n_convertion = x[len(self.cost_lst):]
if print_output:
print(status_dct[status]+(' Computed in %.4f seconds,' %(time.time()-stt)))
if status != 0:
raise ValueError(status_dct[status])
stages = []
for i,t in enumerate(n_looting):
if t >= 0.1:
target_items = np.where(self.probs_matrix[i]>=0.05)[0]
items = {self.item_array[idx]: float2str(self.probs_matrix[i, idx]*t)
for idx in target_items if len(self.item_id_array[idx])==5}
stage = {
"stage": self.stage_array[i],
"count": float2str(t),
"items": items
}
stages.append(stage)
syntheses = []
for i,t in enumerate(n_convertion):
if t >= 0.1:
target_item = self.item_array[np.argmax(self.convertion_matrix[i])]
materials = { k: str(v*int(t+0.9)) for k,v in self.convertions_dct[target_item].items() }
synthesis = {
"target": target_item,
"count": str(int(t+0.9)),
"materials": materials
}
syntheses.append(synthesis)
elif t >= 0.01:
target_item = self.item_array[np.argmax(self.convertion_matrix[i])]
materials = { k: '%.1f'%(v*t) for k,v in self.convertions_dct[target_item].items() }
synthesis = {
"target": target_item,
"count": '%.1f'%t,
"materials": materials
}
syntheses.append(synthesis)
values = []
for i,item in enumerate(self.item_array):
if len(self.item_id_array[i])==5:
item_value = {
"item": item,
"value": '%.2f'%dual_solution[i]
}
values.append(item_value)
res = {
"cost": int(cost),
"stages": stages,
"syntheses": syntheses
}
if print_output:
print('Estimated total cost', res['cost'])
print('Loot at following stages:')
for stage in stages:
display_lst = [k + '(%s) '%stage['items'][k] for k in stage['items']]
print('Stage ' + stage['stage'] + '(%s times) ===> '%stage['count']
+ ', '.join(display_lst))
print('Synthesize following items:')
for synthesis in syntheses:
display_lst = [k + '(%s) '%synthesis['materials'][k] for k in synthesis['materials']]
print(synthesis['target'] + '(%s) <=== '%synthesis['count']
+ ', '.join(display_lst))
print('Items Values:')
for value in values:
print(value['item'] + ': ' + value['value'])
return res
def Cartesian_sum(arr1, arr2):
arr_r = []
for arr in arr1:
arr_r.append(arr+arr2)
arr_r = np.vstack(arr_r)
return arr_r
def float2str(x, offset=0.5):
if x < 1.0:
out = '%.1f'%x
else:
out = '%d'%(int(x+offset))
return out
def request_data(url_stats, url_rules, save_path_stats, save_path_rules):
"""
To request probability and convertion rules from web resources and store at local.
Args:
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
save_path_stats: string. local path for storing the stats data.
save_path_rules: string. local path for storing the composing rules data.
Returns:
material_probs: dictionary. Content of the stats json file.
convertion_rules: dictionary. Content of the rules json file.
"""
try:
os.mkdir(os.path.dirname(save_path_stats))
except:
pass
try:
os.mkdir(os.path.dirname(save_path_rules))
except:
pass
with urllib.request.urlopen(url_stats) as url:
material_probs = json.loads(url.read().decode())
with open(save_path_stats, 'w') as outfile:
json.dump(material_probs, outfile)
with urllib.request.urlopen(url_rules) as url:
convertion_rules = json.loads(url.read().decode())
with open(save_path_rules, 'w') as outfile:
json.dump(convertion_rules, outfile)
return material_probs, convertion_rules
def load_data(path_stats, path_rules):
"""
To load stats and rules data from local directories.
Args:
path_stats: string. local path to the stats data.
path_rules: string. local path to the composing rules data.
Returns:
material_probs: dictionary. Content of the stats json file.
convertion_rules: dictionary. Content of the rules json file.
"""
with open(path_stats) as json_file:
material_probs = json.load(json_file)
with open(path_rules) as json_file:
convertion_rules = json.load(json_file)
return material_probs, convertion_rules
| StarcoderdataPython |
1749244 | from pywps import Process, LiteralInput, LiteralOutput
from pywps.app.Common import Metadata
class Nap(Process):
def __init__(self):
inputs = [
LiteralInput('delay', 'Delay between every update',
default='1', data_type='float')
]
outputs = [
LiteralOutput('output', 'Nap Output', data_type='string')
]
super(Nap, self).__init__(
self._handler,
identifier='nap',
version='1.0',
title='Afternoon Nap (supports sync calls only)',
abstract='This process will have a short nap for a given delay or 1 second if not a valid value.\
This procces only supports synchronous WPS requests ... \
so, make sure the nap does not take to long.',
profile='',
metadata=[
Metadata('Birdhouse', 'http://bird-house.github.io/'),
Metadata('User Guide', 'http://emu.readthedocs.io/en/latest/')],
inputs=inputs,
outputs=outputs,
store_supported=False,
status_supported=False
)
@staticmethod
def _handler(request, response):
import time
nap_delay = request.inputs['delay'][0].data
if nap_delay:
nap_delay = float(nap_delay)
else:
nap_delay = 1
response.update_status('PyWPS Process started.', 0)
time.sleep(nap_delay)
response.update_status('PyWPS Process started. Waiting...', 25)
time.sleep(nap_delay)
response.update_status('PyWPS Process started. Waiting...', 50)
time.sleep(nap_delay)
response.update_status('PyWPS Process started. Waiting...', 75)
time.sleep(nap_delay)
response.outputs['output'].data = 'done sleeping'
response.update_status('PyWPS Process completed.', 100)
return response
| StarcoderdataPython |
184153 | from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import sys
import codecs
import re
#remove empty lines
#tokenize and remove stopwords
def preprocess(doc):
""" Preprocess a document: tokenize words, lowercase, remove stopwords, non-alphabetic characters and empty lines"""
sw = stopwords.words('english')
tokenized = [re.sub('[^A-Za-z0-9]+', '', word.lower()) if word.lower() not in sw else "" for word in word_tokenize(doc)] #nltk tokenizer
specialChars = ["",'', ' ', '\t']
tokenized[:] = (char for char in tokenized if char not in specialChars and len(char)>2)
#print tokenized
return ' '.join(tokenized)
if __name__=="__main__":
inFile = sys.argv[1]
opened = codecs.open(inFile, "r", "utf8")
text = opened.read()
opened.close()
preprocessed = preprocess(text)
outFile = sys.argv[1]+".tok"
openedOut = codecs.open(outFile, "w", "utf8")
openedOut.write(preprocessed)
openedOut.close()
print "Preprocessed %s and wrote to file %s" % (inFile, outFile)
| StarcoderdataPython |
3257867 | <reponame>pawamoy/woof<filename>src/failprint/process.py
"""Functions related to subprocesses."""
import contextlib
import subprocess # noqa: S404 (we don't mind the security implication)
from typing import List, Optional, Tuple
from failprint import WINDOWS
from failprint.capture import Capture
from failprint.formats import printable_command
from failprint.types import CmdType
if not WINDOWS:
from ptyprocess import PtyProcessUnicode
def run_subprocess(
cmd: CmdType,
capture: Capture = Capture.BOTH,
shell: bool = False,
stdin: Optional[str] = None,
) -> Tuple[int, str]:
"""
Run a command in a subprocess.
Arguments:
cmd: The command to run.
capture: The output to capture.
shell: Whether to run the command in a shell.
stdin: String to use as standard input.
Returns:
The exit code and the command raw output.
"""
if capture == Capture.NONE:
stdout_opt = None
stderr_opt = None
else:
stdout_opt = subprocess.PIPE
if capture == Capture.BOTH:
stderr_opt = subprocess.STDOUT
else:
stderr_opt = subprocess.PIPE
if shell and not isinstance(cmd, str):
cmd = printable_command(cmd)
process = subprocess.run( # noqa: S603,W1510 (we trust the input, and don't want to "check")
cmd,
input=stdin,
stdout=stdout_opt,
stderr=stderr_opt,
shell=shell, # noqa: S602 (shell=True)
universal_newlines=True,
encoding="utf8",
)
if capture == Capture.NONE:
output = ""
elif capture == Capture.STDERR:
output = process.stderr
else:
output = process.stdout
return process.returncode, output
def run_pty_subprocess(cmd: List[str], capture: Capture = Capture.BOTH, stdin: Optional[str] = None) -> Tuple[int, str]:
"""
Run a command in a PTY subprocess.
Arguments:
cmd: The command to run.
capture: The output to capture.
stdin: String to use as standard input.
Returns:
The exit code and the command output.
"""
process = PtyProcessUnicode.spawn(cmd)
pty_output: List[str] = []
if stdin is not None:
process.setecho(False)
process.waitnoecho()
process.write(stdin)
process.sendeof()
# not sure why but sending only one eof is not always enough,
# so we send a second one and ignore any IO error
with contextlib.suppress(OSError):
process.sendeof()
while True:
try:
output_data = process.read()
except EOFError:
break
if capture == Capture.NONE:
print(output_data, end="", flush=True) # noqa: WPS421 (print)
else:
pty_output.append(output_data)
output = "".join(pty_output).replace("\r\n", "\n")
return process.wait(), output
| StarcoderdataPython |
3371910 | <reponame>opennms-forge/report-aux<gh_stars>0
# export_all.py
# Easy access to generate PDFs for all node pairs
import export
export.render_all_nodes_pdf()
| StarcoderdataPython |
3371253 | <reponame>ralexrivero/python_fundation
#
# Hello World program in Python
# take info from input and display
def main():
print("Hellow World")
name = input("What is your name? ")
print("Hello ", name)
if __name__ == "__main__":
main()
| StarcoderdataPython |
79076 | <reponame>BrendanFrick/great_expectations
import os
from collections import OrderedDict
import pytest
from ruamel.yaml import YAML, YAMLError
import great_expectations as ge
from great_expectations.data_context.types.base import (
DataContextConfig,
DataContextConfigSchema,
DatasourceConfig,
DatasourceConfigSchema,
)
from great_expectations.data_context.util import (
file_relative_path,
substitute_config_variable,
)
from great_expectations.exceptions import InvalidConfigError, MissingConfigVariableError
from tests.data_context.conftest import create_data_context_files
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
dataContextConfigSchema = DataContextConfigSchema()
def test_config_variables_on_context_without_config_variables_filepath_configured(
data_context_without_config_variables_filepath_configured,
):
# test the behavior on a context that does not config_variables_filepath (the location of
# the file with config variables values) configured.
context = data_context_without_config_variables_filepath_configured
# an attempt to save a config variable should raise an exception
with pytest.raises(InvalidConfigError) as exc:
context.save_config_variable("var_name_1", {"n1": "v1"})
assert (
"'config_variables_file_path' property is not found in config"
in exc.value.message
)
def test_setting_config_variables_is_visible_immediately(
data_context_with_variables_in_config,
):
context = data_context_with_variables_in_config
assert type(context.get_config()) == DataContextConfig
config_variables_file_path = context.get_config()["config_variables_file_path"]
assert config_variables_file_path == "uncommitted/config_variables.yml"
# The config variables must have been present to instantiate the config
assert os.path.isfile(
os.path.join(context._context_root_directory, config_variables_file_path)
)
# the context's config has two config variables - one using the ${} syntax and the other - $.
assert (
context.get_config()["datasources"]["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub1"]
== "${replace_me}"
)
assert (
context.get_config()["datasources"]["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub2"]
== "$replace_me"
)
config_variables = context._load_config_variables_file()
assert config_variables["replace_me"] == {"n1": "v1"}
# the context's config has two config variables - one using the ${} syntax and the other - $.
assert context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub1"] == {"n1": "v1"}
assert context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub2"] == {"n1": "v1"}
# verify that we can save a config variable in the config variables file
# and the value is retrievable
context.save_config_variable("replace_me_2", {"n2": "v2"})
# Update the config itself
context._project_config["datasources"]["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub1"] = "${replace_me_2}"
# verify that the value of the config variable is immediately updated.
# verify that the config variable will be substituted with the value from the file if the
# env variable is not set (for both ${} and $ syntax variations)
assert context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub1"] == {"n2": "v2"}
assert context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub2"] == {"n1": "v1"}
# verify the same for escaped variables
context.save_config_variable(
"escaped_password", "<PASSWORD>"
)
dict_to_escape = {
"drivername": "po$tgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"username": "postgres",
"password": "<PASSWORD>$",
"database": "postgres",
}
context.save_config_variable(
"escaped_password_dict",
dict_to_escape,
)
context._project_config["datasources"]["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub_escaped"] = "${escaped_password}"
context._project_config["datasources"]["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub_escaped_dict"] = "${escaped_password_dict}"
assert (
context.get_config().datasources["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub_escaped"]
== "${escaped_password}"
)
assert (
context.get_config().datasources["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub_escaped_dict"]
== "${escaped_password_dict}"
)
# Ensure that the value saved in config variables has escaped the $
config_variables_with_escaped_vars = context._load_config_variables_file()
assert (
config_variables_with_escaped_vars["escaped_password"]
== r"<PASSWORD>"
)
assert config_variables_with_escaped_vars["escaped_password_dict"] == {
"drivername": r"po\$tgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"username": "postgres",
"password": r"<PASSWORD>\$",
"database": "postgres",
}
# Ensure that when reading the escaped config variable, the escaping should be removed
assert (
context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub_escaped"]
== "this_is_$mypassword_escape_the_$signs"
)
assert (
context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub_escaped_dict"]
== dict_to_escape
)
assert (
context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"][
"test_escaped_manually_entered_value_from_config"
]
== "correct_hor$e_battery_$taple"
)
try:
# verify that the value of the env var takes precedence over the one from the config variables file
os.environ["replace_me_2"] = "value_from_env_var"
assert (
context.get_config_with_variables_substituted().datasources["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]["reader_options"]["test_variable_sub1"]
== "value_from_env_var"
)
except Exception:
raise
finally:
del os.environ["replace_me_2"]
def test_substituted_config_variables_not_written_to_file(tmp_path_factory):
# this test uses a great_expectations.yml with almost all values replaced
# with substitution variables
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename="great_expectations_v013_basic_with_exhaustive_variables.yml",
config_variables_fixture_filename="config_variables_exhaustive.yml",
)
# load ge config fixture for expected
path_to_yml = (
"../test_fixtures/great_expectations_v013_basic_with_exhaustive_variables.yml"
)
path_to_yml = file_relative_path(__file__, path_to_yml)
with open(path_to_yml) as data:
config_commented_map_from_yaml = yaml.load(data)
expected_config = DataContextConfig.from_commented_map(
config_commented_map_from_yaml
)
expected_config_commented_map = dataContextConfigSchema.dump(expected_config)
expected_config_commented_map.pop("anonymous_usage_statistics")
# instantiate data_context twice to go through cycle of loading config from file then saving
context = ge.data_context.DataContext(context_path)
context._save_project_config()
context_config_commented_map = dataContextConfigSchema.dump(
ge.data_context.DataContext(context_path)._project_config
)
context_config_commented_map.pop("anonymous_usage_statistics")
assert context_config_commented_map == expected_config_commented_map
def test_runtime_environment_are_used_preferentially(tmp_path_factory, monkeypatch):
monkeypatch.setenv("FOO", "BAR")
monkeypatch.setenv("REPLACE_ME_ESCAPED_ENV", r"ive_been_\$replaced")
value_from_environment = "from_environment"
os.environ["replace_me"] = value_from_environment
value_from_runtime_override = "runtime_var"
runtime_environment = {"replace_me": value_from_runtime_override}
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
create_data_context_files(
context_path,
asset_config_path,
ge_config_fixture_filename="great_expectations_basic_with_variables.yml",
config_variables_fixture_filename="config_variables.yml",
)
data_context = ge.data_context.DataContext(
context_path, runtime_environment=runtime_environment
)
config = data_context.get_config_with_variables_substituted()
try:
assert (
config.datasources["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub1"]
== value_from_runtime_override
)
assert (
config.datasources["mydatasource"]["batch_kwargs_generators"][
"mygenerator"
]["reader_options"]["test_variable_sub2"]
== value_from_runtime_override
)
except Exception:
raise
finally:
del os.environ["replace_me"]
def test_substitute_config_variable():
config_variables_dict = {
"arg0": "val_of_arg_0",
"arg2": {"v1": 2},
"aRg3": "val_of_aRg_3",
"ARG4": "val_of_ARG_4",
}
assert (
substitute_config_variable("abc${arg0}", config_variables_dict)
== "abcval_of_arg_0"
)
assert (
substitute_config_variable("abc$arg0", config_variables_dict)
== "abcval_of_arg_0"
)
assert (
substitute_config_variable("${arg0}", config_variables_dict) == "val_of_arg_0"
)
assert substitute_config_variable("hhhhhhh", config_variables_dict) == "hhhhhhh"
with pytest.raises(MissingConfigVariableError) as exc:
substitute_config_variable(
"abc${arg1} def${foo}", config_variables_dict
) # does NOT equal "abc${arg1}"
assert (
"""Unable to find a match for config substitution variable: `arg1`.
Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
See https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets"""
in exc.value.message
)
assert (
substitute_config_variable("${arg2}", config_variables_dict)
== config_variables_dict["arg2"]
)
assert exc.value.missing_config_variable == "arg1"
# Null cases
assert substitute_config_variable("", config_variables_dict) == ""
assert substitute_config_variable(None, config_variables_dict) == None
# Test with mixed case
assert (
substitute_config_variable("prefix_${aRg3}_suffix", config_variables_dict)
== "prefix_val_of_aRg_3_suffix"
)
assert (
substitute_config_variable("${aRg3}", config_variables_dict) == "val_of_aRg_3"
)
# Test with upper case
assert (
substitute_config_variable("prefix_$ARG4/suffix", config_variables_dict)
== "prefix_val_of_ARG_4/suffix"
)
assert substitute_config_variable("$ARG4", config_variables_dict) == "val_of_ARG_4"
# Test with multiple substitutions
assert (
substitute_config_variable("prefix${arg0}$aRg3", config_variables_dict)
== "prefixval_of_arg_0val_of_aRg_3"
)
# Escaped `$` (don't substitute, but return un-escaped string)
assert (
substitute_config_variable(r"abc\${arg0}\$aRg3", config_variables_dict)
== "abc${arg0}$aRg3"
)
# Multiple configurations together
assert (
substitute_config_variable(
r"prefix$ARG4.$arg0/$aRg3:${ARG4}/\$dontsub${arg0}:${aRg3}.suffix",
config_variables_dict,
)
== "prefixval_of_ARG_4.val_of_arg_0/val_of_aRg_3:val_of_ARG_4/$dontsubval_of_arg_0:val_of_aRg_3.suffix"
)
def test_substitute_env_var_in_config_variable_file(
monkeypatch, empty_data_context_with_config_variables
):
monkeypatch.setenv("FOO", "correct_val_of_replace_me")
monkeypatch.setenv("REPLACE_ME_ESCAPED_ENV", r"ive_been_\$replaced")
context = empty_data_context_with_config_variables
context_config = context.get_config_with_variables_substituted()
my_generator = context_config["datasources"]["mydatasource"][
"batch_kwargs_generators"
]["mygenerator"]
reader_options = my_generator["reader_options"]
assert reader_options["test_variable_sub3"] == "correct_val_of_replace_me"
assert reader_options["test_variable_sub4"] == {
"inner_env_sub": "correct_val_of_replace_me"
}
assert reader_options["password"] == "<PASSWORD>"
# Escaped variables (variables containing `$` that have been escaped)
assert (
reader_options["test_escaped_env_var_from_config"]
== "prefixive_been_$replaced/suffix"
)
assert (
my_generator["test_variable_escaped"]
== "dont$replace$me$please$$$$thanksive_been_$replaced"
)
def test_escape_all_config_variables(empty_data_context_with_config_variables):
"""
Make sure that all types of input to escape_all_config_variables are escaped properly: str, dict, OrderedDict, list
Make sure that changing the escape string works as expected.
"""
context = empty_data_context_with_config_variables
# str
value_str = "pas$word1"
escaped_value_str = r"pas\$word1"
assert context.escape_all_config_variables(value=value_str) == escaped_value_str
value_str2 = "pas$wor$d1$"
escaped_value_str2 = r"pas\$wor\$d1\$"
assert context.escape_all_config_variables(value=value_str2) == escaped_value_str2
# dict
value_dict = {
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"username": "postgres",
"password": "<PASSWORD>",
"database": "postgres",
}
escaped_value_dict = {
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"username": "postgres",
"password": r"<PASSWORD>",
"database": "postgres",
}
assert context.escape_all_config_variables(value=value_dict) == escaped_value_dict
# OrderedDict
value_ordered_dict = OrderedDict(
[
("UNCOMMITTED", "uncommitted"),
("docs_test_folder", "test$folder"),
(
"test_db",
{
"drivername": "postgresql",
"host": "some_host",
"port": "5432",
"username": "postgres",
"password": "<PASSWORD>",
"database": "postgres",
},
),
]
)
escaped_value_ordered_dict = OrderedDict(
[
("UNCOMMITTED", "uncommitted"),
("docs_test_folder", r"test\$folder"),
(
"test_db",
{
"drivername": "postgresql",
"host": "some_host",
"port": "5432",
"username": "postgres",
"password": r"<PASSWORD>",
"database": "postgres",
},
),
]
)
assert (
context.escape_all_config_variables(value=value_ordered_dict)
== escaped_value_ordered_dict
)
# list
value_list = [
"postgresql",
os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"5432",
"postgres",
"<PASSWORD>$<PASSWORD>",
"postgres",
]
escaped_value_list = [
"postgresql",
os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"5432",
"postgres",
r"<PASSWORD>",
"postgres",
]
assert context.escape_all_config_variables(value=value_list) == escaped_value_list
# Custom escape string
value_str_custom_escape_string = "pas$word1"
escaped_value_str_custom_escape_string = "pas@*&$word1"
assert (
context.escape_all_config_variables(
value=value_str_custom_escape_string, dollar_sign_escape_string="@*&$"
)
== escaped_value_str_custom_escape_string
)
value_str_custom_escape_string2 = "pas$wor$d1$"
escaped_value_str_custom_escape_string2 = "pas@*&$wor@*&$d1@*&$"
assert (
context.escape_all_config_variables(
value=value_str_custom_escape_string2, dollar_sign_escape_string="@*&$"
)
== escaped_value_str_custom_escape_string2
)
def test_escape_all_config_variables_skip_substitution_vars(
empty_data_context_with_config_variables,
):
"""
What does this test and why?
escape_all_config_variables(skip_if_substitution_variable=True/False) should function as documented.
"""
context = empty_data_context_with_config_variables
# str
value_str = "$VALUE_STR"
escaped_value_str = r"\$VALUE_STR"
assert (
context.escape_all_config_variables(
value=value_str, skip_if_substitution_variable=True
)
== value_str
)
assert (
context.escape_all_config_variables(
value=value_str, skip_if_substitution_variable=False
)
== escaped_value_str
)
value_str2 = "VALUE_$TR"
escaped_value_str2 = r"VALUE_\$TR"
assert (
context.escape_all_config_variables(
value=value_str2, skip_if_substitution_variable=True
)
== escaped_value_str2
)
assert (
context.escape_all_config_variables(
value=value_str2, skip_if_substitution_variable=False
)
== escaped_value_str2
)
multi_value_str = "${USER}:pas$word@${HOST}:${PORT}/${DATABASE}"
escaped_multi_value_str = r"\${USER}:pas\$word@\${HOST}:\${PORT}/\${DATABASE}"
assert (
context.escape_all_config_variables(
value=multi_value_str, skip_if_substitution_variable=True
)
== multi_value_str
)
assert (
context.escape_all_config_variables(
value=multi_value_str, skip_if_substitution_variable=False
)
== escaped_multi_value_str
)
multi_value_str2 = "$USER:pas$word@$HOST:${PORT}/${DATABASE}"
escaped_multi_value_str2 = r"\$USER:pas\$word@\$HOST:\${PORT}/\${DATABASE}"
assert (
context.escape_all_config_variables(
value=multi_value_str2, skip_if_substitution_variable=True
)
== multi_value_str2
)
assert (
context.escape_all_config_variables(
value=multi_value_str2, skip_if_substitution_variable=False
)
== escaped_multi_value_str2
)
multi_value_str3 = "USER:pas$word@$HOST:${PORT}/${DATABASE}"
escaped_multi_value_str3 = r"USER:pas\$word@\$HOST:\${PORT}/\${DATABASE}"
assert (
context.escape_all_config_variables(
value=multi_value_str3, skip_if_substitution_variable=True
)
== escaped_multi_value_str3
)
assert (
context.escape_all_config_variables(
value=multi_value_str3, skip_if_substitution_variable=False
)
== escaped_multi_value_str3
)
# dict
value_dict = {
"drivername": "postgresql",
"host": "${HOST}",
"port": "<PASSWORD>",
"username": "postgres",
"password": "<PASSWORD>",
"database": "$postgres",
"sub_dict": {
"test_val_no_escaping": "test_val",
"test_val_escaping": "te$t_val",
"test_val_substitution": "$test_val",
"test_val_substitution_braces": "${test_val}",
},
}
escaped_value_dict = {
"drivername": "postgresql",
"host": r"\${HOST}",
"port": "5432",
"username": "postgres",
"password": r"<PASSWORD>",
"database": r"\$postgres",
"sub_dict": {
"test_val_no_escaping": "test_val",
"test_val_escaping": r"te\$t_val",
"test_val_substitution": r"\$test_val",
"test_val_substitution_braces": r"\${test_val}",
},
}
escaped_value_dict_skip_substitution_variables = {
"drivername": "postgresql",
"host": "${HOST}",
"port": "5432",
"username": "postgres",
"password": r"<PASSWORD>",
"database": "$postgres",
"sub_dict": {
"test_val_no_escaping": "test_val",
"test_val_escaping": r"te\$t_val",
"test_val_substitution": "$test_val",
"test_val_substitution_braces": "${test_val}",
},
}
assert (
context.escape_all_config_variables(
value=value_dict, skip_if_substitution_variable=False
)
== escaped_value_dict
)
assert (
context.escape_all_config_variables(
value=value_dict, skip_if_substitution_variable=True
)
== escaped_value_dict_skip_substitution_variables
)
# OrderedDict
value_ordered_dict = OrderedDict(
[
("UNCOMMITTED", "uncommitted"),
("docs_test_folder", "test$folder"),
(
"test_db",
{
"drivername": "$postgresql",
"host": "some_host",
"port": "5432",
"username": "${USERNAME}",
"password": "<PASSWORD>",
"database": "postgres",
},
),
]
)
escaped_value_ordered_dict = OrderedDict(
[
("UNCOMMITTED", "uncommitted"),
("docs_test_folder", r"test\$folder"),
(
"test_db",
{
"drivername": r"\$postgresql",
"host": "some_host",
"port": "5432",
"username": r"\${USERNAME}",
"password": r"<PASSWORD>",
"database": "postgres",
},
),
]
)
escaped_value_ordered_dict_skip_substitution_variables = OrderedDict(
[
("UNCOMMITTED", "uncommitted"),
("docs_test_folder", r"test\$folder"),
(
"test_db",
{
"drivername": "$postgresql",
"host": "some_host",
"port": "5432",
"username": "${USERNAME}",
"password": r"<PASSWORD>",
"database": "postgres",
},
),
]
)
assert (
context.escape_all_config_variables(
value=value_ordered_dict, skip_if_substitution_variable=False
)
== escaped_value_ordered_dict
)
assert (
context.escape_all_config_variables(
value=value_ordered_dict, skip_if_substitution_variable=True
)
== escaped_value_ordered_dict_skip_substitution_variables
)
# list
value_list = [
"postgresql",
os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"5432",
"$postgres",
"pass$<PASSWORD>",
"${POSTGRES}",
]
escaped_value_list = [
"postgresql",
os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"5432",
r"\$postgres",
r"<PASSWORD>\$<PASSWORD>",
r"\${POSTGRES}",
]
escaped_value_list_skip_substitution_variables = [
"postgresql",
os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"5432",
"$postgres",
r"<PASSWORD>",
"${POSTGRES}",
]
assert (
context.escape_all_config_variables(
value=value_list, skip_if_substitution_variable=False
)
== escaped_value_list
)
assert (
context.escape_all_config_variables(
value=value_list, skip_if_substitution_variable=True
)
== escaped_value_list_skip_substitution_variables
)
def test_create_data_context_and_config_vars_in_code(tmp_path_factory, monkeypatch):
"""
What does this test and why?
Creating a DataContext via .create(), then using .save_config_variable() to save a variable that will eventually be substituted (e.g. ${SOME_VAR}) should result in the proper escaping of $.
This is in response to issue #2196
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context = ge.DataContext.create(
project_root_dir=project_path,
usage_statistics_enabled=False,
)
CONFIG_VARS = {
"DB_HOST": "${DB_HOST_FROM_ENV_VAR}",
"DB_NAME": "DB_NAME",
"DB_USER": "DB_USER",
"DB_PWD": "<PASSWORD>",
}
for k, v in CONFIG_VARS.items():
context.save_config_variable(k, v)
config_vars_file_contents = context._load_config_variables_file()
# Add escaping for DB_PWD since it is not of the form ${SOMEVAR} or $SOMEVAR
CONFIG_VARS_WITH_ESCAPING = CONFIG_VARS.copy()
CONFIG_VARS_WITH_ESCAPING["DB_PWD"] = r"<PASSWORD>"
# Ensure all config vars saved are in the config_variables.yml file
# and that escaping was added for "pas$word" -> "<PASSWORD>"
assert all(
item in config_vars_file_contents.items()
for item in CONFIG_VARS_WITH_ESCAPING.items()
)
assert not all(
item in config_vars_file_contents.items() for item in CONFIG_VARS.items()
)
# Add env var for substitution
monkeypatch.setenv("DB_HOST_FROM_ENV_VAR", "DB_HOST_FROM_ENV_VAR_VALUE")
datasource_config = DatasourceConfig(
class_name="SqlAlchemyDatasource",
credentials={
"drivername": "postgresql",
"host": "$DB_HOST",
"port": "65432",
"database": "${DB_NAME}",
"username": "${DB_USER}",
"password": <PASSWORD>}",
},
)
datasource_config_schema = DatasourceConfigSchema()
# use context.add_datasource to test this by adding a datasource with values to substitute.
context.add_datasource(
initialize=False,
name="test_datasource",
**datasource_config_schema.dump(datasource_config)
)
assert context.list_datasources()[0]["credentials"] == {
"drivername": "postgresql",
"host": "DB_HOST_FROM_ENV_VAR_VALUE",
"port": "65432",
"database": "DB_NAME",
"username": "DB_USER",
# Note masking of "password" field
"password": "***",
}
# Check context substitutes escaped variables appropriately
data_context_config_schema = DataContextConfigSchema()
context_with_variables_substituted_dict = data_context_config_schema.dump(
context.get_config_with_variables_substituted()
)
test_datasource_credentials = context_with_variables_substituted_dict[
"datasources"
]["test_datasource"]["credentials"]
assert test_datasource_credentials["host"] == "DB_HOST_FROM_ENV_VAR_VALUE"
assert test_datasource_credentials["username"] == "DB_USER"
assert test_datasource_credentials["password"] == "<PASSWORD>"
assert test_datasource_credentials["database"] == "DB_NAME"
# Ensure skip_if_substitution_variable=False works as documented
context.save_config_variable(
"escaped", "$SOME_VAR", skip_if_substitution_variable=False
)
context.save_config_variable(
"escaped_curly", "${SOME_VAR}", skip_if_substitution_variable=False
)
config_vars_file_contents = context._load_config_variables_file()
assert config_vars_file_contents["escaped"] == r"\$SOME_VAR"
assert config_vars_file_contents["escaped_curly"] == r"\${SOME_VAR}"
| StarcoderdataPython |
3370682 | <reponame>liwt31/Renormalizer<filename>renormalizer/mps/mpdm.py
# -*- coding: utf-8 -*-
import logging
from typing import List
import numpy as np
import scipy.linalg
from renormalizer.model import MolList, MolList2, ModelTranslator
from renormalizer.mps.backend import xp
from renormalizer.mps.matrix import tensordot, asnumpy
from renormalizer.mps import Mpo, Mps
from renormalizer.mps.tdh import unitary_propagation
from renormalizer.utils import Op
logger = logging.getLogger(__name__)
# MPS first. `digest`, `metacopy`
class MpDmBase(Mps, Mpo):
@classmethod
def random(cls, mpo, nexciton, m_max, percent=0):
# avoid misuse to produce mps
raise ValueError("MpDm don't have to produce random state")
@classmethod
def gs(cls, mol_list, max_entangled):
raise ValueError(
"Use max_entangled_ex or max_entangled_gs for matrix product density matrix"
)
@property
def is_mps(self):
return False
@property
def is_mpo(self):
return False
@property
def is_mpdm(self):
return True
def _expectation_path(self):
# e
# |
# S--a--S--f--S
# | | |
# | d |
# | | |
# O--b--O--h--O
# | | |
# | g |
# | | |
# S--c--S--j--S
# |
# e
path = [
([0, 1], "abc, cgej -> abgej"),
([3, 0], "abgej, bdgh -> aejdh"),
([2, 0], "aejdh, adef -> jhf"),
([1, 0], "jhf, fhj -> "),
]
return path
def conj_trans(self):
logger.warning("use conj_trans on mpdm leads to dummy qn")
new_mpdm: "MpDmBase" = super().conj_trans()
new_mpdm.use_dummy_qn = True
for idx, wfn in enumerate(new_mpdm.tdh_wfns):
new_mpdm.tdh_wfns[idx] = np.conj(wfn).T
return new_mpdm
def apply(self, mp, canonicalise=False) -> "MpDmBase":
# Note usually mp is an mpo
assert not mp.is_mps
new_mpdm = self.metacopy()
if mp.is_complex:
new_mpdm.to_complex(inplace=True)
# todo: also duplicate with MPO apply. What to do???
for i, (mt_self, mt_other) in enumerate(zip(self, mp)):
assert mt_self.shape[2] == mt_other.shape[1]
# mt=np.einsum("apqb,cqrd->acprbd",mt_s,mt_o)
mt = xp.moveaxis(
tensordot(mt_self.array, mt_other.array, axes=([2], [1])),
[-3, -2],
[1, 3],
)
mt = mt.reshape(
(
mt_self.shape[0] * mt_other.shape[0],
mt_self.shape[1],
mt_other.shape[2],
mt_self.shape[-1] * mt_other.shape[-1],
)
)
new_mpdm[i] = mt
qn = mp.dummy_qn
new_mpdm.qn = [
np.add.outer(np.array(qn_o), np.array(qn_m)).ravel().tolist()
for qn_o, qn_m in zip(self.qn, qn)
]
if canonicalise:
new_mpdm.canonicalise()
return new_mpdm
def dot(self, other: "MpDmBase", with_hartree=True):
e = super().dot(other, with_hartree=False)
if with_hartree:
assert len(self.tdh_wfns) == len(other.tdh_wfns)
for wfn1, wfn2 in zip(self.tdh_wfns[:-1], other.tdh_wfns[:-1]):
# using vdot is buggy here, because vdot will take conjugation automatically
# note the difference between np.dot(wfn1, wfn2).trace()
# probably the wfn part should be better wrapped?
e *= np.dot(wfn1.flatten(), wfn2.flatten())
return e
class MpDm(MpDmBase):
@classmethod
def from_mps(cls, mps: Mps):
mpo = cls()
mpo.mol_list = mps.mol_list
for ms in mps:
mo = np.zeros(tuple([ms.shape[0]] + [ms.shape[1]] * 2 + [ms.shape[2]]))
for iaxis in range(ms.shape[1]):
mo[:, iaxis, iaxis, :] = ms[:, iaxis, :].array
mpo.append(mo)
for wfn in mps.tdh_wfns[:-1]:
assert wfn.ndim == 2
mpo.tdh_wfns = mps.tdh_wfns
mpo.optimize_config = mps.optimize_config
mpo.evolve_config = mps.evolve_config
mpo.compress_add = mps.compress_add
mpo.qn = [qn.copy() for qn in mps.qn]
mpo.qntot = mps.qntot
mpo.qnidx = mps.qnidx
mpo.to_right = mps.to_right
mpo.compress_config = mps.compress_config.copy()
return mpo
@classmethod
def max_entangled_ex(cls, mol_list, normalize=True):
"""
T = \\infty locally maximal entangled EX state
"""
mps = Mps.gs(mol_list, max_entangled=True)
# the creation operator \\sum_i a^\\dagger_i
if isinstance(mol_list, MolList):
ex_mpo = Mpo.onsite(mol_list, r"a^\dagger")
else:
model = {}
for dof in mol_list.e_dofs:
model[(dof,)] = [(Op("a^\dagger", 1), 1.0)]
ex_mpo = Mpo.general_mpo(mol_list, model=model, model_translator=ModelTranslator.general_model)
ex_mps = ex_mpo @ mps
if normalize:
ex_mps.normalize(1.0)
return cls.from_mps(ex_mps)
@classmethod
def max_entangled_gs(cls, mol_list):
return cls.from_mps(Mps.gs(mol_list, max_entangled=True))
def _get_sigmaqn(self, idx):
if isinstance(self.mol_list, MolList2):
array_up = self.mol_list.basis[idx].sigmaqn
array_down = np.zeros_like(array_up)
return np.add.outer(array_up, array_down)
else:
if self.ephtable.is_phonon(idx):
return np.zeros((self.pbond_list[idx],self.pbond_list[idx]), dtype=np.int32)
# for electron: auxiliary space all 0.
if self.mol_list.scheme < 4 and self.ephtable.is_electron(idx):
return np.add.outer(np.array([0, 1]), np.array([0, 0]))
elif self.mol_list.scheme == 4 and self.ephtable.is_electrons(idx):
n = self.pbond_list[idx]
return np.add.outer(np.array([0]+[1]*(n-1)), np.array([0]*n))
else:
assert False
def calc_reduced_density_matrix(self) -> np.ndarray:
if isinstance(self.mol_list, MolList):
return self._calc_reduced_density_matrix(self, self.conj_trans())
elif isinstance(self.mol_list, MolList2):
return self._calc_reduced_density_matrix(None, None)
else:
assert False
def evolve_exact(self, h_mpo, evolve_dt, space):
MPOprop, ham, Etot = self.hybrid_exact_propagator(
h_mpo, -1.0j * evolve_dt, space
)
# Mpdm is applied on the propagator, different from base method
new_mpdm = self.apply(MPOprop, canonicalise=True)
for iham, ham in enumerate(ham):
w, v = scipy.linalg.eigh(ham)
new_mpdm.tdh_wfns[iham] = (
new_mpdm.tdh_wfns[iham]
.dot(v)
.dot(np.diag(np.exp(-1.0j * evolve_dt * w)))
.dot(v.T)
)
new_mpdm.tdh_wfns[-1] *= np.exp(-1.0j * Etot * evolve_dt)
# unitary_propagation(new_mpdm.tdh_wfns, HAM, Etot, evolve_dt)
return new_mpdm
def full_wfn(self):
raise NotImplementedError("Use full_operator on Matrix Product Density Matrix")
# MpDm without the auxiliary space.
class MpDmFull(MpDmBase):
@classmethod
def from_mpdm(cls, mpdm: MpDm):
mpdm_full = cls(mpdm.mol_list)
product = mpdm.apply(mpdm.conj_trans())
product.build_empty_qn()
product.use_dummy_qn = True
# this normalization actually makes the mpdm not normalized.
# The real norm is `mpdm_norm`. Use this "fake" norm so that previous codes can be utilized
product.normalize(1)
product.canonicalise()
product.compress()
# qn not implemented
mpdm_full.use_dummy_qn = True
if product.is_complex:
mpdm_full.to_complex(inplace=True)
for mt in product:
mpdm_full.append(mt)
mpdm_full.build_empty_qn()
return mpdm_full
def __init__(self, mol_list):
super().__init__()
self.mol_list = mol_list
def _get_sigmaqn(self, idx):
# dummy qn
return np.array([0] * self.pbond_list[idx] ** 2)
# `_expectation_conj` and `mpdm_norm` could be cached if they are proved to be bottlenecks
def _expectation_conj(self):
i = Mpo.identity(self.mol_list)
i.scale(1 / self.mpdm_norm(), inplace=True)
return i
def mpdm_norm(self):
# the trace
i = Mpo.identity(self.mol_list)
return self.expectation(i, i)
def full_operator(self, normalize=False):
if normalize:
return super().full_operator() / self.mpdm_norm()
else:
return super().full_operator()
# tdvp can't be used in this representation
def _evolve_dmrg_tdvp_fixed_gauge(self, mpo, evolve_dt):
raise NotImplementedError
def _evolve_dmrg_tdvp_mu_switch_gauge(self, mpo, evolve_dt):
raise NotImplementedError
def _evolve_dmrg_tdvp_ps(self, mpo, evolve_dt):
raise NotImplementedError
# todo: implement this
def calc_reduced_density_matrix(self) -> np.ndarray:
raise NotImplementedError
| StarcoderdataPython |
1612309 | <filename>tensorflow_mri/python/ops/recon_ops.py
# Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image reconstruction operations.
This module contains functions for MR image reconstruction.
"""
import collections
import tensorflow as tf
import tensorflow_nufft as tfft
from tensorflow_mri.python.ops import array_ops
from tensorflow_mri.python.ops import coil_ops
from tensorflow_mri.python.ops import fft_ops
from tensorflow_mri.python.ops import image_ops
from tensorflow_mri.python.ops import linalg_ops
from tensorflow_mri.python.ops import math_ops
from tensorflow_mri.python.ops import optimizer_ops
from tensorflow_mri.python.ops import traj_ops
from tensorflow_mri.python.util import check_util
from tensorflow_mri.python.util import tensor_util
def reconstruct(kspace,
mask=None,
trajectory=None,
density=None,
calib=None,
sensitivities=None,
method=None,
**kwargs):
"""MR image reconstruction gateway.
Reconstructs an image given the corresponding *k*-space measurements.
This is a gateway function to different image reconstruction methods. The
reconstruction method can be selected with the `method` argument. If the
`method` argument is not specified, a method is automatically selected based
on the input arguments.
Supported methods are:
* **fft**: Simple fast Fourier transform (FFT) reconstruction for Cartesian
*k*-space data. This is the default method if only a `kspace` argument is
given.
* **nufft**: Non-uniform fast Fourier transform (NUFFT) reconstruction for
non-Cartesian *k*-space data. Uses the adjoint NUFFT operator with density
compensation. This is the default method if `kspace`, `trajectory` and
(optionally) `density` are given.
* **inufft**: Non-uniform fast Fourier transform (NUFFT) reconstruction for
non-Cartesian *k*-space data. Uses the inverse NUFFT, calculated
iteratively. This method is never selected by default.
* **sense**: SENSitivity Encoding (SENSE) [1]_ reconstruction for Cartesian
*k*-space data. This is the default method if `kspace` and `sensitivities`
are given.
* **cg_sense**: Conjugate gradient SENSE (CG-SENSE) [2]_ reconstruction for
non-Cartesian *k*-space data. This is the default method if `kspace`,
`trajectory`, `sensitivities` and (optionally) `density` are given.
* **grappa**: Generalized autocalibrating partially parallel acquisitions [3]_
reconstruction for Cartesian *k*-space data. This is the default method if
`kspace`, `calib` and (optionally) `sensitivities` are given.
* **pics**: Combined parallel imaging and compressed sensing (PICS)
reconstruction. Accepts Cartesian and non-Cartesian *k*-space data. Supply
`mask` and `sensitivities` in combination with a Cartesian `kspace`, or
`trajectory`, `sensitivities` and (optionally) `density` in combination with
a non-Cartesian `kspace`. If `sensitivities` is not provided, a compressed
sensing reconstruction is performed. This method is never selected by
default.
.. note::
This function supports CPU and GPU computation.
.. note::
This function supports batches of inputs, which are processed in parallel
whenever possible.
See also `tfmr.estimate_coil_sensitivities` and `tfmr.combine_coils`.
Args:
kspace: A `Tensor`. The *k*-space samples. Must have type `complex64` or
`complex128`. `kspace` can be either Cartesian or non-Cartesian. A
Cartesian `kspace` must have shape `[..., C, *K]`, where `K` is the shape
of the spatial frequency dimensions, `C` is the number of coils and `...`
is the batch shape, which can have any rank. Note that `K` should be the
reduced or undersampled shape, i.e., no zero-filling of any kind should be
included. A non-Cartesian `kspace` must have shape `[..., C, M]`, where
`M` is the number of samples, `C` is the number of coils and `...` is the
batch shape, which can have any rank.
mask: A `Tensor`. The sampling mask. Must have type `bool`. Must have shape
`S`, where `S` is the shape of the spatial dimensions. In other words,
`mask` should have the shape of a fully sampled *k*-space. For each point,
`mask` should be `True` if the corresponding *k*-space sample was measured
and `False` otherwise. `True` entries should correspond to the data in
`kspace`, and the result of dropping all `False` entries from `mask`
should have shape `K`. `mask` is required if `method` is `"grappa"`, or
if `method` is `"pics"` and `kspace` is Cartesian. For other methods or
for non-Cartesian `kspace`, this parameter is not relevant.
trajectory: A `Tensor`. The *k*-space trajectory. Must have type `float32`
or `float64`. Must have shape `[..., M, N]`, where `N` is the number of
spatial dimensions, `N` is the number of *k*-space samples and `...` is
the batch shape, which can have any rank and must be broadcastable to the
batch shape of `kspace`. `trajectory` is required when `method` is
`"nufft"`, `"inufft"` or `"cg_sense"`, or if `method` is `"pics"` and
`kspace` is non-Cartesian. For other methods or for Cartesian `kspace`,
this parameter is not relevant.
density: A `Tensor`. The sampling density. Must have type `float32` or
`float64`. Must have shape `[..., M]`, where `M` is the number of
*k*-space samples and `...` is the batch shape, which can have any rank
and must be broadcastable to the batch shape of `kspace`. `density` is
optional when `method` is `"nufft"` or `"cg_sense"`, or if `method` is
`"pics"` and `kspace` is non-Cartesian. In these cases, `density` will be
estimated from the given `trajectory` if not provided. For other methods
or for Cartesian `kspace`, this parameter is not relevant.
calib: A `Tensor`. The calibration data. Must have type `complex64` or
`complex128`. Must have shape `[..., C, *R]`, where `R` is the shape of
the calibration region, `C` is the number of coils and `...` is the batch
shape, which can have any rank and must be broadcastable to the batch
shape of `kspace`. `calib` is required when `method` is `"grappa"`. For
other methods, this parameter is not relevant.
sensitivities: A `Tensor`. The coil sensitivity maps. Must have type
`complex64` or `complex128`. Must have shape `[..., C, *S]`, where `S` is
shape of the spatial dimensions, `C` is the number of coils and `...` is
the batch shape, which can have any rank and must be broadcastable to the
batch shape of `kspace`. `sensitivities` is required when `method` is
`"sense"` or `"cg_sense"`. For other methods, this parameter is not
relevant.
method: A `string`. The reconstruction method. Must be one of `"fft"`,
`"nufft"`, `"inufft"`, `"sense"`, `"cg_sense"`, `"grappa"` or `"pics"`.
**kwargs: Additional method-specific keyword arguments. See Notes for the
method-specific arguments.
Notes:
This function accepts several method dependent arguments:
* For `method="fft"`, provide `kspace` and, optionally, `sensitivities`.
If provided, `sensitivities` are used for adaptive coil combination (see
`tfmr.combine_coils`). If not provided, multi-coil inputs are combined
using the sum of squares method. In addition, the following keyword
arguments are accepted:
* **rank**: An optional `int`. The rank (in the sense of spatial
dimensionality) of this operation. Defaults to `kspace.shape.rank` if
`multicoil` is `False` and `kspace.shape.rank - 1` if `multicoil` is
`True`.
* **multicoil**: An optional `bool`. Whether the input *k*-space has a
coil dimension. Defaults to `True` if `sensitivities` were specified,
`False` otherwise.
* **combine_coils**: An optional `bool`. If `True`, multi-coil images
are combined. Otherwise, the uncombined images are returned. Defaults to
`True`.
* For `method="nufft"`, provide `kspace`, `trajectory` and, optionally,
`density` and `sensitivities`. If `density` is not provided, an estimate
will be used (see `tfmr.estimate_density`). If provided, `sensitivities`
are used for adaptive coil combination (see `tfmr.combine_coils`). If not
provided, multi-coil inputs are combined using the sum of squares method.
In addition, the following keyword arguments are accepted:
* **image_shape**: A `TensorShape` or list of `ints`. The shape of the
output images. This parameter must be provided.
* **multicoil**: An optional `bool`. Whether the input *k*-space has a
coil dimension. Defaults to `True` if `sensitivities` were specified,
`False` otherwise.
* **combine_coils**: An optional `bool`. If `True`, multi-coil images
are combined. Otherwise, the uncombined images are returned. Defaults to
`True`.
* For `method="inufft"`, provide `kspace`, `trajectory` and, optionally,
`sensitivities`. If provided, `sensitivities` are used for adaptive coil
combination (see `tfmr.combine_coils`). If not provided, multi-coil inputs
are combined using the sum of squares method. In addition, the following
arguments are accepted:
* **image_shape**: A `TensorShape` or list of `ints`. The shape of the
output images. This parameter must be provided.
* **tol**: An optional `float`. The convergence tolerance for the
conjugate gradient iteration. Defaults to 1e-05.
* **max_iter**: An optional `int`. The maximum number of iterations for
the conjugate gradient iteration. Defaults to 10.
* **return_cg_state**: An optional `bool`. Defaults to `False`. If `True`,
return a tuple containing the image and an object describing the final
state of the CG iteration. For more details about the CG state, see
`tfmr.conjugate_gradient`. If `False`, only the image is returned.
* **multicoil**: An optional `bool`. Whether the input *k*-space has a
coil dimension. Defaults to `True` if `sensitivities` were specified,
`False` otherwise.
* **combine_coils**: An optional `bool`. If `True`, multi-coil images
are combined. Otherwise, the uncombined images are returned. Defaults to
`True`.
* For `method="sense"`, provide `kspace` and `sensitivities`. In addition,
the following keyword arguments are accepted:
* **reduction_axis**: An `int` or a list of `ints`. The reduced axes. This
parameter must be provided.
* **reduction_factor**: An `int` or a list of `ints`. The reduction
factors corresponding to each reduction axis. The output image will have
dimension `kspace.shape[ax] * r` for each pair `ax` and `r` in
`reduction_axis` and `reduction_factor`. This parameter must be
provided.
* **rank**: An optional `int`. The rank (in the sense of spatial
dimensionality) of this operation. Defaults to `kspace.shape.rank - 1`.
Therefore, if `rank` is not specified, axis 0 is interpreted to be the
coil axis and the remaining dimensions are interpreted to be spatial
dimensions. You must specify `rank` if you intend to provide any batch
dimensions in `kspace` and/or `sensitivities`.
* **l2_regularizer**: An optional `float`. The L2 regularization factor
used when solving the linear least-squares problem. Ignored if
`fast=False`. Defaults to 0.0.
* **fast**: An optional `bool`. Defaults to `True`. If `False`, use a
numerically robust orthogonal decomposition method to solve the linear
least-squares. This algorithm finds the solution even for rank deficient
matrices, but is significantly slower. For more details, see
`tf.linalg.lstsq`.
* For `method="cg_sense"`, provide `kspace`, `trajectory`, `density`
(optional) and `sensitivities`. If `density` is not provided, an estimate
will be used (see `tfmr.estimate_density`). In addition, the following
keyword arguments are accepted:
* **tol**: An optional `float`. The convergence tolerance for the
conjugate gradient iteration. Defaults to 1e-05.
* **max_iter**: An optional `int`. The maximum number of iterations for
the conjugate gradient iteration. Defaults to 10.
* **return_cg_state**: An optional `bool`. Defaults to `False`. If `True`,
return a tuple containing the image and an object describing the final
state of the CG iteration. For more details about the CG state, see
`tfmr.conjugate_gradient`. If `False`, only the image is returned.
* For `method="grappa"`, provide `kspace`, `mask` and `calib`. Optionally,
you can also provide `sensitivities` (note that `sensitivities` are not
used for the GRAPPA computation, but they are used for adaptive coil
combination). If `sensitivities` are not provided, coil combination will
be performed using the sum of squares method. Additionally, the following
keyword arguments are accepted:
* **kernel_size**: An `int` or list of `ints`. The size of the GRAPPA
kernel. Must have length equal to the image rank or number of spatial
dimensions. If a scalar `int` is provided, the same size is used in all
dimensions.
* **weights_l2_regularizer**: An optional `float`. The regularization
factor for the L2 regularization term used to fit the GRAPPA weights.
If 0.0, no regularization is applied.
* **combine_coils**: An optional `bool`. If `True`, multi-coil images
are combined. Otherwise, the uncombined images are returned. Defaults to
`True`.
* **return_kspace**: An optional `bool`. If `True`, returns the filled
*k*-space without performing the Fourier transform. In this case, coils
are not combined regardless of the value of `combine_coils`.
* For `method="pics"`, provide `kspace`, `mask` (Cartesian only),
`trajectory` (non-Cartesian only), `density` (non-Cartesian only,
optional) and `sensitivities` (optional). Additionally, the following
keyword arguments are accepted:
* **recon_shape**: A `tf.TensorShape` or a list of `int`. The shape of
the reconstructed image, including temporal dimensions but not batch
dimensions. This argument must be provided.
* **rank**: An `int`. The number of spatial dimensions.
* **regularizers**: A list of `tfmr.Regularizer`. The regularizers to be
used in the iterative reconstruction.
* **optimizer**: The optimizer. Must be `None` or `"lbfgs"`. If `None`,
the optimizer will be selected automatically.
* **initial_image**: A `Tensor`. The initial estimate for the iterative
reconstruction. Must have shape `recon_shape`.
* **max_iterations**: An `int`. The maximum number of iterations.
* **use_density_compensation**: A `bool`. If `True`, adds an explicit
density compensation step to the encoding operator.
Returns:
A `Tensor`. The reconstructed images. Has the same type as `kspace`. Has
shape `[..., S]`, where `...` is the batch shape of `kspace` and `S` is the
spatial shape.
References:
.. [1] <NAME>., <NAME>., <NAME>. and <NAME>.
(1999), SENSE: Sensitivity encoding for fast MRI. Magn. Reson. Med.,
42: 952-962.
https://doi.org/10.1002/(SICI)1522-2594(199911)42:5<952::AID-MRM16>3.0.CO;2-S
.. [2] <NAME>., <NAME>., <NAME>. and <NAME>. (2001),
Advances in sensitivity encoding with arbitrary k-space trajectories.
Magn. Reson. Med., 46: 638-651. https://doi.org/10.1002/mrm.1241
.. [3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. and <NAME>. (2002), Generalized autocalibrating
partially parallel acquisitions (GRAPPA). Magn. Reson. Med., 47:
1202-1210. https://doi.org/10.1002/mrm.10171
.. [4] <NAME>., <NAME>. and <NAME>. (2007), Undersampled radial MRI
with multiple coils. Iterative image reconstruction using a total
variation constraint. Magn. Reson. Med., 57: 1086-1098.
https://doi.org/10.1002/mrm.21236
.. [5] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. and <NAME>. (2014), Golden-angle radial sparse
parallel MRI: Combination of compressed sensing, parallel imaging, and
golden-angle radial sampling for fast and flexible dynamic volumetric MRI.
Magn. Reson. Med., 72: 707-717. https://doi.org/10.1002/mrm.24980
"""
method = _select_reconstruction_method(
kspace, mask, trajectory, density, calib, sensitivities, method)
kspace = tf.convert_to_tensor(kspace)
if mask is not None:
mask = tf.convert_to_tensor(mask)
if trajectory is not None:
trajectory = tf.convert_to_tensor(trajectory)
if density is not None:
density = tf.convert_to_tensor(density)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
args = {'mask': mask,
'trajectory': trajectory,
'density': density,
'calib': calib,
'sensitivities': sensitivities}
args = {name: arg for name, arg in args.items() if arg is not None}
return _MR_RECON_METHODS[method](kspace, **{**args, **kwargs})
def _fft(kspace,
sensitivities=None,
rank=None,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using FFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Check inputs and set defaults.
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
if rank is None:
# If `rank` not specified, assume no leading batch dimensions, so all dims
# are spatial dims (minus coil dimension if `multicoil` is true).
rank = kspace.shape.rank
if multicoil:
rank -= 1 # Account for coil dimension.
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `kspace` has "
f"{rank} spatial dimensions. If `kspace` has any leading batch "
f"dimensions, please set the argument `rank` explicitly.")
else:
rank = check_util.validate_type(rank, int, "rank")
if rank > 3:
raise ValueError(f"Argument `rank` must be <= 3, but got: {rank}")
# Do FFT.
axes = list(range(-rank, 0)) # pylint: disable=invalid-unary-operand-type
image = fft_ops.ifftn(kspace, axes=axes, shift=True)
# If multicoil, do coil combination. Will do adaptive combine if
# `sensitivities` are given, otherwise sum of squares.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1) # pylint: disable=invalid-unary-operand-type
return image
def _nufft(kspace,
trajectory,
density=None,
sensitivities=None,
image_shape=None,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using density-compensated adjoint NUFFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
trajectory = tf.convert_to_tensor(trajectory)
if density is not None:
density = tf.convert_to_tensor(density)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Add channel dimension to trajectory and density.
trajectory = tf.expand_dims(trajectory, -3)
if density is not None:
density = tf.expand_dims(density, -2)
# Infer rank from number of dimensions in trajectory.
rank = trajectory.shape[-1]
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `trajectory` implies "
f"rank {rank}.")
# Check inputs and set defaults.
if image_shape is None:
# `image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
# Compensate non-uniform sampling density.
if density is None:
density = traj_ops.estimate_density(trajectory, image_shape)
kspace = tf.math.divide_no_nan(kspace, tensor_util.cast_to_complex(density))
# Do NUFFT.
image = tfft.nufft(kspace, trajectory,
grid_shape=image_shape,
transform_type='type_1',
fft_direction='backward')
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return image
def _inufft(kspace,
trajectory,
sensitivities=None,
image_shape=None,
tol=1e-5,
max_iter=10,
return_cg_state=False,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using iterative inverse NUFFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
trajectory = tf.convert_to_tensor(trajectory)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Infer rank from number of dimensions in trajectory.
rank = trajectory.shape[-1]
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `trajectory` implies "
f"rank {rank}.")
# Check inputs and set defaults.
if image_shape is None:
# `image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
batch_shape = tf.shape(kspace)[:-1]
# Set up system operator and right hand side.
linop_nufft = linalg_ops.LinearOperatorNUFFT(image_shape, trajectory)
operator = tf.linalg.LinearOperatorComposition(
[linop_nufft.H, linop_nufft],
is_self_adjoint=True, is_positive_definite=True)
# Compute right hand side.
rhs = tf.linalg.matvec(linop_nufft.H, kspace)
# Solve linear system using conjugate gradient iteration.
result = linalg_ops.conjugate_gradient(operator, rhs, x=None,
tol=tol, max_iter=max_iter)
# Restore image shape.
image = tf.reshape(result.x, tf.concat([batch_shape, image_shape], 0))
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return (image, result) if return_cg_state else image
def _sense(kspace,
sensitivities,
reduction_axis,
reduction_factor,
rank=None,
l2_regularizer=0.0,
fast=True):
"""MR image reconstruction using SENSitivity Encoding (SENSE).
For the parameters, see `tfmr.reconstruct`.
"""
# Parse inputs.
kspace = tf.convert_to_tensor(kspace)
sensitivities = tf.convert_to_tensor(sensitivities)
# Rank or spatial dimensionality.
rank = rank or kspace.shape.rank - 1
reduced_shape = kspace.shape[-rank:]
reduction_axis = check_util.validate_list(
reduction_axis, element_type=int, name='reduction_axis')
reduction_factor = check_util.validate_list(
reduction_factor, element_type=int, length=len(reduction_axis),
name='reduction_factor')
reduction_axis = [ax + rank if ax < 0 else ax for ax in reduction_axis]
canonical_reduction = [1] * rank
for ax, r in zip(reduction_axis, reduction_factor):
canonical_reduction[ax] = r
image_shape = tf.TensorShape(
[s * r for s, r in zip(reduced_shape.as_list(), canonical_reduction)])
# Compute batch shapes. `batch_shape` is the output batch shape.
kspace_rank = kspace.shape.rank
kspace_batch_shape = kspace.shape[:-rank-1]
sens_rank = sensitivities.shape.rank
sens_batch_shape = sensitivities.shape[:-rank-1]
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, sens_batch_shape)
# We do not broadcast the k-space, by design.
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `sensitivities` have incompatible batch shapes: "
f"{kspace_batch_shape}, {sens_batch_shape}")
# Rearrange dimensions. Put spatial dimensions first, then coil dimension,
# then batch dimensions.
kspace_perm = list(range(-rank, 0)) + [-rank-1]
kspace_perm = [ax + kspace_rank for ax in kspace_perm]
kspace_perm += list(range(0, kspace_rank - rank - 1))
sens_perm = list(range(-rank, 0)) + [-rank-1]
sens_perm = [ax + sens_rank for ax in sens_perm]
sens_perm += list(range(0, sens_rank - rank - 1))
kspace = tf.transpose(kspace, kspace_perm)
sensitivities = tf.transpose(sensitivities, sens_perm)
# Compute aliased images and shift along the reduced dimensions.
aliased_images = fft_ops.ifftn(kspace, axes=list(range(rank)), shift=True)
aliased_images = tf.signal.ifftshift(aliased_images, axes=reduction_axis)
# Create a grid of indices into the reduced FOV image.
reduced_indices = tf.stack(tf.meshgrid(*[tf.range(s) for s in reduced_shape]))
reduced_indices = tf.transpose(tf.reshape(reduced_indices, [rank, -1]))
# Compute corresponding indices into the full FOV image.
offsets = [tf.range(r) * s for s, r in zip(
reduced_shape.as_list(), canonical_reduction)]
offsets = tf.transpose(tf.reshape(
tf.stack(tf.meshgrid(*offsets)), [rank, -1]))
indices = tf.expand_dims(reduced_indices, -2) + offsets
# Compute the system matrices, ie, pixel-wise sensitivity matrices folding the
# full FOV image into a reduced FOV image.
sens_matrix = tf.gather_nd(sensitivities, indices)
sens_matrix = tf.transpose(
sens_matrix, [0, 2, 1] + list(range(3, 3 + sens_batch_shape.rank)))
# Compute the right hand sides for the set of linear systems.
rhs = tf.gather_nd(aliased_images, reduced_indices)
# Remove any pixels known to have zero signal, with no contributions from any
# of the aliases. Currently we can't do this for batched sensitivities, so it
# is disabled in that case.
if sens_batch_shape.rank == 0:
mask = tf.reduce_sum(tf.math.square(tf.math.abs(sens_matrix)), -2) > 0
mask = tf.math.reduce_any(mask, axis=-1)
sens_matrix = tf.boolean_mask(sens_matrix, mask, axis=0)
rhs = tf.boolean_mask(rhs, mask, axis=0)
indices = tf.boolean_mask(indices, mask, axis=0)
# Move batch dimensions to the beginning.
sens_matrix = tf.transpose(
sens_matrix, list(range(3, sens_matrix.shape.rank)) + [0, 1, 2])
rhs = tf.transpose(rhs, list(range(2, rhs.shape.rank)) + [0, 1])
rhs = tf.expand_dims(rhs, -1)
# Broadcast the sensitivity matrix as necessary.
sens_matrix = tf.broadcast_to(
sens_matrix, batch_shape + sens_matrix.shape[-3:])
# Solve the pixel-wise linear least-squares problems.
unfolded_values = tf.linalg.lstsq(sens_matrix, rhs,
l2_regularizer=l2_regularizer,
fast=fast)
unfolded_values = tf.reshape(unfolded_values, [-1])
output_indices = tf.reshape(indices, [-1, rank])
# For batch mode we need to do some additional indexing calculations.
if batch_shape.rank > 0:
batch_size = batch_shape.num_elements()
element_size = unfolded_values.shape[0] // batch_size
batch_indices = tf.stack(tf.meshgrid(*[tf.range(s) for s in batch_shape]))
batch_indices = tf.transpose(
tf.reshape(batch_indices, [batch_shape.rank, -1]))
batch_indices = tf.expand_dims(batch_indices, -2)
batch_indices = tf.tile(
batch_indices, [1] * batch_shape.rank + [element_size, 1])
batch_indices = tf.reshape(batch_indices, [-1, batch_shape.rank])
output_indices = tf.tile(output_indices, [batch_size, 1])
output_indices = tf.concat([batch_indices, output_indices], -1)
# Scatter the unfolded values into the reconstructed image.
image = tf.scatter_nd(output_indices, unfolded_values,
batch_shape + image_shape)
return image
def _cg_sense(kspace,
trajectory,
density=None,
sensitivities=None,
tol=1e-5,
max_iter=10,
return_cg_state=False):
"""MR image reconstruction using conjugate gradient SENSE (CG-SENSE).
For the parameters, see `tfmr.reconstruct`.
"""
if sensitivities is None:
raise ValueError("Argument `sensitivities` must be specified for CG-SENSE.")
# Inputs.
kspace = tf.convert_to_tensor(kspace)
sensitivities = tf.convert_to_tensor(sensitivities)
trajectory = tf.convert_to_tensor(trajectory)
rank = trajectory.shape[-1]
num_points = kspace.shape[-1]
num_coils = kspace.shape[-2]
batch_shape = kspace.shape[:-2]
image_shape = sensitivities.shape[-rank:]
# Check some inputs.
tf.debugging.assert_equal(
tf.shape(kspace)[-1], tf.shape(trajectory)[-2], message=(
f"The number of samples in `kspace` (axis -1) and `trajectory` "
f"(axis -2) must match, but got: {tf.shape(kspace)[-1]}, "
f"{tf.shape(trajectory)[-2]}"))
tf.debugging.assert_equal(
tf.shape(kspace)[-2], tf.shape(sensitivities)[-rank-1], message=(
f"The number of coils in `kspace` (axis -2) and `sensitivities` "
f"(axis {-rank-1}) must match, but got: {tf.shape(kspace)[-1]}, "
f"{tf.shape(sensitivities)[-rank-1]}"))
# Check batch shapes.
kspace_batch_shape = kspace.shape[:-2]
sens_batch_shape = sensitivities.shape[:-rank-1]
traj_batch_shape = trajectory.shape[:-2]
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, sens_batch_shape)
# We do not broadcast the k-space input, by design.
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `sensitivities` have incompatible batch shapes: "
f"{kspace_batch_shape}, {sens_batch_shape}")
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, traj_batch_shape)
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `trajectory` have incompatible batch shapes: "
f"{kspace_batch_shape}, {traj_batch_shape}")
# For sampling density correction.
if density is None:
# Sampling density not provided, so estimate from trajectory.
density = traj_ops.estimate_density(trajectory, image_shape)
else:
# Use the provided sampling density.
density = tf.convert_to_tensor(density)
density = tf.expand_dims(density, -2) # Add coil dimension.
# For intensity correction.
intensity = tf.math.reduce_sum(tf.math.square(tf.math.abs(sensitivities)),
axis=-rank-1)
# Prepare intensity correction linear operator.
intensity_weights = tf.math.reciprocal_no_nan(intensity)
linop_intensity = linalg_ops.LinearOperatorRealWeighting(
tf.math.sqrt(intensity_weights),
arg_shape=intensity_weights.shape[-rank:],
dtype=kspace.dtype)
# Prepare density compensation linear operator.
density_weights = tf.math.reciprocal_no_nan(density)
linop_density = linalg_ops.LinearOperatorRealWeighting(
tf.math.sqrt(density_weights),
arg_shape=[num_coils, num_points],
dtype=kspace.dtype)
# Get non-Cartesian parallel MRI operator.
linop_parallel_mri = linalg_ops.LinearOperatorParallelMRI(
sensitivities, trajectory=trajectory)
# Calculate the right half of the system operator. Then, the left half is the
# adjoint of the right half.
linop_right = tf.linalg.LinearOperatorComposition(
[linop_density, linop_parallel_mri, linop_intensity])
linop_left = linop_right.H
# Finally, make system operator. We know this to be self-adjoint and positive
# definite, as required for CG.
operator = tf.linalg.LinearOperatorComposition(
[linop_left, linop_right],
is_self_adjoint=True, is_positive_definite=True)
# Step 1. Compute the right hand side of the linear system.
kspace_vec = tf.reshape(kspace, batch_shape.as_list() + [-1])
rhs = tf.linalg.matvec(linop_left,
tf.linalg.matvec(linop_density, kspace_vec))
# Step 2. Perform CG iteration to solve modified system.
result = linalg_ops.conjugate_gradient(operator, rhs,
tol=tol, max_iter=max_iter)
# Step 3. Correct intensity to obtain solution to original system.
image_vec = tf.linalg.matvec(linop_intensity, result.x)
# Restore image shape.
image = tf.reshape(image_vec, batch_shape.as_list() + image_shape)
return (image, result) if return_cg_state else image
def _grappa(kspace,
mask=None,
calib=None,
sensitivities=None,
kernel_size=5,
weights_l2_regularizer=0.0,
combine_coils=True,
return_kspace=False):
"""MR image reconstruction using GRAPPA.
For the parameters, see `tfmr.reconstruct`.
"""
if mask is None:
raise ValueError("Argument `mask` must be provided.")
if calib is None:
raise ValueError("Argument `calib` must be provided.")
kspace = tf.convert_to_tensor(kspace)
calib = tf.convert_to_tensor(calib)
mask = tf.convert_to_tensor(mask)
# If mask has no holes, there is nothing to do.
if tf.math.count_nonzero(tf.math.logical_not(mask)) == 0:
return kspace
# Use `mask` to infer rank.
rank = mask.shape.rank
# If an `int` was given for the kernel size, use isotropic kernel in all
# dimensions.
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * rank
# Get multi-dimensional and flat indices for kernel center, e.g. [2, 2]
# (multi), 12 (flat) for [5, 5] kernel. `kernel_center` is also used as half
# the size of the kernel.
kernel_center = [ks // 2 for ks in kernel_size]
kernel_center_index = array_ops.ravel_multi_index(kernel_center, kernel_size)
# Save batch shape for later, broadcast `calib` to match `kspace` and reshape
# inputs to a single batch axis (except `mask`, which should have no batch
# dimensions).
kspace_shape = tf.shape(kspace)[-rank-1:] # No batch dims.
calib_shape = tf.shape(calib)[-rank-1:] # No batch dims.
batch_shape = tf.shape(kspace)[:-rank-1]
if tf.math.reduce_prod(tf.shape(calib)[:-rank-1]) == 1:
# Shared calibration. Do not broadcast, but maybe add batch dimension.
calib = tf.reshape(calib, tf.concat([[1], calib_shape], 0))
else:
# General case. Calibration may not be shared for all inputs.
calib = tf.broadcast_to(calib, tf.concat([batch_shape, calib_shape], 0))
kspace = tf.reshape(kspace, tf.concat([[-1], kspace_shape], 0))
calib = tf.reshape(calib, tf.concat([[-1], calib_shape], 0))
batch_size = tf.shape(kspace)[0]
num_coils = tf.shape(kspace)[1]
# Move coil axis to the end, i.e. [batch, coil, *dims] -> [batch, *dims, coil]
perm = [0, *list(range(2, rank + 2)), 1]
kspace = tf.transpose(kspace, perm)
calib = tf.transpose(calib, perm)
# Initialize output tensor and fill with the measured values.
full_shape = tf.concat([[batch_size], tf.shape(mask), [num_coils]], 0)
measured_indices = tf.cast(tf.where(mask), tf.int32)
measured_indices = _insert_batch_indices(measured_indices, batch_size)
full_kspace = tf.scatter_nd(measured_indices,
tf.reshape(kspace, [-1, num_coils]),
full_shape)
# Pad arrays so we can slide the kernel in the edges.
paddings = tf.concat([[0], kernel_center, [0]], 0)
paddings = tf.expand_dims(paddings, -1)
paddings = tf.tile(paddings, [1, 2])
full_kspace = tf.pad(full_kspace, paddings) # pylint:disable=no-value-for-parameter
calib = tf.pad(calib, paddings) # pylint:disable=no-value-for-parameter
mask = tf.pad(mask, paddings[1:-1, :], constant_values=False)
# Extract all patches from the mask. We cast to `float32` because `bool` is
# not currently supported in all devices for `_extract_patches` (TF v2.6).
mask_patches = _extract_patches(
tf.cast(mask[tf.newaxis, ..., tf.newaxis], tf.float32), kernel_size) > 0.5
# Find the unique patterns among all the mask patches. `unique_inverse` are
# the indices that reconstruct `mask_patches` from `unique_patches`.
patch_array_shape = tf.shape(mask_patches, out_type=tf.int64)[1:-1]
mask_patches = tf.reshape(
mask_patches, [-1, tf.math.reduce_prod(kernel_size)])
unique_patches, unique_inverse = tf.raw_ops.UniqueV2(x=mask_patches, axis=[0])
unique_inverse = tf.cast(unique_inverse, tf.int64)
unique_inverse = tf.reshape(unique_inverse, patch_array_shape)
# Select only patches that:
# - Have a hole in the center. Otherwise job is done!
# - Are not empty. Otherwise there is nothing we can do!
valid_patch_indices = tf.where(tf.math.logical_and(
tf.math.logical_not(unique_patches[:, kernel_center_index]),
tf.math.reduce_any(unique_patches, axis=-1)))
valid_patch_indices = tf.squeeze(valid_patch_indices, axis=-1)
# Get all overlapping patches of ACS.
calib_patches = _extract_patches(calib, kernel_size)
calib_patches = _flatten_spatial_axes(calib_patches)
calib_patches = _split_last_dimension(calib_patches, num_coils)
# For each geometry.
for patch_index in valid_patch_indices:
# Estimate the GRAPPA weights for current geometry. Get all possible
# calibration patches with current geometry: sources (available data) and
# targets (holes to fill). Given known sources and targets, estimate weights
# using (possibly regularized) least squares.
sources = tf.boolean_mask(calib_patches,
unique_patches[patch_index, :], axis=-2)
sources = _flatten_last_dimensions(sources)
targets = calib_patches[..., kernel_center_index, :]
weights = tf.linalg.lstsq(sources, targets,
l2_regularizer=weights_l2_regularizer)
# Now find all patch offsets (upper-left corners) and centers for current
# geometry.
patch_offsets = tf.where(unique_inverse == patch_index)
patch_centers = tf.cast(patch_offsets + kernel_center, tf.int32)
patch_centers = _insert_batch_indices(patch_centers, batch_size)
# Collect all sources from partially measured `kspace` (all patches with
# current geometry are pulled at the same time here).
sources = image_ops.extract_glimpses(
full_kspace, kernel_size, patch_offsets)
sources = _split_last_dimension(sources, num_coils)
sources = tf.boolean_mask(sources, unique_patches[patch_index, :], axis=-2)
sources = _flatten_last_dimensions(sources)
# Compute targets using the previously estimated weights.
targets = tf.linalg.matmul(sources, weights)
targets = tf.reshape(targets, [-1, num_coils])
# Fill the holes.
full_kspace = tf.tensor_scatter_nd_update(full_kspace,
patch_centers,
targets)
# `full_kspace` was zero-padded at the beginning. Crop it to correct shape.
full_kspace = image_ops.central_crop(
full_kspace, tf.concat([[-1], full_shape[1:-1], [-1]], 0))
# Move coil axis back. [batch, *dims, coil] -> [batch, coil, *dims]
inv_perm = tf.math.invert_permutation(perm)
full_kspace = tf.transpose(full_kspace, inv_perm)
# Restore batch shape.
result = tf.reshape(
full_kspace, tf.concat([batch_shape, tf.shape(full_kspace)[1:]], 0))
if return_kspace:
return result
# Inverse FFT to image domain.
result = fft_ops.ifftn(result, axes=list(range(-rank, 0)), shift=True)
# Combine coils if requested.
if combine_coils:
result = coil_ops.combine_coils(result,
maps=sensitivities,
coil_axis=-rank-1)
return result
def _pics(kspace,
mask=None,
trajectory=None,
density=None,
sensitivities=None,
recon_shape=None,
rank=None,
regularizers=None,
optimizer=None,
initial_image=None,
max_iterations=50,
use_density_compensation=True):
"""MR image reconstruction using parallel imaging and compressed sensing.
For the parameters, see `tfmr.reconstruct`.
"""
# Check reconstruction shape.
if recon_shape is None:
raise ValueError(
"Input `recon_shape` must be provided for CS.")
recon_shape = tf.TensorShape(recon_shape)
# Check regularizers.
if regularizers is None:
regularizers = []
# Check optimizer.
if optimizer is None:
optimizer = 'lbfgs' # Default optimizer.
optimizer = check_util.validate_enum(optimizer, {'lbfgs'}, name='optimizer')
# Check what kind of reconstruction this is.
is_cartesian = trajectory is None
is_multicoil = sensitivities is not None
if is_cartesian: # Cartesian imaging.
# Number of spatial dimensions. Use `rank` parameter. If `rank` was not
# provided, assume all dimensions are spatial dimensions.
rank = rank or recon_shape.rank
# Number of dimensions in reconstruction (spatial dimensions plus other
# potentially regularized dimensions such as time).
recon_dims = recon_shape.rank
time_dims = recon_dims - rank
# Shape of `kspace` (encoding dimensions only). Shape has length N for
# N-dimensional imaging, or N + 1 for multicoil imaging.
kspace_encoding_shape = kspace.shape[-(rank + is_multicoil):]
# The batch shape. The shape of `kspace` without the encoding dimensions,
# time dimensions or coil dimension.
batch_shape = kspace.shape[:-(recon_dims + is_multicoil)]
else: # Non-Cartesian imaging.
# Infer rank from trajectory. Parameter `rank` is ignored for non-Cartesian
# imaging.
rank = trajectory.shape[-1]
# Number of dimensions in reconstruction (spatial dimensions plus other
# potentially regularized dimensions such as time).
recon_dims = recon_shape.rank
time_dims = recon_dims - rank
# Shape of `kspace` (encoding dimensions only). Shape has length 1, or 2 for
# multicoil imaging.
kspace_encoding_shape = kspace.shape[-(1 + is_multicoil):]
# The batch shape. The shape of `kspace` without the single encoding
# dimension, time dimensions or coil dimension.
batch_shape = kspace.shape[:-(time_dims + 1 + is_multicoil)]
# Subshapes of reconstruction shape. `image_shape` has the spatial dimensions,
# while `time_shape` has the time dimensions (or any other non-spatial
# dimensions).
image_shape = recon_shape[-rank:] # pylint: disable=invalid-unary-operand-type
time_shape = recon_shape[:-rank] # pylint: disable=invalid-unary-operand-type
# The solution `x` should have shape `recon_shape` plus the additional batch
# dimensions. The measurements `y` should be the flattened encoding
# dimension/s plus the time dimensions plus the batch dimensions.
x_shape = batch_shape + recon_shape
y_shape_tensor = tf.concat([batch_shape, time_shape, [-1]], 0)
# Estimate density if it was not provided.
if not is_cartesian and density is None and use_density_compensation:
density = traj_ops.estimate_density(trajectory, image_shape)
# Compute and apply weights.
if not is_cartesian and use_density_compensation:
weights = tf.math.sqrt(tf.math.reciprocal_no_nan(density))
if is_multicoil:
weights = tf.expand_dims(weights, -2) # Add the channel dimension.
kspace *= tf.cast(weights, kspace.dtype)
# Flatten `kspace` to a single encoding dimension.
y = tf.reshape(kspace, y_shape_tensor)
# Select encoding operator.
if is_multicoil:
e = linalg_ops.LinearOperatorParallelMRI(
sensitivities,
mask=mask,
trajectory=trajectory,
rank=recon_shape.rank,
norm='ortho')
else:
if is_cartesian:
e = linalg_ops.LinearOperatorFFT(recon_shape, mask=mask, norm='ortho')
else:
e = linalg_ops.LinearOperatorNUFFT(recon_shape, trajectory, norm='ortho')
# Add density compensation to encoding operator.
if not is_cartesian and use_density_compensation:
linop_dens = linalg_ops.LinearOperatorRealWeighting(
weights,
arg_shape=kspace_encoding_shape,
dtype=kspace.dtype)
e = tf.linalg.LinearOperatorComposition([linop_dens, e])
@tf.function
@math_ops.make_val_and_grad_fn
def _objective(x):
# Reinterpret real input as complex and reshape to correct shape.
x = math_ops.view_as_complex(x, stacked=False)
x = tf.reshape(x, y_shape_tensor)
# Compute data consistency terms.
value = tf.math.abs(tf.norm(y - tf.linalg.matvec(e, x), ord=2))
# Add regularization term[s].
x = tf.reshape(x, x_shape)
for reg in regularizers:
value += reg(x)
return value
# Prepare initial estimate.
if initial_image is None:
initial_image = tf.linalg.matvec(e.H, y)
initial_image = tf.reshape(initial_image,
tf.concat([batch_shape, [-1]], 0))
initial_image = math_ops.view_as_real(initial_image, stacked=False)
# Perform optimization.
if optimizer == 'lbfgs':
result = optimizer_ops.lbfgs_minimize(_objective, initial_image,
max_iterations=max_iterations)
else:
raise ValueError(f"Unknown optimizer: {optimizer}")
# Image to correct shape and type.
recon = tf.reshape(
math_ops.view_as_complex(result.position, stacked=False), recon_shape)
return recon
def _extract_patches(images, sizes):
"""Extract patches from N-D image.
Args:
images: A `Tensor` of shape `[batch_size, *spatial_dims, channels]`.
`spatial_dims` must have rank 2 or 3.
sizes: A list of `ints`. The size of the patches. Must have the same length
as `spatial_dims`.
Returns:
A `Tensor` containing the extracted patches.
Raises:
ValueError: If rank is not 2 or 3.
"""
rank = len(sizes)
if rank == 2:
patches = tf.image.extract_patches(
images,
sizes=[1, *sizes, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='VALID')
elif rank == 3:
# `tf.extract_volume_patches` does not support complex tensors, so we do the
# extraction for real and imaginary separately and then combine.
if images.dtype.is_complex:
patches_real = tf.extract_volume_patches(
tf.math.real(images),
ksizes=[1, *sizes, 1],
strides=[1, 1, 1, 1, 1],
padding='VALID')
patches_imag = tf.extract_volume_patches(
tf.math.imag(images),
ksizes=[1, *sizes, 1],
strides=[1, 1, 1, 1, 1],
padding='VALID')
patches = tf.dtypes.complex(patches_real, patches_imag)
else:
patches = tf.extract_volume_patches(
images,
ksizes=[1, *sizes, 1],
strides=[1, 1, 1, 1, 1],
padding='VALID')
else:
raise ValueError(f"Unsupported rank: {rank}")
return patches
def _insert_batch_indices(indices, batch_size): # pylint: disable=missing-param-doc
"""Inserts batch indices into an array of indices.
Given an array of indices with shape `[M, N]` which indexes into a tensor `x`,
returns a new array with shape `[batch_size * M, N + 1]` which indexes into a
tensor of shape `[batch_size] + x.shape`.
"""
batch_indices = tf.expand_dims(tf.repeat(
tf.range(batch_size), tf.shape(indices)[0]), -1)
indices = tf.tile(indices, [batch_size, 1])
indices = tf.concat([batch_indices, indices], -1)
return indices
def _flatten_spatial_axes(images): # pylint: disable=missing-param-doc
"""Flatten the spatial axes of an image.
If `images` has shape `[batch_size, *spatial_dims, channels]`, returns a
`Tensor` with shape `[batch_size, prod(spatial_dims), channels]`.
"""
shape = tf.shape(images)
return tf.reshape(images, [shape[0], -1, shape[-1]])
def _split_last_dimension(x, size):
"""Splits the last dimension into two dimensions.
Returns an array of rank `tf.rank(x) + 1` whose last dimension has size
`size`.
"""
return tf.reshape(x, tf.concat([tf.shape(x)[:-1], [-1, size]], 0))
def _flatten_last_dimensions(x):
"""Flattens the last two dimensions.
Returns an array of rank `tf.rank(x) - 1`.
"""
return tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))
def _select_reconstruction_method(kspace, # pylint: disable=unused-argument
mask,
trajectory,
density,
calib,
sensitivities,
method):
"""Select an appropriate reconstruction method based on user inputs.
For the parameters, see `tfmr.reconstruct`.
"""
# If user selected a method, use it. We do not check that inputs are valid
# here, this will be done by the methods themselves.
if method is not None:
if method not in _MR_RECON_METHODS:
return ValueError(
f"Could not find a reconstruction method named: `{method}`")
return method
# No method was specified: choose a default one.
if (sensitivities is None and
trajectory is None and
density is None and
calib is None and
mask is None):
return 'fft'
if (sensitivities is None and
trajectory is not None and
calib is None and
mask is None):
return 'nufft'
if (sensitivities is not None and
trajectory is None and
density is None and
calib is None and
mask is None):
return 'sense'
if (sensitivities is not None and
trajectory is not None and
calib is None and
mask is None):
return 'cg_sense'
if (trajectory is None and
density is None and
calib is not None and
mask is not None):
return 'grappa'
# Nothing worked.
raise ValueError(
"Could not find any reconstruction method that supports the specified "
"combination of inputs.")
def reconstruct_partial_kspace(kspace,
factors,
return_complex=False,
return_kspace=False,
method='zerofill',
**kwargs):
"""Partial Fourier image reconstruction.
Args:
kspace: A `Tensor`. The *k*-space data. Must have type `complex64` or
`complex128`. Must have shape `[..., *K]`, where `K` are the spatial
frequency dimensions. `kspace` should only contain the observed data,
without zero-filling of any kind.
factors: A list of `floats`. The partial Fourier factors. There must be a
factor for each spatial frequency dimension. Each factor must be between
0.5 and 1.0 and indicates the proportion of observed *k*-space values
along the specified dimensions.
return_complex: A `bool`. If `True`, returns complex instead of real-valued
images. Note that partial Fourier reconstruction assumes that images are
real, and the returned complex values may not be valid in all contexts.
return_kspace: A `bool`. If `True`, returns the filled *k*-space instead of
the reconstructed images. This is always complex-valued.
method: A `string`. The partial Fourier reconstruction algorithm. Must be
one of `"zerofill"`, `"homodyne"` (homodyne detection method) or `"pocs"`
(projection onto convex sets method).
**kwargs: Additional method-specific keyword arguments. See Notes for
details.
Returns:
A `Tensor` with shape `[..., *S]` where `S = K / factors`. Has type
`kspace.dtype` if either `return_complex` or `return_kspace` is `True`, and
type `kspace.dtype.real_dtype` otherwise.
Notes:
This function accepts some method-specific arguments:
* `method="zerofill"` accepts no additional arguments.
* `method="homodyne"` accepts the following additional keyword arguments:
* **weighting_fn**: An optional `string`. The weighting function. Must be
one of `"step"`, `"ramp"`. Defaults to `"ramp"`. `"ramp"` helps
mitigate Gibbs artifact, while `"step"` has better SNR properties.
* `method="pocs"` accepts the following additional keyword arguments:
* **tol**: An optional `float`. The convergence tolerance. Defaults to
`1e-5`.
* **max_iter**: An optional `int`. The maximum number of iterations of the
POCS algorithm. Defaults to `10`.
References:
.. [1] <NAME>., <NAME>., & <NAME>. (1991). Homodyne
detection in magnetic resonance imaging. IEEE transactions on medical
imaging, 10(2), 154-163.
.. [2] <NAME>., <NAME>., & <NAME>. (1991). A fast, iterative,
partial-Fourier technique capable of local phase recovery. Journal of
Magnetic Resonance (1969), 92(1), 126-145.
"""
kspace = tf.convert_to_tensor(kspace)
factors = tf.convert_to_tensor(factors)
# Validate inputs.
method = check_util.validate_enum(method, {'zerofill', 'homodyne', 'pocs'})
tf.debugging.assert_greater_equal(factors, 0.5, message=(
f"`factors` must be greater than or equal to 0.5, but got: {factors}"))
tf.debugging.assert_less_equal(factors, 1.0, message=(
f"`factors` must be less than or equal to 1.0, but got: {factors}"))
func = {'zerofill': _pf_zerofill,
'homodyne': _pf_homodyne,
'pocs': _pf_pocs}
return func[method](kspace, factors,
return_complex=return_complex,
return_kspace=return_kspace,
**kwargs)
def _pf_zerofill(kspace, factors, return_complex=False, return_kspace=False):
"""Partial Fourier reconstruction using zero-filling.
For the parameters, see `reconstruct_partial_kspace`.
"""
output_shape = _scale_shape(tf.shape(kspace), 1.0 / factors)
paddings = tf.expand_dims(output_shape - tf.shape(kspace), -1)
paddings = tf.pad(paddings, [[0, 0], [1, 0]]) # pylint: disable=no-value-for-parameter
full_kspace = tf.pad(kspace, paddings) # pylint: disable=no-value-for-parameter
if return_kspace:
return full_kspace
image = _ifftn(full_kspace, tf.size(factors))
if return_complex:
return image
return tf.math.abs(image)
def _pf_homodyne(kspace,
factors,
return_complex=False,
return_kspace=False,
weighting_fn='ramp'):
"""Partial Fourier reconstruction using homodyne detection.
For the parameters, see `reconstruct_partial_kspace`.
"""
# Rank of this operation.
dtype = kspace.dtype
# Create zero-filled k-space.
full_kspace = _pf_zerofill(kspace, factors, return_kspace=True)
full_shape = tf.shape(full_kspace)
# Shape of the symmetric region.
shape_sym = _scale_shape(full_shape, 2.0 * (factors - 0.5))
# Compute weighting function. Weighting function is:
# - 2.0 for the asymmetric part of the measured k-space.
# - A ramp from 2.0 to 0.0 for the symmetric part of the measured k-space.
# - 0.0 for the part of k-space that was not measured.
weights = tf.constant(1.0, dtype=kspace.dtype)
for i in range(len(factors)): #reverse_axis, factor in enumerate(tf.reverse(factors, [0])):
dim_sym = shape_sym[-i-1]
dim_asym = (full_shape[-i-1] - dim_sym) // 2
# Weighting for symmetric part of k-space.
if weighting_fn == 'step':
weights_sym = tf.ones([dim_sym], dtype=dtype)
elif weighting_fn == 'ramp':
weights_sym = tf.cast(tf.linspace(2.0, 0.0, dim_sym), dtype)
else:
raise ValueError(f"Unknown `weighting_fn`: {weighting_fn}")
weights *= tf.reshape(tf.concat(
[2.0 * tf.ones([dim_asym], dtype=dtype),
weights_sym,
tf.zeros([dim_asym], dtype=dtype)], 0), [-1] + [1] * i)
# Phase correction. Estimate a phase modulator from low resolution image using
# symmetric part of k-space.
phase_modulator = _estimate_phase_modulator(full_kspace, factors)
# Compute image with following steps.
# 1. Apply weighting function.
# 2. Convert to image domain.
# 3. Apply phase correction.
full_kspace *= weights
image = _ifftn(full_kspace, tf.size(factors))
image *= tf.math.conj(phase_modulator)
if return_kspace:
return _fftn(image, tf.size(factors))
if return_complex:
return image
return _real_non_negative(image)
def _pf_pocs(kspace,
factors,
return_complex=False,
return_kspace=False,
max_iter=10,
tol=1e-5):
"""Partial Fourier reconstruction using projection onto convex sets (POCS).
For the parameters, see `reconstruct_partial_kspace`.
"""
# Zero-filled k-space.
full_kspace = _pf_zerofill(kspace, factors, return_kspace=True)
# Generate a k-space mask which is True for measured samples, False otherwise.
kspace_mask = tf.constant(True)
# for i, factor in enumerate(tf.reverse(factors, [0])):
for i in tf.range(tf.size(factors)):
dim_partial = kspace.shape[-i-1]
dim_full = full_kspace.shape[-i-1]
kspace_mask = tf.math.logical_and(kspace_mask, tf.reshape(tf.concat(
[tf.fill([dim_partial], True),
tf.fill([dim_full - dim_partial], False)], 0),
tf.concat([[-1], tf.repeat([1], [i])], 0)))
# Estimate the phase modulator from central symmetric region of k-space.
phase_modulator = _estimate_phase_modulator(full_kspace, factors)
# Initial estimate of the solution.
image = tf.zeros_like(full_kspace)
# Type to hold state of the iteration.
pocs_state = collections.namedtuple('pocs_state', ['i', 'x', 'r'])
def stopping_criterion(i, state):
return tf.math.logical_and(i < max_iter,
state.r > tol)
def pocs_step(i, state):
prev = state.x
# Set the estimated phase.
image = tf.cast(tf.math.abs(prev), prev.dtype) * phase_modulator
# Data consistency. Replace estimated k-space values by measured ones if
# available.
kspace = _fftn(image, tf.size(factors))
kspace = tf.where(kspace_mask, full_kspace, kspace)
image = _ifftn(kspace, tf.size(factors))
# Phase demodulation.
image *= tf.math.conj(phase_modulator)
# Calculate the relative difference.
diff = tf.math.abs(tf.norm(image - prev) / tf.norm(prev))
return i + 1, pocs_state(i=i + 1, x=image, r=diff)
i = tf.constant(0, dtype=tf.int32)
state = pocs_state(i=0, x=image, r=1.0)
_, state = tf.while_loop(stopping_criterion, pocs_step, [i, state])
image = state.x
if return_kspace:
return _fftn(image, tf.size(factors))
if return_complex:
return image
return _real_non_negative(image)
def _estimate_phase_modulator(full_kspace, factors): # pylint: disable=missing-param-doc
"""Estimate a phase modulator from central region of k-space."""
shape_sym = _scale_shape(tf.shape(full_kspace), 2.0 * (factors - 0.5))
paddings = tf.expand_dims((tf.shape(full_kspace) - shape_sym) // 2, -1)
paddings = tf.tile(paddings, [1, 2])
symmetric_mask = tf.pad(tf.ones(shape_sym, dtype=full_kspace.dtype), paddings) # pylint: disable=no-value-for-parameter
symmetric_kspace = full_kspace * symmetric_mask
ref_image = _ifftn(symmetric_kspace, tf.size(factors))
phase_modulator = tf.math.exp(tf.dtypes.complex(
tf.constant(0.0, dtype=ref_image.dtype.real_dtype),
tf.math.angle(ref_image)))
return phase_modulator
def _scale_shape(shape, factors):
"""Scale the last dimensions of `shape` by `factors`."""
factors = tf.pad(factors, [[tf.size(shape) - tf.size(factors), 0]],
constant_values=1.0)
return tf.cast(tf.cast(shape, tf.float32) * factors + 0.5, tf.int32)
_real_non_negative = lambda x: tf.math.maximum(0.0, tf.math.real(x))
_fftn = lambda x, rank: fft_ops.fftn(x, axes=tf.range(-rank, 0), shift=True)
_ifftn = lambda x, rank: fft_ops.ifftn(x, axes=tf.range(-rank, 0), shift=True)
_MR_RECON_METHODS = {
'fft': _fft,
'nufft': _nufft,
'inufft': _inufft,
'sense': _sense,
'cg_sense': _cg_sense,
'grappa': _grappa,
'pics': _pics
}
| StarcoderdataPython |
1728141 | <reponame>bookRa/q-tutorials
import graphene
import resolvers
class Info(graphene.ObjectType):
id = graphene.ID(required=True)
name = graphene.String(required=True)
description = graphene.String()
class Sentence(graphene.ObjectType):
id = graphene.ID(required=True)
text = graphene.String(required=True)
class Person(graphene.ObjectType):
id = graphene.ID(required=True)
name = graphene.String(required=True)
class Query(graphene.ObjectType):
info = graphene.Field(Info)
sentence = graphene.Field(Sentence, id=graphene.ID())
all_sentences = graphene.Field(graphene.List(Sentence))
person = graphene.Field(Person, id=graphene.ID())
all_people = graphene.Field(graphene.List(Person))
def resolve_info(self, _):
return resolvers.info()
async def resolve_sentence(self, info, id):
return await resolvers.sentence(id)
async def resolve_all_sentences(self, _):
return await resolvers.all_sentences()
async def resolve_person(self, info, id):
return await resolvers.person(id)
async def resolve_all_people(self, _):
return await resolvers.all_people()
class AddSentenceInput(graphene.InputObjectType):
id = graphene.ID()
text = graphene.String(required=True)
class AddSentence(graphene.Mutation):
class Arguments:
input = AddSentenceInput(required=True)
Output = Sentence
async def mutate(self, _, input):
return await resolvers.add_sentence(input)
class ExtractAndLinkInput(graphene.InputObjectType):
text = graphene.String(required=True)
class ExtractAndLink(graphene.Mutation):
class Arguments:
input = ExtractAndLinkInput(required=True)
Output = graphene.List(graphene.ID)
async def mutate(self, _, input):
return await resolvers.extract_and_link(input)
class Mutation(graphene.ObjectType):
add_sentence = AddSentence.Field()
extract_and_link = ExtractAndLink.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| StarcoderdataPython |
153888 | <reponame>P5-G6/graph-tool<filename>backend/tests/integration_tests.py<gh_stars>0
"""Integration tests file."""
import urllib3
import json
import sys
sys.path.append('../')
class TestClass(object):
"""Test routes class."""
mock_vetex = ["1", "2", "3", "4", "5", "6"]
mock_edges = [["1", "4", 3, True],
["4", "2", 2, True],
["2", "1", 4, False],
["3", "1", 4, False],
["5", "6", 5, True],
["6", "3", 0, True]]
mock_adjacency_list = {
"1": [["4", 3, True],
["2", 4, False],
["3", 4, False]],
"2": [["1", 4, False]],
"3": [["1", 4, False]],
"4": [["2", 2, True]],
"5": [["6", 5, True]],
"6": [["3", 0, True]]
}
mock_graph_size = 6
mock_graph_order = 6
mock_are_adjacents_input = ["2", "1"]
mock_are_adjacents = True
mock_vertex_degree_input = "1"
mock_vertex_degree = {'edges': 2, 'in': 0, 'out': 1}
mock_vertex_adjacent_list_input = "1"
mock_vertex_adjacent_list = {
'edges': [['2', 4, False], ['3', 4, False]],
'in': [],
'out': [['4', 3, True]]
}
mock_delete_vertex = "1"
mock_delete_edge = ["6", "3", 0, True]
def test_post_add_vertex(self):
"""Test post_add_vertex."""
http = urllib3.PoolManager()
vertex_responses = []
for vertex in self.mock_vetex:
body = json.dumps({"vertex_label": vertex})
request = http.request('POST',
'http://127.0.0.1:5000/graph/add-vertex',
headers={'Content-Type':
'application/json'},
body=body)
response = json.loads(request.data)
vertex_responses.append(response['body']["added_vertex_label"])
assert vertex_responses == self.mock_vetex
def test_post_add_edge(self):
"""Test post_add_edge."""
http = urllib3.PoolManager()
edge_responses = []
for edge in self.mock_edges:
body = json.dumps({"edge": edge})
request = http.request('POST',
'http://127.0.0.1:5000//graph/add-edge',
headers={'Content-Type':
'application/json'},
body=body)
response = json.loads(request.data)
edge_responses.append(response['body']['added_edge'])
assert edge_responses == self.mock_edges
def test_get_adjacency_list(self):
"""Test get_adjacency_list."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/adjacency-list',
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_adjacency_list = response['body']['adjacency_list']
assert self.mock_adjacency_list == response_adjacency_list
def test_get_graph_order(self):
"""Test get_graph_order."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/graph-order',
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_graph_order = response['body']['graph_order']
assert self.mock_graph_order == response_graph_order
def test_get_graph_size(self):
"""Test get_graph_size."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/graph-size',
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_graph_size = response['body']['graph_size']
assert self.mock_graph_size == response_graph_size
def test_get_check_if_are_adjacents(self):
"""Test get_check_if_are_adjacents."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/check-if-are-adjacents'
'?vertex_1={}&vertex_2={}'
.format(self.mock_are_adjacents_input[0],
self.mock_are_adjacents_input[1]),
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_are_adjacents = response['body']['are_adjacents']
assert self.mock_are_adjacents == response_are_adjacents
def test_get_vertex_degree(self):
"""Test get_vertex_degree."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/vertex-degree'
'?vertex={}'
.format(self.mock_vertex_degree_input[0]),
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_vertex_degree = response['body']['vertex_degree']
assert self.mock_vertex_degree == response_vertex_degree
def test_get_vertex_adjacent_list(self):
"""Test get_vertex_adjacent_list."""
http = urllib3.PoolManager()
request = http.request('GET',
'http://127.0.0.1:5000/vertex-adjacent-list'
'?vertex={}'
.format(self.mock_vertex_adjacent_list_input),
headers={'Content-Type':
'application/json'})
response = json.loads(request.data)
response_vertex_adjacent_list = \
response['body']['vertex_adjacent_list']
assert self.mock_vertex_adjacent_list == response_vertex_adjacent_list
def test_post_delete_vertex(self):
"""Test get_vertex_adjacent_list."""
http = urllib3.PoolManager()
body = json.dumps({"vertex_label": self.mock_delete_vertex})
request = http.request('POST',
'http://127.0.0.1:5000/graph/delete_vertex',
headers={'Content-Type':
'application/json'},
body=body)
response = json.loads(request.data)
response_deleted_vertex = \
response['body']['deleted_vertex']
assert self.mock_delete_vertex == response_deleted_vertex
def test_post_delete_edge(self):
"""Test get_vertex_adjacent_list."""
http = urllib3.PoolManager()
body = json.dumps({"edge": self.mock_delete_edge})
request = http.request('POST',
'http://127.0.0.1:5000/graph/delete_edge',
headers={'Content-Type':
'application/json'},
body=body)
response = json.loads(request.data)
response_deleted_edge = \
response['body']['deleted_edge']
assert self.mock_delete_edge == response_deleted_edge
if __name__ == '__main__':
test = TestClass()
test.test_get_vertex_adjacent_list()
| StarcoderdataPython |
1680629 | <gh_stars>0
from barbearia.barbearia import app
from flask import render_template
from barbearia.barbearia import db
from barbearia.main import main_bp
# db.create_all()
@main_bp.route('/home')
@app.route('/')
def home_page():
return render_template('home.html')
@main_bp.route("/contact")
def contact_page():
return render_template("contact.html") | StarcoderdataPython |
3354483 | <filename>generators/app/templates/environment.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from config import db_url
engine = create_engine(db_url)
Session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base(bind=engine)
| StarcoderdataPython |
3385953 | # tree.py
# pylint: disable=no-member
r'''
The main LatexTree class
Initialized from a string or file name.
Methods
tree.write_chars() Recover Latex source code
tree.write_pretty() Native output format (see node.py)
tree.write_xml() Uses the `lxml` package
tree.write_bbq()
'''
import sys, os
# base_dir = os.path.dirname(os.path.abspath(__file__)) # this directory
# pars_dir = os.path.join(base_dir, 'parser')
# sys.path.insert(0, pars_dir)
# print ('sys.path is: {}'.format(sys.path))
from latextree.settings import LATEX_ROOT, EXTENSIONS_ROOT, LOG_ROOT, mathjax_source
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
filename = os.path.join(LOG_ROOT, 'latextree.log'),
# format = '%(asctime)s:%(levelname)8s:%(name)8s:%(funcName)20s:%(lineno)4s %(message)s',
format = '%(levelname)8s:%(name)8s:%(funcName)20s:%(lineno)4s %(message)s',
level = logging.DEBUG,
)
log.setLevel(logging.INFO)
from latextree.reader import read_latex_file
from latextree.parser import Parser
from latextree.parser.misc import parse_kv_opt_args, parse_length
import pprint as pp
class LatexTree():
r'''
Document object model for Lates
The root node is accessed via the `root' attribute.
Fields:
`tex_main': main input file (absolute path)
`root': Node type
`preamble': Nodes extracted from preamble {title: Group(), ...}
`static': dict in the format {key: file}
The name of an image object is its file name
- \includegraphics{mypic.png}
is recorded as
- key='mypic.png'
If \graphicspath{{./figures/}} is set
- key='./figures/mypic.png'
'''
def __init__(self, tex_main=None):
# init built-in parser (why member ?????)
self.parser = Parser()
# init tree (single)
self.tex_main = None # record main file name
# for passing to templates
self.preamble = {} # document properties extracted from root node
self.images = {} # image source files (for copying to webserver)
# read extensions
for ext_file in os.listdir(EXTENSIONS_ROOT):
if ext_file.endswith(".json"):
log.info('Reading definitions from {}'.format(ext_file))
ext_file = os.path.join(EXTENSIONS_ROOT, ext_file)
self.read_defs_file(ext_file)
def read_defs_file(self, defs_file):
self.parser.read_defs_file(defs_file)
def pretty_print(self):
print('--------------------')
print(self)
# for key, val in self.preamble.items():
# print('\t{:16}:{}'.format(key, val.chars()))
print('--------------------')
# parse functions
def parse(self, s):
self.root = self.parser.parse(s)
self.registry = self.parser.registry
self.pp_tree()
def parse_file(self, tex_main):
self.tex_main = os.path.join(LATEX_ROOT, tex_main)
self.root = self.parser.parse_file(tex_main)
self.registry = self.parser.registry
self.pp_tree()
# post-processing functions
def pp_tree(self):
"""Extract information for passing to write functions (templates).
How much of this should be done here, and how much in latex2html.py?
"""
if not self.root:
return
self.pp_document()
self.pp_preamble()
self.pp_sections()
self.pp_labels()
self.pp_toc()
self.pp_image_files()
self.pp_widths()
def pp_document(self):
'''Extract document element (if any).'''
if any([child.species == 'document' for child in self.root.children]):
self.doc_root = next((child for child in self.root.children if child.species == 'document'), None)
def pp_sections(self):
"""Tree search (DFS) for Level-1 chapters or sections."""
if self.doc_root:
self.chapters = self.get_phenotypes('chapter')
self.sections = []
if not self.chapters:
self.sections = self.get_phenotypes('section')
def pp_preamble(self):
"""Extract document properties from preamble."""
for node in [child for child in self.root.children if not child.species == 'document']:
if node.species == 'title' and 'title' in node.args:
self.preamble.__setitem__('title', node.args['title'])
if node.species == 'author' and 'names' in node.args:
self.preamble.__setitem__('author', node.args['names'])
if node.species == 'date' and 'date' in node.args:
self.preamble.__setitem__('date', node.args['date'])
if node.species == 'documentclass' and 'name' in node.args:
self.preamble.__setitem__('documentclass', node.args['name'])
if node.species == 'graphicspath' and 'paths' in node.args:
graphicspath = node.args['paths']
self.preamble.__setitem__('graphicspath', graphicspath)
def pp_labels(self):
"""Create map of label keys to Node objects (the labelled nodes)."""
self.labels = {}
for node in self.get_phenotypes('label'):
key = node.args['key'].chars(nobrackets=True)
while node.parent:
if hasattr(node, 'number'):
break
if node.species in self.registry.block_commands:
break
# if node.family == 'Environment':
# break
node = node.parent
self.labels.__setitem__(key, node)
for node in self.get_phenotypes('bibitem'):
key = node.args['key'].chars(nobrackets=True)
self.labels.__setitem__(key, node)
def pp_image_files(self):
r"""Create a map of `includegraphics' objects onto file names.
The filenames are relative to LATEX_ROOT and possibly `graphicspath'
We need to check \graphicspath and \DeclareGraphicsExtensions
The `graphics' table is used by write functions to
(1) copy image files to server (static files)
(2) include <img src="{{ ... }}"> elements in templates.
"""
self.image_files = {}
for node in self.get_phenotypes('includegraphics'):
fname_str = node.args['file'].children[0].content
self.image_files.__setitem__(node, fname_str)
self.video_urls = {}
for video in self.get_phenotypes('includevideo'):
print(video.args)
url_str = video.args['arg1'].chars(nobrackets=True) # defined with \newfloat
self.video_urls.__setitem__(video, url_str)
def pp_widths(self):
"""Set width attributes for minipages and images"""
for minipage in self.get_phenotypes('minipage'):
width = minipage.args['width'].chars(nobrackets=True) # mandatory arg
minipage.width = parse_length(width)
for image in self.get_phenotypes('includegraphics'):
if 'options' in image.args: # optional arg
opt_arg_str = image.args['options'].chars(nobrackets=True)
kw = parse_kv_opt_args(opt_arg_str)[1]
if 'scale' in kw:
image.width = str(int(99*float(kw['scale']))) + '%'
elif 'width' in kw:
image.width = parse_length(kw['width']) + '%'
def pp_toc(self):
"""Experimental: create table of contents as a dict"""
# recursive function (local)
def _pp_toc(node):
# check node is not 'None' (e.g. from optional arguments)
if not node:
return {}
# hack for input or include (file contents parsed into children)
if node.genus == 'Input':
tt = {}
for child in node.children:
tt.update(_pp_toc(child))
return tt
# check children for subsections etc.
subs = []
for child in node.children:
s = _pp_toc(child)
if s:
subs.append(s)
# check node
# hack: include root.document node to start things off
if node.genus == 'Section' or node.species == 'document':
return {node: subs}
else:
return {}
# call recursive function on self.document (if it exists)
self.toc = None
if self.doc_root:
self.toc = _pp_toc(self.doc_root)
# search functions
def get_container(self, node):
"""Get nearest container of the node (environment or numbered species)."""
cont = node
while cont.parent:
if cont.family == 'Environment':
return cont
if cont.species in self.registry.numbered:
return cont
cont = cont.parent
return None
def get_phenotypes(self, species):
"""Retrieve all nodes of the given species."""
# 1. define recursive function (local)
def _get_phenotypes(node, species):
# check node is not 'None' (e.g. from optional arguments)
if not node:
return []
# init phenotype list
phenotypes = []
# check node
if node.species == species:
phenotypes.append(node)
# check arguments (if any)
if hasattr(node, 'args'):
for arg in node.args.values():
phenotypes.extend(_get_phenotypes(arg, species))
# check children
for child in node.children:
phenotypes.extend(_get_phenotypes(child, species))
# return phenotypes
return phenotypes
# 2. call recursive function on self
return _get_phenotypes(self.root, species)
# def get_labels(self, map_to_container=True):
# """Returns a table of label keys to the corresponding labelled nodes."""
# labels = {}
# for node in self.get_phenotypes('label'):
# key = node.args['key'].chars(nobrackets=True)
# while node.parent:
# if hasattr(node, 'number'):
# break
# if node.species in self.registry.block_commands:
# break
# # if node.family == 'Environment':
# # break
# node = node.parent
# labels.__setitem__(key, node)
# bibitems = {}
# for node in self.get_phenotypes('bibitem'):
# key = node.args['key'].chars(nobrackets=True)
# bibitems.__setitem__(key, node)
# labels.update(bibitems)
# return labels
# write functions
def write_chars(self):
"""Write tree as Latex source."""
return self.root.chars()
def write_xml(self):
"""Write tree in XML format."""
return self.root.xml_print()
def write_pretty(self):
"""Write in native LatexTree format (verbose)."""
return self.root.pretty_print()
def write_bbq(self, include_mathjax_header=False):
"""Extract MC/MA questions and typeset for Blackboard."""
# init question pool
pool = []
# extract `questions` environments
question_sets = self.get_phenotypes('questions')
if not question_sets:
return ''.join(pool)
# iterate over question sets
for question_set in question_sets:
# iterate over questions
for question in question_set.children:
# ignore non-question objects (e.g. initial spaces encoded as Text nodes)
# The parser put everything between \begin{itemize} and the first \item
# command into a Text node. For valid Latex there should be only one of
# these Text nodes, consisting only of whitespace (inc newlines).
# TODO: Why do we not just test question.species == 'question'?
if not question.genus == 'Item':
continue
# find and extract choices environment (if any)
choices_block = next((child for child in question.children if child.species in ['choices','checkboxes']), None)
# bail out if no choices environment (MC or MA only)
# TODO: true or false, fill the blank, ...
# TF TABquestion text TABtrue or false
# FIL TABquestion text
# FIB TABquestion text TABanswer text TABanswer text ... (max 100)
if not choices_block:
continue
# record question type (MC or MA)
output = []
if choices_block.species == 'choices':
output.append('MC')
elif choices_block.species == 'checkboxes':
output.append('MA')
else:
continue
# init question text
qu_text = ''
if include_mathjax_header:
qu_text += '<script type="text/javascript" src="%s"/>' % mathjax_source
# iterate over children
# bbq format does not allow for content after the choices block
# so everything after the question block is ignored
for child in question.children:
# parse question text
if not child == choices_block:
qu_text += child.chars(non_breaking_spaces=True).rstrip('\n')
# process choices block (break on completion)
else:
# append question text to output list
output.append(qu_text.strip())
# iterate over choices
# as above we skip any leading whitespace (before to the first item)
for choice in child.children:
if not choice.genus == 'Item':
continue
# extract option and append to output list
option = ''.join([cc.chars(non_breaking_spaces=True) for cc in choice.children ]).strip()
output.append(option)
# append 'correct' or 'incorrect' to the output list
status = 'CORRECT' if choice.species == 'correctchoice' else 'INCORRECT'
output.append(status)
# bbq format does not allow for content after the choices block
break
# append to question pool
print(output)
pool.append('\t'.join(output))
# hack to ignore multiple question sets
break
# end: iterate over questions
# end: iterate over question sets
return '\n'.join(pool)
| StarcoderdataPython |
3353141 | <reponame>mailslurp/mailslurp-client-python<gh_stars>1-10
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailslurp_client.api_client import ApiClient
from mailslurp_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SentEmailsControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_all_sent_tracking_pixels(self, **kwargs): # noqa: E501
"""Get all sent email tracking pixels in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_sent_tracking_pixels(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in sent email tracking pixel list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email tracking pixel list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageTrackingPixelProjection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_all_sent_tracking_pixels_with_http_info(**kwargs) # noqa: E501
def get_all_sent_tracking_pixels_with_http_info(self, **kwargs): # noqa: E501
"""Get all sent email tracking pixels in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_sent_tracking_pixels_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in sent email tracking pixel list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email tracking pixel list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageTrackingPixelProjection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'before',
'page',
'search_filter',
'since',
'size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_sent_tracking_pixels" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501
query_params.append(('before', local_var_params['before'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501
query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501
if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501
query_params.append(('since', local_var_params['since'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent/tracking-pixels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageTrackingPixelProjection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sent_email(self, id, **kwargs): # noqa: E501
"""Get sent email receipt # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SentEmailDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sent_email_with_http_info(id, **kwargs) # noqa: E501
def get_sent_email_with_http_info(self, id, **kwargs): # noqa: E501
"""Get sent email receipt # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SentEmailDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sent_email" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_sent_email`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SentEmailDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sent_email_html_content(self, id, **kwargs): # noqa: E501
"""Get sent email HTML content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email_html_content(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sent_email_html_content_with_http_info(id, **kwargs) # noqa: E501
def get_sent_email_html_content_with_http_info(self, id, **kwargs): # noqa: E501
"""Get sent email HTML content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email_html_content_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sent_email_html_content" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_sent_email_html_content`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent/{id}/html', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sent_email_tracking_pixels(self, id, **kwargs): # noqa: E501
"""Get all tracking pixels for a sent email in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email_tracking_pixels(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in sent email tracking pixel list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email tracking pixel list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageTrackingPixelProjection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sent_email_tracking_pixels_with_http_info(id, **kwargs) # noqa: E501
def get_sent_email_tracking_pixels_with_http_info(self, id, **kwargs): # noqa: E501
"""Get all tracking pixels for a sent email in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_email_tracking_pixels_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: id (required)
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in sent email tracking pixel list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email tracking pixel list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageTrackingPixelProjection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'before',
'page',
'search_filter',
'since',
'size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sent_email_tracking_pixels" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_sent_email_tracking_pixels`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501
query_params.append(('before', local_var_params['before'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501
query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501
if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501
query_params.append(('since', local_var_params['since'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent/{id}/tracking-pixels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageTrackingPixelProjection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sent_emails(self, **kwargs): # noqa: E501
"""Get all sent emails in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_emails(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param str inbox_id: Optional inboxId to filter sender of sent emails by
:param int page: Optional page index in inbox sent email list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in inbox sent email list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageSentEmailProjection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sent_emails_with_http_info(**kwargs) # noqa: E501
def get_sent_emails_with_http_info(self, **kwargs): # noqa: E501
"""Get all sent emails in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_emails_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param str inbox_id: Optional inboxId to filter sender of sent emails by
:param int page: Optional page index in inbox sent email list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in inbox sent email list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageSentEmailProjection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'before',
'inbox_id',
'page',
'search_filter',
'since',
'size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sent_emails" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501
query_params.append(('before', local_var_params['before'])) # noqa: E501
if 'inbox_id' in local_var_params and local_var_params['inbox_id'] is not None: # noqa: E501
query_params.append(('inboxId', local_var_params['inbox_id'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501
query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501
if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501
query_params.append(('since', local_var_params['since'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageSentEmailProjection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sent_organization_emails(self, **kwargs): # noqa: E501
"""Get all sent organization emails in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_organization_emails(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param str inbox_id: Optional inboxId to filter sender of sent emails by
:param int page: Optional page index in sent email list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageSentEmailProjection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sent_organization_emails_with_http_info(**kwargs) # noqa: E501
def get_sent_organization_emails_with_http_info(self, **kwargs): # noqa: E501
"""Get all sent organization emails in paginated form # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sent_organization_emails_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param str inbox_id: Optional inboxId to filter sender of sent emails by
:param int page: Optional page index in sent email list pagination
:param str search_filter: Optional search filter
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in sent email list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageSentEmailProjection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'before',
'inbox_id',
'page',
'search_filter',
'since',
'size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sent_organization_emails" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501
query_params.append(('before', local_var_params['before'])) # noqa: E501
if 'inbox_id' in local_var_params and local_var_params['inbox_id'] is not None: # noqa: E501
query_params.append(('inboxId', local_var_params['inbox_id'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501
query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501
if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501
query_params.append(('since', local_var_params['since'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/sent/organization', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageSentEmailProjection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
32717 | <reponame>Jwomers/trinity
from typing import (
Dict,
Sequence,
Tuple,
Type,
)
from eth2.beacon.on_startup import (
get_genesis_block,
get_initial_beacon_state,
)
from eth2.beacon.state_machines.configs import BeaconConfig
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.types.deposits import Deposit
from eth2.beacon.types.deposit_data import DepositData
from eth2.beacon.types.deposit_input import DepositInput
from eth2.beacon.types.eth1_data import Eth1Data
from eth2.beacon.types.forks import Fork
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import (
BLSPubkey,
Timestamp,
)
from eth2.beacon.tools.builder.validator import (
sign_proof_of_possession,
)
def create_mock_initial_validator_deposits(
num_validators: int,
config: BeaconConfig,
pubkeys: Sequence[BLSPubkey],
keymap: Dict[BLSPubkey, int]) -> Tuple[Deposit, ...]:
# Mock data
withdrawal_credentials = b'\x22' * 32
randao_commitment = b'\x33' * 32
deposit_timestamp = 0
fork = Fork(
previous_version=config.GENESIS_FORK_VERSION,
current_version=config.GENESIS_FORK_VERSION,
epoch=config.GENESIS_EPOCH,
)
initial_validator_deposits = tuple(
Deposit(
branch=(
b'\x11' * 32
for j in range(10)
),
index=i,
deposit_data=DepositData(
deposit_input=DepositInput(
pubkey=pubkeys[i],
withdrawal_credentials=withdrawal_credentials,
randao_commitment=randao_commitment,
proof_of_possession=sign_proof_of_possession(
deposit_input=DepositInput(
pubkey=pubkeys[i],
withdrawal_credentials=withdrawal_credentials,
randao_commitment=randao_commitment,
),
privkey=keymap[pubkeys[i]],
fork=fork,
slot=config.GENESIS_SLOT,
epoch_length=config.EPOCH_LENGTH,
),
),
amount=config.MAX_DEPOSIT_AMOUNT,
timestamp=deposit_timestamp,
),
)
for i in range(num_validators)
)
return initial_validator_deposits
def create_mock_genesis(
num_validators: int,
config: BeaconConfig,
keymap: Dict[BLSPubkey, int],
genesis_block_class: Type[BaseBeaconBlock],
genesis_time: Timestamp=0) -> Tuple[BeaconState, BaseBeaconBlock]:
latest_eth1_data = Eth1Data.create_empty_data()
assert num_validators <= len(keymap)
pubkeys = list(keymap)[:num_validators]
initial_validator_deposits = create_mock_initial_validator_deposits(
num_validators=num_validators,
config=config,
pubkeys=pubkeys,
keymap=keymap,
)
state = get_initial_beacon_state(
initial_validator_deposits=initial_validator_deposits,
genesis_time=genesis_time,
latest_eth1_data=latest_eth1_data,
genesis_slot=config.GENESIS_SLOT,
genesis_epoch=config.GENESIS_EPOCH,
genesis_fork_version=config.GENESIS_FORK_VERSION,
genesis_start_shard=config.GENESIS_START_SHARD,
shard_count=config.SHARD_COUNT,
seed_lookahead=config.SEED_LOOKAHEAD,
latest_block_roots_length=config.LATEST_BLOCK_ROOTS_LENGTH,
latest_index_roots_length=config.LATEST_INDEX_ROOTS_LENGTH,
epoch_length=config.EPOCH_LENGTH,
max_deposit_amount=config.MAX_DEPOSIT_AMOUNT,
latest_penalized_exit_length=config.LATEST_PENALIZED_EXIT_LENGTH,
latest_randao_mixes_length=config.LATEST_RANDAO_MIXES_LENGTH,
entry_exit_delay=config.ENTRY_EXIT_DELAY,
)
block = get_genesis_block(
startup_state_root=state.root,
genesis_slot=config.GENESIS_SLOT,
block_class=genesis_block_class,
)
assert len(state.validator_registry) == num_validators
return state, block
| StarcoderdataPython |
157980 | <reponame>robertispas/django-debug-toolbar
import functools
from django.http import Http404, HttpResponseBadRequest
def require_show_toolbar(view):
@functools.wraps(view)
def inner(request, *args, **kwargs):
from debug_toolbar.middleware import get_show_toolbar
show_toolbar = get_show_toolbar()
if not show_toolbar(request):
raise Http404
return view(request, *args, **kwargs)
return inner
def signed_data_view(view):
"""Decorator that handles unpacking a signed data form"""
@functools.wraps(view)
def inner(request, *args, **kwargs):
from debug_toolbar.forms import SignedDataForm
data = request.GET if request.method == "GET" else request.POST
signed_form = SignedDataForm(data)
if signed_form.is_valid():
return view(
request, *args, verified_data=signed_form.verified_data(), **kwargs
)
return HttpResponseBadRequest("Invalid signature")
return inner
| StarcoderdataPython |
144043 | from typing import List
from flask import request
from flask_restx import Namespace, Resource
from CTFd.api.v1.helpers.request import validate_args
from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
from CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse
from CTFd.constants import RawEnum
from CTFd.models import db, UserRights
from CTFd.schemas.user_rights import UserRightsSchema
from CTFd.utils.decorators import access_granted_only
from CTFd.utils.helpers.models import build_model_filters
user_rights_namespace = Namespace("user_rights", description="Endpoint to retrieve UserRights")
UserRightsModel = sqlalchemy_to_pydantic(UserRights)
class UserRightsDetailedSuccessResponse(APIDetailedSuccessResponse):
data: UserRightsModel
class UserRightsListSuccessResponse(APIListSuccessResponse):
data: List[UserRightsModel]
user_rights_namespace.schema_model(
"UserRightsDetailedSuccessResponse", UserRightsDetailedSuccessResponse.apidoc()
)
user_rights_namespace.schema_model("UserRightsListSuccessResponse", UserRightsListSuccessResponse.apidoc())
@user_rights_namespace.route("")
class UserRightsList(Resource):
@access_granted_only("api_user_rights_list_get")
@user_rights_namespace.doc(
description="Endpoint to list UserRights objects in bulk",
responses={
200: ("Success", "UserRightsListSuccessResponse"),
400: (
"An error occurred processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(
{
"user_id": (int, None),
"right_id": (int, None),
"q": (str, None),
"field": (
RawEnum(
"UserRightsFields",
{
"user_id": "user_id",
"right_id": "right_id"
}
),
None,
),
},
location="query",
)
def get(self, query_args):
q = query_args.pop("q", None)
field = str(query_args.pop("field", None))
filters = build_model_filters(model=UserRights, query=q, field=field)
user_rights = UserRights.query.filter_by(**query_args).filter(*filters).all()
schema = UserRightsSchema(many=True)
response = schema.dump(user_rights)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@access_granted_only("api_user_rights_list_post")
@user_rights_namespace.doc(
description="Endpoint to create a UserRights object",
responses={
200: ("Success", "UserRightsDetailedSuccessResponse"),
400: (
"An error occurred processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def post(self):
req = request.get_json()
schema = UserRightsSchema()
response = schema.load(req, session=db.session)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
response = schema.dump(response.data)
db.session.close()
return {"success": True, "data": response.data}
@user_rights_namespace.route("/<user_id>/<right_id>")
@user_rights_namespace.param("user_id", "A User ID")
@user_rights_namespace.param("right_id", "A Right ID")
class UserRights(Resource):
@access_granted_only("api_user_rights_get")
@user_rights_namespace.doc(
description="Endpoint to get a specific UserRights object",
responses={
200: ("Success", "UserRightsDetailedSuccessResponse"),
400: (
"An error occurred processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def get(self, user_id, right_id):
user_rights = UserRights.query.filter_by(user_id=user_id, right_id=right_id).first_or_404()
response = UserRightsSchema().dump(user_rights)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@access_granted_only("api_user_rights_delete")
@user_rights_namespace.doc(
description="Endpoint to delete a specific UserRights object",
responses={200: ("Success", "APISimpleSuccessResponse")},
)
def delete(self, user_id, right_id):
user_rights = UserRights.query.filter_by(user_id=user_id, right_id=right_id).first_or_404()
db.session.delete(user_rights)
db.session.commit()
db.session.close()
return {"success": True}
| StarcoderdataPython |
141837 | <gh_stars>10-100
from typing import TypeVar, Callable
from injectable.container.injection_container import InjectionContainer
from injectable.errors.injectable_load_error import InjectableLoadError
from injectable.common_utils import get_caller_filepath
T = TypeVar("T")
def injectable_factory(
dependency: T = None,
*,
qualifier: str = None,
primary: bool = False,
namespace: str = None,
group: str = None,
singleton: bool = False,
) -> Callable[..., Callable[..., T]]:
"""
Function decorator to mark it as a injectable factory for the dependency.
At least one of ``dependency`` or ``qualifier`` parameters need to be defined. An
:class:`InjectableLoadError <injectable.errors.InjectableLoadError>` will be raised
if none are defined.
.. note::
This decorator shall be the first decorator of the function since only the
received function will be registered as an injectable factory
.. note::
All files using this decorator will be executed when
:meth:`load_injection_container <injectable.load_injection_container>` is
invoked.
:param dependency: (optional) the dependency class for which the factory will be
registered to. Defaults to None.
:param qualifier: (optional) string qualifier for which the factory will be
registered to. Defaults to None.
:param primary: (optional) marks the factory as primary for the dependency
resolution in ambiguous cases. Defaults to False.
:param namespace: (optional) namespace in which the factory will be registered.
Defaults to :const:`injectable.constants.DEFAULT_NAMESPACE`.
:param group: (optional) group to be assigned to the factory. Defaults to None.
:param singleton: (optional) when True the factory will be used to instantiate a
singleton, i.e. only one call to the factory will be made and the created
instance will be shared globally. Defaults to False.
Usage::
>>> from injectable import injectable_factory
>>> from foo import Foo
>>>
>>> @injectable_factory(Foo)
... def foo_factory() -> Foo:
... return Foo(...)
"""
if not dependency and not qualifier:
raise InjectableLoadError("No dependency class nor a qualifier were specified")
def decorator(fn: Callable[..., T]) -> Callable[..., T]:
caller_filepath = get_caller_filepath()
if caller_filepath == InjectionContainer.LOADING_FILEPATH:
InjectionContainer._register_factory(
fn,
caller_filepath,
dependency,
qualifier,
primary,
namespace,
group,
singleton,
)
return fn
return decorator
| StarcoderdataPython |
154608 | <filename>nr_all/search.py
from elasticsearch_dsl.query import Term, Bool
from nr_common.search import NRRecordsSearch
class AllRecordsSearch(NRRecordsSearch):
LIST_SOURCE_FIELDS = [
'control_number', 'oarepo:validity.valid', 'oarepo:draft', 'title',
'dateIssued', 'creator', 'creators', 'resource_type', 'contributors', 'keywords',
'subject', 'abstract', 'state', 'accessRights', '_files',
'languages', 'id', '_primary_community', 'communities',
'_administration.primaryCommunity', 'publication_date',
'_administration.communities', 'rights',
'$schema'
]
class AllRecordsDraftSearch(AllRecordsSearch):
class ActualMeta(NRRecordsSearch.ActualMeta):
@classmethod
def default_filter_factory(cls, search=None, **kwargs):
qs = NRRecordsSearch.Meta.default_filter_factory(search=search, **kwargs)
return Bool(must=[
qs,
Term(**{'oarepo:draft': True})
])
class AllRecordsPublishedSearch(AllRecordsSearch):
class ActualMeta(NRRecordsSearch.ActualMeta):
@classmethod
def default_filter_factory(cls, search=None, **kwargs):
qs = NRRecordsSearch.Meta.default_filter_factory(search=search, **kwargs)
return Bool(must=[
qs,
Term(**{'oarepo:draft': False})
])
| StarcoderdataPython |
3371824 | <filename>2017/day11.py<gh_stars>1-10
# The hexagons ("hexes") in this grid are aligned such that adjacent hexes can be found to the north, northeast, southeast, south, southwest, and northwest.
# You have a path, starting where he started, you need to determine the fewest number of steps required to reach him. (A "step" means to move from the hex you are in to any adjacent hex.)
val = {
'n': (0,1,-1),
's': (0,-1,1),
'nw': (-1,1,0),
'ne': (1,0,-1),
'sw': (-1,0,1),
'se': (1,-1,0)
}
x, y, z = 0, 0, 0;
directions = [x for x in input().split(',')];
for d in directions:
x += val[d][0];
y += val[d][1];
z += val[d][2];
print(int((abs(x) + abs(y) + abs(z))/2));
| StarcoderdataPython |
120565 | <gh_stars>0
#!/usr/bin/python
# <NAME> 10/30/2020
#
#
#
# - Gets interface stats from an IOS-XE device
#
import requests
import json
# Welcome
print("Welcome to the Netconf_IOS-XE_BGP.py Script!")
print("*" * 80)
# Variable collection
host_value = input("Host: ")
port_value = input("Port: ")
username = input("Username: ")
password = input("Password: ")
# Define the device and pull vars from user input
router = {
"host":host_value,
"port": port_value,
"username": username,
"password": password,
}
# Define the headers for the HTTP request
headers = {
"Accept": "application/yang-data+json",
"Content-type": "application/yang-data+json",
}
# Define the URL - Get stats for a specific Interface
# url = f"https://{router['host']}:{router['port']}/restconf/data/Cisco-IOS-XE-interfaces-oper:interfaces/interface=GigabitEthernet1"
# Define the URL - Get stats for all interfaces
url = f"https://{router['host']}:{router['port']}/restconf/data/Cisco-IOS-XE-interfaces-oper:interfaces/"
# Form our request and assign the output to response
response = requests.get(url=url, headers=headers, auth=(
router['username'], router['password']), verify=False).json()
# Convert data to JSON and print output
print(json.dumps(response,indent=2)) | StarcoderdataPython |
4812814 | """
A frameless window widget
"""
from AnyQt.QtWidgets import QWidget, QStyleOption
from AnyQt.QtGui import QPalette, QPainter, QBitmap
from AnyQt.QtCore import Qt, pyqtProperty as Property
from .utils import is_transparency_supported, StyledWidget_paintEvent
class FramelessWindow(QWidget):
"""
A basic frameless window widget with rounded corners (if supported by
the windowing system).
"""
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
self.__radius = 6
self.__isTransparencySupported = is_transparency_supported()
self.setAttribute(Qt.WA_TranslucentBackground, self.__isTransparencySupported)
def setRadius(self, radius):
"""
Set the window rounded border radius.
"""
if self.__radius != radius:
self.__radius = radius
if not self.__isTransparencySupported:
self.__updateMask()
self.update()
def radius(self):
"""
Return the border radius.
"""
return self.__radius
radius_ = Property(
int, fget=radius, fset=setRadius, designable=True, doc="Window border radius"
)
def resizeEvent(self, event):
QWidget.resizeEvent(self, event)
if not self.__isTransparencySupported:
self.__updateMask()
def __updateMask(self):
opt = QStyleOption()
opt.initFrom(self)
rect = opt.rect
size = rect.size()
mask = QBitmap(size)
p = QPainter(mask)
p.setRenderHint(QPainter.Antialiasing)
p.setBrush(Qt.black)
p.setPen(Qt.NoPen)
p.drawRoundedRect(rect, self.__radius, self.__radius)
p.end()
self.setMask(mask)
def paintEvent(self, event):
if self.__isTransparencySupported:
opt = QStyleOption()
opt.initFrom(self)
rect = opt.rect
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing, True)
p.setBrush(opt.palette.brush(QPalette.Window))
p.setPen(Qt.NoPen)
p.drawRoundedRect(rect, self.__radius, self.__radius)
p.end()
else:
StyledWidget_paintEvent(self, event)
| StarcoderdataPython |
1778401 | #settest
from mgrslib import *
import random
g=Grid(73,-43).mgrs1000.buffer(10000)
gg=mgrsSet(random.sample(g,15))
print len(g)
print g.northernmost()
print g.southernmost()
print g.westernmost()
print g.easternmost()
print g.centeroid()
print g.exterior()
print g.interior()
z=Grid(73,-43)
k=g.nearestTo(z)
print z,z.latitude,z.longitude
print k, k.latitude,k.longitude
z=Grid(20,20)
k=g.nearestTo(z)
print z,z.latitude,z.longitude
print k, k.latitude,k.longitude
print g.centeroid()
print gg.centeroid()
print g.centeroid().distance(gg.centeroid())
print g.nearestTo(gg.centeroid())
| StarcoderdataPython |
36250 | <gh_stars>1-10
import pandas as pd
import mapping_module as mm
import multiprocessing as mp
from sqlalchemy import create_engine
from sys import argv
user_name = argv[1]
password = argv[2]
data_type = argv[3]
start_year = int(argv[4])
end_year = int(argv[5])
leiden_input = argv[6] #quality_func_Res --> CPM_R001
schema = argv[7]
rootdir = argv[8] # "/erniedev_data3/theta_plus/Leiden/"
sql_scheme = 'postgresql://' + user_name + ':' + password + '@localhost:5432/ernie'
engine = create_engine(sql_scheme)
data_name = data_type + str(start_year) + '_' + str(end_year)
# Read from Postgres
mcl_name = data_name + '_cluster_scp_list_unshuffled'
mcl = pd.read_sql_table(table_name= mcl_name, schema=schema, con=engine)
# # Read directly
# mcl_name = data_name + '_cluster_scp_list_unshuffled.csv'
# mcl = pd.read_csv(mcl_name)
leiden_name = data_name + '_cluster_scp_list_leiden_' + leiden_input + '.csv'
leiden = pd.read_csv(leiden_name)
mcl_grouped = mcl.groupby(by='cluster_no',
as_index=False).agg('count').sort_values(by='cluster_no', ascending=True)
# To match clusters between size 30 and 350 only:
mcl_grouped = mcl_grouped[(mcl_grouped['scp'] >= 30) & (mcl_grouped['scp'] <= 350)]
mcl_cluster_list = mcl_grouped['cluster_no'].tolist()
print("Running...")
p = mp.Pool(6)
final_df = pd.DataFrame()
for mcl_cluster_no in mcl_cluster_list:
match_dict = p.starmap(mm.match_mcl_to_leiden, [(mcl_cluster_no, mcl, leiden)])
match_df = pd.DataFrame.from_dict(match_dict)
final_df = final_df.append(match_df, ignore_index=True)
save_name = rootdir + '/' + data_name + '_match_to_leiden_' + leiden_input + '.csv'
final_df.to_csv(save_name, index = None, header=True, encoding='utf-8')
# In case the connection times out:
engine = create_engine(sql_scheme)
save_name_sql = data_name + '_match_to_leiden_' + leiden_input
final_df.to_sql(save_name_sql, con=engine, schema=schema, index=False, if_exists='fail')
print("")
print("All Completed.") | StarcoderdataPython |
192337 | <reponame>CoSandu/PythonCourses
import urllib
import json
url_in = raw_input("Enter site here: ")
html = urllib.urlopen(url_in).read()
print html
info = json.loads(html)
s = 0
for v in info["comments"]:
s = s + int(v["count"])
print s
| StarcoderdataPython |
50353 | <reponame>TheAnybodys/statistics-projects
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 20:23:06 2022
@author: olivi
"""
import random
result = 0
for i in range(1000000):
X = random.uniform(0.0, 1.0)
Y = random.uniform(0.0, 1.0)
if abs(X - Y) <= 0.2:
result += 1
print(result / 1000000)
| StarcoderdataPython |
3211470 |
__all__ = ["Electron_v1"]
#
# electron struct
#
def Electron_v1():
code = """
namespace edm{
struct Electron_v1{
/* Branch variables */
uint32_t RunNumber{};
unsigned long long EventNumber{};
float avgmu{};
float LumiBlock{};
/* Egamma */
bool el_hasCalo ;
bool el_hasTrack ;
float el_e;
float el_et;
float el_eta;
float el_phi;
float el_ethad1;
float el_ehad1;
float el_f1;
float el_f3;
float el_f1core;
float el_f3core;
float el_weta1;
float el_weta2;
float el_wtots1;
float el_fracs1;
float el_Reta;
float el_Rphi;
float el_Eratio;
float el_Rhad;
float el_Rhad1;
float el_deta1;
float el_deta2;
float el_dphi2;
float el_dphiresc;
float el_deltaPhiRescaled2;
float el_deltaEta1;
float el_deltaE;
float el_e277;
std::vector<float> *el_etCone;
std::vector<float> *el_ptCone;
float el_tap_deltaR;
float el_tap_mass;
float el_trk_pt;
float el_trk_eta;
float el_trk_charge;
float el_trk_sigd0;
float el_trk_d0;
float el_trk_eProbabilityHT;
float el_trk_transformed_eProbabilityHT;
float el_trk_d0significance;
float el_trk_deltaPOverP;
float el_trk_qOverP;
std::vector<uint8_t> *el_trk_summaryValues;
bool el_loose{};
bool el_medium{};
bool el_tight{};
bool el_lhvloose{};
bool el_lhloose{};
bool el_lhmedium{};
bool el_lhtight{};
bool el_multiLepton{};
std::vector<float> *el_ringsE;
int el_nGoodVtx{};
int el_nPileupPrimaryVtx{};
///Egamma Calo
float el_calo_et{};
float el_calo_eta{};
float el_calo_phi{};
float el_calo_etaBE2{};
float el_calo_e{};
// Level 1
float trig_L1_eta{};
float trig_L1_phi{};
float trig_L1_emClus{};
float trig_L1_tauClus{};
float trig_L1_emIsol{};
float trig_L1_hadIsol{};
float trig_L1_hadCore{};
std::vector<std::string> *m_trig_L1_thrNames;
// Level 2 Calo
float trig_L2_calo_et{};
float trig_L2_calo_eta{};
float trig_L2_calo_phi{};
float trig_L2_calo_e237{};
float trig_L2_calo_e277{};
float trig_L2_calo_fracs1{};
float trig_L2_calo_weta2{};
float trig_L2_calo_ehad1{};
float trig_L2_calo_emaxs1{};
float trig_L2_calo_e2tsts1{};
float trig_L2_calo_wstot{};
float trig_L2_calo_nnOutput{};
std::vector<float> *trig_L2_calo_energySample;
std::vector<float> *trig_L2_calo_rings;
std::vector<float> *trig_L2_calo_rnnOutput;
// level 2 id
std::vector<int> *trig_L2_el_trackAlgID;
std::vector<float> *trig_L2_el_pt;
std::vector<float> *trig_L2_el_caloEta;
std::vector<float> *trig_L2_el_eta;
std::vector<float> *trig_L2_el_phi;
std::vector<float> *trig_L2_el_charge;
std::vector<float> *trig_L2_el_nTRTHits;
std::vector<float> *trig_L2_el_nTRTHiThresholdHits;
std::vector<float> *trig_L2_el_etOverPt;
std::vector<float> *trig_L2_el_trkClusDeta;
std::vector<float> *trig_L2_el_trkClusDphi;
// EFCalo and HLT steps
std::vector<float> *trig_EF_calo_e;
std::vector<float> *trig_EF_calo_et;
std::vector<float> *trig_EF_calo_eta;
std::vector<float> *trig_EF_calo_phi;
std::vector<float> *trig_EF_calo_etaBE2;
std::vector<float> *trig_EF_el_calo_e;
std::vector<float> *trig_EF_el_calo_et;
std::vector<float> *trig_EF_el_calo_eta;
std::vector<float> *trig_EF_el_calo_phi;
std::vector<float> *trig_EF_el_calo_etaBE2;
std::vector<float> *trig_EF_el_e;
std::vector<float> *trig_EF_el_et;
std::vector<float> *trig_EF_el_eta;
std::vector<float> *trig_EF_el_phi;
std::vector<float> *trig_EF_el_ethad1;
std::vector<float> *trig_EF_el_ehad1;
std::vector<float> *trig_EF_el_f1;
std::vector<float> *trig_EF_el_f3;
std::vector<float> *trig_EF_el_f1core;
std::vector<float> *trig_EF_el_f3core;
std::vector<float> *trig_EF_el_weta1;
std::vector<float> *trig_EF_el_weta2;
std::vector<float> *trig_EF_el_wtots1;
std::vector<float> *trig_EF_el_fracs1;
std::vector<float> *trig_EF_el_Reta;
std::vector<float> *trig_EF_el_Rphi;
std::vector<float> *trig_EF_el_Eratio;
std::vector<float> *trig_EF_el_Rhad;
std::vector<float> *trig_EF_el_Rhad1;
std::vector<float> *trig_EF_el_deta2;
std::vector<float> *trig_EF_el_dphi2;
std::vector<float> *trig_EF_el_dphiresc;
std::vector<float> *trig_EF_el_e277;
std::vector<float> *trig_EF_el_deltaPhiRescaled2;
std::vector<float> *trig_EF_el_deltaEta1;
std::vector<float> *trig_EF_el_deltaE;
std::vector<float> *trig_EF_el_etCone;
std::vector<float> *trig_EF_el_ptCone;
std::vector<float> *trig_EF_el_trk_pt;
std::vector<float> *trig_EF_el_trk_eta;
std::vector<float> *trig_EF_el_trk_charge;
std::vector<float> *trig_EF_el_trk_sigd0;
std::vector<float> *trig_EF_el_trk_d0;
std::vector<float> *trig_EF_el_trk_eProbabilityHT;
std::vector<float> *trig_EF_el_trk_transformed_eProbabilityHT;
std::vector<float> *trig_EF_el_trk_d0significance;
std::vector<float> *trig_EF_el_trk_deltaPOverP;
std::vector<float> *trig_EF_el_trk_qOverP;
std::vector<uint8_t> *trig_EF_el_trk_summaryValues;
std::vector<bool> *trig_EF_el_hasCalo ;
std::vector<bool> *trig_EF_el_hasTrack ;
std::vector<bool> *trig_EF_el_loose;
std::vector<bool> *trig_EF_el_medium;
std::vector<bool> *trig_EF_el_tight;
std::vector<bool> *trig_EF_el_lhvloose;
std::vector<bool> *trig_EF_el_lhloose;
std::vector<bool> *trig_EF_el_lhmedium;
std::vector<bool> *trig_EF_el_lhtight;
std::vector<bool> *trig_EF_calo_loose;
std::vector<bool> *trig_EF_calo_medium;
std::vector<bool> *trig_EF_calo_tight;
std::vector<bool> *trig_EF_calo_lhvloose;
std::vector<bool> *trig_EF_calo_lhloose;
std::vector<bool> *trig_EF_calo_lhmedium;
std::vector<bool> *trig_EF_calo_lhtight;
std::vector<int> *trig_tdt_L1_calo_accept;
std::vector<int> *trig_tdt_L2_calo_accept;
std::vector<int> *trig_tdt_L2_el_accept ;
std::vector<int> *trig_tdt_EF_calo_accept;
std::vector<int> *trig_tdt_EF_el_accept ;
std::vector<int> *trig_tdt_emu_L1_calo_accept;
std::vector<int> *trig_tdt_emu_L2_calo_accept;
std::vector<int> *trig_tdt_emu_L2_el_accept ;
std::vector<int> *trig_tdt_emu_EF_calo_accept;
std::vector<int> *trig_tdt_emu_EF_el_accept ;
// Monte Carlo
bool mc_hasMC{} ;
float mc_pt{} ;
float mc_eta{} ;
float mc_phi{} ;
bool mc_isTop{} ;
bool mc_isParton{} ;
bool mc_isMeson{} ;
bool mc_isQuark{} ;
bool mc_isTau{} ;
bool mc_isMuon{} ;
bool mc_isPhoton{} ;
bool mc_isElectron{};
int mc_type{};
int mc_origin{};
bool mc_isTruthElectronFromZ{};
bool mc_isTruthElectronFromW{};
bool mc_isTruthElectronFromJpsi{};
bool mc_isTruthElectronAny{};
};
}
"""
import ROOT
ROOT.gInterpreter.ProcessLine(code)
from ROOT import edm
return edm.Electron_v1()
| StarcoderdataPython |
146664 | <filename>2016/12/monorail.py<gh_stars>1-10
instructions = []
regs = {}
with open('input.txt') as f:
for line in f:
element = line.strip().split()
instructions.append({'operation': element[0],
'operands': tuple(element[1:])})
try:
int(element[1])
except ValueError:
regs.update({element[1]: 0})
def get_value(x):
try:
return int(x)
except ValueError:
return regs[x]
def run(ignition = False):
i = 0
for reg in regs:
regs[reg] = 0
if ignition:
regs['c'] = 1
while i >= 0 and i < len(instructions):
instruction = instructions[i]['operation']
operands = instructions[i]['operands']
if instruction == 'jnz':
if get_value(operands[0]) != 0:
i += get_value(operands[1])
continue
elif instruction == 'cpy':
regs[operands[1]] = get_value(operands[0])
elif instruction == 'dec':
regs[operands[0]] -= 1
elif instruction == 'inc':
regs[operands[0]] += 1
i += 1
return regs['a']
print "Register a holds the value %d" % run()
print "With ignition key, register a holds the value %d" % run(True)
| StarcoderdataPython |
3281314 | import json
import logging
import os
import traceback
from abc import abstractmethod, ABC
from collections import Iterator, Iterable
from logging import Logger
from typing import Union, Any, Callable
from commentjson import commentjson
from rx import Observable, from_
from rx.core.abc import Observer
from faddist.reflection import load_class, create_instance
_logger = Logger(__file__)
class Pipeline(object):
def __init__(self, iterator: Iterator, observer: Observer = None):
self.__iterator = iterator
self.__observer = observer
self.__ducts = []
@property
def iterator(self) -> Iterator:
return self.__iterator
@property
def observer(self) -> Observer:
return self.__observer
def append(self, duct: Callable[[Any], Any]):
self.__ducts.append(duct)
def operate(self, callable_: Callable[[Any], Any] = None):
def on_error(error):
traceback.print_exc()
observable: Observable = from_(self.__iterator).pipe(*self.__ducts)
if isinstance(self.__observer, InitializingObserver):
self.__observer.initialize(self)
observable.subscribe(self.__observer, on_error=on_error)
if callable_ is not None:
observable.subscribe(callable_)
class InitializingObserver(Observer):
def __init__(self) -> None:
super(InitializingObserver, self).__init__()
self.__has_error = False
def initialize(self, pipeline: Pipeline):
_logger.info('Pipeline is initialized.')
@abstractmethod
def on_next(self, value: Any) -> None:
pass
def on_error(self, error):
self.__has_error = True
_logger.error(error, exc_info=True)
def on_completed(self):
if not self.__has_error:
_logger.info('Successfully finished.')
else:
_logger.warning('Ended with errors.')
class Assembler(object):
def __init__(self, working_dir: str = os.path.abspath(os.getcwd())):
self.__working_dir = working_dir
self.__named_classes = {}
self.__variables = {"working_dir": working_dir}
def __prepare_value(self, value: Any):
if isinstance(value, str) and value.startswith('$var:'):
variable_name = value[5:]
return self.get_variable(variable_name)
elif isinstance(value, str) and value.startswith('$lambda'):
try:
script = self.__create_lambda(value)
except Exception:
raise SyntaxWarning(f"Check the code of the descriptor '{json.dumps(descriptor)}'.")
return script
return value
def __resolve_argument_from_list(self, arguments: Union[Iterable, Iterator, list, tuple]):
result = []
for value in arguments:
result.append(self.__prepare_value(value))
return result
def __resolve_argument_from_dict(self, arguments: dict):
result = {}
for key, value in arguments.items():
result[key] = self.__prepare_value(value)
return result
def __resolve_arguments(self, descriptor: dict):
if 'arguments' in descriptor:
arguments = descriptor['arguments']
if isinstance(arguments, str):
arguments = [arguments]
if isinstance(arguments, (Iterator, list, tuple)):
return self.__resolve_argument_from_list(arguments)
elif isinstance(arguments, dict):
return self.__resolve_argument_from_dict(arguments)
return []
def __create_lambda(self, value: str):
scope = {}
scope.update(self.__variables)
scope.update(self.__named_classes)
compiled = eval(value[1:], scope)
def isolation(data: Any) -> Any:
try:
return compiled(data)
except Exception:
logging.critical(f"Failed executing lamda function '{value}' with input data {repr(data)}.",
exc_info=True)
raise
return isolation
def __bootstrap_alias(self, definitions: list[dict]):
if isinstance(definitions, (Iterable, Iterator, list, tuple)):
for descriptor in definitions:
if 'name' not in descriptor:
raise ResourceWarning('An alias descriptor needs a name definition.')
if '__type__' not in descriptor:
raise ResourceWarning('An alias descriptor needs a __type__ definition.')
name = descriptor['name']
type_ = descriptor['__type__']
self.__named_classes[name] = load_class(type_)
def __bootstrapp_variables(self, definitions: list[dict]):
if isinstance(definitions, (Iterable, Iterator, list, tuple)):
for descriptor in definitions:
if 'name' not in descriptor:
raise ResourceWarning('A variable descriptor needs a name definition.')
name = descriptor['name']
self.__variables[name] = self.instance_from_descriptor(descriptor)
def __bootstrap_include(self, path: Union[list, str]):
if isinstance(path, str):
path = [path]
for p in path:
if os.path.isabs(p):
include_path = p
else:
include_path = os.path.join(self.__working_dir, p)
with open(include_path, 'r') as fd:
self.bootstrap(json.load(fd))
def instance_from_descriptor(self, descriptor: dict) -> Any:
if isinstance(descriptor, str) and descriptor.startswith('$lambda'):
return self.__create_lambda(descriptor)
if '__type__' not in descriptor and '__alias__' not in descriptor:
raise ResourceWarning('An instance descriptor needs a __type__ or __alias__ definition.')
type_ = descriptor.get('__type__')
alias = descriptor.get('__alias__')
arguments = self.__resolve_arguments(descriptor)
if type_:
clazz = load_class(type_)
elif alias:
clazz = self.get_class(alias)
else:
raise ResourceWarning('An instance descriptor needs a __type__ or __alias__ definition.')
return create_instance(clazz, arguments)
def bootstrap(self, definitions: dict):
if 'include' in definitions:
self.__bootstrap_include(definitions['include'])
if 'alias' in definitions:
self.__bootstrap_alias(definitions['alias'])
if 'variables' in definitions:
self.__bootstrapp_variables(definitions['variables'])
def has_class(self, classname: str):
return classname in self.__named_classes
def get_class(self, alias: str):
return self.__named_classes[alias]
def new_instance(self, alias: str, arguments: Union[dict, list]):
clazz = self.get_class(alias)
return create_instance(clazz, arguments)
def has_variable(self, name: str):
return name in self.__variables
def get_variable(self, name: str):
return self.__variables[name]
def set_variable(self, name: str, value: Any, force: bool = False):
if self.has_variable(name) and not force:
raise ValueError(f"Variable '{name}' is already set. Yoe can use the 'force' argument to override.")
self.__variables[name] = value
def build_pipeline(self, definitions: dict) -> Pipeline:
self.bootstrap(definitions)
if 'iterator' not in definitions:
raise ResourceWarning('A variable descriptor needs a name definition.')
iterator = self.instance_from_descriptor(definitions['iterator'])
observer = None
if 'observer' in definitions:
observer = self.instance_from_descriptor(definitions['observer'])
pipeline = Pipeline(iterator, observer)
if 'pipe' in definitions:
pipe_definitions = definitions['pipe']
for operator_descriptor in pipe_definitions:
operator = self.instance_from_descriptor(operator_descriptor)
if isinstance(operator, OperatorBuilder):
operator.assembler = self
operator = operator.build()
pipeline.append(operator)
return pipeline
def load_json_file(self, file_path, **kwargs):
if not os.path.isabs(file_path):
file_path = os.path.join(self.__working_dir, file_path)
with open(file_path, 'r') as fp:
return self.load_json(fp, **kwargs)
def load_json(self, fp, **kwargs):
pipeline_configuration = commentjson.load(fp, **kwargs)
return self.build_pipeline(pipeline_configuration)
class OperatorBuilder(ABC):
def __init__(self):
self.__assembler = None
@property
def assembler(self):
return self.__assembler
@assembler.setter
def assembler(self, assembler: Assembler):
self.__assembler = assembler
@abstractmethod
def build(self) -> Callable[[Observable], Observable]:
pass
| StarcoderdataPython |
1785697 | """Resolwe models hydrate utils."""
import copy
import os
import re
from pathlib import Path
from django.core.exceptions import ValidationError
from resolwe.flow.utils import iterate_fields
def _hydrate_values(output, output_schema, data):
"""Hydrate basic:file and basic:json values.
Find fields with basic:file type and assign a full path to the file.
Find fields with basic:json type and assign a JSON object from storage.
"""
def hydrate_path(file_name):
"""Hydrate file paths."""
from resolwe.flow.managers import manager
class HydratedPath(str):
"""String wrapper, which also stores the original filename."""
__slots__ = ("data_id", "file_name")
def __new__(cls, value=""):
"""Initialize hydrated path."""
hydrated = str.__new__(cls, value)
hydrated.data_id = data.id
hydrated.file_name = file_name
return hydrated
return HydratedPath(manager.get_executor().resolve_data_path(data, file_name))
def hydrate_storage(storage_id):
"""Hydrate storage fields."""
from ..storage import LazyStorageJSON # Prevent circular import.
return LazyStorageJSON(pk=storage_id)
for field_schema, fields in iterate_fields(output, output_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("basic:file:"):
value["file"] = hydrate_path(value["file"])
value["refs"] = [hydrate_path(ref) for ref in value.get("refs", [])]
elif field_schema["type"].startswith("list:basic:file:"):
for obj in value:
obj["file"] = hydrate_path(obj["file"])
obj["refs"] = [hydrate_path(ref) for ref in obj.get("refs", [])]
if field_schema["type"].startswith("basic:dir:"):
value["dir"] = hydrate_path(value["dir"])
value["refs"] = [hydrate_path(ref) for ref in value.get("refs", [])]
elif field_schema["type"].startswith("list:basic:dir:"):
for obj in value:
obj["dir"] = hydrate_path(obj["dir"])
obj["refs"] = [hydrate_path(ref) for ref in obj.get("refs", [])]
elif field_schema["type"].startswith("basic:json:"):
fields[name] = hydrate_storage(value)
elif field_schema["type"].startswith("list:basic:json:"):
fields[name] = [hydrate_storage(storage_id) for storage_id in value]
def hydrate_input_references(input_, input_schema, hydrate_values=True):
"""Hydrate ``input_`` with linked data.
Find fields with complex data:<...> types in ``input_``.
Assign an output of corresponding data object to those fields.
"""
from ..data import Data # prevent circular import
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("data:"):
if value is None:
continue
try:
data = Data.objects.get(id=value)
except Data.DoesNotExist:
fields[name] = {}
continue
output = copy.deepcopy(data.output)
hydrate_input_references(output, data.process.output_schema)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
output["__descriptor"] = data.descriptor
output["__name"] = getattr(data, "name", None)
output["__entity_id"] = getattr(data.entity, "id", None)
output["__entity_name"] = getattr(data.entity, "name", None)
output["__output_schema"] = data.process.output_schema
fields[name] = output
elif field_schema["type"].startswith("list:data:"):
outputs = []
for val in value:
if val is None:
continue
try:
data = Data.objects.get(id=val)
except Data.DoesNotExist:
outputs.append({})
continue
output = copy.deepcopy(data.output)
hydrate_input_references(output, data.process.output_schema)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
output["__descriptor"] = data.descriptor
output["__name"] = getattr(data, "name", None)
output["__entity_id"] = getattr(data.entity, "id", None)
output["__entity_name"] = getattr(data.entity, "name", None)
output["__output_schema"] = data.process.output_schema
outputs.append(output)
fields[name] = outputs
def hydrate_input_uploads(input_, input_schema, hydrate_values=True):
"""Hydrate input basic:upload types with upload location.
Find basic:upload fields in input.
Add the upload location for relative paths.
"""
from resolwe.flow.managers import manager
files = []
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"] == "basic:file:":
files.append(value)
elif field_schema["type"] == "list:basic:file:":
files.extend(value)
urlregex = re.compile(
r"^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]"
)
for value in files:
if "file_temp" in value:
if isinstance(value["file_temp"], str):
# If file_temp not url, hydrate path.
if not urlregex.search(value["file_temp"]):
value["file_temp"] = manager.get_executor().resolve_upload_path(
value["file_temp"]
)
else:
# Something very strange happened.
value["file_temp"] = "Invalid value for file_temp in DB"
def hydrate_size(data, force=False):
"""Add file and dir sizes.
Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:``
and ``list:basic:dir:`` fields.
``force`` parameter is used to recompute file sizes also on objects
that already have these values, e.g. in migrations.
"""
from ..data import Data # prevent circular import
def get_dir_size(path):
"""Get directory size."""
return sum(
file_.stat().st_size for file_ in Path(path).rglob("*") if file_.is_file()
)
def get_refs_size(obj, obj_path):
"""Calculate size of all references of ``obj``.
:param dict obj: Data object's output field (of type file/dir).
:param Path obj_path: Path to ``obj``.
"""
total_size = 0
for ref in obj.get("refs", []):
ref_path = data.location.get_path(filename=ref)
if ref_path in os.fspath(obj_path):
# It is a common case that ``obj['file']`` is also contained in
# one of obj['ref']. In that case, we need to make sure that it's
# size is not counted twice:
continue
ref_path: Path = Path(ref_path)
if ref_path.is_file():
total_size += ref_path.stat().st_size
elif ref_path.is_dir():
total_size += get_dir_size(ref_path)
return total_size
def add_file_size(obj):
"""Add file size to the basic:file field."""
if (
data.status in [Data.STATUS_DONE, Data.STATUS_ERROR]
and "size" in obj
and not force
):
return
path = Path(data.location.get_path(filename=obj["file"]))
if not path.is_file():
raise ValidationError("Referenced file does not exist ({})".format(path))
obj["size"] = path.stat().st_size
obj["total_size"] = obj["size"] + get_refs_size(obj, path)
def add_dir_size(obj):
"""Add directory size to the basic:dir field."""
if (
data.status in [Data.STATUS_DONE, Data.STATUS_ERROR]
and "size" in obj
and not force
):
return
path = Path(data.location.get_path(filename=obj["dir"]))
if not path.is_dir():
raise ValidationError("Referenced dir does not exist ({})".format(path))
obj["size"] = get_dir_size(path)
obj["total_size"] = obj["size"] + get_refs_size(obj, path)
data_size = 0
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("basic:file:"):
add_file_size(value)
data_size += value.get("total_size", 0)
elif field_schema["type"].startswith("list:basic:file:"):
for obj in value:
add_file_size(obj)
data_size += obj.get("total_size", 0)
elif field_schema["type"].startswith("basic:dir:"):
add_dir_size(value)
data_size += value.get("total_size", 0)
elif field_schema["type"].startswith("list:basic:dir:"):
for obj in value:
add_dir_size(obj)
data_size += obj.get("total_size", 0)
data.size = data_size
| StarcoderdataPython |
62959 | <filename>backend/tests/improvements/test_fishing_boats.py
from backend.improvements.fishing_boats import FishingBoats
import pytest
@pytest.fixture(scope="function")
def setup_improvement():
imp = FishingBoats()
return imp
# Init
testdata = [
('food', 1),
('production', 0),
('gold', 0),
('science', 0),
('culture', 0),
('faith', 0),
('housing', .5),
('appeal', 0),
('power', 0),
('acceptable_terrain', None),
('acceptable_features', None),
('resources', [
'fish',
'crabs',
'whales',
'pearls',
'amber',
'truffles',
]),
]
@pytest.mark.parametrize("resource, value", testdata)
def test_init(setup_improvement, resource, value):
test_improvement = setup_improvement
assert getattr(test_improvement, resource) == value
| StarcoderdataPython |
135548 | import json
import os
class JsonLoader:
"""
JsonLoader is used to load the data from all structured json files associated with the DeepInterpolation package.
"""
def __init__(self, path):
self.path = path
self.load_json()
def load_json(self):
"""
This function load the json file from the path recorded in the class instance.
Parameters:
None
Returns:
None
"""
with open(self.path, "r") as read_file:
self.json_data = json.load(read_file)
print("done")
def set_default(self, parameter_name, default_value):
"""
set default forces the initialization of a parameter if it was not present in
the json file. If the parameter is already present in the json file, nothing
will be changed.
Parameters:
parameter_name (str): name of the paramter to initialize
default_value (Any): default parameter value
Returns:
None
"""
if not (parameter_name in self.json_data):
self.json_data[parameter_name] = default_value
def get_type(self):
"""
json types define the general category of the object the json file applies to.
For instance, the json can apply to a data Generator type
Parameters:
None
Returns:
str: Description of the json type
"""
return self.json_data["type"]
def get_name(self):
"""
Each json type is sub-divided into different names. The name defines the exact construction logic of the object and how the
parameters json data is used. For instance, a json file can apply to a Generator type using the AudioGenerator name when
generating data from an audio source. Type and Name fully defines the object logic.
Parameters:
None
Returns:
str: Description of the json name
"""
return self.json_data["name"]
class JsonSaver:
"""
JsonSaver is used to save dict data into individual file.
"""
def __init__(self, dict_save):
self.dict = dict_save
def save_json(self, path):
"""
This function save the json file into the path provided.
Parameters:
str: path: str
Returns:
None
"""
with open(path, "w") as write_file:
json.dump(self.dict, write_file)
os.chmod(path, 0o777)
class ClassLoader:
"""
ClassLoader allows to select and create a specific Type and Name object from the available library of objects. It then
uses the parameters in the json file to create a specific instance of that object.
It returns that object and the ClassLoader object should then be deleted.
"""
from deepinterpolation import network_collection
from deepinterpolation import generator_collection
from deepinterpolation import trainor_collection
from deepinterpolation import inferrence_collection
def __init__(self, json_path):
json_class = JsonLoader(json_path)
self.json_path = json_path
self.local_type = json_class.get_type()
self.local_name = json_class.get_name()
def find_and_build(self):
"""
This function searches the available classes available for object 'type' and 'name' and returns a callback to instantiate.
Parameters:
None
Returns:
obj: an instantiation callback of the object requested when creating ClassLoader with a json file
"""
if self.local_type == "network":
local_object = getattr(self.network_collection, self.local_name)
return local_object
elif self.local_type == "generator":
local_object = getattr(self.generator_collection, self.local_name)
return local_object
elif self.local_type == "trainer":
local_object = getattr(self.trainor_collection, self.local_name)
return local_object
elif self.local_type == "inferrence":
local_object = getattr(self.inferrence_collection, self.local_name)
return local_object
| StarcoderdataPython |
3265516 | #!/usr/bin/env python
from setuptools import setup
from python3_gearman import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='python3_gearman',
version=version,
description=(
'Python 3 Gearman API - Client, worker, and admin client interfaces'
),
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/josiahmwalton/python3-gearman',
packages=['python3_gearman'],
python_requires='>=3',
license='Apache',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| StarcoderdataPython |
3211091 | # Importar spacy e criar o objeto nlp do Português
import ____
nlp = ____
# Processar o texto
doc = ____("Eu gosto de gatos e cachorros.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| StarcoderdataPython |
3379522 | <gh_stars>10-100
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return []
l = 1
dummy = head
while dummy.next:
l += 1
dummy = dummy.next
k = k % l
tail = dummy
tail.next = head
for i in range(l - k):
tail = tail.next
new_head = tail.next
tail.next = None
return new_head
| StarcoderdataPython |
3297425 | <gh_stars>0
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
def function(n = 1):
print(n)
return n * 2
x = function(69)
print(x)
| StarcoderdataPython |
1768892 | <filename>Bag.py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import sys
import cv2
import numpy as np
from glob import glob
import argparse
from helpers import *
class BOV:
def __init__(self, no_clusters):
self.no_clusters = no_clusters
self.train_path = None
self.test_path = None
self.im_helper = ImageHelpers()
self.bov_helper = BOVHelpers(no_clusters)
self.file_helper = FileHelpers()
self.images = None
self.trainImageCount = 0
self.train_labels = np.array([])
self.name_dict = {}
self.descriptor_list = []
self.name = []
self.labelList = []
def trainModel(self):
"""
This method contains the entire module
required for training the bag of visual words model
Use of helper functions will be extensive.
"""
# read file. prepare file lists.
self.images, self.trainImageCount, self.names = self.file_helper.getFiles(self.train_path)
# extract SIFT Features from each image
label_count = 0
counter = 0
featuresCount = []
labelList = []
for word, imlist in self.images.iteritems():
self.name_dict[str(label_count)] = word
print "Computing Features for ", word
for im in imlist:
# cv2.imshow("im", im)
# cv2.waitKey()
self.train_labels = np.append(self.train_labels, label_count)
kp, des = self.im_helper.features(im)
print(self.names[counter] + " count " + str(np.shape(des)))
num, features = np.shape(des)
featuresCount.append(num)
labelList.append(word)
self.descriptor_list.append(des)
counter += 1
label_count += 1
self.labelList = labelList
# perform clustering
bov_descriptor_stack = self.bov_helper.formatND(self.descriptor_list)
self.bov_helper.cluster(featuresCount, labelList)
self.bov_helper.developVocabulary(n_images = self.trainImageCount, descriptor_list=self.descriptor_list, labelList = labelList)
# show vocabulary trained
self.bov_helper.standardize()
#sys.exit(0)
self.bov_helper.train(self.train_labels)
self.bov_helper.plotHist()
def recognize(self,test_img, test_image_path=None):
"""
This method recognizes a single image
It can be utilized individually as well.
"""
print("Reconociendo " + test_image_path)
kp, des = self.im_helper.features(test_img)
# print kp
print des.shape
# generate vocab for test image
vocab = np.array( [[ 0 for i in range(self.no_clusters)]])
# locate nearest clusters for each of
# the visual word (feature) present in the image
# test_ret =<> return of kmeans nearest clusters for N features
test_ret = self.bov_helper.kmeans_obj.predict(des)
print "Prediccion de clases para cada descriptor"
print test_ret
# print vocab
for each in test_ret:
vocab[0][each] += 1
# Scale the features
vocab = self.bov_helper.scale.transform(vocab)
print "Vocabulario normalizado"
print vocab
# predict the class of the image
lb = self.bov_helper.clf.predict(vocab)
print "Image belongs to class : ", self.name_dict[str(int(lb[0]))]
neighbor = NearestNeighbors(n_neighbors = 10)
neighbor.fit(self.bov_helper.mega_histogram)
dist, result = neighbor.kneighbors(vocab)
print "kNN:"
# print(dist)
# print(result[0])
for i in result[0]:
print("label: "+self.labelList[i])
return lb
def testModel(self):
"""
This method is to test the trained classifier
read all images from testing path
use BOVHelpers.predict() function to obtain classes of each image
"""
self.testImages, self.testImageCount, nameList = self.file_helper.getFiles(self.test_path)
predictions = []
counter = 0
for word, imlist in self.testImages.iteritems():
print "processing " ,word
for im in imlist:
# print imlist[0].shape, imlist[1].shape
print im.shape
cl = self.recognize(im, nameList[counter])
print cl
predictions.append({
'image':im,
'class':cl,
'object_name':self.name_dict[str(int(cl[0]))]
})
counter += 1
num = 0
#print predictions
for each in predictions:
# cv2.imshow(each['object_name'], each['image'])
# cv2.waitKey()
# cv2.destroyWindow(each['object_name'])
#
plt.imshow(cv2.cvtColor(each['image'], cv2.COLOR_GRAY2RGB))
#plt.title(each['object_name'])
#plt.show()
plt.title(each['object_name'])
name = 'result_' + str(num) + '.png'
plt.savefig(name)
num = num + 1;
def cluster(self):
print("Clustering con DBSCAN")
#DIFICIL
mega_histogram = self.bov_helper.mega_histogram
#print(mega_histogram)
#db = DBSCAN(eps=5, min_samples=3).fit(mega_histogram)
#core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
#core_samples_mask[db.core_sample_indices_] = True
#labels = db.labels_
#last = 0
#count = 0
#print labels
##for k in featuresCount:
## print('Etiquetas(' +str(count)+') : ' + str(k) + ' Label: ' + labelList[count])
## new = labels[last:last+k-1]
## new = new[new != -1]
## print(new)
## moda = stats.mode(new)
## print('Moda: ')
## print(moda[0])
## hist = np.histogram(new)
## print(hist)
## last = last+k
## count += 1
#n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
#n_noise_ = list(labels).count(-1)
#print('Estimated number of clusters: %d' % n_clusters_)
#print('Estimated number of noise points: %d' % n_noise_)
print('PCA::')
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(mega_histogram)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, pd.Series(self.labelList)], axis = 1)
print(finalDf)
db = DBSCAN(eps=1, min_samples=2).fit(principalComponents)
#db = DBSCAN(eps=1.2, min_samples=2).fit(principalComponents) #6 clusters mas feo
#db = DBSCAN(eps=1, min_samples=2).fit(principalComponents) #6 clusters
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
last = 0
print labels
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
# Black removed and is used for noise instead.
unique_labels = set(labels)
contador = 0
for i in labels:
if(i == -1):
print("label: None")
else:
print("i: " +str(contador) +"label: "+str(i))
contador += 1
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = principalComponents[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=14)
xy = principalComponents[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.savefig('pepe.png')
def print_vars(self):
pass
if __name__ == '__main__':
# parse cmd args
parser = argparse.ArgumentParser(
description=" Bag of visual words example"
)
parser.add_argument('--train_path', action="store", dest="train_path", required=True)
parser.add_argument('--test_path', action="store", dest="test_path", required=True)
args = vars(parser.parse_args())
print args
bov = BOV(no_clusters=20)
# set training paths
bov.train_path = args['train_path']
# set testing paths
bov.test_path = args['test_path']
# train the model
bov.trainModel()
# test model
#bov.testModel()
bov.cluster()
| StarcoderdataPython |
1724827 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Generated: 10/24/2021(m/d/y) 01:41:23 utc
leagues = ['Standard', 'Hardcore', 'Scourge', 'Hardcore Scourge'] | StarcoderdataPython |
3287399 | <reponame>mpuheim/Various<filename>University - Team Projects/Technicom APG/modules/utils.py<gh_stars>0
from os import mkdir
from sys import exc_clear
# make directory
def makedir(name):
try:
mkdir(name) # try to make directory
except OSError:
exc_clear() # don't show error if directory exists
# print "Cannot create directory '" + name + "'. Directory already exists." | StarcoderdataPython |
1748382 | import os
from enum import Enum
from pathlib import Path
from os.path import join, exists
import argparse
import pathlib
import click
import numpy as np
import pandas as pd
import download_data
import dataframe
import plotter
from matplotlib import pyplot as plt
import seaborn as sns
import dataframe
import plotter
plotter.legend_fontsize = 20
alpha = 0.7
class ModelTypes(Enum):
# Models plotted in order (so first enum value is plotted first)
STANDARD = ('Standard training', (0.5, 0.5, 0.5, 0.5), 50)
MORE_DATA = ('Trained with more data', 'tab:green', 100)
NO_SUBSAMPLE = ('1000 classes', 'tab:blue', (300, 1.0))
SUBSAMPLE_500C = ('500 classes', 'tab:purple', (300, 1.0))
SUBSAMPLE_250C = ('250 classes', 'tab:brown', (300, 1.0))
SUBSAMPLE_125C = ('125 classes', 'tab:olive', (300, 1.0))
model_types_map = {
'resnet50_imagenet_100percent_batch64_original_images': ModelTypes.NO_SUBSAMPLE,
'resnet50_imagenet_subsample_500_classes_batch64_original_images': ModelTypes.SUBSAMPLE_500C,
'resnet50_imagenet_subsample_250_classes_batch64_original_images': ModelTypes.SUBSAMPLE_250C,
'resnet50_imagenet_subsample_125_classes_batch64_original_images': ModelTypes.SUBSAMPLE_125C,
'FixPNASNet': ModelTypes.STANDARD,
'FixResNeXt101_32x48d': ModelTypes.MORE_DATA,
'FixResNeXt101_32x48d_v2': ModelTypes.MORE_DATA,
'FixResNet50': ModelTypes.STANDARD,
'FixResNet50_no_adaptation': ModelTypes.STANDARD,
'FixResNet50_v2': ModelTypes.STANDARD,
'alexnet': ModelTypes.STANDARD,
'bninception': ModelTypes.STANDARD,
'bninception-imagenet21k': ModelTypes.MORE_DATA,
'cafferesnet101': ModelTypes.STANDARD,
'densenet121': ModelTypes.STANDARD,
'densenet161': ModelTypes.STANDARD,
'densenet169': ModelTypes.STANDARD,
'densenet201': ModelTypes.STANDARD,
'dpn107': ModelTypes.MORE_DATA,
'dpn131': ModelTypes.STANDARD,
'dpn68': ModelTypes.STANDARD,
'dpn68b': ModelTypes.MORE_DATA,
'dpn92': ModelTypes.MORE_DATA,
'dpn98': ModelTypes.STANDARD,
'efficientnet-b0': ModelTypes.STANDARD,
'efficientnet-b0-autoaug': ModelTypes.STANDARD,
'efficientnet-b1': ModelTypes.STANDARD,
'efficientnet-b1-autoaug': ModelTypes.STANDARD,
'efficientnet-b2': ModelTypes.STANDARD,
'efficientnet-b2-autoaug': ModelTypes.STANDARD,
'efficientnet-b3': ModelTypes.STANDARD,
'efficientnet-b3-autoaug': ModelTypes.STANDARD,
'efficientnet-b4': ModelTypes.STANDARD,
'efficientnet-b4-autoaug': ModelTypes.STANDARD,
'efficientnet-b5': ModelTypes.STANDARD,
'efficientnet-b5-autoaug': ModelTypes.STANDARD,
'efficientnet-b5-randaug': ModelTypes.STANDARD,
'efficientnet-b6-autoaug': ModelTypes.STANDARD,
'efficientnet-b7-autoaug': ModelTypes.STANDARD,
'efficientnet-b7-randaug': ModelTypes.STANDARD,
'efficientnet-l2-noisystudent': ModelTypes.MORE_DATA,
'fbresnet152': ModelTypes.STANDARD,
'google_resnet101_jft-300M': ModelTypes.MORE_DATA,
'googlenet/inceptionv1': ModelTypes.STANDARD,
'inceptionresnetv2': ModelTypes.STANDARD,
'inceptionv3': ModelTypes.STANDARD,
'inceptionv4': ModelTypes.STANDARD,
'instagram-resnext101_32x16d': ModelTypes.MORE_DATA,
'instagram-resnext101_32x32d': ModelTypes.MORE_DATA,
'instagram-resnext101_32x48d': ModelTypes.MORE_DATA,
'instagram-resnext101_32x8d': ModelTypes.MORE_DATA,
'mnasnet0_5': ModelTypes.STANDARD,
'mnasnet1_0': ModelTypes.STANDARD,
'mobilenet_v2': ModelTypes.STANDARD,
'nasnetalarge': ModelTypes.STANDARD,
'nasnetamobile': ModelTypes.STANDARD,
'pnasnet5large': ModelTypes.STANDARD,
'polynet': ModelTypes.STANDARD,
'resnet101': ModelTypes.STANDARD,
'resnet101-tencent-ml-images': ModelTypes.MORE_DATA,
'resnet152': ModelTypes.STANDARD,
'resnet152-imagenet11k': ModelTypes.MORE_DATA,
'resnet18': ModelTypes.STANDARD,
'resnet18_ssl': ModelTypes.MORE_DATA,
'resnet18_swsl': ModelTypes.MORE_DATA,
'resnet34': ModelTypes.STANDARD,
'resnet50': ModelTypes.STANDARD,
'resnet50-vtab': ModelTypes.STANDARD,
'resnet50-vtab-exemplar': ModelTypes.STANDARD,
'resnet50-vtab-rotation': ModelTypes.STANDARD,
'resnet50-vtab-semi-exemplar': ModelTypes.STANDARD,
'resnet50-vtab-semi-rotation': ModelTypes.STANDARD,
'resnet50_aws_baseline': ModelTypes.STANDARD,
'resnet50_ssl': ModelTypes.MORE_DATA,
'resnet50_swsl': ModelTypes.MORE_DATA,
'resnext101_32x16d_ssl': ModelTypes.MORE_DATA,
'resnext101_32x4d': ModelTypes.STANDARD,
'resnext101_32x4d_ssl': ModelTypes.MORE_DATA,
'resnext101_32x4d_swsl': ModelTypes.MORE_DATA,
'resnext101_32x8d': ModelTypes.STANDARD,
'resnext101_32x8d_ssl': ModelTypes.MORE_DATA,
'resnext101_32x8d_swsl': ModelTypes.MORE_DATA,
'resnext101_64x4d': ModelTypes.STANDARD,
'resnext50_32x4d': ModelTypes.STANDARD,
'resnext50_32x4d_ssl': ModelTypes.MORE_DATA,
'resnext50_32x4d_swsl': ModelTypes.MORE_DATA,
'se_resnet101': ModelTypes.STANDARD,
'se_resnet152': ModelTypes.STANDARD,
'se_resnet50': ModelTypes.STANDARD,
'se_resnext101_32x4d': ModelTypes.STANDARD,
'se_resnext50_32x4d': ModelTypes.STANDARD,
'senet154': ModelTypes.STANDARD,
'shufflenet_v2_x0_5': ModelTypes.STANDARD,
'shufflenet_v2_x1_0': ModelTypes.STANDARD,
'squeezenet1_0': ModelTypes.STANDARD,
'squeezenet1_1': ModelTypes.STANDARD,
'vgg11': ModelTypes.STANDARD,
'vgg11_bn': ModelTypes.STANDARD,
'vgg13': ModelTypes.STANDARD,
'vgg13_bn': ModelTypes.STANDARD,
'vgg16': ModelTypes.STANDARD,
'vgg16_bn': ModelTypes.STANDARD,
'vgg19': ModelTypes.STANDARD,
'vgg19_bn': ModelTypes.STANDARD,
'wide_resnet101_2': ModelTypes.STANDARD,
'wide_resnet50_2': ModelTypes.STANDARD,
'xception': ModelTypes.STANDARD,
}
def get_model_type(df_row):
if df_row.name in model_types_map:
return model_types_map[df_row.name]
def show_in_plot(df_row):
return True
def use_for_line_fit(df_row):
model_type, in_plot = df_row.model_type, df_row.show_in_plot
return model_type is ModelTypes.STANDARD
def prepare_df_for_plotting(df, df_metadata, columns):
assert set(columns).issubset(set(df.columns))
df = df[columns]
df_metadata = df_metadata[[x+'_dataset_size' for x in columns]]
df = df.merge(df_metadata, right_index=True, left_index=True)
df = df.dropna()
df['model_type'] = df.apply(get_model_type, axis=1)
df['show_in_plot'] = df.apply(show_in_plot, axis=1)
df['use_for_line_fit'] = df.apply(use_for_line_fit, axis=1)
return df
@click.command()
@click.option('--x_axis', type=str, default='val_subsampled_class_1_8')
@click.option('--y_axis', type=str, default='imagenetv2-matched-frequency-format-val_subsampled_class_1_8')
@click.option('--transform', type=str, default='logit')
@click.option('--num_bootstrap_samples', type=int, default=1000)
@click.option('--output_dir', type=str, default=str((pathlib.Path(__file__).parent / '../outputs').resolve()))
@click.option('--output_file_dir', type=str, default=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()))
@click.option('--skip_download', is_flag=True, type=bool)
def generate_xy_plot(x_axis, y_axis, transform, num_bootstrap_samples, output_dir, output_file_dir, skip_download):
if skip_download:
filename = join(output_dir, 'grid_df.pkl')
if not exists(filename):
raise Exception(f'Downloaded data not found at {filename}. Please run python src/plotting/download_data.py first')
df = pd.read_pickle(filename)
else:
df = download_data.download_plotting_data(output_dir, store_data=True, verbose=True)
df, df_metadata = dataframe.extract_metadata(df)
df, df_metadata = dataframe.replace_10percent_with_metadata(df, df_metadata)
df, df_metadata = dataframe.aggregate_corruptions_with_metadata(df, df_metadata)
df = prepare_df_for_plotting(df, df_metadata, [x_axis, y_axis])
df = plotter.add_plotting_data(df, [x_axis, y_axis])
df = df.dropna()
df_visible = df[df.show_in_plot == True]
xlim = [df_visible[x_axis].min() - 1, df_visible[x_axis].max() + 0.5]
ylim = [df_visible[y_axis].min() - 2, df_visible[y_axis].values.max() + 2]
fig, ax = plotter.model_scatter_plot(df, x_axis, y_axis, xlim, ylim, ModelTypes,
transform=transform, tick_multiplier=5, num_bootstrap_samples=num_bootstrap_samples,
title='Robustness for Subsampling ImageNet', x_label='ImageNet (class-subsampled)', y_label='ImageNetV2 (class-\nsubsampled)',
figsize=(12, 8), include_legend=True, return_separate_legend=False)
l = ax.legend(loc='upper left',
ncol=2,
bbox_to_anchor=(0, 1),
fontsize=plotter.legend_fontsize,
scatterpoints=1,
columnspacing=0,
handlelength=1.5,
borderpad=0.2)
for x in l.legendHandles:
x._sizes = [100]
x.set_alpha(0.8)
os.makedirs(output_file_dir, exist_ok=True)
fig.savefig(join(output_file_dir, f'subsample_classes.pdf'), dpi='figure', bbox_inches='tight', pad_inches=0.1)
print(f"Plot saved to {join(output_file_dir, f'subsample_classes.pdf')}")
if __name__ == '__main__':
generate_xy_plot()
| StarcoderdataPython |
3252755 | <gh_stars>1-10
import numpy as np
class Loader(dict):
"""
方法
========
L 为该类的实例
len(L)::返回样本数目
iter(L)::即为数据迭代器
Return
========
可迭代对象(numpy 对象)
"""
def __init__(self, batch_size, X, Y=None, shuffle=True, name=None):
'''
X, Y 均为类 numpy, 可以是 HDF5
'''
if name is not None:
self.name = name
self.X = np.asanyarray(X[:])
if Y is None:
# print('不存在标签!')
self.Y = None
else:
self.Y = np.asanyarray(Y[:])
self.batch_size = batch_size
self.shuffle = shuffle
self.nrows = self.X.shape[0]
def __iter__(self):
idx = np.arange(self.nrows)
if self.shuffle:
np.random.shuffle(idx)
for k in range(0, self.nrows, self.batch_size):
K = idx[k:min(k + self.batch_size, self.nrows)]
if self.Y is None:
yield np.take(self.X, K, 0)
else:
yield np.take(self.X, K, 0), np.take(self.Y, K, 0)
def __len__(self):
return self.nrows | StarcoderdataPython |
28248 | <gh_stars>100-1000
#!/usr/bin/env python3
"""
Generate the numeric limits for a given radix.
This is used for the fast-path algorithms, to calculate the
maximum number of digits or exponent bits that can be exactly
represented as a native value.
"""
import math
def is_pow2(value):
'''Calculate if a value is a power of 2.'''
floor = int(math.log2(value))
return value == 2**floor
def remove_pow2(value):
'''Remove a power of 2 from the value.'''
while math.floor(value / 2) == value / 2:
value //= 2
return value
def feature(radix):
'''Get the feature gate from the value'''
if radix == 10:
return ''
elif is_pow2(radix):
return 'if cfg!(feature = "power-of-two") '
return 'if cfg!(feature = "radix") '
def exponent_limit(radix, mantissa_size, max_exp):
'''
Calculate the exponent limit for a float, for a given
float type, where `radix` is the numerical base
for the float type, and mantissa size is the length
of the mantissa in bits. max_exp is the maximum
binary exponent, where all exponent bits except the lowest
are set (or, `2**(exponent_size - 1) - 1`).
'''
if is_pow2(radix):
# Can always be exactly represented. We can't handle
# denormal floats, however.
scaled = int(max_exp / math.log2(radix))
return (-scaled, scaled)
else:
# Positive and negative should be the same,
# since we need to find the maximum digit
# representable with mantissa digits.
# We first need to remove the highest power-of-
# two from the radix, since these will be represented
# with exponent digits.
base = remove_pow2(radix)
precision = mantissa_size + 1
exp_limit = int(precision / math.log2(base))
return (-exp_limit, exp_limit)
def mantissa_limit(radix, mantissa_size):
'''
Calculate mantissa limit for a float type, given
the radix and the length of the mantissa in bits.
'''
precision = mantissa_size + 1
return int(precision / math.log2(radix))
def all_limits(mantissa_size, exponent_size, type_name):
'''Print limits for all radixes.'''
max_exp = 2**(exponent_size - 1) - 1
print('/// Get the exponent limit as a const fn.')
print('#[inline(always)]')
print(f'pub const fn {type_name}_exponent_limit(radix: u32) -> (i64, i64) {{')
print(' match radix {')
for radix in range(2, 37):
exp_limit = exponent_limit(radix, mantissa_size, max_exp)
print(f' {radix} {feature(radix)}=> {exp_limit},')
print(' _ => (0, 0),')
print(' }')
print('}')
print('')
print('/// Get the mantissa limit as a const fn.')
print('#[inline(always)]')
print(f'pub const fn {type_name}_mantissa_limit(radix: u32) -> i64 {{')
print(' match radix {')
for radix in range(2, 37):
mant_limit = mantissa_limit(radix, mantissa_size)
print(f' {radix} {feature(radix)}=> {mant_limit},')
print(' _ => 0,')
print(' }')
print('}')
print('')
all_limits(23, 8, 'f32')
all_limits(52, 11, 'f64')
| StarcoderdataPython |
79322 | <reponame>arcosin/ANP_TrackDriver<filename>src/sac/sac.py
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
from .models import FeatureExtractor, ValueNetwork, SoftQNetwork, PolicyNetwork
from .replay_buffers import BasicBuffer
from .checkpointer import Checkpointer
class SACAgent:
def __init__(self,
action_range, action_dim, gamma, tau, v_lr, q_lr, pi_lr, buffer_maxlen=int(1e6),
image_size=(256,256,3), kernel_size=(3,3), conv_channels=4,
logFile='logs/losses.txt'):
#TODO: Known issue when cuda is enabled, robot can't de-pickle stuff from server
#self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.device = "cpu"
self.action_range = action_range
self.action_dim = action_dim
self.image_size = tuple(image_size)
# Hyperparameters
self.gamma = gamma
self.tau = tau
self.update_step = 0
self.delay_step = 2
# Logging
self.logFile = open(logFile, 'w')
# Network initialization
self.fe = FeatureExtractor(image_size[2], conv_channels, kernel_size).to(self.device)
self.in_dim = self.fe.get_output_size(self.image_size)
self.in_dim = np.prod(self.in_dim)
self.v_net = ValueNetwork(self.in_dim, 1).to(self.device)
self.target_v_net = ValueNetwork(self.in_dim, 1).to(self.device)
self.q_net1 = SoftQNetwork(self.in_dim, self.action_dim).to(self.device)
self.q_net2 = SoftQNetwork(self.in_dim, self.action_dim).to(self.device)
self.pi_net = PolicyNetwork(self.in_dim, self.action_dim).to(self.device)
for target_param, param in zip(self.target_v_net.parameters(), self.v_net.parameters()):
target_param.data.copy_(param)
# Optimizer initialization
self.v_optimizer = optim.Adam(self.v_net.parameters(), lr=v_lr)
self.q1_optimizer = optim.Adam(self.q_net1.parameters(), lr=q_lr)
self.q2_optimizer = optim.Adam(self.q_net1.parameters(), lr=q_lr)
self.pi_optimizer = optim.Adam(self.pi_net.parameters(), lr=pi_lr)
self.replay_buffer = BasicBuffer(buffer_maxlen)
def update(self, batch_size):
if len(self.replay_buffer) <= batch_size:
print('Replay buffer not large enough to sample, returning models...')
return self.fe.state_dict(), self.pi_net.state_dict(), False, None
states, actions, rewards, next_states, dones = self.replay_buffer.sample(batch_size)
states = np.stack(states)
next_states = np.stack(next_states)
states = torch.FloatTensor(states).permute(0, 3, 1, 2).to(self.device)
actions = torch.FloatTensor(actions).to(self.device)
rewards = torch.FloatTensor(rewards).to(self.device)
next_states = torch.FloatTensor(next_states).permute(0, 3, 1, 2).to(self.device)
dones = torch.FloatTensor(dones).to(self.device)
dones = dones.view(dones.size(0), -1)
# Process images
features = self.fe(states)
next_features = self.fe(next_states)
features = torch.reshape(features, (batch_size, self.in_dim))
next_features = torch.reshape(next_features, (batch_size, self.in_dim))
next_actions, next_log_pi = self.pi_net.sample(next_features)
next_q1 = self.q_net1(next_features, next_actions)
next_q2 = self.q_net2(next_features, next_actions)
next_v = self.target_v_net(next_features)
next_v_target = torch.min(next_q1, next_q2) - next_log_pi
curr_v = self.v_net.forward(features)
v_loss = F.mse_loss(curr_v, next_v_target.detach())
self.logFile.write(' v_loss: %s\n' % v_loss.item())
# Q loss
curr_q1 = self.q_net1.forward(features, actions)
curr_q2 = self.q_net2.forward(features, actions)
expected_q = rewards + (1 - dones) * self.gamma * next_v
q1_loss = F.mse_loss(curr_q1, expected_q.detach())
q2_loss = F.mse_loss(curr_q2, expected_q.detach())
self.logFile.write('q1_loss: %s\n' % q1_loss.item())
self.logFile.write('q2_loss: %s\n' % q2_loss.item())
# update v_net and q_nets
self.v_optimizer.zero_grad()
v_loss.backward(retain_graph=True)
self.v_optimizer.step()
self.q1_optimizer.zero_grad()
q1_loss.backward(retain_graph=True)
self.q1_optimizer.step()
self.q2_optimizer.zero_grad()
q2_loss.backward(retain_graph=True)
self.q2_optimizer.step()
losses = None
if self.update_step % self.delay_step == 0:
new_actions, log_pi = self.pi_net.sample(features)
min_q = torch.min(
self.q_net1.forward(features, new_actions),
self.q_net2.forward(features, new_actions)
)
pi_loss = (log_pi - min_q).mean()
self.logFile.write('pi_loss: %s\n\n' % pi_loss.item())
losses = { 'v_loss': v_loss.item(),
'q_loss': min(q1_loss.item(),q2_loss.item()),
'pi_loss': pi_loss.item() }
self.pi_optimizer.zero_grad()
pi_loss.backward(retain_graph=True)
self.pi_optimizer.step()
for target_param, param in zip(self.target_v_net.parameters(), self.v_net.parameters()):
target_param.data.copy_(self.tau * param + (1 - self.tau) * target_param)
else:
self.logFile.write('\n\n')
self.update_step += 1
return self.fe.state_dict(), self.pi_net.state_dict(), True, losses
| StarcoderdataPython |
177410 | """
从Kafka 中读取股价信息,
Spark Streaming 输出10秒时间窗的滑动均值作为预测。
"""
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.functions import from_json
import pyspark.sql.types as spark_type
import pyspark.sql.functions as F
from pyspark.sql.functions import get_json_object
from pyspark.sql.functions import window, avg, count, to_timestamp, col
spark = SparkSession.builder.appName("StructuredStreaming_Kafka").config("spark.sql.shuffle.partitions",2).getOrCreate()
# Subscribe to 1 topic
df = (
spark.readStream.format("kafka")
.option("kafka.bootstrap.servers", "localhost:9092")
.option("subscribe", "test")
.option("fetchOffset.retryIntervalMs", "3000")
.load()
)
# output with window function
ds = (
df.select(
get_json_object(df.value.cast("string"), "$.timestamp")
.cast("timestamp")
.alias("timestamp"),
get_json_object(df.value.cast("string"), "$.close")
.cast("float")
.alias("close"),
)
.groupby(
window("timestamp", "10 seconds", "1 second")
.start.cast("string")
.alias("start_time"),
window("timestamp", "10 seconds", "1 second")
.end.cast("string")
.alias("end_time"),
)
.agg(F.avg("close").alias("value"), F.count("close").alias("cnt_close"))
.where(F.col("cnt_close") == 10)
.selectExpr("start_time", "end_time", "CAST(value AS STRING)", "cnt_close")
.writeStream.outputMode("Update")
# .format("console")
# .option("truncate", False)
.format("kafka")
.option("kafka.bootstrap.servers", "localhost:9092")
.option("topic","output_buffer")
.option("checkpointLocation", "./checkpoint")
.start()
)
ds.awaitTermination()
| StarcoderdataPython |
3206284 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Exercise 3 - Question.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/Exercises/Exercise%203%20-%20Convolutions/Exercise%203%20-%20Question.ipynb
"""
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""## Exercise 3
In the videos you looked at how you would improve Fashion MNIST using Convolutions. For your exercise see if you can improve MNIST to 99.8% accuracy or more using only a single convolutional layer and a single MaxPooling 2D. You should stop training once the accuracy goes above this amount. It should happen in less than 20 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your layers.
I've started the code for you -- you need to finish it!
When 99.8% accuracy has been hit, you should print out the string "Reached 99.8% accuracy so cancelling training!"
"""
import tensorflow as tf
# YOUR CODE STARTS HERE
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('accuracy')>0.998):
print("\nAccuracy achieved 99.8% so cancelling training.")
self.model.stop_training = True
# YOUR CODE ENDS HERE
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images / 255.0
test_images = test_images / 255.0
training_images = training_images.reshape(60000, 28, 28, 1)
test_images = test_images.reshape(10000, 28, 28, 1)
# YOUR CODE STARTS HERE
callbacks = myCallback()
# YOUR CODE ENDS HERE
model = tf.keras.models.Sequential([
# YOUR CODE STARTS HERE
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
# YOUR CODE ENDS HERE
])
# YOUR CODE STARTS HERE
model.summary()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=20, callbacks=[callbacks])
model.evaluate(test_images, test_labels)
# YOUR CODE ENDS HERE
| StarcoderdataPython |
35802 | import bs4
import urllib
from base_online_scraper import base_online_scraper as scraper
BASE_URL = 'http://catalog.northeastern.edu'
INITIAL_PATH = '/course-descriptions/'
fp = urllib.urlopen(BASE_URL + INITIAL_PATH)
soup = bs4.BeautifulSoup(fp, 'lxml')
nav_menu = soup.find("div", {"id": "atozindex"}).find_all('a', href=True)
scraper = scraper('')
for a in nav_menu:
scraper.url = BASE_URL + a['href']
scraper.scrape()
| StarcoderdataPython |
3379270 | <filename>send-email.py
#!/usr/bin/python
import sys
import smtplib
import string
from subprocess import Popen, PIPE
stdout = Popen('ifconfig', shell=True, stdout=PIPE).stdout
output = stdout.read()
HOST = '10.1.0.1'
SUBJECT = "Linux installation complete"
if len(sys.argv) != 2:
# default value for use in old kickstarts
TO = "<EMAIL>"
else:
# new kickstarts specify address
TO = sys.argv[1]
FROM = "<EMAIL>"
BODY = string.join((
"From: %s" % FROM,
"To: %s" % TO,
"Subject: %s" % SUBJECT ,
"",
output
), "\r\n")
server = smtplib.SMTP(HOST)
server.sendmail(FROM, [TO], BODY)
server.quit()
| StarcoderdataPython |
3269849 | <filename>src/puzzle/steps/image/prepare_image.py
import numpy as np
from data.image import image
from puzzle.constraints.image import prepare_image_constraints
from puzzle.steps.image import _base_image_step
class PrepareImage(_base_image_step.BaseImageStep):
_prepare_image_constraints: prepare_image_constraints.PrepareImageConstraints
def __init__(
self,
source: image.Image,
constraints: prepare_image_constraints.PrepareImageConstraints) -> None:
super(PrepareImage, self).__init__(source, constraints=(constraints,))
self._prepare_image_constraints = constraints
def _modify_result(self, result: image.Image) -> image.Image:
constraints = self._prepare_image_constraints
if constraints.normalize:
result.normalize()
if constraints.invert:
result.invert()
if constraints.crop is not None:
color = np.array(constraints.crop)
result.crop(color)
if constraints.grayscale:
result.grayscale()
if constraints.enhance:
result.enhance()
return result
| StarcoderdataPython |
1604891 | <reponame>iRiziya/swift
class RC4:
def __init__(self):
self.state = [0] * 256
self.I = 0
self.J = 0
def init(self, key):
for i in xrange(256):
self.state[i] = i
j = 0
for i in xrange(256):
K = ord(key[i % len(key)])
S = self.state[i]
j = (j + S + K) % 256
self.swapByIndex(i, j)
def swapByIndex(self, i, j):
self.state[i], self.state[j] = self.state[j], self.state[i]
def next(self):
self.I = (self.I + 1) % 256
self.J = (self.J + self.state[self.I]) % 256
self.swapByIndex(self.I, self.J)
return self.state[(self.state[self.I] + self.state[self.J]) % 256]
def encrypt(self, data):
for i in xrange(len(data)):
data[i] = data[i] ^ self.next()
def benchRC4_internal(messageLen, iterations):
Secret = "This is my secret message"
Key = "This is my key"
LongData = [0] * messageLen
for i in xrange(messageLen):
LongData[i] = ord(Secret[i % len(Secret)])
Enc = RC4()
Enc.init(Key)
for i in xrange(iterations):
Enc.encrypt(LongData)
benchRC4_internal(5000, 100000)
| StarcoderdataPython |
3311507 | from authorizations.cookie.entity import Cookie
from .token_object import TokenObject
class CookieObject(TokenObject):
"""
Contains routines that facilitate creating of cookie objects
"""
_entity_class = Cookie
| StarcoderdataPython |
68843 | <reponame>ulope/matrix-python-sdk<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Matrix Python SDK documentation build configuration file, created by
# sphinx-quickstart on Tue May 3 14:25:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
srcdir = os.path.abspath('../../')
sys.path.insert(0, srcdir)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Matrix Python SDK'
copyright = '2016, matrix.org'
author = 'matrix.org'
version = '0.2.0-dev'
release = '0.1.0'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = 'Matrix Python SDK v' + version
# html_static_path = ['_static']
htmlhelp_basename = 'MatrixPythonSDKdoc'
highlight_language = 'python'
latex_documents = [
(master_doc, 'MatrixPythonSDK.tex', 'Matrix Python SDK Documentation',
'matrix.org', 'manual'),
]
man_pages = [
(master_doc, 'matrixpythonsdk', 'Matrix Python SDK Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'MatrixPythonSDK', 'Matrix Python SDK Documentation',
author, 'MatrixPythonSDK', 'SDK for writing Matrix Clients in Python',
'Miscellaneous'),
]
| StarcoderdataPython |
191408 | <reponame>af765/Flood-Warning-System<gh_stars>0
# Copyright (C) 2018 <NAME>
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from os import access
from .utils import sorted_by_key # noqa
from haversine import haversine
import plotly.graph_objects as go
from numpy import average
def stations_by_distance(stations, p):
"""" Returns a list of tuples of the form (station, distance) of all the stations
from the point p. Sorted by distance"""
station_dist = []
for station in stations:
station_dist.append((station,haversine(station.coord,p)))
station_dist = sorted_by_key(station_dist, 1)
return station_dist
def stations_within_radius(stations, centre, r):
"""Returns a list of the names of stations that fall within a distance r from a
point centre"""
stations_within_r = []
for station in stations:
dist = haversine(station.coord, centre)
if (dist < r):
stations_within_r.append(station.name)
stations_within_r.sort()
return stations_within_r
def rivers_with_stations(stations):
"""Returns a list of rivers with a monitoring stations"""
rivers = []
for station in stations:
if (station.river not in rivers):
rivers.append(station.river)
return rivers
def stations_by_river(stations):
"""Returns a dictionary mapping river names (the key) to a list of station opjects on a given river (in alphabetical order)"""
river_dict = {}
rivers = rivers_with_stations(stations) #Get list of rivers to search for
for river in rivers: #iterate over rivers
stationList = []
for station in stations: #iterate over all stations
if station.river == river: #if station is on the river add it
stationList.append(station.name)
river_dict[river]=sorted(stationList)
return river_dict
def rivers_by_station_number(stations, N):
"""Returns a list of N tuples on the form (river name, number of stations on the river). These tuples are sorted in decreasing order of station numbers.
If many stations have the same number of stations as the 'Nth' river, these are also included."""
riversList = stations_by_river(stations) #Get list of rivers to consider
riverNumber = []
for River in riversList:
riverNumber.append((River, len(riversList[River]))) #Get tuple of (river name, number of stations)
riverNumber.sort(key= lambda x:x[1], reverse=True) #Sort into decreasing numerical order
#This code is used to include any rivers with equal number of stations to the 'final' one being output.
extraStations = 0
#search through next few rivers to see how many have the same number of stations
for i in range(N, len(riverNumber)):
if riverNumber[i][1] == riverNumber[N-1][1]:
extraStations += 1
else:
break #as items pre-sorted once the number is not equal can exit
N += extraStations #adjust value of N
return riverNumber[:N] #Return sliced array.
def displayStationLocation(stations, type = "basic"):
"""Displays a map showing the location of stations inputted. The inputs are:
stations: A list of MonitoringStation objects to plot
type: The type of map to plot. See https://plotly.com/python/mapbox-layers/#base-maps-in-layoutmapboxstyle for more details"""
accessToken = "<KEY>"
lattitude = []
longitude = []
name = []
for station in stations:
if station.coord[0] != None and station.coord[1] != None:
lattitude.append(station.coord[0])
longitude.append(station.coord[1])
name.append("Station Name: {}\n River: {}".format(station.name, station.river))
initialLongitude = average(longitude)
initialLattitude = average(lattitude)
fig = go.Figure(go.Scattermapbox(lat=lattitude, lon=longitude, mode = 'markers', marker = go.scattermapbox.Marker(size = 9), text=name))
fig.update_layout(mapbox_style = type,autosize=True, hovermode='closest', mapbox=dict(accesstoken=accessToken,bearing=0, center=dict(lat=initialLattitude,lon=initialLongitude),pitch=0,zoom=7))
fig.show()
def stationObjectsByRiver(stations, rivers):
"""Returns a list of Monitoring Station objects which are on the rivers input"""
stationObjectsByRiverOutput = []
for river in rivers:
for station in stations:
if station.river==river:
stationObjectsByRiverOutput.append(station)
return stationObjectsByRiverOutput
| StarcoderdataPython |
4835768 | <reponame>protodave/CriticMarkup-toolkit<filename>Sublime Text Package/Critic Markup/accept_critic.py
import sublime, sublime_plugin
import re
class AcceptCriticCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.options = ['Accept', 'Reject']
# Need to find scope limits then do regex find within current scope
self.view.window().show_quick_panel(self.options, self.process_critic, sublime.MONOSPACE_FONT)
def process_critic(self, choice):
# Choice 0 is accept
sels = self.view.sel()
caret = []
add_edit = re.compile(r'(?s)\{\+\+(.*?)\+\+[ \t]*(\[(.*?)\])?[ \t]*\}')
del_edit = re.compile(r'(?s)\{\-\-(.*?)\-\-[ \t]*(\[(.*?)\])?[ \t]*\}')
sub_edit = re.compile(r'''(?s)\{\~\~(?P<original>(?:[^\~\>]|(?:\~(?!\>)))+)\~\>(?P<new>(?:[^\~\~]|(?:\~(?!\~\})))+)\~\~\}''')
for sel in sels:
text = self.view.substr(sel)
# If something is selected...
if len(text) > 0:
a = add_edit.search(text)
d = del_edit.search(text)
s = sub_edit.search(text)
edit = self.view.begin_edit()
if choice == 0:
if a:
self.view.replace(edit, sel, a.group(1))
if d:
self.view.erase(edit, sel)
if s:
self.view.replace(edit, sel, s.group('new'))
#if m.group(2)
# ... turn the selected text into the link text
#view.replace(edit, sel, "[{0}][{1}]".format(text, title))
# Reject
elif choice == 1:
if a:
self.view.erase(edit, sel)
if d:
self.view.replace(edit, sel, d.group(1))
if s:
self.view.replace(edit, sel, s.group('original'))
self.view.end_edit(edit)
| StarcoderdataPython |
24106 | import sys
from collections import defaultdict
import torch
from varclr.utils.infer import MockArgs
from varclr.data.preprocessor import CodePreprocessor
if __name__ == "__main__":
ret = torch.load(sys.argv[2])
vars, embs = ret["vars"], ret["embs"]
embs /= embs.norm(dim=1, keepdim=True)
embs = embs.cuda()
var2idx = dict([(var, idx) for idx, var in enumerate(vars)])
processor = CodePreprocessor(MockArgs())
Ks = [1, 5, 10, 25, 50, 100, 250, 500, 1000]
topk_succ = defaultdict(int)
tot = 0
with open(sys.argv[1], "r") as f:
for line in f:
try:
var1, var2 = line.strip().split()
except ValueError:
print("skpped: ", line)
def canon(var):
return "".join(
[
word.capitalize() if idx > 0 else word
for idx, word in enumerate(processor(var).split())
]
)
var1, var2 = canon(var1), canon(var2)
if var1 not in var2idx or var2 not in var2idx:
print(f"variable {var1} or {var2} not found")
continue
tot += 1
for k in Ks:
result = torch.topk(embs @ embs[var2idx[var1]], k=k + 1)
topk_succ[k] += var2 in [vars[idx] for idx in result.indices][1:]
print(f"Total {tot} variable pairs")
for k in Ks:
print(f"Recall@{k} = {100 * topk_succ[k] / tot:.1f}")
| StarcoderdataPython |
1639344 | <gh_stars>0
from threading import Lock
# Lock 类支持 with 语句
lock = Lock()
with lock:
print('Lock is held 拿到锁了')
# 等价的 try/finally 写法
lock.acquire()
try:
print('Lock is held 拿到锁了')
finally:
lock.release()
| StarcoderdataPython |
3345931 | #! /usr/bin/env python3
import requests
import csv
import argparse
import getpass
from datetime import datetime
from datetime import timedelta
import urllib3
from os import path
from os import getcwd
import numpy as np
import time
import json
from jsonpath_ng import jsonpath, parse
# csv mapping config
# - Header title
# - a lambda for transforming data
# - a jsonpath expression for selecting a value
csv_mapping = np.array([
["Time s", lambda occured_at, roti_data, roti_value: int(round(occured_at.total_seconds(),0)), None],
["PressureAct mbar", None, '$.vacuum.act'],
["PressureSet", None, '$.vacuum.set'],
["BathAct", None, '$.heating.act'],
["BathSet", None, '$.heating.set'],
["ChillerAct", None, '$.cooling.act'],
["ChillerSet", None, '$.cooling.set'],
["Rotation rpm", None, '$.rotation.act'],
["Vapor", None, '$.vacuum.vaporTemp'],
["AutoDestIn", None, '$.vacuum.autoDestIn'],
["AutoDestOut", None, '$.vacuum.autoDestOut'],
["AutoDestDiff", lambda occured_at, roti_data, roti_value: round(roti_data['vacuum']['autoDestOut'] - roti_data['vacuum']['autoDestIn'], 2), None],
["LiftAct", None, '$.lift.act'],
["LiftEnd", None, '$.lift.limit'],
["Hold", None, '$.globalStatus.onHold'],
["Foam control", None, '$.globalStatus.foamActive'],
["PumpAct[0.1%]", None, '$.vacuum.powerPercentAct'],
["VacOpen", None, '$.vacuum.vacuumValveOpen']
])
csv_dialect = csv.excel # see https://docs.python.org/3/library/csv.html#dialects-and-formatting-parameters
timestamp_after_csvheader = "%d.%m.%Y %H:%M" # set this to None if you don't wan't this second header line
missing_value_char = '*' #set this to None for an empty cell
def build_filepath(folder, device_name, started_at):
started_at_str = started_at.strftime("%Y-%m-%dT%H%M%S-%f")
filename = f"{device_name}-{started_at_str}.csv"
return path.join(folder, filename)
def get_value(occured_at, roti_data, transform, jsonp):
try:
roti_value = None
if jsonp is not None:
jsonpath_expression = parse(jsonp)
roti_value = jsonpath_expression.find(roti_data)[0].value
if transform is not None:
return transform(occured_at, roti_data, roti_value)
else:
return roti_value if roti_value is not None else missing_value_char
except:
return missing_value_char
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Remotely logs rotavapor process data into a CSV file. The format is the same as the I-300pro writes to its SD card.')
parser.add_argument('host', metavar='host', type=str, help='host or IP of rotavapor')
parser.add_argument('-u', '--user', type=str, help='device password', default='rw')
parser.add_argument('-p', '--password', type=str, help='device password', required=False)
parser.add_argument('-f', '--folder', type=str, help='destination folder for the csv files', default=getcwd())
parser.add_argument('-c', '--cert', type=str, help="root cert file", default="root_cert.crt")
args = parser.parse_args()
# ask for password if it wasn't passed in as command line argument
if args.password is None:
args.password = <PASSWORD>()
auth = (args.user, args.password)
base_url = f"https://{args.host}/api/v1"
info_endpoint = base_url + "/info"
process_endpoint = base_url + "/process"
rootcert = "root_cert.crt"
# configure http client session
session = requests.Session()
session.auth = auth
if path.isfile(rootcert):
session.verify = rootcert
else:
print("Root certificate missing. Disabling certiicate checks...")
session.verify = False
urllib3.disable_warnings()
# verify that this is a Rotavapor
info_resp = session.get(info_endpoint)
if info_resp.status_code != 200:
raise Exception("Unexpected status code when getting device info", info_resp.status_code)
info_msg = info_resp.json()
system_name = info_msg["systemName"]
print(f"Connected to {system_name}")
if info_msg["systemClass"] != "Rotavapor":
raise Exception(f"This is not a Rotavapor")
# wait for start
started_at = None
poll_at = datetime.now()
current_file = None
current_file_writer = None
is_recording = False
while True:
# read process data
proc_resp = session.get(process_endpoint)
if proc_resp.status_code != 200:
raise Exception("Unexpected status code when polling process data", proc_resp.status_code)
proc_msg = proc_resp.json()
is_running = proc_msg["globalStatus"]["running"]
# check whether we need to start or stop the recording
if is_running and not is_recording:
# start a new file
started_at = poll_at
csvpath = build_filepath(args.folder, system_name, started_at)
current_file = open(csvpath, 'w+', newline='')
current_file_writer = csv.writer(current_file, dialect=csv_dialect)
# write header
current_file_writer.writerow(csv_mapping[:,0])
if timestamp_after_csvheader is not None:
current_file_writer.writerow([started_at.strftime(timestamp_after_csvheader)])
is_recording = True
if not is_running and is_recording:
# close file
current_file.close()
current_file = None
is_recording = False
# add current data (if there is an open file)
if current_file is not None:
occured_at = poll_at - started_at
current_file_writer.writerow([get_value(occured_at, proc_msg, m[1], m[2]) for m in csv_mapping])
# delay execution so that we poll once every second
poll_at = poll_at + timedelta(seconds=1)
sleep_for = (poll_at - datetime.now()).total_seconds()
if sleep_for > 0:
time.sleep(sleep_for) | StarcoderdataPython |
3225191 | """
"""
from yoyo import step
__depends__ = {'20210629_03_kqMH9'}
steps = [
step("ALTER TABLE news ADD COLUMN position INTEGER")
]
| StarcoderdataPython |
1714681 | '''
Information on available virtual machine images.
'''
from ... pyaz_utils import _call_az
from . import terms
def list_offers(location, publisher, edge_zone=None):
'''
List the VM image offers available in the Azure Marketplace.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- publisher -- image publisher
Optional Parameters:
- edge_zone -- The name of edge zone.
'''
return _call_az("az vm image list-offers", locals())
def list_publishers(location, edge_zone=None):
'''
List the VM image publishers available in the Azure Marketplace.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
Optional Parameters:
- edge_zone -- The name of edge zone.
'''
return _call_az("az vm image list-publishers", locals())
def list_skus(location, offer, publisher, edge_zone=None):
'''
List the VM image SKUs available in the Azure Marketplace.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- offer -- image offer
- publisher -- image publisher
Optional Parameters:
- edge_zone -- The name of edge zone.
'''
return _call_az("az vm image list-skus", locals())
def list(all=None, edge_zone=None, location=None, offer=None, publisher=None, sku=None):
'''
List the VM/VMSS images available in the Azure Marketplace.
Optional Parameters:
- all -- None
- edge_zone -- The name of edge zone.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- offer -- image offer
- publisher -- image publisher
- sku -- image sku
'''
return _call_az("az vm image list", locals())
def accept_terms(offer=None, plan=None, publisher=None, urn=None):
'''
Accept Azure Marketplace term so that the image can be used to create VMs
Optional Parameters:
- offer -- image offer
- plan -- image billing plan
- publisher -- image publisher
- urn -- URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted
'''
return _call_az("az vm image accept-terms", locals())
def show(edge_zone=None, location=None, offer=None, publisher=None, sku=None, urn=None, version=None):
'''
Get the details for a VM image available in the Azure Marketplace.
Optional Parameters:
- edge_zone -- The name of edge zone.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- offer -- image offer
- publisher -- image publisher
- sku -- image sku
- urn -- URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted
- version -- image sku's version
'''
return _call_az("az vm image show", locals())
| StarcoderdataPython |
3354818 |
from sax import SAX
from sequitur import Grammar
class Motif(object):
"""
This class uses sax and sequitur to identify the motifs from a timeseries
"""
def __init__(self, timeseries, windowSize, wordSize, alphabetSize):
self.timeseries = timeseries
self.windowSize = windowSize
self.wordSize = wordSize
self.alphabetSize = alphabetSize
def buildMotifs(self):
s = SAX(self.wordSize, self.alphabetSize)
self.saxterms = s.sliding_window(self.timeseries, self.windowSize)
self.grammar = Grammar()
self.grammar.train_string(self.saxterms)
self.myrules = self.grammar.getRules()
def getRules(self):
return self.myrules
def getMotif(self, ruleID):
motif = []
for k in self.myrules[ruleID].positions:
# so i can get the start and then length of the expanded rule and that is how I can get the
if not k == None:
motif.append([self.saxterms[k].getSpan()[0], self.saxterms[k + len(self.myrules[ruleID].expanded) - 1].getSpan()[1]])
return motif | StarcoderdataPython |
1740240 | # noinspection PyPep8Naming
def solution(X, A):
T_fall = [None] * (X + 1)
for t, x in enumerate(A):
T_fall[x] = min(t, T_fall[x] or t)
t_max = None
for t in T_fall[1:]:
if t is None:
return -1
t_max = max(t, t_max)
return t_max
assert solution(5, [1, 3, 1, 4, 2, 3, 5, 4]) == 6
| StarcoderdataPython |
7384 | import sbol2
import pandas as pd
import os
import logging
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, PatternFill, Border, Side
from requests_html import HTMLSession
#wasderivedfrom: source
#remove identity, persistenID, displayID, version
#remove attachment (if empty)
#add library sheets
#add postprocessing function to remove unecessaries
class seqFile:
def __init__(self, file_path_in, output_path):
# global varibales for homespace, document, and sheet
self.homeSpace = 'https://sys-bio.org'
self.document = file_path_in
self.file_location_path = os.path.dirname(__file__)
self.sheet = os.path.join(self.file_location_path, 'ontologies.xlsx')
self.output_template = os.path.join(self.file_location_path, 'Template_to_Output_Into_v001.xlsx')
self.output_path = output_path
def roleVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=1, usecols=[1, 2])
# convert the dataframe into a dictionary
roleConvertDict = df.to_dict()
# set dictionary indices and values (use column 'URI' in excel sheet)
roleName = roleConvertDict['URI']
# switch indices' and values' postions
roleDictionary = {uri: role for role, uri in roleName.items()}
return roleDictionary
def orgVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=2, usecols=[0, 1])
# convert the dataframe into a dictionary
organismConvertDict = df.to_dict()
# set dictionary indices and values (use column 'txid' in excel sheet)
organismName = organismConvertDict['txid']
# switch indices' and values' postions
organismDictionary = {str(txid): organism for organism, txid in organismName.items()}
return organismDictionary
# def inspectDocInfo(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document information
# print(doc)
# def printDocContents(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document contents
# for obj in doc:
# print(obj)
def readDocChart(self):
# declare homespace
sbol2.setHomespace(self.homeSpace)
doc = sbol2.Document()
doc.read(self.document)
# create a dictionary to hold all the component defintions' information
componentDefinitions = {}
# iterate through the component definitions
roleDict = self.roleVariables()
orgDict = self.orgVariables()
for cd in doc.componentDefinitions:
cdType = cd.type
# create a dictionary that has a key for the
# component definition's identity,
# and a value for all of its features
componentFeatures = {}
persistentIdentity = cd.properties['http://sbols.org/v2#persistentIdentity'][0]
# iterate through the properties of the component defintions
# and set them equal to propValue variable
for prop in cd.properties:
try:
propValue = cd.properties[prop][0]
except (IndexError):
propValue = cd.properties[prop]
# extract attribute property type
if propValue == []:
propValue = ''
prop = self.prop_convert(prop)
propValue = columnMethods(prop, propValue, doc, cdType,
roleDict, orgDict).colV
componentFeatures[prop] = str(propValue)
# append each componentFeatures dictionary as a
# value into the componentDefinitions
# dictionary with the 'persistentIdentity' serving as the key
componentDefinitions[persistentIdentity] = componentFeatures
# return the dictionary of information (temporary, maybe
# return true if read in correctly)
doc_chart = pd.DataFrame.from_dict(componentDefinitions, orient="index")
return doc_chart
def prop_convert(self, prop):
if type(prop) is str:
idx = prop.find('#')
# if parsing conditions meet, append them into the
# componentFeatures dictionary as necessary
if idx >= 1:
prop = prop[idx + 1:]
if prop == 'type':
prop = 'types'
if prop == 'http://purl.org/dc/terms/title':
prop = 'title'
if prop == 'http://purl.org/dc/terms/description':
prop = 'description'
if prop == 'http://purl.obolibrary.org/obo/OBI_0001617':
prop = 'OBI_0001617'
return (prop)
else:
raise ValueError()
def displayDocChart(self):
#display the dataframe
return pd.DataFrame.from_dict(self.readDocChart(), orient = "index")
def TEMP_readDocChart1(self):
#demo of table column names
columnNames = ['Part Name',
'Role',
'Design Notes',
'Altered Sequence',
'Part Description',
'Data Source Prefix',
'Data Source',
'Source Organism',
'Target Organism',
'Circular',
'length (bp)',
'Sequence',
'Data Source',
'Composite']
#import dataframe dictionary
#convert dictionary to dataframe
df = self.displayDocChart()
#type caste dataframe to a set
dfSet = set(df)
#type caste column names to a set
columnNameOrder = set(columnNames)
#check difference between the dataframe set and the column name order
dfSetDifference = dfSet.difference(columnNameOrder)
#check intersection between the datframe set and the column name order
dfSetIntersection = dfSet.intersection(columnNameOrder)
#combine the type casted difference and intersection
finalSetList = list(dfSetIntersection) + list(dfSetDifference)
#set list to dictionary
return finalSetList
# def displayDocChart(self):
# # display the dataframe
# return pd.DataFrame.from_dict(self.readDocChart(), orient="index")
def columnString(self, n):
# loop through column length in order to get string appropriate
# values for excel sheet rows and columns
string = ""
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
def returnExcelChart(self):
start_row = 18
start_cell = f'A{start_row}'
# load a workbook
wb = load_workbook(self.output_template)
ws = wb.active
# load raw dataframe to df
df = self.readDocChart()
# set font features
ft1 = Font(name='Arial', size=12, color='548235')
ft2 = Font(name='Calibri', size=11, bold=True)
hold = dataframe_to_rows(df, index=False, header=True)
# counter = 0
# loop through worksheet
ws[start_cell].value = ''
for r in hold:
# if a specific cell is empty, continue to loop past it
if r == [None]:
continue
ws.append(r)
# counter += 1
# set table features
tab = Table(displayName="Parts_Lib", ref=f"A{start_row +1}:{self.columnString(len(df.columns))}{(len(df) * 2) - 2}")
style = TableStyleInfo(name="TableStyleLight7", showFirstColumn=False,
showLastColumn=False, showRowStripes=True,
showColumnStripes=False)
cellColor = PatternFill(patternType='solid',
fgColor='DDEBF7')
cellBorder = Side(border_style='medium', color="000000")
# cellIndex = len(x)
# gives cells within specified range their table attributes
for col in range(1, len(df.columns) + 1):
alpha = self.columnString(col)
ws[f'{alpha}{start_row+1}'].fill = cellColor
ws[f'{alpha}{start_row+1}'].border = Border(top=cellBorder)
tab.tableStyleInfo = style
ws.add_table(tab)
# counter = 0
# gives cells within specified range their font attributes
for row in range(len(df) - 1, (len(df) * 2 - 1)):
# counter = counter + 1
for cell in ws[row]:
cell.font = ft1
# gives cells within specified range their font attributes
# (these are special features for the title)
num_rows = len(df)
if num_rows % 2 > 0:
num_rows = num_rows - 1
for j in range(19, num_rows):
for x in ws[j]:
x.font = ft2
# output the file
wb.save(self.output_path)
wb.close()
logging.warning(f'Your converted file has been output at {self.output_path}')
class columnMethods:
def __init__(self, colN, colV, doc, cdType, roleDict, orgDict):
# global varibales for dataframe switch statements
self.colN = colN
self.colV = colV
self.doc = doc
self.cdType = cdType
self.roleDict = roleDict
self.orgDict = orgDict
# if the column name matches the function name, call the function
try:
return getattr(self, self.colN)()
# if the column name does not match the function name, call 'no_change'
except AttributeError:
return getattr(self, 'no_change')()
def no_change(self):
pass
# if the specified column role value is within the role column
def role(self):
roleVal = str(self.colV)
if roleVal in self.roleDict:
self.colV = self.roleDict[roleVal]
def types(self):
self.colV = self.colV.split('#')[-1]
def sequence(self):
self.colV = self.doc.getSequence(self.colV).elements
def sourceOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
def targetOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
| StarcoderdataPython |
22559 | <filename>src/plotComponents2D.py
import matplotlib.pyplot as plt
import numpy as np
def plotComponents2D(X, y, labels, use_markers = False, ax=None, legends = None, tags = None):
if X.shape[1] < 2:
print('ERROR: X MUST HAVE AT LEAST 2 FEATURES/COLUMNS! SKIPPING plotComponents2D().')
return
# Gray shades can be given as a string encoding a float in the 0-1 range
colors = ['0.9', '0.1', 'red', 'blue', 'black','orange','green','cyan','purple','gray']
markers = ['o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H']
if (ax is None):
fig, ax = plt.subplots()
i=0
if (labels is None):
labels = set(y)
for label in labels:
cluster = X[np.where(y == label)]
# print(cluster.shape)
if use_markers:
ax.scatter([cluster[:,0]], [cluster[:,1]],
s=40,
marker=markers[i],
facecolors='none',
edgecolors=colors[i+3],
label= (str(legends[i]) if legends is not None else ("Y = " + str(label))) )
else:
ax.scatter([cluster[:,0]], [cluster[:,1]],
s=70,
facecolors=colors[i],
label= (str(legends[i]) if legends is not None else ("Y = " + str(label))),
edgecolors = 'black',
alpha = .4) # cmap='tab20'
i=i+1
if (tags is not None):
for j,tag in enumerate(tags):
ax.annotate(str(tag), (X[j,0] + 0.1, X[j,1] - 0.1))
ax.legend()
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False) | StarcoderdataPython |
1632751 | <filename>src/codebase/synthetic/nino3.py
"""Generate synthetic streamflow sequences based on a NINO3 sequence
"""
import os
import numpy as np
import pandas as pd
import xarray as xr
from datetime import datetime
from .synthetic import SyntheticFloodSequence
from ..path import data_path
class NINO3Linear(SyntheticFloodSequence):
"""Draw streamflow sequences based on a linear relationship with a NINO3 index/
NINO3 data from Ramesh et al (2017)
"""
def __init__(self, **kwargs) -> None:
model_param = {
"mu0": kwargs.pop("mu0"),
"gamma": kwargs.pop("gamma", 0),
"beta": kwargs.pop("beta", 0.5),
"coeff_var": kwargs.pop("coeff_var", 0.1),
"sigma_min": kwargs.pop("sigma_min", 0.01),
}
super().__init__(**kwargs)
self.param.update(model_param)
self.model_name = "NINO3"
def _calculate_one(self) -> np.ndarray:
"""Run the calculation
"""
np.random.seed(datetime.now().microsecond)
filename = os.path.join(data_path, "ramesh2017.csv")
nino3 = pd.read_csv(filename, index_col="year")
valid_start_years = np.arange(nino3.index.max() - (self.M + self.N))
syear = np.random.choice(valid_start_years)
eyear = syear + self.N + self.M - 1
nino3_sub = nino3.loc[syear:eyear]["nino3"].values
mu = (
self.param.get("mu0")
+ self.param.get("gamma") * self._get_time(period="all")
+ self.param.get("beta") * nino3_sub
)
sigma = self.param.get("coeff_var") * mu
sigma[np.where(sigma < self.param.get("sigma_min"))] = self.param.get(
"sigma_min"
)
sflow = np.exp(np.random.normal(loc=mu, scale=sigma))
return sflow
| StarcoderdataPython |
106534 | <filename>config.py
#! /usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import argparse
class AttrDict (dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
config = AttrDict ({
'environment' : 'Pong-v0',
'batch_size' : 32,
'gamma' : 0.99,
# Frame skipping is implemented in the Pong-v0 env
'repeats_per_action' : 1,
'actions_per_update' : 4,
'epsilon_start' : 1.0,
'epsilon_rest' : 0.1,
'epsilon_steps' : 100000,
'epsilon_greedy' : 0.00,
'replay_capacity' : 50000,
'replay_start_size' : 5000,
'target_update_frequency' : 128,
'checkpoint_frequency' : 10000,
'checkpoint_path' : '/tmp/atari/pong',
'frame_height' : 84,
'frame_width' : 84,
'frame_channels' : 4,
'optimizer' : 'Adam',
'learning_rate' : 1e-4,
'render' : False,
'delay' : 0.0,
# Defined beneath
'frame_shape' : None,
'stacked_shape' : None
})
config.frame_shape = (config.frame_height, config.frame_width)
config.stacked_shape = config.frame_shape + (config.frame_channels,)
def get_FLAGS (config):
parser = argparse.ArgumentParser()
parser.add_argument (
'--environment', type=str, metavar='env',
default=config.environment,
help='OpenAI gym environment.'
)
parser.add_argument (
'--batch-size', type=int, metavar='n',
default=config.batch_size,
help='Number of SARS to use in each update.'
)
parser.add_argument (
'--gamma', type=float, metavar='x',
default=config.gamma,
help='Discount factor.'
)
parser.add_argument (
'--learning-rate', type=float, metavar='x',
default=config.learning_rate,
help='Learning rate.'
)
parser.add_argument (
'--optimizer', type=str, metavar='opt',
default=config.optimizer,
help='Tensorflow.train optimizer to use.'
)
parser.add_argument (
'--checkpoint-path', type=str, metavar='dir',
default=config.checkpoint_path,
help='Where to store model files.'
)
parser.add_argument (
'--frame-height', type=int, metavar='n',
default=config.frame_height,
help='Target height for input frames.'
)
parser.add_argument (
'--frame-width', type=int, metavar='n',
default=config.frame_width,
help='Target width for input frames.'
)
parser.add_argument (
'--frame-channels', type=int, metavar='n',
default=config.frame_channels,
help='Number of consecutive frames to stack for network input.'
)
parser.add_argument (
'--target-update-frequency', type=int, metavar='n',
default=config.target_update_frequency,
help='Number of updates between each synchronization between training and target networks.'
)
parser.add_argument (
'--checkpoint-frequency', type=int, metavar='n',
default=config.checkpoint_frequency,
help='Number of updates between storing checkpoint on disk.'
)
parser.add_argument (
'--epsilon-start', type=float, metavar='eps',
default=config.epsilon_start,
help='Start value for exploration probability.'
)
parser.add_argument (
'--epsilon-rest', type=float, metavar='eps',
default=config.epsilon_rest,
help='Stop value for exploration probability.'
)
parser.add_argument (
'--epsilon-greedy', type=float, metavar='eps',
default=config.epsilon_greedy,
help='Exploration value used in greedy-evaluation policy.'
)
parser.add_argument (
'--epsilon-steps', type=int, metavar='n',
default=config.epsilon_steps,
help='Number of training steps before exploration probability reaches rest value.'
)
parser.add_argument (
'--repeats-per-action', type=int, metavar='n',
default=config.repeats_per_action,
help='How many times an action is repeated before we get a new one from our policy.'
)
parser.add_argument (
'--actions-per-update', type=int, metavar='n',
default=config.actions_per_update,
help='How many actions we get from policy before each update.'
)
parser.add_argument (
'--replay-capacity', type=int, metavar='n',
default=config.replay_capacity,
help='Total number of SARS in replay memory.'
)
parser.add_argument (
'--replay-start-size', type=int, metavar='n',
default=config.replay_start_size,
help='Number of elements in replay memory before we start updating.'
)
parser.add_argument (
'--render',
action='store_false' if config.render else 'store_true',
help='Render evaluation runs to screen.'
)
parser.add_argument (
'--delay', type=float, metavar='t',
default=config.delay,
help='Seconds to delay between each frame in evaluation renders..'
)
FLAGS, unparsed = parser.parse_known_args()
FLAGS.action_shape = ()
FLAGS.frame_shape = (FLAGS.frame_height, FLAGS.frame_width)
FLAGS.stacked_shape = FLAGS.frame_shape + (FLAGS.frame_channels,)
FLAGS.delay = 0.0 if not FLAGS.render else FLAGS.delay
return FLAGS, unparsed
| StarcoderdataPython |
3331246 | from services.run import handle_cron_request
from fastapi import FastAPI
from dotenv import load_dotenv
from starlette.responses import RedirectResponse
from DB.database import Base, engine, get_db
import uvicorn
from routes import image, user, auth, user_images
import os
from fastapi_utils.tasks import repeat_every
load_dotenv()
Base.metadata.create_all(bind=engine)
app = FastAPI(title="sat-image-api",
description="This api provide a service of downloading free Sentinel satelite images for free, and support 4 first platform of sentinel",
version="1.0.0",)
@app.get("/")
def home_page():
return RedirectResponse(url="/redoc")
# Registering routes
app.include_router(auth.router)
app.include_router(image.router)
app.include_router(user.router)
app.include_router(user_images.router)
@app.on_event("startup")
@repeat_every(seconds=60*60*24)
def remove_expired_tokens_task() -> None:
try:
handle_cron_request(db=get_db())
except Exception as ex:
print(ex)
if __name__ == '__main__':
uvicorn.run('main:app', host='127.0.0.1', port=os.getenv("PORT"))
| StarcoderdataPython |
3216188 | <reponame>Harvard-Neutrino/phys145
import ROOT
import itertools
import Analysis
import AnalysisHelpers as AH
import Constants
#======================================================================
class WZAnalysis(Analysis.Analysis):
"""Analysis searching for the pair production of WZ with both boson decaying to leptons"""
def __init__(self, store):
super(WZAnalysis, self).__init__(store)
def initialize(self):
self.invMass = self.addStandardHistogram("invMass")
self.WtMass = self.addStandardHistogram("WtMass")
self.hist_leptn = self.addStandardHistogram("lep_n")
self.hist_leptpt = self.addStandardHistogram("lep_pt")
self.hist_lepteta = self.addStandardHistogram("lep_eta")
self.hist_leptE = self.addStandardHistogram("lep_E")
self.hist_leptphi = self.addStandardHistogram("lep_phi")
self.hist_leptch = self.addStandardHistogram("lep_charge")
self.hist_leptID = self.addStandardHistogram("lep_type")
self.hist_leptptc = self.addStandardHistogram("lep_ptconerel30")
self.hist_leptetc = self.addStandardHistogram("lep_etconerel20")
self.hist_lepz0 = self.addStandardHistogram("lep_z0")
self.hist_lepd0 = self.addStandardHistogram("lep_d0")
self.hist_etmiss = self.addStandardHistogram("etmiss")
self.hist_vxp_z = self.addStandardHistogram("vxp_z")
self.hist_pvxp_n = self.addStandardHistogram("pvxp_n")
def ZWindow(self, lep1, lep2):
return abs((lep1.tlv()+lep2.tlv()).M() - Constants.Z_Mass)
def TestWZCandidate(self, candidate):
return self.ZWindow(candidate[0], candidate[1])
def WZCandidate(self, leptons):
def isValidCandidate(lep1, lep2):
if lep1.charge()*lep2.charge() > 0: return False
if abs(lep1.pdgId()) != abs(lep2.pdgId()): return False
return True
bestCandidate = None
for p in itertools.permutations(leptons, 3):
if not isValidCandidate(p[0], p[1]): continue
if bestCandidate is None:
bestCandidate = p
if self.TestWZCandidate(p) < self.TestWZCandidate(bestCandidate):
bestCandidate = p
return bestCandidate
def analyze(self):
# retrieving objects
eventinfo = self.Store.getEventInfo()
weight = eventinfo.scalefactor()*eventinfo.eventWeight() if not self.getIsData() else 1
# apply standard event based selection
if not AH.StandardEventCuts(eventinfo): return False
self.countEvent("EventCuts", weight)
# Lepton Requirements
goodLeptons = AH.selectAndSortContainer(self.Store.getLeptons(), AH.isGoodLepton, lambda p: p.pt())
if not (len(goodLeptons) == 3): return False
self.countEvent("3 high pt Leptons", weight)
# find candidate for WZ system
candidate = self.WZCandidate(goodLeptons)
if candidate is None: return False;
z1Lepton = candidate[0]
z2Lepton = candidate[1]
wLepton = candidate[2]
etmiss = self.Store.getEtMiss()
# test candidate for WZ system
if not self.ZWindow(z1Lepton, z2Lepton) < -999: return False;# TO DO: Find a good value for this cut
if not AH.WTransverseMass(wLepton, etmiss) > -999: return False;# TO DO: Find a good value for this cut
# histograms for missing et
self.hist_etmiss.Fill(etmiss.et(),weight)
# vertex histograms
self.hist_vxp_z.Fill(eventinfo.primaryVertexPosition(), weight)
self.hist_pvxp_n.Fill(eventinfo.numberOfVertices(), weight)
# WZ system histograms
self.invMass.Fill((z1Lepton.tlv() + z2Lepton.tlv()).M(), weight)
self.WtMass.Fill(AH.WTransverseMass(wLepton, etmiss), weight)
# lepton histograms
self.hist_leptn.Fill(len(goodLeptons), weight)
[self.hist_leptpt.Fill(lep.pt(), weight) for lep in goodLeptons]
[self.hist_lepteta.Fill(lep.eta(), weight) for lep in goodLeptons]
[self.hist_leptE.Fill(lep.e(), weight) for lep in goodLeptons]
[self.hist_leptphi.Fill(lep.phi(), weight) for lep in goodLeptons]
[self.hist_leptch.Fill(lep.charge(), weight) for lep in goodLeptons]
[self.hist_leptID.Fill(lep.pdgId(), weight) for lep in goodLeptons]
[self.hist_leptptc.Fill(lep.isoptconerel30(), weight) for lep in goodLeptons]
[self.hist_leptetc.Fill(lep.isoetconerel20(), weight) for lep in goodLeptons]
[self.hist_lepz0.Fill(lep.z0(), weight) for lep in goodLeptons]
[self.hist_lepd0.Fill(lep.d0(), weight) for lep in goodLeptons]
return True
def finalize(self):
pass
| StarcoderdataPython |
3316553 | # !/usr/bin/env python
# Copyright (c) 2020, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
# Checks whether the requested website is behind a Cloudflare server
def check_cloudflare(url):
try:
sc = requests.get(url)
if sc.status_code == 200:
sc = sc.status_code
else:
print("[!] Error with status code:", sc.status_code)
except:
print("[!] Error with the first request")
exit()
req = requests.get(url)
try:
if req.headers["server"] == "cloudflare":
print("[!] The server is behind a CloudFlare server")
return True
except:
return False
# Checks whether the requested website is vulnerable to XSS by using a predefined set of payloads
def check_xss(url):
xssResults = []
payloads = ['"-prompt(8)-"','";a=prompt,a()//','"onclick=prompt(8)>"@x.y"','\'-alert(1)//','</script><svg onload=alert(1)>',
'<script>alert("inject")</script>', '<image/src/onerror=prompt(8)>', '<x onclick=alert(1)>click this!']
print("[!] Testing XSS started")
splitUrl = url.split("=")
newUrl = splitUrl[0] + '='
for pl in payloads:
urlWithPayload = newUrl + pl
re = requests.get(urlWithPayload).text
if pl in re:
xssResults.append(pl)
else:
pass
if len(xssResults) == 0:
print("[!] Was not possible to exploit XSS")
else:
print("[+]",len(xssResults)," payloads were found")
for p in xssResults:
print("\n[*] Payload found!")
print("[!] Payload:",p)
print("[!] POC:",newUrl+p)
filePath = input("Enter domain file path: ")
with open(filePath) as file:
for domainUrl in file:
res = check_cloudflare(domainUrl)
if res:
opt = ["Yes","yes","Y","y"]
ex = input("[!] Exit y/n: ")
if ex in opt:
exit()
else:
check_xss(domainUrl)
else:
check_xss(domainUrl)
| StarcoderdataPython |
191413 | <filename>icekit/plugins/slideshow/content_plugins.py
"""
Definition of the plugin.
"""
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from . import models
@plugin_pool.register
class SlideShowPlugin(ContentPlugin):
model = models.SlideShowItem
category = _('Assets')
render_template = 'icekit/plugins/slideshow/default.html'
raw_id_fields = ['slide_show', ]
| StarcoderdataPython |
1761142 | <gh_stars>0
import re
import operator
from bytewax import Executor, inp, processes
def tokenize(x):
x = x.lower()
return re.findall(r'[^\s!,.?":;0-9]+', x)
def initial_count(word):
return word, 1
ec = Executor()
flow = ec.Dataflow(inp.single_batch(open("benches/benchmarks/collected-works.txt")))
flow.flat_map(tokenize)
flow.map(initial_count)
flow.reduce_epoch(operator.add)
if __name__ == "__main__":
processes.start_local(ec, number_of_processes=6)
| StarcoderdataPython |
3292805 | import plotly.express as px
import plotly.graph_objects as go
import dash_core_components as dcc
import dash_html_components as html
from .workout import WORKOUTS
COLORS = {"graph_bg": "#1E1E1E", "text": "#696969"}
def layout_config_panel(current_user):
"""The Dash app layout for the user config panel"""
title = html.Div([html.H5(f"HIIT PI")], className="app__header")
subtitle = html.Div([html.P(f"Welcome, {current_user}!")], className="app__header")
dropdown_options = [{"label": "Random", "value": "random"}] + [
{"label": v.name, "value": k} for k, v in WORKOUTS.items()
]
dropdown_menu = html.Div(
[
html.Div(
[
dcc.Dropdown(
id="workout-dropdown",
options=dropdown_options,
placeholder="Select a workout",
searchable=True,
clearable=False,
)
],
className="six columns flex-display",
),
html.Div(
[
html.Button(
id="workout-stop-btn",
n_clicks=0,
children="Done!",
className="button",
),
html.A(
id="user-logout-btn",
n_clicks=0,
children="Exit?",
className="button",
href="/user_logout",
),
],
className="six columns flex-display",
),
],
className="row app__dropdown flex-display",
)
lines_graph = html.Div(
[
dcc.Graph(
id="live-update-graph",
figure={
"data": [
{
"name": "Inference Time",
"type": "scatter",
"y": [],
"mode": "lines",
"line": {"color": "#e6af19"},
},
{
"name": "Pose Score",
"type": "scatter",
"y": [],
"yaxis": "y2",
"mode": "lines",
"line": {"color": "#6145bf"},
},
],
"layout": {
"margin": {"l": 60, "r": 60, "b": 10, "t": 20},
"height": 180,
"autosize": True,
"font": {
"family": "Comfortaa",
"color": COLORS["text"],
"size": 10,
},
"plot_bgcolor": COLORS["graph_bg"],
"paper_bgcolor": COLORS["graph_bg"],
"xaxis": {
"ticks": "",
"showticklabels": False,
"showgrid": False,
},
"yaxis": {
# "autorange": True,
"range": [10, 30],
"title": "Inference Time (ms)",
},
"yaxis2": {
# "autorange": True,
"range": [0, 1],
"title": "Pose Score",
"overlaying": "y",
"side": "right",
},
"legend": {
"x": 0.5,
"y": -0.2,
"xanchor": "center",
"yanchor": "middle",
"orientation": "h",
},
},
},
config={
"displayModeBar": False,
"responsive": True,
"scrollZoom": True,
},
)
]
)
workout_name = html.Div([html.P(id="workout_name")], className="app__header")
def indicator(id_value, text):
return html.Div(
[
html.P(id=id_value, className="indicator_value"),
html.P(text, className="twelve columns indicator_text"),
],
className="six columns indicator pretty_container",
)
indicators = html.Div(
[
indicator("indicator-reps", "REPS"),
indicator("indicator-pace", "PACE (/30s)"),
],
className="row indicators flex-display",
)
live_update_graph = html.Div(
[
lines_graph,
dcc.Interval(id="live-update-interval", interval=50, n_intervals=0),
]
)
bars_graph = html.Div(
[
html.Button(
id="update-leaderboard-btn",
n_clicks=0,
children="Leaderboard",
className="button",
),
dcc.Graph(
id="leaderboard-graph",
config={
"displayModeBar": False,
"responsive": True,
"scrollZoom": True,
},
),
]
)
return html.Div(
[
title,
subtitle,
live_update_graph,
dropdown_menu,
workout_name,
indicators,
bars_graph,
],
className="four columns app__config_panel",
)
def layout_videostream():
"""The Dash app layout for the video stream"""
videostream = html.Img(id="videostream")
return html.Div([videostream], className="eight columns app__video_image")
def layout_homepage(current_user):
"""The Dash app home page layout"""
return html.Div(
[layout_videostream(), layout_config_panel(current_user)],
className="row app__container",
)
def layout_login():
"""The Dash app login oage layout"""
header = html.Div(
[
html.H2("HIIT PI"),
dcc.Markdown(
id="welcome-intro-md",
children="""
A workout trainer [Dash](https://dash.plot.ly/) app
that helps track your [HIIT](https://en.wikipedia.org/wiki/High-intensity_interval_training) workouts
by analyzing real-time video streaming from your sweet [Pi](https://www.raspberrypi.org/).
""",
),
dcc.Markdown(
id="welcome-intro-sub-md",
children="Powered by [TensorFlow Lite](https://www.tensorflow.org/lite) and [Edge TPU](https://cloud.google.com/edge-tpu) with ❤️.",
),
],
className="app__header",
)
login_form = html.Div(
[
html.Form(
[
dcc.Input(
id="user_name_input",
name="user_name_form",
type="text",
placeholder="<NAME>",
maxLength=20,
minLength=1,
required=True,
),
html.Button(
id="user-login-btn",
children="ENTER",
type="submit",
n_clicks=0,
className="button",
),
],
action="/user_login",
method="POST",
autoComplete="off",
className="form-inline",
title="Enter your player name.",
)
],
className="flex-display",
style={"margin-top": "4rem"},
)
welcome_jumbotron = html.Div([header, login_form], className="header_container")
return html.Div(
[welcome_jumbotron],
className="welcome_login_form page-background-image flex-display",
)
def layout():
return html.Div([dcc.Location(id="url", refresh=True), html.Div(id="page-content")])
| StarcoderdataPython |
3276030 | import docker
import pytest
@pytest.fixture(scope="session")
def client():
return docker.from_env()
@pytest.fixture(scope="session")
def image(client):
img, _ = client.images.build(path='./src', dockerfile='Dockerfile')
return img | StarcoderdataPython |
169756 | import numpy as np
from prml.linear.classifier import Classifier
class Perceptron(Classifier):
"""
Perceptron model
"""
def fit(self, X, t, max_epoch=100):
"""
fit perceptron model on given input pair
Parameters
----------
X : (N, D) np.ndarray
training independent variable
t : (N,)
training dependent variable
binary -1 or 1
max_epoch : int, optional
maximum number of epoch (the default is 100)
"""
self.w = np.zeros(np.size(X, 1))
for _ in range(max_epoch):
X_error=X[np.sign(X@self.w)!=t]
t_error=t[np.sign(X@self.w)!=t]
idx=np.random.choice(len(X_error))
self.w+=X_error[idx]*t_error[idx]
if (X@self.w*t>0).all():
break
def classify(self, X):
"""
classify input data
Parameters
----------
X : (N, D) np.ndarray
independent variable to be classified
Returns
-------
(N,) np.ndarray
binary class (-1 or 1) for each input
"""
return np.sign(X @ self.w).astype(np.int)
| StarcoderdataPython |
132694 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
import struct
from titus.genpy import PFAEngine
from titus.errors import *
# libc regexp library has no support for multibyte characters. This causes a difference between
# hadrian and titus regex libs. Unittests for multibye characters (non-ascii) are commented out.
class TestLib1Regex(unittest.TestCase):
def testMemory(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.rindex: [input, [ab(c|d)*]]}
""")
import resource, time
memusage_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for i in range(0, 10000):
engine.action("abcccdc")
memusage_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print("\nMemory usage before: {0}, after: {1}".format(memusage_1, memusage_2))
def testPosix(self):
engine, = PFAEngine.fromYaml('''
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "[hc]+at"}]}
''')
self.assertEqual(engine.action("hat"), [0,3])
self.assertEqual(engine.action("cat"), [0,3])
self.assertEqual(engine.action("hhat"), [0,4])
self.assertEqual(engine.action("chat"), [0,4])
self.assertEqual(engine.action("hcat"), [0,4])
self.assertEqual(engine.action("cchchat"), [0,7])
self.assertEqual(engine.action("at"), [])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "[hc]?at"}]}
""")
self.assertEqual(engine.action("hat"), [0,3])
self.assertEqual(engine.action("cat"), [0,3])
self.assertEqual(engine.action("at"), [0,2])
self.assertEqual(engine.action("dog"), [])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "[hc]*at"}]}
""")
self.assertEqual(engine.action("hat"), [0,3])
self.assertEqual(engine.action("cat"), [0,3])
self.assertEqual(engine.action("hhat"), [0,4])
self.assertEqual(engine.action("chat"), [0,4])
self.assertEqual(engine.action("hcat"), [0,4])
self.assertEqual(engine.action("cchchat"), [0,7])
self.assertEqual(engine.action("at"), [0,2])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "cat|dog"}]}
""")
self.assertEqual(engine.action("dog"), [0,3])
self.assertEqual(engine.action("cat"), [0,3])
self.assertEqual(engine.action("mouse"),[])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "(abc){2}|(def){2}"}]}
""")
self.assertEqual(engine.action("abcabc"), [0,6])
self.assertEqual(engine.action("defdef"), [0,6])
self.assertEqual(engine.action("XKASGJ8"), [])
# backreferences
engine, = PFAEngine.fromYaml(r"""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, [(the )\1]]}
""")
self.assertEqual(engine.action("Paris in the the spring"), [9,17])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "[[:upper:]ab]"}]}
""")
self.assertEqual(engine.action("GHab"), [0,1])
self.assertEqual(engine.action("ab"), [0,1])
self.assertEqual(engine.action("p"), [])
def testIndex(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, {string: "ab(c|d)*"}]}
""")
self.assertEqual(engine.action("abcccdc"), [0,7])
self.assertEqual(engine.action("abddddd"), [0,7])
self.assertEqual(engine.action("XKASGJ8"), [])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.index: [input, [dog]]}
""")
self.assertEqual(engine.action("999dogggggg"), [3,6])
self.assertEqual(engine.action("cat"), [])
# test non ascii strings
# engine, = PFAEngine.fromYaml('''
# input: string
# output: {type: array, items: int}
# action:
# - {re.index: [input, {string: "对讲(机|p)*"}]}
# ''')
# self.assertEqual(engine.action("对讲机机机机机机"), [0,8])
# self.assertEqual(engine.action("对讲pppppppppp"), [0,12])
# check byte input
engine, = PFAEngine.fromYaml('''
input: bytes
output: {type: array, items: int}
action:
- re.index: [input, {bytes.encodeUtf8: {string: "ab(c|d)*"}}]
''')
self.assertEqual(engine.action("abcccdc"), [0,7])
self.assertEqual(engine.action("\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xbaabcccdc"), [9,16])
self.assertEqual(engine.action("\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xbaabcccdc\xe8\xae\xb2\xe6\x9c\xba"), [9,16])
def testContains(self):
engine, = PFAEngine.fromYaml('''
input: string
output: boolean
action:
- {re.contains: [input, [ab(c|d)*]]}
''')
self.assertEqual(engine.action("wio239fj6abcccdc"), True)
self.assertEqual(engine.action("938736362abddddd"), True)
self.assertEqual(engine.action("938272XKASGJ8"), False)
engine, = PFAEngine.fromYaml('''
input: string
output: boolean
action:
- {re.contains: [input, [dog]]}
''')
self.assertEqual(engine.action("9999doggggggg"), True)
self.assertEqual(engine.action("928373cat"), False)
# check non ascii strings
engine, = PFAEngine.fromYaml("""
input: string
output: boolean
action:
- {re.contains: [input, [对讲机(讲|机)*]]}
""")
self.assertEqual(engine.action("abcccdc"), False)
self.assertEqual(engine.action("xyzzzz对讲机机abcc"), True)
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: boolean
action:
- re.contains: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcccdc"), False)
self.assertEqual(engine.action('xyzzzz\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaabcc'), True)
def testCount(self):
engine, = PFAEngine.fromYaml('''
input: string
output: int
action:
- {re.count: [input, [ab(c|d)*]]}
''')
self.assertEqual(engine.action("938272XKASGJ8"), 0)
self.assertEqual(engine.action("iabc1abc2abc2abc"), 4)
self.assertEqual(engine.action("938736362abddddd"), 1)
engine, = PFAEngine.fromYaml('''
input: string
output: int
action:
- {re.count: [input, [dog]]}
''')
self.assertEqual(engine.action("999doggggggg"), 1)
self.assertEqual(engine.action("9233857cat"), 0)
self.assertEqual(engine.action("dogdogdogdogdog"), 5)
self.assertEqual(engine.action("dogDogdogdogdog"), 4)
self.assertEqual(engine.action("dogdog \n dogdogdog"), 5)
engine, = PFAEngine.fromYaml('''
input: string
output: int
action:
- {re.count: [input, [a*]]}
''')
self.assertEqual(engine.action("aaaaaaaaaaaaaaa"), 1)
engine, = PFAEngine.fromYaml('''
input: string
output: int
action:
- {re.count: [input, [ba]]}
''')
self.assertEqual(engine.action("ababababababababababa"), 10)
# check non ascii strings
engine, = PFAEngine.fromYaml("""
input: string
output: int
action:
- {re.count: [input, [对+]]}
""")
self.assertEqual(engine.action("abcccdc"), 0)
self.assertEqual(engine.action("xyzzzz对对对对讲机机abcc对讲机机mmmmm对对对讲机机aa"), 3)
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: int
action:
- re.count: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcccdc"), 0)
self.assertEqual(engine.action("xyzzzz\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xba\xe6\x9c\xbaabcc\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xba\xe6\x9c\xbammmmm\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaaa"), 3)
def testrIndex(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.rindex: [input, [ab(c|d)*]]}
""")
self.assertEqual(engine.action("abcccdc"), [0,7])
self.assertEqual(engine.action("abddddd"), [0,7])
self.assertEqual(engine.action("XKASGJ8"), [])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: int}
action:
- {re.rindex: [input, [dog]]}
""")
self.assertEqual(engine.action("999dogggggg"), [3,6])
self.assertEqual(engine.action("cat"), [])
self.assertEqual(engine.action("catdogpppdog"), [9,12])
# check non-ascii string input
# engine, = PFAEngine.fromYaml("""
# input: string
# output: {type: array, items: int}
# action:
# - {re.rindex: [input, [对讲机(讲|机)*]]}
# """)
# self.assertEqual(engine.action("abcccdc"), [])
# self.assertEqual(engine.action("xyzzzz对讲机机abcc对讲机机mmmmm对讲机机aa"), [23,27])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: int}
action:
- re.rindex: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcccdc"), [])
self.assertEqual(engine.action("xyzzzz\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaabcc\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbammmmm\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaaa"), [39,51])
def testGroups(self):
engine, = PFAEngine.fromYaml('''
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.groups: [input, {string: "(a(b)c)d"}]}
''')
self.assertEqual(engine.action("abcd"), [[0,4], [0,3], [1,2]])
engine, = PFAEngine.fromYaml('''
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.groups: [input, {string: "(the )+"}]}
''')
self.assertEqual(engine.action("Paris in the the spring"), [[9,17], [13,17]])
engine, = PFAEngine.fromYaml(r'''
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.groups: [input, {string: (the )\1}]}
''')
self.assertEqual(engine.action("Paris in the the spring"), [[9,17], [9,13]])
engine, = PFAEngine.fromYaml('''
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.groups: [input, {string: "()(a)bc(def)ghijk"}]}
''')
self.assertEqual(engine.action("abcdefghijk"), [[0,11], [0,0], [0,1], [3,6]])
# check non-ascii string input
# engine, = PFAEngine.fromYaml("""
# input: string
# output: {type: array, items: {type: array, items: int}}
# action:
# - {re.groups: [input, [对讲机(讲|机)*]]}
# """)
# self.assertEqual(engine.action("abcccdc"), [])
# self.assertEqual(engine.action("xyzzzz对讲机机abcc对讲机机mmmmm对讲机机aa"), [[6,10], [9,10]])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: {type: array, items: int}}
action:
- re.groups: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcccdc"), [])
self.assertEqual(engine.action("xyzzzz\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaabcc\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbammmmm\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaaa"), [[6,18], [15,18]])
def testindexAll(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.indexall: [input, [ab]]}
""")
self.assertEqual(engine.action("abcabcabc"), [[0,2], [3,5], [6,8]])
self.assertEqual(engine.action("88cabcc"), [[3,5]])
# backref (include r in string)
engine, = PFAEngine.fromYaml(r"""
input: string
output: {type: array, items: {type: array, items: int}}
action:
- {re.indexall: [input, [(the )\1]]}
""")
self.assertEqual(engine.action("Paris in the the spring, LA in the the summer"), [[9,17], [31,39]])
# check non-ascii string input
# engine, = PFAEngine.fromYaml("""
# input: string
# output: {type: array, items: {type: array, items: int}}
# action:
# - {re.indexall: [input, [对讲机(讲|机)*]]}
# """)
# self.assertEqual(engine.action("abcccdc"), [])
# self.assertEqual(engine.action("xyzzzz对讲机机abcc对讲机机mmmmm对讲机机aa"), [[6,10], [14,18], [23,27]])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: {type: array, items: int}}
action:
- re.indexall: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcccdc"), [])
self.assertEqual(engine.action("xyzzzz\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaabcc\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbammmmm\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe6\x9c\xbaaa"), [[6,18], [22,34], [39,51]])
def testfindAll(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.findall: [input, [ab]]}
""")
self.assertEqual(engine.action("abcabcabc"), ["ab","ab", "ab"])
self.assertEqual(engine.action("88cabcc"), ["ab"])
# check non-ascii string input
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.findall: [input, [猫机+猫]]}
""")
self.assertEqual(engine.action("猫机猫oooo猫机机猫ppp猫机机机猫bbbb猫机aaaa猫机机"), ["猫机猫" ,"猫机机猫","猫机机机猫"])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: bytes}
action:
- re.findall: [input, {bytes.encodeUtf8: {string: "ab+"}}]
""")
self.assertEqual(engine.action("xyz"), [])
self.assertEqual(engine.action("abc\xe6\x9c\xba\xe6\x9c\xbaabcabc"), ["ab", "ab", "ab"] )
def testfindFirst(self):
engine, = PFAEngine.fromYaml("""
input: string
output: [string, "null"]
action:
- {re.findfirst: [input, [ab]]}
""")
self.assertEqual(engine.action("88ccc555"), None)
self.assertEqual(engine.action("abcabcabc"), {"string": "ab"})
# check non-ascii input
# engine, = PFAEngine.fromYaml("""
# input: string
# output: [string, "null"]
# action:
# - {re.findfirst: [input, [机机+]]}
# """)
# self.assertEqual(engine.action("abc机机机abca机机bc asdkj 机机机sd"), "机机机")
# self.assertEqual(engine.action("abdefg"), None)
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: [bytes, "null"]
action:
- re.findfirst: [input, {bytes.encodeUtf8: {string: "对讲机(讲|机)*"}}]
""")
self.assertEqual(engine.action("abcde\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe8\xae\xb2fgg\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe8\xae\xb2h"), {"bytes": "\xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba\xe8\xae\xb2"})
self.assertEqual(engine.action("abcdefghijk"), None)
def testfindGroupsFirst(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.findgroupsfirst: [input, [ab]]}
""")
self.assertEqual(engine.action("abcabcabc"), ["ab"])
self.assertEqual(engine.action("88ccc"), [])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.findgroupsfirst: [input, [()(a)bc(def)ghijk]]}
""")
self.assertEqual(engine.action("abcdefghijk"), ["abcdefghijk", "", "a", "def"])
engine, = PFAEngine.fromYaml(r"""
input: string
output: {type: array, items: string}
action:
- {re.findgroupsfirst: [input, [(the.)\1]]}
""")
self.assertEqual(engine.action("Paris in the the spring"), ["the the ", "the "])
# check non-ascii input
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.findgroupsfirst: [input, [机+(机)]]}
""")
self.assertEqual(engine.action("abc机机机机abca机机bc"), ["机机机机","机"] )
self.assertEqual(engine.action("abcd"), [])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: bytes}
action:
- re.findgroupsfirst: [input, {bytes.encodeUtf8: {string: "机(机)"}}]
""")
self.assertEqual(engine.action("abc\xe6\x9c\xba\xe6\x9c\xbaabca\xe6\x9c\xba\xe6\x9c\xbabc"), ["\xe6\x9c\xba\xe6\x9c\xba","\xe6\x9c\xba"] )
self.assertEqual(engine.action("abcd"), [])
def testfindGroupsAll(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: {type: array, items: string}}
action:
- {re.findgroupsall: [input, [ab]]}
""")
self.assertEqual(engine.action("aabb"), [["ab"]])
self.assertEqual(engine.action("kkabkkabkkab"), [["ab"], ["ab"], ["ab"]])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: {type: array, items: string}}
action:
- {re.findgroupsall: [input, [()(a)bc(def)ghijk]]}
""")
self.assertEqual(engine.action("abcdefghijkMMMMMabcdefghijkMMMM"), [["abcdefghijk", "", "a", "def"], ["abcdefghijk","", "a", "def"]])
# check non-ascii input
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: {type: array, items: string}}
action:
- {re.findgroupsall: [input, [机+(机)]]}
""")
self.assertEqual(engine.action("abc机机机机abca机机bc"), [["机机机机", "机"], ["机机", "机"]])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: {type: array, items: bytes}}
action:
- re.findgroupsall: [input, {bytes.encodeUtf8: {string: "机(机)"}}]
""")
self.assertEqual(engine.action('abc\xe6\x9c\xba\xe6\x9c\xbaabca\xe6\x9c\xba\xe6\x9c\xbabc'), [['\xe6\x9c\xba\xe6\x9c\xba', '\xe6\x9c\xba'], ['\xe6\x9c\xba\xe6\x9c\xba', '\xe6\x9c\xba']])
self.assertEqual(engine.action("abcd"), [])
def testgroupsAll(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: {type: array, items: {type: array, items: int}}}
action:
- {re.groupsall: [input, [()(a)bc(def)ghijk]]}
""")
self.assertEqual(engine.action("abcdefghijkMMMMMabcdefghijkMMMM"), [[[0,11], [0,0], [0,1], [3,6]], [[16, 27],[16,16],[16,17], [19,22]]])
## check non-ascii input
# engine, = PFAEngine.fromYaml("""
# input: string
# output: {type: array, items: {type: array, items: {type: array, items: int}}}
# action:
# - {re.groupsall: [input, [(机)机]]}
# """)
# self.assertEqual(engine.action("abc机机abca机机bc"), [[[3,5], [3,4]], [[9,11], [9,10]]])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: {type: array, items: {type: array, items: int}}}
action:
- re.groupsall: [input, {bytes.encodeUtf8: {string: "(机)机"}}]
""")
self.assertEqual(engine.action('abc\xe6\x9c\xba\xe6\x9c\xbaabca\xe6\x9c\xba\xe6\x9c\xbabc'), [[[3,9], [3,6]], [[13,19], [13,16]]])
def testreplaceFirst(self):
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replacefirst: [input, ["ab(c|d)*"], ["person"]]}
""")
self.assertEqual(engine.action("abcccdcPPPP"), "personPPPP")
self.assertEqual(engine.action("PPPPabcccdcPPPP"), "PPPPpersonPPPP")
self.assertEqual(engine.action("PPPPPPPP"), "PPPPPPPP")
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replacefirst: [input, ["ab(c|d)*"], ["walkie talkie"]]}
""")
self.assertEqual(engine.action("This abcccdc works better than that abcccdc."), "This walkie talkie works better than that abcccdc.")
# check non-ascii input
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replacefirst: [input, [对讲机+], ["walkie talkie"]]}
""")
self.assertEqual(engine.action("This 对讲机 works better than that 对讲机."), "This walkie talkie works better than that 对讲机.")
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: bytes
action:
- {re.replacefirst: [input, {bytes.encodeUtf8: {string: "对讲机+"}}, {bytes.encodeUtf8: {string: "walkie talkie"}}]}
""")
self.assertEqual(engine.action('This \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba works better than that \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba.'), 'This walkie talkie works better than that \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba.')
def testreplaceLast(self):
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replacelast: [input, ["ab(c|d)*"], ["person"]]}
""")
self.assertEqual(engine.action("abcccdcPPPPabcccdc"), "abcccdcPPPPperson")
self.assertEqual(engine.action("abcccdcPPPPabcccdcPPPP"), "abcccdcPPPPpersonPPPP")
# check non-ascii input
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replacelast: [input, [对讲机+], ["walkie talkie"]]}
""")
self.assertEqual(engine.action("This 对讲机 works better than that 对讲机."), "This 对讲机 works better than that walkie talkie.")
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: bytes
action:
- {re.replacelast: [input, {bytes.encodeUtf8: {string: "对讲机+"}}, {bytes.encodeUtf8: {string: "walkie talkie"}}]}
""")
self.assertEqual(engine.action('This \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba works better than that \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba.'), 'This \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba works better than that walkie talkie.')
def testreplaceAll(self):
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replaceall: [input, [cow], [doggy]]}
""")
self.assertEqual(engine.action("pcowppcowpppcow"), "pdoggyppdoggypppdoggy")
self.assertEqual(engine.action("cowpcowppcowppp"), "doggypdoggyppdoggyppp")
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replaceall: [input, [cow], [Y]]}
""")
self.assertEqual(engine.action("cowpcowppcowppp"), "YpYppYppp")
self.assertEqual(engine.action("pcowppcowpppcow"), "pYppYpppY")
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replaceall: [input, [ab(c|d)*], [cow]]}
""")
self.assertEqual(engine.action("abcccdcPPPP"), "cowPPPP")
self.assertEqual(engine.action("PPPPabcccdc"), "PPPPcow")
self.assertEqual(engine.action("PPabcdddcPPabcccdcPPabcccdcPP"), "PPcowPPcowPPcowPP")
# check non-ascii input
engine, = PFAEngine.fromYaml("""
input: string
output: string
action:
- {re.replaceall: [input, [对讲机+], ["walkie talkie"]]}
""")
self.assertEqual(engine.action("This 对讲机机 works better than that 对讲机机."), "This walkie talkie works better than that walkie talkie.")
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: bytes
action:
- {re.replaceall: [input, {bytes.encodeUtf8: {string: "对讲机+"}}, {bytes.encodeUtf8: {string: "walkie talkie"}}]}
""")
self.assertEqual(engine.action('This \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba works better than that \xe5\xaf\xb9\xe8\xae\xb2\xe6\x9c\xba.'), "This walkie talkie works better than that walkie talkie.")
def testsplit(self):
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.split: [input, [cow]]}
""")
self.assertEqual(engine.action("cowpcowppcowppp"), ["p","pp","ppp"])
self.assertEqual(engine.action("pcowppcowpppcow"), ["p","pp","ppp"])
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.split: [input, [ab(c|d)*]]}
""")
self.assertEqual(engine.action("abcccdcPPPP"), ["PPPP"])
self.assertEqual(engine.action("PPPPabcccdc"), ["PPPP"])
self.assertEqual(engine.action("PPabcccdcPPabcccdcPPabcccdcPP"), ["PP","PP","PP","PP"])
# check non-ascii string input
engine, = PFAEngine.fromYaml("""
input: string
output: {type: array, items: string}
action:
- {re.split: [input, [机+]]}
""")
self.assertEqual(engine.action("abc机机机abca机机机bc asdkj 机机sd"), ["abc","abca","bc asdkj ", "sd" ])
# check byte input
engine, = PFAEngine.fromYaml("""
input: bytes
output: {type: array, items: bytes}
action:
- re.split: [input, {bytes.encodeUtf8: {string: "机机+"}}]
""")
self.assertEqual(engine.action("xyz"), ["xyz"])
self.assertEqual(engine.action("ab\xe6\x9c\xba\xe6\x9c\xbaab\xe6\x9c\xba\xe6\x9c\xbaabc\xe6\x9c\xba\xe6\x9c\xbaabc"), ["ab", "ab", "abc", "abc"])
| StarcoderdataPython |
32016 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Marshmallow loader for record deserialization.
Use marshmallow schema to transform a JSON sent via the REST API from an
external to an internal JSON presentation. The marshmallow schema further
allows for advanced data validation.
"""
from __future__ import absolute_import, print_function
import json
from flask import request
from invenio_rest.errors import RESTValidationError
def _flatten_marshmallow_errors(errors):
"""Flatten marshmallow errors."""
res = []
for field, error in errors.items():
if isinstance(error, list):
res.append(
dict(field=field, message=' '.join([str(x) for x in error])))
elif isinstance(error, dict):
res.extend(_flatten_marshmallow_errors(error))
return res
class MarshmallowErrors(RESTValidationError):
"""Marshmallow validation errors.
Responsible for formatting a JSON response to a user when a validation
error happens.
"""
def __init__(self, errors):
"""Store marshmallow errors."""
self._it = None
self.errors = _flatten_marshmallow_errors(errors)
super(MarshmallowErrors, self).__init__()
def __str__(self):
"""Print exception with errors."""
return "{base}. Encountered errors: {errors}".format(
base=super(RESTValidationError, self).__str__(),
errors=self.errors)
def __iter__(self):
"""Get iterator."""
self._it = iter(self.errors)
return self
def next(self):
"""Python 2.7 compatibility."""
return self.__next__() # pragma: no cover
def __next__(self):
"""Get next file item."""
return next(self._it)
def get_body(self, environ=None):
"""Get the request body."""
body = dict(
status=self.code,
message=self.get_description(environ),
)
if self.errors:
body['errors'] = self.errors
return json.dumps(body)
def marshmallow_loader(schema_class):
"""Marshmallow loader for JSON requests."""
def json_loader():
request_json = request.get_json()
context = {}
pid_data = request.view_args.get('pid_value')
if pid_data:
pid, record = pid_data.data
context['pid'] = pid
context['record'] = record
result = schema_class(context=context).load(request_json)
if result.errors:
raise MarshmallowErrors(result.errors)
return result.data
return json_loader
def json_patch_loader():
"""Dummy loader for json-patch requests."""
return request.get_json(force=True)
| StarcoderdataPython |
3287534 | <filename>setup.py<gh_stars>0
from setuptools import setup
setup(
name='jsonTV',
version='0.1.1',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['jsontv'],
url='http://pypi.python.org/pypi/jsonTV/',
license='APACHE 2.0',
description='A client for the Schedules Direct JSON API',
long_description=open('README.rst').read(),
install_requires=[
"requests",
],
)
| StarcoderdataPython |
1741928 | <reponame>hexatester/sdgs-dashboard<filename>sdgs/survey_individu/tambah/penyakit_diderita.py<gh_stars>0
import attr
from typing import Dict
@attr.dataclass
class PenyakitDiderita:
# P404
mutaber_diare: bool = False
demam_berdarah: bool = False
campak: bool = False
malaria: bool = False
flu_burung_sars: bool = False
covid19: bool = False
hepatitis_b: bool = False
hepatitis_e: bool = False
difteri: bool = False
chikungunya: bool = False
leptospirosis: bool = False
kolera: bool = False
gizi_buruk: bool = False
jantung: bool = False
tbc_paru_paru: bool = False
kanker: bool = False
diabetes: bool = False
lumpuh: bool = False
lainnya: bool = False
def todict(self) -> Dict[str, str]:
return {
"1": "1" if self.mutaber_diare else "2",
"2": "1" if self.demam_berdarah else "2",
"3": "1" if self.campak else "2",
"4": "1" if self.malaria else "2",
"5": "1" if self.flu_burung_sars else "2",
"6": "1" if self.covid19 else "2",
"7": "1" if self.hepatitis_b else "2",
"8": "1" if self.hepatitis_e else "2",
"9": "1" if self.difteri else "2",
"10": "1" if self.chikungunya else "2",
"11": "1" if self.leptospirosis else "2",
"12": "1" if self.kolera else "2",
"13": "1" if self.gizi_buruk else "2",
"14": "1" if self.jantung else "2",
"15": "1" if self.tbc_paru_paru else "2",
"16": "1" if self.kanker else "2",
"17": "1" if self.diabetes else "2",
"18": "1" if self.lumpuh else "2",
"19": "1" if self.lainnya else "2",
}
| StarcoderdataPython |
95393 | <reponame>anuraag392/How-to-make-a-simple-dice-roll-program<filename>dice roll project 1.py<gh_stars>0
from tkinter import *
import random
root=Tk()
root.title("Dice roll")
root.geometry("500x400")
label=Label(root,font=('helvetica',250,'bold'),text='')
label.pack()
def rolldice():
dice=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
label.configure(text=f'{random.choice(dice)}')
label.pack()
button=Button(root,font=('helvetica',15,'bold'),text='roll dice',command=rolldice)
button.pack()
root.mainloop()
| StarcoderdataPython |
1695336 | <filename>uart_debugger/scripts/waveform_dup.py<gh_stars>1-10
#!/usr/bin/python
"""
Name: <NAME>
ECN Login: mg296
PUID: 0024209781
Email: <EMAIL>
Description: Copy waveforms from an existing testbench and creating a
similar testbench
"""
import os
import sys
import string
import re
if len(sys.argv) < 4:
print "[Error] : Too few arguments. \n[Usage] : ./waveform_dup.py [waveform].do [old testbench] [new testbench]"
else:
pre_waveform = sys.argv[1] #dot do file that is copied from
old_testbench = sys.argv[2]
new_testbench = sys.argv[3]
testbench_name = "%s.do" %(sys.argv[3])
#Read from old waveform
try:
fp = open(pre_waveform,"r")
except IOError:
print "[Error] : Could not open file %s. \n[Usage] : ./waveform_dup.py [waveform].do" %(pre_waveform)
sys.exit()
#Edit new waveform
try:
fp2 = open(testbench_name,"w")
except IOError:
print "[Error] : Could not open file %s. \n[Usage] : ./waveform_dup.py [waveform].do" %(new_testbench)
sys.exit()
print "Old testbench : %s" %old_testbench
print "New testbench : %s" %new_testbench
for line in fp:
if re.search(old_testbench,line):
#Replace old testbench's name with new test bench's name
line = line.replace(old_testbench, new_testbench)
fp2.write(line)
else:
fp2.write(line)
fp.close()
fp2.close()
sys.exit()
| StarcoderdataPython |
3322299 | from putils.patterns import Singleton
from putils.filesystem import Dir
import mimetypes
import scss
from scss import Scss
from jsmin import jsmin
import os
import shutil
class StaticCompiler(object):
"""
Static files minifier.
"""
def __init__(self, path):
self.css_parser = Scss()
scss.LOAD_PATHS = path
def compile_file(self, filepath, need_compilation=True):
result = self.get_content(filepath)
if need_compilation:
mimetype = mimetypes.guess_type(filepath)[0]
result = self.compile_text(result, mimetype)
return result
def compile_text(self, text, mimetype):
result = ""
if mimetype == "text/css":
result = self.css_parser.compile(text)
elif mimetype == "application/javascript":
result = jsmin(text)
else:
result = text
return result
def get_content(self, file):
return open(file).read()
class StaticBuilder(object):
"""
Uses StaticCompiler to minify and compile js and css.
"""
def __init__(self, path, static_not_compile):
self.path = path
self.static_not_compile = static_not_compile
self.compiler = StaticCompiler(self.path)
def build(self):
try:
shutil.rmtree(self.path + "/build")
except:
pass
try:
Dir.walk(self.path, self.build_file)
except:
pass
def build_file(self, file):
rel_path = file.replace(self.path, "")
need_compilation = True
if rel_path in self.static_not_compile:
need_compilation = False
new_path = self.path + "/build" + rel_path
result = self.compiler.compile_file(file, need_compilation=need_compilation)
if result:
try:
os.makedirs(os.path.dirname(new_path))
except:
pass
with open(new_path, "w") as f:
f.write(result)
| StarcoderdataPython |
57296 | <filename>python/turbodbc/__init__.py
from __future__ import absolute_import
from .api_constants import apilevel, threadsafety, paramstyle
from .connect import connect
from .constructors import Date, Time, Timestamp
from .exceptions import Error, InterfaceError, DatabaseError, ParameterError
from .data_types import STRING, BINARY, NUMBER, DATETIME, ROWID
from .options import make_options
from turbodbc_intern import Rows, Megabytes
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except:
__version__ = 'unknown'
| StarcoderdataPython |
118173 | import random
from data import participants
from whatsapp_selenium import send_messages
import time
Host_Name = 'Dorcy'
def generate_text(santa, santee, age):
return f"""Dear {santa},\
\nThis year you are {santee}'s Secret Santa!. Ho Ho Ho!\
\nThis message was automagically generated from a computer by {Host_Name}\
\nNothing could possibly go wrong... Ho Ho Ho! ps {santee} is {age}.
"""
def generate_santas(participants_list):
participant_choices = participants_list
full_response = []
for x in participants_list[::-1]:
output = [x['phone']]
stop = False
timeout = 0
while not stop:
timeout += 1
random_range = random.randrange(len(participant_choices))
this_guy = participant_choices[random_range]
if this_guy["id"] not in x["dont_pair"] and this_guy["id"] != x["id"]:
output.append(generate_text(x['name'], this_guy['name'], this_guy['age']))
stop = True
participant_choices.pop(random_range)
if timeout >= 61:
raise ("Timeout error")
break
full_response.append(output)
return full_response
santas_list = generate_santas(participants_list=participants)
time.sleep(2)
for santa in santas_list:
send_messages(santa[0], santa[1])
| StarcoderdataPython |
3338282 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from webpages import *
@pytest.fixture
def page(browser, server_url, access_token):
return RootPage(browser, server_url, access_token)
class TestRootPage(object):
def test_should_show_a_dialog_when_opened(self, page):
page.open()
header = page.find_element_by_tag_name('h1')
# time.sleep(5)
print(header.text)
assert 'EAvatar ME' in header.text
| StarcoderdataPython |
181384 | <reponame>ahmedengu/h2o-3
import h2o
from h2o.tree import H2OTree
from h2o.estimators import H2OIsolationForestEstimator
from tests import pyunit_utils
def check_tree(tree, tree_number, tree_class = None):
assert tree is not None
assert len(tree) > 0
assert tree._tree_number == tree_number
assert tree._tree_class == tree_class
assert tree.root_node is not None
assert tree.left_children is not None
assert tree.right_children is not None
assert tree.thresholds is not None
assert tree.nas is not None
assert tree.descriptions is not None
assert tree.node_ids is not None
assert tree.model_id is not None
assert tree.levels is not None
assert tree.root_node.na_direction is not None
assert tree.root_node.id is not None
def irf_tree_Test():
cat_frame = h2o.create_frame(cols=10, categorical_fraction=1, seed=42)
# check all columns are categorical
assert set(cat_frame.types.values()) == set(['enum'])
iso_model = H2OIsolationForestEstimator(seed=42)
iso_model.train(training_frame=cat_frame)
tree = H2OTree(iso_model, 5)
check_tree(tree, 5, None)
print(tree)
if __name__ == "__main__":
pyunit_utils.standalone_test(irf_tree_Test)
else:
irf_tree_Test()
| StarcoderdataPython |
99838 | <filename>pymt/bmi/bmi.py
class Error(Exception):
"""Base class for BMI exceptions"""
pass
class VarNameError(Error):
"""Exception to indicate a bad input/output variable name"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class BMI(object):
def initialize(self, filename):
pass
def run(self, time):
pass
def finalize(self):
pass
def get_input_var_names(self):
pass
def get_output_var_names(self):
pass
def get_var_grid(self, var_name):
pass
def get_var_type(self, var_name):
pass
def get_var_units(self, var_name):
pass
def get_time_step(self):
pass
def get_start_time(self):
pass
def get_current_time(self):
pass
def get_end_time(self):
pass
def get_grid_rank(self, grid_id):
pass
def get_grid_spacing(self, grid_id):
pass
def get_grid_shape(self, grid_id):
pass
def get_grid_x(self, grid_id):
pass
def get_grid_y(self, grid_id):
pass
def get_grid_z(self, grid_id):
pass
def get_grid_connectivity(self, grid_id):
pass
def get_grid_offset(self, grid_id):
pass
| StarcoderdataPython |
1780348 | <filename>aplications/departamento/views.py
from django.shortcuts import render
from django.views.generic.edit import FormView
from django.views.generic import (
TemplateView,
ListView,
CreateView,
DeleteView,
)
from .forms import NewDepartamentoForm
from .models import Departamento
from aplications.persona.models import Colaborador
# Create your views here.
class DepartamentoListView(ListView):
model = Departamento
template_name = "departamento/lista.html"
context_object_name = 'lista_departamento'
paginate_by = 4
def get_queryset(self):
palabra_clave = self.request.GET.get('kword','')
lista = Departamento.objects.filter(
name__icontains = palabra_clave
)
return lista
class NewDepartamentoView(FormView):
template_name = 'departamento/new_departamento.html'
form_class = NewDepartamentoForm
success_url = '/'
def form_valid(self,form):
print('Estamos en form valid')
depa = Departamento(
name = form.cleaned_data['departamento'],
short_name = form.cleaned_data['short_name']
)
depa.save()
nombre = form.cleaned_data['nombre']
apellido = form.cleaned_data['apellido']
job = ['job']
Colaborador.objects.create(
first_name = nombre,
last_name = apellido,
job = job,
departamento = depa
)
return super(NewDepartamentoView, self).form_valid(form)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.