id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8184549 | <gh_stars>1-10
from torchvision.ops import nms
from config import *
import torch
import torch.nn as nn
class _transitionLayer(nn.Module):
def __init__(self, inChannels):
super(_transitionLayer, self).__init__()
self.outChannels = int(inChannels * dnc.compressionRate)
self.module = nn.Sequential(
nn.GroupNorm(dnc.numGroups, inChannels),
nn.ReLU(),
nn.Conv2d(inChannels, self.outChannels, 1),
nn.AvgPool2d(2)
)
def forward(self, x):
return self.module(x)
class _convBlock(nn.Module):
def __init__(self, inChannels):
super(_convBlock, self).__init__()
self.outChannels = dnc.growthRate
self.module = nn.Sequential(
nn.GroupNorm(dnc.numGroups, inChannels),
nn.ReLU(),
nn.Conv2d(inChannels, 4 * dnc.growthRate, 1),
nn.GroupNorm(dnc.numGroups, 4 * dnc.growthRate),
nn.ReLU(),
nn.Conv2d(4 * dnc.growthRate, dnc.growthRate, 3, padding=1)
)
def forward(self, x):
return self.module(x)
class _denseBlock(nn.Module):
def __init__(self, inChannels, numBlocks):
super(_denseBlock, self).__init__()
self.outChannels = inChannels
self.layers = nn.ModuleList()
for _ in range(numBlocks):
self.layers.append(_convBlock(self.outChannels))
self.outChannels += dnc.growthRate
def forward(self, x):
features = [x]
for layer in self.layers:
features.append(layer(torch.cat(features, 1)))
return torch.cat(features, 1)
class DenseNet(nn.Module):
def __init__(self):
super(DenseNet, self).__init__()
self.outChannels = 64
self.input = nn.Sequential(
nn.Conv2d(3, self.outChannels, 7, padding=3),
nn.GroupNorm(dnc.numGroups, self.outChannels),
nn.ReLU(),
nn.MaxPool2d(2)
)
layers = [self.input]
for num in dnc.numBlocks:
block = _denseBlock(self.outChannels, num)
self.outChannels = block.outChannels
trans = _transitionLayer(self.outChannels)
self.outChannels = trans.outChannels
layers.append(block)
layers.append(trans)
self.module = nn.Sequential(*layers)
def forward(self, x):
return self.module(x)
class YOLO(nn.Module):
def __init__(self):
super(YOLO, self).__init__()
self.warmUpBatch = 0
self.metrics = {}
self.backbone = DenseNet()
self.yolo = nn.Conv2d(self.backbone.outChannels, ylc.numAnchors * (5 + ylc.numClasses), 1)
def computeLosses(self, x, y):
y, boxes, grid, anchors, gridSize, imgSize = y
# MARK: - adjust prediction
predConf = x[..., :1]
predXY = x[..., 1:3]
predWH = x[..., 3:5]
predClasses = x[..., 5:]
# MARK: - adjust ground truth
trueConf = y[..., :1]
trueXY = y[..., 1:3]
trueWH = y[..., 3:5]
trueClasses = y[..., 5:]
# MARK: - ignore box that overlap some ground truth by 0.5
trueBoxes = boxes
trueBoxesXY = trueBoxes[..., :2] / gridSize
trueBoxesWH = trueBoxes[..., 2:] / imgSize
predBoxesXY = torch.unsqueeze(predXY / gridSize, -2)
predBoxesWH = torch.unsqueeze(torch.expm1(predWH) * anchors / imgSize, -2)
# calc IoU
trueHalf = trueBoxesWH / 2
trueMin = trueBoxesXY - trueHalf
trueMax = trueBoxesXY + trueHalf
predHalf = predBoxesWH / 2
predMin = predBoxesXY - predHalf
predMax = predBoxesXY + predHalf
intersectMin = torch.max(predMin, trueMin)
intersectMax = torch.min(predMax, trueMax)
intersectWH = torch.max(intersectMax - intersectMin, torch.zeros_like(intersectMax))
trueArea = trueBoxesWH[..., 0] * trueBoxesWH[..., 1]
predArea = predBoxesWH[..., 0] * predBoxesWH[..., 1]
intersectArea = intersectWH[..., 0] * intersectWH[..., 1]
unionArea = trueArea + predArea - intersectArea
iou = intersectArea / unionArea
(bestIou, _) = torch.max(iou, -1)
objMask = torch.unsqueeze(y[..., 0], -1)
noObjMask = torch.unsqueeze(bestIou < .5, -1)
coordMask = objMask
# MARK: - compute IoU & recall
tXY = trueXY / gridSize
tWH = torch.expm1(trueWH) * anchors / imgSize
pXY = predXY / gridSize
pWH = torch.expm1(predWH) * anchors / imgSize
trueHalf = tWH / 2
trueMin = tXY - trueHalf
trueMax = tXY + trueHalf
predHalf = pWH / 2
predMin = pXY - predHalf
predMax = pXY + predHalf
intersectMin = torch.max(predMin, trueMin)
intersectMax = torch.min(predMax, trueMax)
intersectWH = torch.max(intersectMax - intersectMin, torch.zeros_like(intersectMax))
trueArea = tWH[..., 0] * tWH[..., 1]
predArea = pWH[..., 0] * pWH[..., 1]
intersectArea = intersectWH[..., 0] * intersectWH[..., 1]
unionArea = trueArea + predArea - intersectArea
iou = intersectArea / unionArea
iou = objMask * torch.unsqueeze(iou, -1)
objCount = torch.sum(objMask) + 1e-7
detectMask = (predConf * objMask >= .5).type_as(objCount)
classMask = torch.argmax(predClasses, -1) == torch.argmax(trueClasses, -1)
classMask = torch.unsqueeze(classMask, -1).type_as(objCount)
recall50 = torch.sum((iou >= .50) * classMask * detectMask) / objCount
recall75 = torch.sum((iou >= .75) * classMask * detectMask) / objCount
avgIou = torch.sum(iou) / objCount
# increase the loss scale for small box
coordScale = torch.expm1(trueWH) * anchors / imgSize
coordScale = torch.unsqueeze(2. - (coordScale[..., 0] * coordScale[..., 1]), -1)
# MARK: - warm up training
if self.warmUpBatch < tc.warmUpBatches:
trueXY += (torch.ones_like(objMask) - objMask) * (grid + .5)
coordMask = torch.ones_like(coordMask)
self.warmUpBatch += 1
# MARK: - calc total loss
coordCount = torch.sum(coordMask) + 1e-7
lossConf = (predConf - trueConf) * objMask * ylc.objScale + (predConf - 0) * noObjMask * ylc.noObjScale
lossConf = torch.sum(lossConf ** 2) / (objCount + torch.sum(noObjMask))
lossXY = coordMask * (predXY - trueXY) * coordScale * ylc.coordScale
lossXY = torch.sum(lossXY ** 2) / coordCount
lossWH = coordMask * (predWH - trueWH) * coordScale * ylc.coordScale
lossWH = torch.sum(lossWH ** 2) / coordCount
lossClass = nn.functional.binary_cross_entropy_with_logits(predClasses * objMask, trueClasses * objMask)
metrics = {
'lossConf': lossConf,
'lossXY': lossXY,
'lossWH': lossWH,
'lossClass': lossClass,
'recall50': recall50,
'recall75': recall75,
'avgIou': avgIou
}
for key in metrics:
metrics[key] = metrics[key].cpu().item()
self.metrics.update(metrics)
return lossConf + lossXY + lossWH + lossClass
def forward(self, x, target=None, boxes=None):
(_, _, imgH, imgW) = x.shape
out = self.backbone(x)
out = self.yolo(out)
(N, _, H, W) = out.shape
out = out.view(N, H, W, ylc.numAnchors, 5 + ylc.numClasses)
gridX, gridY = torch.arange(W), torch.arange(H)
gridY, gridX = torch.meshgrid(gridY, gridX)
grid = torch.stack((gridX, gridY), -1).type_as(x)
grid = grid.view(1, H, W, 1, 2).repeat(N, 1, 1, ylc.numAnchors, 1)
anchors = torch.Tensor(ylc.anchors).type_as(x).view(1, 1, 1, ylc.numAnchors, 2)
gridSize = torch.Tensor([W, H]).type_as(x).view(1, 1, 1, 1, 2)
imgSize = torch.Tensor([imgW, imgH]).type_as(x).view(1, 1, 1, 1, 2)
conf = torch.sigmoid(out[..., :1])
xy = torch.sigmoid(out[..., 1:3]) + grid
wh = out[..., 3:5]
classes = out[..., 5:]
if target is not None:
out = torch.cat([conf, xy, wh, classes], -1)
return self.computeLosses(out, [target, boxes, grid, anchors, gridSize, imgSize])
# MARK:- inference
ans = []
xy /= gridSize
wh = torch.expm1(out[..., 3:5]) * anchors / imgSize
for confidence, coord, side, cat in zip(conf, xy, wh, classes):
res = []
# MARK: - ignore objects that has low confidence
objMask = torch.squeeze(confidence > ylc.objThreshold, -1)
if torch.sum(objMask) == 0:
ans.append([])
continue
confidence = confidence[objMask]
coord = coord[objMask]
side = side[objMask]
cat = cat[objMask]
x1y1 = coord - side / 2
x2y2 = coord + side / 2
boxes = torch.cat((x1y1, x2y2), -1)
categories = torch.argmax(cat, -1).type_as(boxes)
for catID in range(ylc.numClasses):
catIds = torch.squeeze(categories == catID, -1)
if torch.sum(catIds) == 0:
continue
score = confidence[catIds]
box = boxes[catIds]
category = cat[catIds]
ids = nms(box, score, ylc.nmsThreshold)
score = score[ids]
box = box[ids]
category = category[ids]
res.append(torch.cat((score, box, category), -1))
if res:
ans.append(torch.cat(res, 0).tolist())
else:
ans.append([])
return ans
| StarcoderdataPython |
3288471 | <filename>render_quads.py
import sys
import numpy as np
import cv2
import os
if len(sys.argv) < 4:
print "python %s manifest.txt dataset_dir out_dir" % __file__
exit()
manifest_file = sys.argv[1]
dataset_dir = sys.argv[2]
out_dir = sys.argv[3]
try:
os.makedirs(out_dir)
except:
pass
file_list = map(lambda s: s.strip(), open(manifest_file, 'r').readlines())
for line in file_list:
tokens = line.split(',')
f = tokens[0]
coords = map(float, tokens[1:9])
resolved = os.path.join(dataset_dir, f)
im = cv2.imread(resolved, 0)
gt = np.zeros(im.shape, dtype=np.uint8)
cv2.fillPoly(gt, np.array(coords).reshape((4, 2)).astype(np.int32)[np.newaxis,:,:], 255)
out_fn = os.path.join(out_dir, f.replace('/', '_'))[:-4] + ".png"
cv2.imwrite(out_fn, gt)
| StarcoderdataPython |
4813884 | fasta=open("/media/alessandro/DATA/User/BIOINFORMATICA.BOLOGNA/Programming_for_Bioinformatics/Module2/Exercise/sequences.txt","r")
list=[]
l1=[]
n=0
#print(len(l1))
for line in fasta:
if ">" not in line:
list.append(line[:-1])
n+=1
elif ">" in line:
list=[]
if list not in l1:
l1.append(list)
#print(l1)
fastafin=[]
for li in l1:
fastlist=[]
#print(li)
fasta="".join(li)
lenght=len(fasta)
fastlist.append(fasta)
fastlist.append(lenght)
fastafin.append(fastlist)
print(fastafin)
min=fastafin[0][1]
max=fastafin[0][1]
sum=0
for el in fastafin:
sum+=el[1]
if el[1]<min:
min=el[1]
seqmin=el[0]
elif el[1]>max:
max=el[1]
seqmax=el[0]
mean=(float(sum)/len(fastafin))
print("The average lenght of the FASTA sequences is: ", mean)
print("The longest sequence with" ,max, "residues is:",seqmax)
print("The shortest sequence with" ,min, "residues is:",seqmin)
| StarcoderdataPython |
11312808 | <reponame>janik-martin/fhirtordf
import os
import unittest
from typing import Optional, List, Callable
class ValidationTestCase(unittest.TestCase):
"""
A test case builder. Iterates over all of the files in input_directory with suffix file_suffix, invoking
validation_function with the input file and optional output directory.
"""
longMessage = True
input_directory = None # type: str
output_directory = None # type: Optional[str]
file_filter = None # type: Optional[Callable[str, str], bool]
file_suffix = None # type: str
start_at = "" # type: Optional[str]
skip = [] # type: List[str]
validation_function = None # type: Callable[["ValidationTestCase", str, str, Optional[str]], bool]
single_file = False # type: bool
max_size = 0 # type: int
no_tests = True # type: bool
@classmethod
def make_test_function(cls, directory: str, fname: str):
@unittest.skipIf(cls.no_tests, "Omitted")
def test(self):
self.assertTrue(cls.validation_function(self, directory, fname))
return test
@classmethod
def build_test_harness(cls) -> None:
started = not bool(cls.start_at)
test_generated = False
for dirpath, _, filenames in os.walk(cls.input_directory):
for fname in filenames:
if fname.endswith(cls.file_suffix):
if fname not in cls.skip and (started or fname >= cls.start_at) and \
(not cls.file_filter or cls.file_filter(dirpath, fname)) and \
(not cls.max_size or os.path.getsize(os.path.join(dirpath, fname)) <= (cls.max_size * 1000)):
started = True
test_func = cls.make_test_function(dirpath, fname)
setattr(cls, 'test_{0}'.format(fname.rsplit('.', 1)[0]), test_func)
test_generated = True
if cls.single_file:
break
if cls.single_file and test_generated:
break
def blank_test(self):
self.assertTrue(True)
| StarcoderdataPython |
5016118 | <gh_stars>0
#!/usr/bin/env python
import cPickle, glob, os, sys, time
import PRIChecker, PRIRecord, Table
def getPickledPath( directory ):
return os.path.join( directory, 'recording.pkl' )
class BadTime:
def __init__( self, file, anomaly ):
self.file = file
self.anomaly = anomaly
self.sequenceCounter = anomaly.getVMESequenceCounter()
self.irigTime = anomaly.getVMEIRIGTime()
def getTime( self ): return self.irigTime
def __hash__( self ):
return hash( self.sequenceCounter )
def __cmp__( self, other ):
return cmp( self.sequenceCounter, other.sequenceCounter )
def __repr__( self ):
return '<BadTime %d %17.6f>' % ( self.sequenceCounter, self.irigTime )
class Recording( object ):
kSpanTable = Table.Table(
0,
( '%17.6f', 'Start Time' ),
( '%17.6f', 'End Time' ),
( '%14.3f', 'Duration' ),
)
def __init__( self, directory ):
self.directory = directory
self.files = []
self.anomalousCount = 0
self.startTime = None
self.endTime = None
self.badTimes = []
def save( self ):
cPickle.dump( self, file( getPickledPath( self.directory ), 'wb' ) )
def addFile( self, priPath, recheck = False, noCache = False ):
file = PRIRecord.load( priPath, noCache = noCache )
if file.messageType != 2:
print '-- invalid message type -', file.messageType
return
if recheck is True or not file.hasReport():
print '-- generating anomaly report'
found = file.generateReport()
if found != 0:
print '** found', found, 'anomalies'
self.files.append( file )
if len( file.anomalies ) > 0:
self.anomalousCount += 1
when = file.getStartTime()
if self.startTime is None or self.startTime > when:
self.startTime = when
when = file.getEndTime()
if self.endTime is None or self.endTime < when:
self.endTime = when
for anomaly in file.getAnomalies():
self.badTimes.append( BadTime( file, anomaly ) )
def printReport( self, fd, minAcceptableSpan = 5.0 ):
fd.write( ' Recording: %s\n' % self.directory )
fd.write( 'Number Files: %d\n' % len( self.files ) )
fd.write( ' w/Anomalies: %d\n\n' % self.anomalousCount )
fd.write( ' Start Time: %17.6f\n' % self.startTime )
fd.write( ' End Time: %17.6f\n' % self.endTime )
fd.write( ' Duration: %14.3f\n\n' % ( self.endTime -
self.startTime ) )
self.kSpanTable.printHeader( fd )
startTime = self.startTime
for badTime in self.badTimes:
if badTime.getTime() - startTime >= minAcceptableSpan:
self.kSpanTable.printValues( startTime, badTime.getTime(),
badTime.getTime() - startTime )
startTime = badTime.getTime()
if self.endTime - startTime >= minAcceptableSpan:
self.kSpanTable.printValues( startTime, self.endTime,
self.endTime - startTime )
def load( directory, noCache = False ):
recording = Recording( directory )
for priPath in sorted( glob.glob( os.path.join( directory, '*.pri' ) ) ):
recording.addFile( priPath, noCache = noCache )
return recording
if __name__ == '__main__':
for directory in sys.argv[ 1 : ]:
print '-- loading', directory
recording = load( directory )
reportPath = os.path.join( directory, 'summary.txt' )
recording.printReport( open( reportPath, 'w' ) )
recording.printReport( sys.stdout )
| StarcoderdataPython |
4974158 | from django.shortcuts import render
from rest_framework.views import APIView
from django.views import View
from django.views.generic import ListView, CreateView, UpdateView
from django.http import HttpResponse
from django.template import loader
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate
from django.http import HttpResponseRedirect, HttpRequest
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework import status
from qualiCar_API.models import UserProfile, Incident, Vehicle, Part
from qualiCar_API.serializers import incidentSerializer
from frontend.forms import IncidentForm
import logging
logger = logging.getLogger (__name__)
def preprocess_request(request):
if isinstance(request, HttpRequest):
return Request(request, parsers=[FormParser])
return request
# class Index (APIView):
# template = 'index.html'
class Index(View):
template = 'index.html'
login_url = '/login/'
def get(self, request):
return render(request, self.template)
class Incident (APIView):
logger.info ("Incident view")
template = 'forms/incident.html'
context = {}
context['form'] = IncidentForm
# incident_part = Incident.objects.get (request.)
# array2=[]
# for single_vehicle in Vehicle.objects.all():
# flag = False
# for vehicle_part in single_vehicle.parts.all():
# if (vehicle_part == incident_part):
# flag = True
# if flag:
# array2.append (vehicle)
model = Incident
#form_class = IncidentForm
def get(self, request):
logger.info ("Incident view get method")
context = {}
context ['form'] = IncidentForm
return render(request, self.template, context)
def post (self, request):
logger.info ("Incident view post method")
form = AuthenticationForm (request.POST)
serializer = incidentSerializer (
# Pass the context resolves the KeyError request
context = {'request': request},
data = request.data,
)
if serializer.is_valid ():
serializer.save ()
return Response (serializer.data, status=status.HTTP_201_CREATED)
else:
print(serializer.errors)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Login (View):
template = 'login.html'
def get (self, request):
form = AuthenticationForm()
return render(request, self.template, {'form': form})
def post (self, request):
form = AuthenticationForm (request.POST)
username = request.POST.get ('username')
password = request.POST.get ('password')
user = authenticate (request, username=username, password=password)
if user is not None:
login (request, user)
return HttpResponseRedirect ('/')
else:
return render (request, self.template, {'form': form})
def load_vehicles (request):
part_id = request.GET.get ('parts')
vehicles = Vehicle.objects.filter (part_id = part_id).order_by ('brand')
# I do not know if it is vehicles:vehicles, actually... Maybe all fields???
return render
(
request,
'forms/dyn/impacted_vehicles_table.html',
{'vehicles': vehicles}
)
| StarcoderdataPython |
6464137 | # -*- coding: utf-8 -*-
"""
flask.ext.split.views
~~~~~~~~~~~~~~~~~~~~~
This module provides the views for Flask-Split's web interface.
:copyright: (c) 2012-2015 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import os
from flask import Blueprint, redirect, render_template, request, url_for
from .models import Alternative, Experiment
from .utils import _get_redis_connection
root = os.path.abspath(os.path.dirname(__file__))
split = Blueprint('split', 'flask.ext.split',
template_folder=os.path.join(root, 'templates'),
static_folder=os.path.join(root, 'static'),
url_prefix='/split'
)
@split.route('/')
def index():
"""Render a dashboard that lists all active experiments."""
redis = _get_redis_connection()
return render_template('split/index.html',
experiments=Experiment.all(redis)
)
@split.route('/<experiment>', methods=['POST'])
def set_experiment_winner(experiment):
"""Mark an alternative as the winner of the experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
alternative_name = request.form.get('alternative')
alternative = Alternative(redis, alternative_name, experiment.name)
if alternative.name in experiment.alternative_names:
experiment.winner = alternative.name
return redirect(url_for('.index'))
@split.route('/<experiment>/reset', methods=['POST'])
def reset_experiment(experiment):
"""Delete all data for an experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.reset()
return redirect(url_for('.index'))
@split.route('/<experiment>/delete', methods=['POST'])
def delete_experiment(experiment):
"""Delete an experiment and all its data."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.delete()
return redirect(url_for('.index'))
| StarcoderdataPython |
1608122 | from gtmcore.container.container import SidecarContainerOperations
from gtmcore.mitmproxy.mitmproxy import MITMProxyOperations
import os
from gtmcore.activity.tests.fixtures import mock_redis_client
from gtmcore.fixtures import mock_labbook, mock_config_with_repo
from gtmcore.fixtures.container import build_lb_image_for_rstudio
from gtmcore.activity import ActivityStore
from gtmcore.activity.monitors.monitor_rserver import RServerMonitor, RStudioServerMonitor
def mock_ip(key):
return "172.16.17.32"
class TestRServerMonitor:
def test_setup(self, mock_redis_client):
"""Test getting the supported names of the dev env monitor"""
monitor = RServerMonitor()
assert len(monitor.get_dev_env_name()) == 1
assert 'rstudio' in monitor.get_dev_env_name()
class TestRStudioServerMonitor:
def test_init(self, mock_redis_client, mock_labbook):
"""Test getting the supported names of the dev env monitor"""
server_monitor = RStudioServerMonitor("test", "test", mock_labbook[2].name,
"52f5a3a9")
assert len(server_monitor.processors) == 6
def test_code_and_image(self, mock_redis_client, mock_labbook):
"""Test reading a log and storing a record"""
# create a server monitor
server_monitor = RStudioServerMonitor("test", "test", mock_labbook[2].name,
"foo:activity_monitor:52f5a3a9")
mitmlog = open(f"{os.path.dirname(os.path.realpath(__file__))}/52f5a3a9.rserver.dump", "rb")
# Read activity and return an aggregated activity record
server_monitor.process_activity(mitmlog)
# call processor
server_monitor.store_record()
a_store = ActivityStore(mock_labbook[2])
ars = a_store.get_activity_records()
# details object [x][3] gets the x^th object
code_dict = a_store.get_detail_record(ars[0].detail_objects[1].key).data
# check the code results
assert(code_dict['text/markdown'][101:109] == 'y("knitr')
# check part of an image
imgdata = a_store.get_detail_record(ars[1].detail_objects[1].key).data['image/png'][0:20]
assert(imgdata == '/9j/4AAQSkZJRgABAQAA')
def test_multiplecells(self, mock_redis_client, mock_labbook):
"""Make sure that RStudio detects and splits cells"""
server_monitor = RStudioServerMonitor("test", "test", mock_labbook[2].name,
"foo:activity_monitor:73467b78")
mitmlog = open(f"{os.path.dirname(os.path.realpath(__file__))}/73467b78.rserver.dump", "rb")
# Read activity and return an aggregated activity record
server_monitor.process_activity(mitmlog)
# call processor
server_monitor.store_record()
lb = mock_labbook[2]
a_store = ActivityStore(lb)
ars = a_store.get_activity_records()
# details object [x][3] gets the x^th object
cell_1 = a_store.get_detail_record(ars[0].detail_objects[2].key).data
cell_2 = a_store.get_detail_record(ars[0].detail_objects[3].key).data
# if the cells were divided, there will be two records
assert(cell_1['text/plain'][59:62] == 'pop')
assert(cell_2['text/plain'][200:204] == 'stan')
class TestMITMproxy:
def test_start_mitm_proxy(self, build_lb_image_for_rstudio):
lb_container, ib, username, lb = build_lb_image_for_rstudio
mitm_container = SidecarContainerOperations(lb_container, MITMProxyOperations.namespace_key)
# Ensure this container doesn't exist already
mitm_container.stop_container()
# The below image should ideally already be on the system, maybe not tagged
# If not, it'll hopefully be useful later - we won't clean it up
image_name = 'ubuntu:18.04'
# We still use Docker directly here. Doesn't make sense to create an abstraction that's only used by a test
docker_client = lb_container._client
docker_client.images.pull(image_name)
docker_client.containers.create(image_name, name=mitm_container.sidecar_container_name)
assert mitm_container.query_container() == 'created'
external_url = lb.client_config.config['proxy']['external_url']
MITMProxyOperations.start_mitm_proxy(lb_container, new_rserver_session=True, external_url=external_url)
assert mitm_container.query_container() == 'running'
MITMProxyOperations.stop_mitm_proxy(lb_container)
if lb_container.stop_container(mitm_container.sidecar_container_name):
assert False, "MITM container not cleaned up during `stop_mitm_proxy()`"
| StarcoderdataPython |
3467017 | <gh_stars>1-10
from django.utils.deconstruct import deconstructible
from django.core.files.uploadedfile import SimpleUploadedFile
from io import BytesIO
import PIL
import hashlib
@deconstructible
class UploadNameFromContent:
"""A Django FileField upload_to handler that
generates the filename from the hash of the file content.
The original file extension (if any) is included.
"""
def __init__(self, basepath, filefield, alg='sha1'):
self.basepath = basepath
self.filefield = filefield
self.alg = alg
if self.basepath and self.basepath[-1] != '/':
self.basepath = self.basepath + '/'
def file_hash(self, file):
h = hashlib.new(self.alg)
for chunk in file.chunks():
h.update(chunk)
return h.hexdigest()
def __call__(self, instance, original_filename):
f = getattr(instance, self.filefield)
try:
ext = original_filename[original_filename.rindex('.'):]
except ValueError:
ext = ''
return self.basepath + self.file_hash(f) + ext
def downscale_image_if_necessary(imagefile, max_size):
img = PIL.Image.open(imagefile)
w, h = img.size
if w <= max_size[0] and h <= max_size[1]:
return imagefile
img.thumbnail(max_size)
if img.mode != 'RGB':
img2 = PIL.Image.new("RGB", img.size, (255, 255, 255))
img2.paste(img)
img = img2
return _image_to_upload(img)
def _image_to_upload(image):
buf = BytesIO()
image.save(buf, 'JPEG')
buf = buf.getvalue()
h = hashlib.sha1()
h.update(buf)
return SimpleUploadedFile(h.hexdigest() + '.jpeg', buf, 'image/jpeg') | StarcoderdataPython |
348639 | <filename>carpyncho1/carpyncho/lcurves/models.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from picklefield.fields import PickledObjectField
import numpy as np
from PyAstronomy import pyasl
from save_the_change.mixins import SaveTheChange
from carpyncho.utils import storages
from skdjango.models import StatsModel
from skdjango.manager import SciManager
from carpyncho.lcurves.libs import match, lightcurves
from .manager import MasterSourceManager
# =============================================================================
# LOGGER
# =============================================================================
logger = logging.getLogger("lcurves")
# =============================================================================
# CLASIFICATIONS
# =============================================================================
class Clasification(SaveTheChange, models.Model):
name = models.CharField(
max_length=255, verbose_name=_("Name"), unique=True
)
objects = SciManager()
def __unicode__(self):
return self.name
#==============================================================================
# TILE
#==============================================================================
def masters_upload(master, filename):
return u"/".join(["lcurves", master.name, filename])
class Tile(SaveTheChange, models.Model):
class Meta:
verbose_name = _("Tile")
verbose_name_plural = _("Tile")
file = models.FileField(
upload_to=masters_upload, verbose_name=_("File"),
storage=storages.OverwriteStorage()
)
name = models.CharField(
max_length=255, verbose_name=_("Name"), unique=True
)
objects = SciManager()
def __unicode__(self):
return self.name
#==============================================================================
# MASTER SOURCE
#==============================================================================
class MasterSource(SaveTheChange, models.Model):
class Meta:
verbose_name = _("Master Source")
verbose_name_plural = _("Masters Sources")
unique_together = ["tile", "order"]
tile = models.ForeignKey(
Tile, related_name="sources", verbose_name=_("Tile")
)
order = models.IntegerField(verbose_name=_("Order"))
ra_h = models.FloatField(verbose_name=_("RA H"))
dec_h = models.FloatField(verbose_name=_("Dec H"))
ra_j = models.FloatField(verbose_name=_("RA J"))
dec_j = models.FloatField(verbose_name=_("Dec J"))
ra_k = models.FloatField(verbose_name=_("RA K"))
dec_k = models.FloatField(verbose_name=_("Dec K"))
x = models.FloatField(verbose_name=_("Y"))
y = models.FloatField(verbose_name=_("X"))
z = models.FloatField(verbose_name=_("Z"))
clasifications = models.ManyToManyField(
Clasification, verbose_name="Clasifications", related_name="sources",
help_text=_("Identify diferents clasifications of the same source for internal propuses"),
through="ClasificationXMasterSource"
)
type = models.CharField(
max_length=255, verbose_name=_("Type"), default=None, null=True,
choices=[(e, e) for e in sorted(settings.SOURCES_TYPES)],
help_text=_("This identify the source type")
)
objects = MasterSourceManager()
def __unicode__(self):
return u"{}[{}]".format(self.tile, self.order)
class ClasificationXMasterSource(SaveTheChange, models.Model):
class Meta:
unique_together = ["master_src", "clasification"]
master_src = models.ForeignKey(MasterSource)
clasification = models.ForeignKey(Clasification)
extra_data = PickledObjectField()
objects = SciManager()
def __unicode__(self):
return u"{}: {}".format(
unicode(self.clasification), unicode(self.master_src))
class MagStats(StatsModel):
class Meta:
stats_from = MasterSource
def stats_query(master_src):
return master_src.matches.values_list(
"pawprint_src__mag", flat=True)
class MagErrStats(StatsModel):
class Meta:
stats_from = MasterSource
def stats_query(master_src):
return master_src.matches.values_list(
"pawprint_src__mag_err", flat=True)
# =============================================================================
# LCurve
# =============================================================================
class LightCurve(SaveTheChange, models.Model):
source = models.OneToOneField(MasterSource, related_name="+")
recalc = models.BooleanField(default=True)
obs_number = models.IntegerField(verbose_name="Number of Observations")
pdm_period = models.FloatField(verbose_name=_("PDM Period"), null=True, default=None)
ls_period = models.FloatField(verbose_name=_("Lomb-Scargle Period"), null=True, default=None)
objects = SciManager()
def __unicode__(self):
return unicode(self.source)
def dataset(self):
if not hasattr(self, "_dataset"):
self._dataset = self.source.matches.all().order_by(
"pawprint_src__pawprint__mjd"
).values(
"pawprint_src__pawprint__mjd",
"pawprint_src__mag", "pawprint_src__mag_err")
return self._dataset
def time(self):
dataset = self.dataset()
return dataset.to_ndarray(["pawprint_src__pawprint__mjd"]).ravel()
def magnitude(self):
dataset = self.dataset()
return dataset.to_ndarray(["pawprint_src__mag"]).ravel()
def magnitude_error(self):
dataset = self.dataset()
return dataset.to_ndarray(["pawprint_src__mag_err"]).ravel()
def calculate(self):
if not self.recalc:
raise ValueError("Please set recalc to True")
self.recalc = False
dataset = self.dataset()
self.obs_number = dataset.count()
if self.obs_number > 0:
time, magnitude, error = (
self.time(), self.magnitude(), self.magnitude_error())
self.pdm_period = lightcurves.pdm_period(time, magnitude)
self.ls_period = lightcurves.ls_period(time, magnitude, error)
def _lightcurve(self):
query = LightCurve.objects.filter(source=self)
if query.exists():
return query.get()
lc = LightCurve(source=self)
lc.calculate()
lc.save()
return lc
MasterSource.lightcurve = property(_lightcurve)
del _lightcurve
#==============================================================================
# PAWPRINT
#==============================================================================
def pwprints_upload(pwprint, filename):
subfolder = pwprint.name.split("_", 1)[0]
return u"/".join(["lcurves", "pawprints", subfolder, filename])
class Pawprint(SaveTheChange, models.Model):
class Meta:
verbose_name = _("Pawprint")
verbose_name_plural = _("Pawprints")
tile = models.ManyToManyField(
Tile, related_name="pawprints",
verbose_name=_("Tile"), through="PawprintXModel"
)
file = models.FileField(
upload_to=pwprints_upload, verbose_name=_("File"),
storage=storages.OverwriteStorage()
)
name = models.CharField(
max_length=255, verbose_name=_("Name"), unique=True
)
mjd = models.FloatField(verbose_name=_("MJD"), null=True, default=None)
has_sources = models.BooleanField(default=False)
objects = SciManager()
def sync_resume(self):
return self.pawprintxmodel_set.all().values_list("tile__name", "sync")
def rescan_sources(self):
if self.has_sources:
raise Exception("This pawprint already has sources")
if self.sources.exists():
self.sources.all().delete()
logger.info(u"Loading sources...")
with open(self.file.path, "rb") as fp:
for order, src in enumerate(match.read_pawprint(fp)):
srcmodel = PawprintSource(
pawprint=self, order=order,
ra_deg=src[0], dec_deg=src[1],
ra_h=src[2], ra_m=src[3], ra_s=src[4],
dec_d=src[5], dec_m=src[6], dec_s=src[7],
pwp_x=src[8], pwp_y=src[9], mag=src[10], mag_err=src[11],
chip_nro=src[12], stel_cls=src[13],
elip=src[14], pos_ang=src[15],
hjd=pyasl.helio_jd(self.mjd, src[0], src[1])
)
str_order = str(order)[1:]
if len(str_order) == str_order.count("0"):
logger.info("Pawprint Source number: {}".format(order))
srcmodel.save()
def __unicode__(self):
tilenames = ",".join(self.tile.all().values_list("name", flat=True))
return u"({}).{}".format(tilenames, self.name)
class PawprintXModel(SaveTheChange, models.Model):
class Meta:
unique_together = ["tile", "pawprint"]
tile = models.ForeignKey(Tile)
pawprint = models.ForeignKey(Pawprint)
matched = models.BooleanField(verbose_name=_("Matched"), default=False)
objects = SciManager()
def ms_dataset(self):
if not hasattr(self, "__ms_dataset"):
self.__ms_dataset = self.tile.sources.values_list(
"id", "ra_k", "dec_k").order_by("order")
return self.__ms_dataset
def data_ms(self):
dataset = self.ms_dataset().to_ndarray().T
return np.core.records.fromarrays(
dataset, dtype=[("pk", 'i4'), ("ra_k", 'f4'), ("dec_k", 'f4')])
def pwp_dataset(self):
if not hasattr(self, "__pwp_dataset"):
self.__pwp_dataset = self.pawprint.sources.values_list(
"id", "ra_deg", "dec_deg").order_by("order")
return self.__pwp_dataset
def data_pwp(self):
dataset = self.pwp_dataset().to_ndarray().T
return np.core.records.fromarrays(
dataset, dtype=[("pk", 'i4'), ("ra_deg", 'f4'), ("dec_deg", 'f4')])
def match(self, data_ms=None, data_pwp=None):
if self.matched:
raise Exception("This pawprint is already matched with this tile")
old_matches = Match.objects.filter(
master_src__tile=self.tile, pawprint_src__pawprint=self.pawprint
)
if old_matches.exists():
old_matches.delete()
del old_matches
data_ms = data_ms if data_ms is not None else self.data_ms()
data_pwp = data_pwp if data_pwp is not None else self.data_pwp()
matches = match.create_match(data_ms, data_pwp)
logger.info(u"'{}' matches".format(len(matches)))
logger.info(u"Writing matches")
for order_ms, order_pwp in matches:
master_src = self.tile.sources.get(pk=data_ms[order_ms]["pk"])
pawprint_src = self.pawprint.sources.get(
pk=data_pwp[order_pwp]["pk"])
Match.objects.create(
master_src=master_src, pawprint_src=pawprint_src
)
#==============================================================================
# PAWPRINT SOURCE
#==============================================================================
class PawprintSource(SaveTheChange, models.Model):
class Meta:
verbose_name = _("Pawprint Source")
verbose_name_plural = _("Pawprints Sources")
unique_together = ["pawprint", "order"]
pawprint = models.ForeignKey(
Pawprint, related_name="sources", verbose_name=_("Pawprint")
)
order = models.IntegerField(verbose_name=_("Order"))
ra_deg = models.FloatField(verbose_name=_("RA Deg"))
ra_h = models.IntegerField(verbose_name=_("RA H"))
ra_m = models.IntegerField(verbose_name=_("RA m"))
ra_s = models.FloatField(verbose_name=_("RA s"))
dec_deg = models.FloatField(verbose_name=_("Dec Deg"))
dec_d = models.IntegerField(verbose_name=_("Dec D"))
dec_m = models.IntegerField(verbose_name=_("Dec m"))
dec_s = models.FloatField(verbose_name=_("Dec s"))
pwp_x = models.FloatField(verbose_name=_("Pawprint X"))
pwp_y = models.FloatField(verbose_name=_("Pawprint Y"))
mag = models.FloatField(verbose_name=_("Mag"))
mag_err = models.FloatField(verbose_name=_("Mag Err"))
chip_nro = models.IntegerField(verbose_name=_("Chip Nro"))
stel_cls = models.IntegerField(verbose_name=_("Stellar Class"))
elip = models.FloatField(verbose_name=_("Elip"))
pos_ang = models.FloatField(verbose_name=_("Pos. Ang."))
hjd = models.FloatField(verbose_name=_("HJD"), null=True, default=None)
objects = SciManager()
def __unicode__(self):
return u"{}[{}]".format(self.pawprint, self.order)
#==============================================================================
# MATCH
#==============================================================================
class Match(SaveTheChange, models.Model):
class Meta:
verbose_name = _("Match")
verbose_name_plural = _("Matchs")
unique_together = ("master_src", "pawprint_src")
master_src = models.ForeignKey(
MasterSource, related_name="matches", verbose_name=_("Pawprint Source")
)
pawprint_src =models.ForeignKey(
PawprintSource, related_name="matches", verbose_name=_("Master Source")
)
ra_avg = models.FloatField(verbose_name="Ra Degree Average")
dec_avg = models.FloatField(verbose_name="Dec Degree Average")
ra_std = models.FloatField(verbose_name="Ra Degree Std")
dec_std = models.FloatField(verbose_name="Dec Degree Std")
ra_range = models.FloatField(verbose_name="Ra Range")
dec_range = models.FloatField(verbose_name="Ra Range")
euc = models.FloatField(verbose_name="Euclidean Distance")
objects = SciManager()
def __unicode__(self):
return u"{} -> {}".format(
unicode(self.master_src), unicode(self.pawprint_src))
def save(self, *args, **kwargs):
m_ra, m_dec = self.master_src.ra_k, self.master_src.dec_k
p_ra, p_dec = self.pawprint_src.ra_deg, self.pawprint_src.dec_deg
ras, decs = np.array([m_ra, p_ra]), np.array([m_dec, p_dec])
self.ra_avg = np.average(ras)
self.dec_avg = np.average(decs)
self.ra_std = np.std(ras)
self.dec_std = np.std(decs)
self.ra_range = np.abs(np.subtract(*ras))
self.dec_range = np.abs(np.subtract(*decs))
self.euc = np.linalg.norm(np.array([m_ra, m_dec]) -
np.array([p_ra, p_dec]))
super(Match, self).save(*args, **kwargs)
#==============================================================================
# CLDO
#==============================================================================
class CldoD001Source(SaveTheChange, models.Model):
_orig_pk = models.IntegerField(unique=True, db_column="cldo_d001_object")
lcurves_master_source = models.OneToOneField(
MasterSource, related_name="+", null=True, blank=True
)
objects = SciManager()
@property
def d001_object(self):
if not hasattr(self, "__d001_object"):
from carpyncho.cldo.models import D001Object
self.__d001_object = D001Object.objects.get(pk=self._orig_pk)
return self.__d001_object
@d001_object.setter
def d001_object(self, v):
from carpyncho.cldo.models import D001Object
if not isinstance(v, D001Object):
raise TypeError("Must be D001Object instance")
self.__d001_object = v
self._orig_pk = v.pk
def __unicode__(self):
if self.lcurves_master_source:
return u"{}->{}".format(
self.d001_object.obj_id, self.lcurves_master_source
)
return unicode(self.d001_object.obj_id)
class CldoLCurvesPawprintMatch(SaveTheChange, models.Model):
class Meta:
unique_together = ["pawprint_source", "_d001_lc_pk"]
cldo_d001_source = models.ForeignKey(
CldoD001Source, related_name="matches"
)
_d001_lc_pk = models.IntegerField(db_column="d001_lc", unique=True)
pawprint_source = models.OneToOneField(PawprintSource, related_name="+")
hjd_avg = models.FloatField()
hjd_delta_avg = models.FloatField()
@property
def d001_lc(self):
if not hasattr(self, "__d001_lc"):
from carpyncho.cldo.models import D001LC
self.__d001_lc = D001LC.objects.get(pk=self._d001_lc_pk)
return self.__d001_lc
@d001_lc.setter
def d001_lc(self, v):
from carpyncho.cldo.models import D001LC
if not isinstance(v, D001LC):
raise TypeError("Must be D001LC instance")
self.__d001_lc = v
self._d001_lc_pk = v.pk
#==============================================================================
# MAIN
#==============================================================================
if __name__ == "__main__":
print(__doc__)
| StarcoderdataPython |
1969708 | <reponame>BSlience/fastweb
# coding:utf8
import logging
logging.basicConfig(level=logging.INFO)
import sys
import glob
sys.path.append('gen-py.tornado')
from HelloService import HelloService
from HelloService.ttypes import *
from thrift import TTornado
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TMultiplexedProtocol
from tornado import gen
from tornado import ioloop
@gen.coroutine
def communicate():
# create client
transport = TTornado.TTornadoStreamTransport('localhost', 9999)
# open the transpo40.163rt, bail on error
try:
yield transport.open()
print('Transport is opened')
except TTransport.TTransportException as ex:
logging.error(ex)
raise gen.Return()
protocol = TBinaryProtocol.TBinaryProtocolFactory()
#pfactory = TMultiplexedProtocol.TMultiplexedProtocol(protocol, 'hello')
client = HelloService.Client(transport, protocol)
# ping
yield client.sayHello()
print("ping()")
client._transport.close()
raise gen.Return()
def main():
# create an ioloop, do the above, then stop
import time
start = time.time()
for _ in range(10000):
ioloop.IOLoop.current().run_sync(communicate)
end = time.time()
print((end - start))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1853267 | <reponame>ulibn/BlueXolo<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-17 22:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Testings', '0006_auto_20171006_0421'),
]
operations = [
migrations.AlterModelOptions(
name='collection',
options={'ordering': ['name'], 'verbose_name': 'collection', 'verbose_name_plural': 'collections'},
),
migrations.AlterField(
model_name='collection',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='Products.Source'),
),
]
| StarcoderdataPython |
1627843 | # -*- coding: utf-8 -*-
"""
reV command line interface (CLI).
"""
import click
import logging
from reV.batch.cli_batch import from_config as run_batch_from_config
from reV.batch.cli_batch import valid_config_keys as batch_keys
from reV.handlers.cli_collect import from_config as run_collect_from_config
from reV.handlers.cli_collect import valid_config_keys as collect_keys
from reV.handlers.cli_multi_year import from_config as run_my_from_config
from reV.handlers.cli_multi_year import valid_config_keys as my_keys
from reV.econ.cli_econ import from_config as run_econ_from_config
from reV.econ.cli_econ import valid_config_keys as econ_keys
from reV.generation.cli_gen import from_config as run_gen_from_config
from reV.generation.cli_gen import valid_config_keys as gen_keys
from reV.offshore.cli_offshore import from_config as run_offshore_from_config
from reV.offshore.cli_offshore import valid_config_keys as offshore_keys
from reV.pipeline.cli_pipeline import from_config as run_pipeline_from_config
from reV.pipeline.cli_pipeline import valid_config_keys as pipeline_keys
from reV.rep_profiles.cli_rep_profiles import from_config as run_rp_from_config
from reV.rep_profiles.cli_rep_profiles import (valid_config_keys
as rep_profiles_keys)
from reV.supply_curve.cli_sc_aggregation import (from_config
as run_sc_agg_from_config)
from reV.supply_curve.cli_sc_aggregation import (valid_config_keys
as sc_agg_keys)
from reV.supply_curve.cli_supply_curve import from_config as run_sc_from_config
from reV.supply_curve.cli_supply_curve import valid_config_keys as sc_keys
from reV.qa_qc.cli_qa_qc import from_config as run_qa_qc_from_config
from reV.qa_qc.cli_qa_qc import valid_config_keys as qa_qc_keys
from reV import __version__
from rex.utilities.cli_dtypes import STR
logger = logging.getLogger(__name__)
@click.group()
@click.version_option(version=__version__)
@click.option('--name', '-n', default='reV', type=STR,
help='Job name. Default is "reV".')
@click.option('--config_file', '-c',
required=True, type=click.Path(exists=True),
help='reV configuration file json for a single module.')
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging. Default is not verbose.')
@click.pass_context
def main(ctx, name, config_file, verbose):
"""reV command line interface."""
ctx.ensure_object(dict)
ctx.obj['NAME'] = name
ctx.obj['CONFIG_FILE'] = config_file
ctx.obj['VERBOSE'] = verbose
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def generation(ctx, verbose):
"""Generation analysis (pv, csp, windpower, etc...)."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_gen_from_config, config_file=config_file,
verbose=verbose)
@generation.command()
@click.pass_context
def valid_generation_keys(ctx):
"""
Valid Generation config keys
"""
ctx.invoke(gen_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def econ(ctx, verbose):
"""Econ analysis (lcoe, single-owner, etc...)."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_econ_from_config, config_file=config_file,
verbose=verbose)
@econ.command()
@click.pass_context
def valid_econ_keys(ctx):
"""
Valid Econ config keys
"""
ctx.invoke(econ_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def offshore(ctx, verbose):
"""Offshore gen/econ aggregation with NRWAL."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_offshore_from_config, config_file=config_file,
verbose=verbose)
@offshore.command()
@click.pass_context
def valid_offshore_keys(ctx):
"""
Valid offshore config keys
"""
ctx.invoke(offshore_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def collect(ctx, verbose):
"""Collect files from a job run on multiple nodes."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_collect_from_config, config_file=config_file,
verbose=verbose)
@collect.command()
@click.pass_context
def valid_collect_keys(ctx):
"""
Valid Collect config keys
"""
ctx.invoke(collect_keys)
@main.group(invoke_without_command=True)
@click.option('--cancel', is_flag=True,
help='Flag to cancel all jobs associated with a given pipeline.')
@click.option('--monitor', is_flag=True,
help='Flag to monitor pipeline jobs continuously. '
'Default is not to monitor (kick off jobs and exit).')
@click.option('--background', is_flag=True,
help='Flag to monitor pipeline jobs continuously '
'in the background using the nohup command. Note that the '
'stdout/stderr will not be captured, but you can set a '
'pipeline "log_file" to capture logs.')
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def pipeline(ctx, cancel, monitor, background, verbose):
"""Execute multiple steps in a reV analysis pipeline."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_pipeline_from_config, config_file=config_file,
cancel=cancel, monitor=monitor, background=background,
verbose=verbose)
@pipeline.command()
@click.pass_context
def valid_pipeline_keys(ctx):
"""
Valid Pipeline config keys
"""
ctx.invoke(pipeline_keys)
@main.group(invoke_without_command=True)
@click.option('--dry-run', is_flag=True,
help='Flag to do a dry run (make batch dirs without running).')
@click.option('--cancel', is_flag=True,
help='Flag to cancel all jobs associated with a given batch.')
@click.option('--delete', is_flag=True,
help='Flag to delete all batch job sub directories associated '
'with the batch_jobs.csv in the current batch config directory.')
@click.option('--monitor-background', is_flag=True,
help='Flag to monitor all batch pipelines continuously '
'in the background using the nohup command. Note that the '
'stdout/stderr will not be captured, but you can set a '
'pipeline "log_file" to capture logs.')
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def batch(ctx, dry_run, cancel, delete, monitor_background, verbose):
"""Execute multiple steps in a reV analysis pipeline."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_batch_from_config, config_file=config_file,
dry_run=dry_run, cancel=cancel, delete=delete,
monitor_background=monitor_background,
verbose=verbose)
@batch.command()
@click.pass_context
def valid_batch_keys(ctx):
"""
Valid Batch config keys
"""
ctx.invoke(batch_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def multi_year(ctx, verbose):
"""Run reV multi year using the config file."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_my_from_config, config_file=config_file,
verbose=verbose)
@multi_year.command()
@click.pass_context
def valid_multi_year_keys(ctx):
"""
Valid Multi Year config keys
"""
ctx.invoke(my_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def supply_curve_aggregation(ctx, verbose):
"""Run reV supply curve aggregation using the config file."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_sc_agg_from_config, config_file=config_file,
verbose=verbose)
@supply_curve_aggregation.command()
@click.pass_context
def valid_supply_curve_aggregation_keys(ctx):
"""
Valid Supply Curve Aggregation config keys
"""
ctx.invoke(sc_agg_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def supply_curve(ctx, verbose):
"""Run reV supply curve using the config file."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_sc_from_config, config_file=config_file,
verbose=verbose)
@supply_curve.command()
@click.pass_context
def valid_supply_curve_keys(ctx):
"""
Valid Supply Curve config keys
"""
ctx.invoke(sc_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def rep_profiles(ctx, verbose):
"""Run reV representative profiles using the config file."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_rp_from_config, config_file=config_file,
verbose=verbose)
@rep_profiles.command()
@click.pass_context
def valid_rep_profiles_keys(ctx):
"""
Valid Representative Profiles config keys
"""
ctx.invoke(rep_profiles_keys)
@main.group(invoke_without_command=True)
@click.option('-v', '--verbose', is_flag=True,
help='Flag to turn on debug logging.')
@click.pass_context
def qa_qc(ctx, verbose):
"""Run reV QA/QC using the config file."""
if ctx.invoked_subcommand is None:
config_file = ctx.obj['CONFIG_FILE']
verbose = any([verbose, ctx.obj['VERBOSE']])
ctx.invoke(run_qa_qc_from_config, config_file=config_file,
verbose=verbose)
@qa_qc.command()
@click.pass_context
def valid_qa_qc_keys(ctx):
"""
Valid QA/QC config keys
"""
ctx.invoke(qa_qc_keys)
if __name__ == '__main__':
try:
main(obj={})
except Exception:
logger.exception('Error running reV CLI')
raise
| StarcoderdataPython |
11075 | from shop.forms import UserForm
from django.views import generic
from django.urls import reverse_lazy
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import auth
from .models import Product, Contact, Category, Product, Order, OrderItem
from django.contrib import messages
from django.views.decorators.csrf import ensure_csrf_cookie
from math import ceil
import json
from shop.models import User
from django.views.decorators.csrf import csrf_exempt
# from PayTm import checksum
# Create your views here.
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
MERCHANT_KEY = 'Your-Merchant-Key-Here'
def index(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
page = request.GET.get('page')
paginator = Paginator(products, 6)
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(1)
if request.user:
print(request.user)
pass
# wishlist = Wishlist.objects.filter(user=request.user)
return render(
request,
'shop/index.html',
{
'category': category,
'categories': categories,
'products': products,
# 'wishlist': wishlist
}
)
else:
return render(
request,
'shop/index.html',
{
'category': category,
'categories': categories,
'products': products,
}
)
def searchMatch(query, item):
'''return true only if query matches the item'''
if query in item.description.lower() or query in item.name.lower():
return True
else:
return False
def search(request):
query = request.GET.get('search')
allProds = []
catprods = Product.objects.values('category', 'id')
cats = {item['category'] for item in catprods}
for cat in cats:
prodtemp = Product.objects.filter(category=cat)
prod = [item for item in prodtemp if searchMatch(query, item)]
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
if len(prod) != 0:
allProds.append([prod, range(1, nSlides), nSlides])
params = {
'products': allProds,
"msg": ""
}
if len(allProds) == 0 or len(query) < 4:
params = {
'msg': "Please make sure to enter relevant search query"
}
return render(request, 'shop/search.html', params)
def about(request):
return render(request, 'shop/about.html')
def contact(request):
thank = False
if request.method == "POST":
name = request.POST.get('name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
desc = request.POST.get('desc', '')
contact = Contact(name=name, email=email, phone=phone, desc=desc)
contact.save()
thank = True
return render(request, 'shop/contact.html', {'thank': thank})
def tracker(request):
if request.method == "POST":
orderId = request.POST.get('orderId', '')
email = request.POST.get('email', '')
try:
order = Order.objects.filter(order_id=orderId, email=email)
if len(order) > 0:
update = OrderUpdate.objects.filter(order_id=orderId)
updates = []
for item in update:
updates.append(
{
'text': item.update_desc,
'time': item.timestamp
}
)
response = json.dumps(
{
"status": "success",
"updates": updates,
"itemsJson": order[0].items_json
},
default=str
)
return HttpResponse(response)
else:
return HttpResponse('{"status":"noitem"}')
except Exception as e:
return HttpResponse('{"status":"error"}')
return render(request, 'shop/tracker.html')
def productView(request, myid):
# Fetch the product using the id
product = Product.objects.filter(id=myid)
return render(request, 'shop/prodView.html', {'product': product[0]})
def checkout(request):
if request.method == "POST":
items_json = request.POST.get('itemsJson', '')
name = request.POST.get('name', '')
amount = request.POST.get('amount', '')
email = request.POST.get('email', '')
address = request.POST.get('address1', '') + \
" " + request.POST.get('address2', '')
city = request.POST.get('city', '')
state = request.POST.get('state', '')
zip_code = request.POST.get('zip_code', '')
phone = request.POST.get('phone', '')
order = Order(
name=name, email=email,
address=address,
state=state,
# zip_code=zip_code,
# phone=phone,
# amount=amount
)
order.save()
order_item = OrderItem(
order=order,
price=amount,
product_id=1,
)
order_item.save()
thank = True
# id = order.order_id
return render(request, 'shop/checkout.html', {'thank':thank, 'id': order.id})
# Request paytm to transfer the amount to your account after payment by user
param_dict = {
'MID': 'Your-Merchant-Id-Here',
'ORDER_ID': str(order.order_id),
'TXN_AMOUNT': str(amount),
'CUST_ID': email,
'INDUSTRY_TYPE_ID': 'Retail',
'WEBSITE': 'WEBSTAGING',
'CHANNEL_ID': 'WEB',
'CALLBACK_URL': 'http://127.0.0.1:8000/handlerequest/',
}
# param_dict['CHECKSUMHASH'] = checksum.generate_checksum(param_dict, MERCHANT_KEY)
# return render(request, '/paytm.html', {'param_dict': param_dict})
return render(request, 'shop/checkout.html')
def signup(request):
if request.method == 'POST':
print('psot')
form = UserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
# commit=False tells Django that "Don't send this to database yet.
# I have more things I want to do with it."
# import pdb;pdb.set_trace()
if form.cleaned_data['type']=='Vendor':
user.is_staff = True # Set the user object here
user.save()
return redirect("/admin/login")
else:
user.is_staff = False
user.save()
return redirect("/login") # Now you can send it to DB
else:
print('in valid vin vlaidpsot')
form = UserForm()
print(form.errors)
return render(
request,
'shop/signup.html',{
'form':form,
'errors':form.errors
})
else:
print('hello jasfdjlasdjfs')
form = UserForm()
return render(
request,
'shop/signup.html',{
'form':form
})
@csrf_exempt
def handlerequest(request):
# paytm will send you post request here
form = request.POST
response_dict = {}
for i in form.keys():
response_dict[i] = form[i]
if i == 'CHECKSUMHASH':
checksum = form[i]
# verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)
# if verify:
# if response_dict['RESPCODE'] == '01':
# print('order successful')
# else:
# print('order was not successful because' + response_dict['RESPMSG'])
return render(request, 'shop/paymentstatus.html', {'response': response_dict})
def vendor(request):
user =User.objects.get(id=request.user.id)
menu = {}
return render(request, 'shop/restprofile.html', {'user':user})
from django.views.generic.edit import UpdateView
class UserUpdate(UpdateView):
model = User
fields = ['name','email','first_name','last_name']
template_name_suffix = '_update_form' | StarcoderdataPython |
384996 | #!/usr/bin/env python
"""
Bit-accuracy test between 2 results folders
"""
import os
import filecmp
import fnmatch
import json
from pathlib import Path
import click
from .config import subproject, config, commit_branch
def cmpfiles(dir_1=Path(), dir_2=Path(), patterns=None, ignore=None):
"""Bit-accuracy test between two directories.
Almost like https://docs.python.org/3/library/filecmp.html
"""
if not patterns:
patterns = ['*']
if not ignore:
ignore = []
mismatch = [] # not the same
match = [] # the same
only_in_1 = [] # exists in dir_1 but not in dir_2
errors = [] # or errors accessing
for pattern in patterns:
for file_1 in dir_1.rglob(pattern):
if not file_1.is_file(): continue
if any(fnmatch.fnmatch(file_1.name, i) for i in ignore): continue
rel_path = file_1.relative_to(dir_1)
file_2 = dir_2 / rel_path
if file_2.is_file():
try:
is_same = filecmp.cmp(str(file_1), str(file_2), shallow=False)
if not is_same:
mismatch.append(rel_path)
else:
match.append(rel_path)
except:
errors.append(rel_path)
else:
only_in_1.append(rel_path)
return {
"mismatch": mismatch,
"match": match,
"only_in_1": only_in_1,
"errors": errors,
}
def cmpmanifests(manifest_path_1, manifest_path_2, patterns=None, ignore=None):
"""Bit-accuracy test between two manifests.
Their format is {filepath: {md5, size_st}}"""
with manifest_path_1.open() as f:
manifest_1 = json.load(f)
with manifest_path_2.open() as f:
manifest_2 = json.load(f)
# print(manifest_1)
# print(manifest_2)
# print(set(manifest_1.keys()) & set(manifest_2.keys()))
if not patterns:
patterns = ['*']
if not ignore:
ignore = []
# print(patterns)
mismatch = set() # not the same
match = set() # the same
only_in_1 = set() # exists in dir_1 but not in dir_1
errors = set() # or errors accessing
for pattern in patterns:
pattern = f'*{pattern}'
for file_1_str, meta_1 in manifest_1.items():
if not fnmatch.fnmatch(file_1_str, pattern):
continue
file_1 = Path(file_1_str)
if any(fnmatch.fnmatch(file_1.name, i) for i in ignore):
continue
if file_1_str in manifest_2:
is_same = meta_1['md5'] == manifest_2[file_1_str]['md5']
if not is_same:
mismatch.add(file_1)
else:
match.add(file_1)
else:
only_in_1.add(file_1)
return {
"mismatch": list(mismatch),
"match": list(match),
"only_in_1": list(only_in_1),
"errors": list(errors),
}
def is_bit_accurate(commit_rootproject_dir, reference_rootproject_dir, output_directories, reference_platform=None):
"""Compares the results of the current output directory versus a reference"""
from .config import config
patterns = config.get("bit_accuracy", {}).get("patterns", [])
if not (isinstance(patterns, list) or isinstance(patterns, tuple)):
patterns = [patterns]
if not patterns:
patterns = ['*']
patterns.append('manifest.inputs.json')
ignore = config.get("bit_accuracy", {}).get("ignore", [])
if not (isinstance(ignore, list) or isinstance(ignore, tuple)):
ignore = [ignore]
ignore.append('log.txt') # contains timestamps
ignore.append('log.lsf.txt') # contains timestamps
ignore.append('metrics.json') # contains measured run time
ignore.append('.nfs000*') # NFS temporary files
if not len(output_directories):
click.secho("WARNING: nothing was compared", fg='yellow')
return True
comparaisons = {'match': [], 'mismatch': [], 'errors': []}
for output_directory in output_directories:
# print('output_directory', output_directory)
dir_1 = reference_rootproject_dir / output_directory
dir_2 = commit_rootproject_dir / output_directory
# it's ugly and fragile...
if reference_platform:
from .config import platform
dir_2 = Path(str(dir_2).replace(platform, reference_platform))
# print('dir_1', dir_1)
# print('dir_2', dir_2)
if (dir_1 / 'manifest.outputs.json').exists() and (dir_2 / 'manifest.outputs.json').exists():
comparaison = cmpmanifests(
manifest_path_1 = dir_1 / 'manifest.outputs.json',
manifest_path_2 = dir_2 / 'manifest.outputs.json',
patterns=patterns,
ignore=ignore,
)
comparaisons['match'].extend(output_directory / p for p in comparaison['match'])
comparaisons['mismatch'].extend(output_directory / p for p in comparaison['mismatch'])
else:
comparaison = cmpfiles(
dir_1=dir_1,
dir_2=dir_2,
patterns=patterns,
ignore=ignore,
)
comparaisons['match'].extend(output_directory / p for p in comparaison['match'])
comparaisons['mismatch'].extend(output_directory / p for p in comparaison['mismatch'])
comparaisons['errors'].extend(output_directory / p for p in comparaison['errors'])
# print(comparaisons['mismatch'])
nothing_was_compared = not (len(comparaisons['match']) + len(comparaisons['mismatch']) + len(comparaisons['errors']) )
if nothing_was_compared:
for o in output_directories:
click.echo(click.style(str(o), fg='yellow') + click.style(' (warning: no files were found to compare)', fg='yellow', dim=True), err=True)
if len(comparaisons['errors']):
click.secho("ERROR: while trying to read those files:", fg='red', bold=True)
for p in comparaisons['error']:
click.secho(str(p), fg='red')
return False
if len(comparaisons['mismatch']):
for o in output_directories:
click.secho(str(o), fg='red', bold=True, err=True)
click.secho(f"ERROR: mismatch for:", fg='red')
for p in comparaisons['mismatch']:
click.secho(f' {p}', fg='red', dim=True)
return False
bit_accurate = not len(comparaisons['mismatch'])
if bit_accurate and not nothing_was_compared:
for o in output_directories:
click.secho(str(o), fg='green', err=True)
return bit_accurate
| StarcoderdataPython |
1808180 | """
BED comes in a handful of flavors: BED3, BED6, BED12, and BED12+.
The 12 defined columns are:
1. ``chrom``: Sequence.
2. ``start``: 0-based start.
3. ``end``: 0-based exclusive end.
4. ``name``: A name.
5. ``score``: A score. Should be an integer between 0 and 1000.
6. ``strand``: A string. Any of ``[+, -, .]``.
7. ``thickStart``: Translation start site.
8. ``thickEnd``: Translation end site.
9. ``itemRgb``: String encoded tuple for an RGB value to display.
10. ``blockCount``: Number of spliced blocks.
11. ``blockSizes``: Length of each block.
12. ``BlockStarts``: Start of each block relative to ``start``.
BED3 is the first 3 columns, BED6 is the first 6, and BED12 is the full thing.
BED12+ basically is just people tacking on extra self-defined columns.
These representations **cannot be present in the same file**. For this reason, we have separate subclasses
for each type that only return their own type.
"""
from dataclasses import dataclass, astuple
from typing import List
from inscripta.biocantor.location import Strand
@dataclass(frozen=True)
class RGB:
r: int = 0
g: int = 0
b: int = 0
def __str__(self) -> str:
return ",".join(str(color) for color in astuple(self))
@dataclass
class BED3:
"""BED3 includes basic interval information; the simplest type of interval"""
chrom: str
start: int
end: int
def __str__(self) -> str:
return ",".join(str(col) for col in astuple(self))
@dataclass
class BED6(BED3):
"""BED6 includes name, score and strand information. Cannot be spliced."""
name: str
score: int
strand: Strand
def __str__(self) -> str:
return "\t".join(map(str, [self.chrom, self.start, self.end, self.name, self.score, self.strand.to_symbol()]))
@dataclass
class BED12(BED6):
"""BED12 contains splicing and CDS information. It does not contain frame/phase information."""
thick_start: int
thick_end: int
item_rgb: RGB
block_count: int
block_sizes: List[int]
block_starts: List[int]
def __str__(self) -> str:
return "\t".join(
(
str(x)
for x in [
self.chrom,
self.start,
self.end,
self.name,
self.score,
self.strand.to_symbol(),
self.thick_start,
self.thick_end,
self.item_rgb,
self.block_count,
",".join(map(str, self.block_sizes)),
",".join(map(str, self.block_starts)),
]
)
)
| StarcoderdataPython |
1960790 | <gh_stars>1-10
from telethon import TelegramClient, events, functions
from telethon.tl.types import (
TypeInputChannel,
PeerChannel,
PeerUser,
ChannelParticipantCreator,
ChannelParticipantAdmin,
)
from telethon.tl.functions.channels import GetParticipantRequest
from pytgcalls import GroupCallFactory
from asyncio import sleep
from random import randint
import urllib
from requests import get
import asyncio
from telethon.utils import get_peer
from sqlite import VoiceChatDatabase
from dotenv import load_dotenv
from os import getenv
load_dotenv()
api_id = int(getenv("API_ID"))
api_hash = getenv("API_HASH")
GROUP_ID = int(getenv("GROUP_ID")) # GROUP you want to install the bot on
LOG_CHANNEL_ID = int(getenv(
"LOG_CHANNEL_ID"
)) # Channel where the logs are going to be sent to
LOGGER_BOT_TOKEN = getenv("LOGGER_BOT_TOKEN")
vc_db = VoiceChatDatabase(getenv("DB_FILENAME", "vc.db"))
def send_msg(chat_id, text):
url_req = f"https://api.telegram.org/bot{LOGGER_BOT_TOKEN}/sendMessage?"
qs = urllib.parse.urlencode(
{
"parse_mode": "Markdown",
"chat_id": chat_id,
"text": text,
}
)
# print(qs)
get(url_req + qs)
def getFormattedMessage(user, participants):
return f"""\
{
"#LEFT" if participants[0].left else
(
"#JOINED" if participants[0].just_joined else
(
"#GENERIC"
)
)
}:
**ID:** `{user.id}`
**Name:** [{
((user.first_name if user.first_name is not None else "") + (user.last_name if user.last_name is not None else "")).replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`')
}](tg://user?id={user.id})
**Username:** {
("@"+user.username.replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`') if user.username is not None else "`None`")
}
""".replace(
"🍔", "\\"
)
def getFormattedMessageForMute(user, admin, reason, muted):
return f"""\
{
"#MUTED" if muted else "#UNMUTED"
}:
**ID:** `{user.id}`
**Name:** [{
((user.first_name if user.first_name is not None else "") + (user.last_name if user.last_name is not None else "")).replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`')
}](tg://user?id={user.id})
**Username:** {
("@"+user.username.replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`') if user.username is not None else "`None`")
}
**Voice Admin:** [{
((admin.first_name if admin.first_name is not None else "") + (admin.last_name if admin.last_name is not None else "")).replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`')
}](tg://user?id={admin.id}) ({
("@"+admin.username.replace('*', '🍔*').replace('_', '🍔_').replace('`', '🍔`') if admin.username is not None else "`None`")
})
**Reason:** {reason}
""".replace(
"🍔", "\\"
)
async def is_admin(group_id, user_id):
participant = await client(
GetParticipantRequest(channel=group_id, participant=user_id)
)
isadmin = type(participant.participant) == ChannelParticipantAdmin
iscreator = type(participant.participant) == ChannelParticipantCreator
return isadmin or iscreator
with TelegramClient(getenv("SESSION_NAME", "user"), api_id, api_hash) as client:
@client.on(events.NewMessage(pattern="setup", outgoing=True))
async def handler(event):
await client.get_dialogs()
await client.get_dialogs(folder=0)
await client.get_dialogs(folder=1)
@client.on(events.NewMessage(pattern=r"^[!/]vmute (\d+) ?(.+)?$"))
async def handler(event):
# await event.delete()
if not await is_admin(GROUP_ID, event.message.from_id.user_id):
return await event.reply("Only admins can execute the commands")
target_id = int(event.pattern_match.group(1))
reason = str(event.pattern_match.group(2))
if vc_db.is_user_muted(target_id):
return await event.reply("User is already muted")
try:
admin = (
await client(
functions.users.GetFullUserRequest(id=event.message.from_id.user_id)
)
).user
except ValueError:
await event.reply("something went wrong locating admin.")
return
try:
target_user = (
await client(functions.users.GetFullUserRequest(id=target_id))
).user
except ValueError:
await event.reply("target not found.")
return
muted_successfully = vc_db.mute_user(
target_user.id,
target_user.first_name
+ (target_user.last_name if target_user.last_name is not None else ""),
admin.id,
(admin.last_name if admin.last_name is not None else ""),
reason,
)
if muted_successfully:
msg = getFormattedMessageForMute(target_user, admin, reason, True)
send_msg(LOG_CHANNEL_ID, msg)
# await event.reply(f"muted {target_user.first_name + (target_user.last_name if target_user.last_name is not None else '')} - Reason: {reason}")
else:
await event.reply(
f"something went wrong trying to mute {target_user.first_name + (target_user.last_name if target_user.last_name is not None else '')}"
)
@client.on(events.NewMessage(pattern=r"^[!/]vunmute (\d+) ?(.+)?$"))
async def handler(event):
# await event.delete()
if not await is_admin(GROUP_ID, event.message.from_id.user_id):
return await event.reply("Only admins can execute the commands")
target_id = int(event.pattern_match.group(1))
reason = str(event.pattern_match.group(2))
if vc_db.is_user_unmuted(target_id):
return await event.reply("User is already unmuted")
try:
admin = (
await client(
functions.users.GetFullUserRequest(id=event.message.from_id.user_id)
)
).user
except ValueError:
await event.reply("something went wrong locating admin.")
return
try:
target_user = (
await client(functions.users.GetFullUserRequest(id=target_id))
).user
except ValueError:
await event.reply("target not found.")
return
unmuted_successfully = vc_db.unmute_user(
target_user.id,
target_user.first_name
+ (target_user.last_name if target_user.last_name is not None else ""),
admin.id,
(admin.last_name if admin.last_name is not None else ""),
reason,
)
if unmuted_successfully:
# await event.reply(f"unmuted {target_user.first_name + (target_user.last_name if target_user.last_name is not None else '')} - Reason: {reason}")
msg = getFormattedMessageForMute(target_user, admin, reason, False)
send_msg(LOG_CHANNEL_ID, msg)
else:
await event.reply(
f"something went wrong trying to unmute {target_user.first_name + (target_user.last_name if target_user.last_name is not None else '')}"
)
@client.on(events.NewMessage(pattern=r"^\.join-vc$", outgoing=True))
async def handler(event):
try:
print("starting monitoring...")
group_call_factory = GroupCallFactory(
client, GroupCallFactory.MTPROTO_CLIENT_TYPE.TELETHON
)
group_call = group_call_factory.get_file_group_call()
await event.delete()
result = await group_call.start(GROUP_ID)
while not group_call.is_connected:
await asyncio.sleep(1)
print(result)
async def handler(grpcall, participants):
peer = participants[0].peer
if type(peer) is PeerChannel:
if not participants[0].muted:
print("muted channel", peer.channel_id)
await group_call.edit_group_call_member(peer, muted=True)
elif type(peer) is PeerUser:
result = await client(
functions.users.GetFullUserRequest(id=peer.user_id)
)
user = result.user
is_interesting_event = (
participants[0].left or participants[0].just_joined
)
if is_interesting_event:
msg = getFormattedMessage(user, participants)
send_msg(LOG_CHANNEL_ID, msg)
if participants[0].just_joined:
if vc_db.is_user_muted(user.id):
await group_call.edit_group_call_member(peer, muted=True)
elif vc_db.is_user_unmuted(user.id):
await group_call.edit_group_call_member(peer, muted=False)
except Exception as e:
print(e.message)
return await event.reply(str(e))
group_call.on_participant_list_updated(handler)
client.run_until_disconnected()
| StarcoderdataPython |
4847891 | import sys
from .Unet import Unet
from .Unet3 import Unet3
from .U2net import U2net,U2netS,U2netM,U2netSP
from .UEfficientNet import UEfficientNetB4
from .UMFacenet2 import UMFacenet
from .SqueezeUNet import SqueezeUNet
from .mobilenet_v3 import MobileNetV3Small
from .deeplab_v3 import Deeplabv3
def build_model(size=256,net='U2netS'):
if net=='U2netS':
model = U2netS(input_shape=(size, size, 3), drop_rate=0.5)
elif net=='U2netM':
model = U2netM(input_shape=(size, size, 3))
elif net=='U2netSP':
model = U2netSP(input_shape=(size, size, 3), drop_rate=0.5)
elif net=='U2net':
model = U2net(input_shape=(size, size, 3))
elif net=='Unet3':
model = Unet3(input_shape=(size, size, 3))
elif net=='UMFacenet':
model = UMFacenet(input_shape=(size, size, 3))
elif net == 'UMFacenetS':
model = UMFacenet(input_shape=(size, size, 3), filters=[32,64,128,256], use_se=False)
elif net=='Unet':
model = Unet(input_shape=(size, size, 3))
elif net=='UEfficientNetB4':
model = UEfficientNetB4(input_shape=(size, size, 3),imagenet_weights='imagenet')
elif net=='SqueezeUNet':
model=SqueezeUNet(input_shape=(size, size, 3))
elif net =='MobileNetV3Small':
model=MobileNetV3Small(input_shape=(size, size, 3))
elif net=='Deeplabv3':
model = Deeplabv3(input_shape=(size, size, 3), classes=1,backbone='mobilenetv2', OS=16, alpha=1., activation='sigmoid')
else:
print(' not support your net .')
sys.exit()
return model
| StarcoderdataPython |
11363083 | from CBMMusicManager import CBMMusicManager
import errors
import netifaces as ni
import queue
from netifaces import AF_INET, AF_INET6, AF_LINK
import requests
from subprocess import check_output
class Song():
"""
This is an object that repersents one song.
"""
def __init__(self):
self.id = None
self.artist = None
self.album = None
self.title = None
self.duration = None
self.image = None
def set_vars(self, gmusic_song):
self.id = gmusic_song['track']['storeId']
self.artist = gmusic_song['track']['artist']
self.album = gmusic_song['track']['album']
self.title = gmusic_song['track']['title']
self.duration = gmusic_song['track']['durationMillis']
self.image = gmusic_song['track']['albumArtRef'][0]['url']
class Rooms():
def __init__(self):
self.mac_addr = None
self.ip_address = None
self.hostname = None
self.queue = queue.Queue()
# get put
def add_song(self, song_info):
song = Song()
song.set_vars(song_info)
self.queue.put(song)
def pop_song(self):
return self.queue.get()
class CBMInterface():
CONFIG_FPATH = "/home/pi/Desktop/JukeSite/ibcconfig"
def __init__(self):
self.id = None
self.interface_name = None
self.name = None
self.ip = None
self.master_ip = None
self.music_manager = None
self.rooms = []
def find_rooms(self):
command = "nmap -sn 192.168.1.0/24".split(' ')
res = check_output(command)
res = res.decode()
lines = res.split('\n')
ibcs = [line.split('for ', 1)[1] for line in lines if 'ibc' in line]
for ibc in ibcs:
hostname, ip = ibc.split(' (',1)
ip = ip.rstrip(')').strip()
r = Rooms()
r.hostname = hostname
r.ip_address = ip
self.rooms.append(r)
def set_id_and_interface(self):
"""
INSTEAD: I think we should give hostnames staerting with ibc indicating that they are IBC.
Each IBC will have its own ID starting from 1 and going up.
The interface name connected in to the wifi needs to be on the line below the id.
"""
try:
f = open(CBMInterface.CONFIG_FPATH, 'r')
except:
raise errors.CouldNotOpenIBCConfigFile('Could not open {}'.format(CBMInterface.CONFIG_FPATH), 2002)
lines = f.readlines()
f.close()
id = lines[0].strip()
interface_name = lines[1].strip()
self.id = id
self.interface_name = interface_name
def start_music_client(self):
self.music_manager = CBMMusicManager()
self.music_manager.start()
def set_name(self):
"""
Set the name appropriately.
The Name is given from master
:return: bool if master was found
"""
res = self.master_scan()
if res is None:
return False
else:
self.name = res['data']['name']
return True
def get_current_ip(self):
"""
Get the current ip from the interface
:return: the current ip as a string || None if not possible
"""
if self.interface_name is None:
errors.SetInterfaceError('Set the interface name before performing this method.', 2004)
return ni.ifaddresses(self.interface_name)[AF_INET][0]['addr']
def set_current_ip(self):
"""
This should work
"""
self.ip = self.get_current_ip()
| StarcoderdataPython |
9629967 | import sys
import bokeh
#if sys.platform.startswith('linux'):
# bokeh.test()
print('bokeh.__version__: %s' % bokeh.__version__)
assert bokeh.__version__ == '0.12.4'
| StarcoderdataPython |
5076273 | import copy
import datetime
from camera import Camera
from display import Display
from illumination import Illumination
from light import Light
from material import Material
from space import Space
from transform import *
from window import Window
def main():
print('Reading ...')
start = datetime.datetime.now()
# data source name
data_source_name = 'better-ball.d'
# shading type:
# 0 - no shading (framework)
# 1 - constant shading
# 2 - Gouraud shading
# 3 - Phong shading
shading = 3
world_space = Space()
world_space.append_by_file(data_source_name + '.txt') # geometry data
camera = Camera()
camera.set_by_file(data_source_name + '.camera.txt') # camera profile
light = Light()
light.set_by_file(data_source_name + '.light.txt') # light profile
material = Material()
material.set_by_file(data_source_name + '.material.txt') # material profile
illumination = Illumination()
illumination.set(camera, light, material, shading)
display = Display()
display.set(800) # change window size
cost = datetime.datetime.now() - start
print('Finish. (cost = ' + str(cost) + ')\n')
print('Calculating: transform ...')
start = datetime.datetime.now()
view_space = copy.deepcopy(world_space)
view_space.transform(world_to_view, camera)
screen_space = copy.deepcopy(view_space)
screen_space.transform(view_to_screen, camera)
device_space = copy.deepcopy(screen_space)
device_space.transform(screen_to_device, display)
cost = datetime.datetime.now() - start
print('Finish. (cost = ' + str(cost) + ')\n')
window = Window()
window.set(world_space, device_space, illumination, display)
window.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
9751636 | <gh_stars>0
'''
id = db.Column(db.Integer,primary_key=True)
did = db.Column(db.String(30))
queue = db.Column(db.String(30))
group = db.Column(db.String(10))
'''
from csv_cti.models.dids import Dids
from csv_cti.models import db
class Dids_op():
@staticmethod
def add(dids_list):#Tiers信息map组成列表
add_list=[]
for i in dids_list:
add_list.append(Dids(queue=i['queue'].lower(),did=i['did'],group=i['group'].upper()))
with Dids.auto_commit(db):
db.session.add_all(add_list)
@staticmethod
def remove(dids_list):#Tiers组成的map 列表
for i in dids_list:
with Dids.auto_commit(db):
a=Dids.query.filter(Dids.did==i['did']).first()
if a:
db.session.delete(a)
@staticmethod
def query(dids_info):#只查询group为条件
query_result_list=[]
if dids_info.get('group'):
with Dids.auto_commit(db):
query_result=Dids.query.filter(Dids.group==dids_info['group'].upper()).order_by(Dids.id.desc()).paginate(dids_info['page_index'], per_page=dids_info['page_size'], error_out = False)
for i in query_result.items:
query_result_list.append(i.to_json())
query_result_list.append(query_result.total)
return query_result_list
else:
if dids_info.get('csv'):
with Dids.auto_commit(db):
query_result=Dids.query.filter().all()
for i in query_result:
query_result_list.append(i.to_json())
return query_result_list
else:
with Dids.auto_commit(db):
query_result=Dids.query.filter().order_by(Dids.id.desc()).paginate(dids_info['page_index'], per_page=dids_info['page_size'], error_out = False)
for i in query_result.items:
query_result_list.append(i.to_json())
query_result_list.append(query_result.total)
return query_result_list
| StarcoderdataPython |
12819407 | <reponame>stepanandr/taf
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``common.py``
`Unittests common functionality`
"""
import threading
from xmlrpc.server import SimpleXMLRPCServer
from testlib.custom_exceptions import SwitchException
TCP_PORT = 9999
class FakeXMLRPCServer(object):
def __init__(self, port=TCP_PORT):
self.server = SimpleXMLRPCServer(("localhost", port))
self.server.register_function(self.applications_gettable,
'nb.Applications.getTable')
self.server.register_function(self.applications_set_loglevel,
'nb.Applications.set.logLevel')
self.server.register_function(self.applications_get_size, 'nb.Applications.size')
self.server.register_function(self.applications_find, 'nb.Applications.find')
self.server.register_function(self.applications_exists, 'nb.Applications.exists')
self.server.register_function(self.system_tables_ready, 'system.tablesReady')
self.server.register_function(self.platform_get_row, 'nb.Platform.getRow')
# self.server.register_function(self.platform_get_table, 'nb.Platform.getTable')
self.server.register_function(self.platform_get_size, 'nb.Platform.size')
self.server.register_function(self.ports_get_name, 'nb.Ports.get.name')
self.server.register_function(self.ports_get_size, 'nb.Ports.size')
self.server.register_function(self.ports_get_info, 'nb.Ports.getInfo')
self.server.register_function(self.ports_get_info_name, 'nb.Ports.getInfo.name')
self.server.register_function(self.method_help, 'system.methodHelp')
self.server.register_function(self.ports_add_row, 'nb.Ports.addRow')
self.server.register_function(self.ports_del_row, 'nb.Ports.delRow')
self.server.register_function(self.system_multicall, 'system.multicall')
self.server.register_function(self.ports_lags_get_table,
'nb.Ports2LagAdmin.getTable')
self.server.register_function(self.ports_lags_get_size, 'nb.Ports2LagAdmin.size')
self.server.register_function(self.lags_get_table, 'nb.LagsAdmin.getTable')
self.server.register_function(self.lags_get_size, 'nb.LagsAdmin.size')
self.server.register_function(self.lags_add_row, 'nb.LagsAdmin.addRow')
self.server.register_function(self.ports_lag_add_row, 'nb.Ports2LagAdmin.addRow')
self.applications = [
{'name': 'ONSApplicationServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'SimSwitchApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSCoreServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSNorthboundServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L3DhcpRelayControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2MirrorControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2QosControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StormControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StatsControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSOpenVSwitchApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1SfpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2VlanControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1PortControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2QinqControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2FdbControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2AclControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1SwitchControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2MulticastControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2LagControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L3ControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2LldpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
]
self.platform = [{'ethernetSwitchType': 'SimSwitch Switch',
'name': 'ONS CoreSwitch',
'cpuArchitecture': 'x86_64',
'chipVersion': '2.0',
'chipSubType': 'simswitch',
'apiVersion': 'SimSwitch 2.0.0',
'switchppVersion': '1.2.0.1405-1',
'chipName': 'SimSwitch', 'osType':
'Linux', 'model': 'ONS', 'osVersion':
'3.2.0-61-generic',
'cpu': 'x86_64',
'serialNumber': ''}]
self.ports = [
{'portId': 1, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Up', 'speed': 10000, 'name': 'xe1'},
{'portId': 2, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Up', 'speed': 10000, 'name': 'xe2'},
{'portId': 3, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe3'},
{'portId': 4, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe4'},
{'portId': 5, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe5'},
{'portId': 6, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe6'},
{'portId': 7, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe7'},
{'portId': 8, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe8'},
{'portId': 9, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe9'},
{'portId': 10, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe10'}]
self.ports_info = {'primary_key': ['portId'],
'persistent': 'True',
'description':
'Ports table includes all type of ports in a single table.',
'columns': ['portId', 'adminMode', 'name',
'pvid', 'speed', 'operationalStatus', 'type'],
'mode': 'rw'}
self.ports_name_info = {'restrictions': {'size': '32'}, 'type': 'string',
'description': 'This ports name (a 32-byte string).',
'mode': 'ro'}
self.ports_get_row_help = 'Method for getting variable from table Ports'
self.error_multicall = False
self.lags = []
self.ports_to_lags = []
self.th = None
def start(self):
self.th = threading.Thread(target=self.server.serve_forever)
self.th.start()
def stop(self):
if self.th.is_alive():
self.server.shutdown()
self.server.server_close()
self.th.join()
def applications_gettable(self):
return self.applications
def applications_set_loglevel(self, app_id, loglevel):
if loglevel == 'error':
raise SwitchException("Error loglevel")
for row in self.applications:
if row['appId'] == app_id:
row['logLevel'] = loglevel
return 0
def applications_find(self, app_id, pid_id, app_name):
index = 0
for row in self.applications:
index += 1
if row['appId'] == app_id and row['name'] == app_name:
return index
return -1
def applications_get_size(self):
return len(self.applications)
def applications_exists(self, app_id, pid_id, app_name):
return self.applications_find(app_id, pid_id, app_name)
def system_tables_ready(self):
return 0
def platform_get_row(self, row):
row = row - 1
return self.platform[row]
def platform_get_table(self):
return self.platform
def platform_get_size(self):
return len(self.platform)
def ports_gettable(self):
return self.ports
def ports_get_name(self, row_id):
row_id = row_id - 1
return self.ports[row_id]['name']
def ports_get_size(self):
return len(self.ports)
def ports_get_info(self):
return self.ports_info
def ports_get_info_name(self):
return self.ports_name_info
def ports_add_row(self, *row):
port = {
'portId': row[0],
'adminMode': row[1],
'pvid': row[2],
'type': row[3],
'operationalStatus': row[4],
'speed': row[5],
'name': row[6],
}
self.ports.append(port)
return 0
def ports_del_row(self, row_id):
self.ports.remove(self.ports[row_id - 1])
return 0
def clear_config(self):
return 0
def method_help(self, method):
if method == 'nb.Ports.getRow':
return self.ports_get_row_help
raise SwitchException('Method %s does not exist' % (method, ))
def system_multicall(self, *calls):
res = []
for _ in calls[0]:
res.append(0)
if self.error_multicall:
return res[: -1]
return res
def ports_lags_get_table(self):
return self.ports_to_lags
def ports_lags_get_size(self):
return len(self.ports_to_lags)
def lags_get_table(self):
return self.lags
def lags_get_size(self):
return len(self.lags)
def lags_add_row(self, *row):
lag = {
'lagId': row[0],
'name': row[1],
'lagControlType': row[3],
'actorAdminLagKey': row[2],
'hashMode': row[4],
}
port = {
'portId': row[0],
'adminMode': 'Up',
'pvid': 1,
'type': 'LAG',
'operationalStatus': 'Down',
'speed': 10000,
'name': row[1],
}
self.lags.append(lag)
self.ports.append(port)
return 0
def ports_lag_add_row(self, *row):
port_lag = {
'lagId': row[1],
'portId': row[0],
'actorPortPriority': row[2],
'actorAdminPortKey': row[3],
'adminAggregation': row[4],
'adminActive': row[5],
'adminTimeout': row[6],
'adminSynchronization': row[7],
'adminCollecting': row[8],
'adminDistributing': row[9],
'adminDefaulted': row[10],
'adminExpired': row[11],
}
port = [x for x in self.ports if x['portId'] == row[0]][0]
port['type'] = 'LagMember'
self.ports_to_lags.append(port_lag)
return 0
| StarcoderdataPython |
131627 | '''
Double Tap
==========
Search touch for a double tap
'''
__all__ = ('InputPostprocDoubleTap', )
from time import time
from nuiinput.config import Config
from nuiinput.vector import Vector
class InputPostprocDoubleTap(object):
'''
InputPostProcDoubleTap is a post-processor to check if
a touch is a double tap or not.
Double tap can be configured in the Kivy config file::
[postproc]
double_tap_time = 250
double_tap_distance = 20
Distance parameter is in the range 0-1000 and time is in milliseconds.
'''
def __init__(self):
dist = Config.getint('postproc', 'double_tap_distance')
self.double_tap_distance = dist / 1000.0
tap_time = Config.getint('postproc', 'double_tap_time')
self.double_tap_time = tap_time / 1000.0
self.touches = {}
def find_double_tap(self, ref):
'''Find a double tap touch within self.touches.
The touch must be not a previous double tap and the distance must be
within the specified threshold. Additionally, the touch profiles
must be the same kind of touch.
'''
ref_button = None
if 'button' in ref.profile:
ref_button = ref.button
for touchid in self.touches:
if ref.uid == touchid:
continue
etype, touch = self.touches[touchid]
if etype != 'end':
continue
if touch.is_double_tap:
continue
distance = Vector.distance(
Vector(ref.sx, ref.sy),
Vector(touch.osx, touch.osy))
if distance > self.double_tap_distance:
continue
if touch.is_mouse_scrolling or ref.is_mouse_scrolling:
continue
touch_button = None
if 'button' in touch.profile:
touch_button = touch.button
if touch_button != ref_button:
continue
touch.double_tap_distance = distance
return touch
def process(self, events):
if self.double_tap_distance == 0 or self.double_tap_time == 0:
return events
# first, check if a touch down have a double tap
for etype, touch in events:
if not touch.is_touch:
continue
if etype == 'begin':
double_tap = self.find_double_tap(touch)
if double_tap:
touch.is_double_tap = True
tap_time = touch.time_start - double_tap.time_start
touch.double_tap_time = tap_time
distance = double_tap.double_tap_distance
touch.double_tap_distance = distance
# add the touch internally
self.touches[touch.uid] = (etype, touch)
# second, check if up-touch is timeout for double tap
time_current = time()
to_delete = []
for touchid in self.touches.keys():
etype, touch = self.touches[touchid]
if etype != 'end':
continue
if time_current - touch.time_start < self.double_tap_time:
continue
to_delete.append(touchid)
for touchid in to_delete:
del self.touches[touchid]
return events
| StarcoderdataPython |
1768435 | import praw
def get_top_jokes(posts = 1):
r = praw.Reddit('jokegetter by /u/reffit_owner')
submissions = r.get_subreddit('Jokes')
posts = submissions.get_top(params={'t': 'hour'}, limit=posts)
jokes = []
for i in posts:
jokes.append(i.title + "\n" + i.selftext)
if len(jokes) == 1:
return jokes[0]
return jokes
| StarcoderdataPython |
9623192 | import OpenGL.GL as gl
import OpenGL.GLU as glu
import OpenGL.GLUT as glut
class Label(object):
def __init__(self,points,labels,colors):
self.list_index = None
self.points = points
self.labels = labels
self.colors = colors
def init(self):
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index, gl.GL_COMPILE)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
#gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(1,1,0)
glut.glutSolidSphere(50.,50,50)
for i in range(len(self.points)):
r,g,b = self.colors[i]
print r,g,b
gl.glColor3f(r,g,b)
x,y,z = self.points[i]
print x,y,z
gl.glRasterPos3f(x,y,z)
label = self.labels[i]
print label
for c in label:
print c
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
gl.glEnable(gl.GL_LIGHTING)
gl.glEnd()
def display(self):
#gl.glPushMatrix()
print self.list_index
#gl.glLoadIdentity()
gl.glCallList(self.list_index)
#gl.glPopMatrix()
| StarcoderdataPython |
1951119 | <gh_stars>0
"""Module for summarizer."""
from abc import ABC, abstractmethod
class Summarizer(ABC):
"""Abstract summarizer class."""
@abstractmethod
def summarize(self):
"""Summarize information."""
| StarcoderdataPython |
1711273 | <reponame>Catalyst9k-SLA/Cat9k
# Importing the variable file in the dir Variable
import sys
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dirParent = os.path.dirname(currentdir)
dirVariable = dirParent + "/Variables"
sys.path.insert(0, dirVariable)
from argparse import ArgumentParser
from SparkVariables import *
from SparkFunctions import *
def kittyHelp():
post_message_markdown("# Kitty Help \n"
"Usage : @" + botName_Kitty + " instruction1 instruction2 ...\n"
"Here is what I can do : \n"
"* **help** : print those inscructions\n"
"* **Salut** : greetings (in French !)\n"
"* **save config** : backup the configturation on the TFTP server\n"
"* **last config** : information about the last config changes", roomID_SoftwareProject, bearer_Bot)
return "OK" | StarcoderdataPython |
6592797 | import logging
from openpnm.core import Base, ModelsMixin, ParamMixin, LabelMixin
from openpnm.utils import Workspace
from openpnm.utils import Docorator, SettingsAttr
from numpy import ones
import openpnm.models as mods
docstr = Docorator()
logger = logging.getLogger(__name__)
ws = Workspace()
@docstr.get_sections(base='PhaseSettings', sections=['Parameters'])
@docstr.dedent
class PhaseSettings:
r"""
Parameters
----------
%(BaseSettings.parameters)s
"""
prefix = 'phase'
@docstr.get_sections(base='GenericPhase', sections=['Parameters'])
@docstr.dedent
class GenericPhase(ParamMixin, Base, ModelsMixin, LabelMixin):
r"""
This generic class is meant as a starter for custom Phase objects
This class produces a blank-slate object with no pore-scale models for
calculating any thermophysical properties. Users must add models and
specify parameters for all the properties they require.
Parameters
----------
%(Base.parameters)s
"""
def __init__(self, network, settings=None, **kwargs):
self.settings = SettingsAttr(PhaseSettings, settings)
super().__init__(network=network, settings=self.settings, **kwargs)
# If project has a network object, adjust pore and throat array sizes
self['pore.all'] = ones((network.Np, ), dtype=bool)
self['throat.all'] = ones((network.Nt, ), dtype=bool)
# Set standard conditions on the fluid to get started
self['pore.temperature'] = 298.0
self['pore.pressure'] = 101325.0
def __getitem__(self, key):
# If the key is a just a numerical value, the kick it directly back
# This allows one to do either value='pore.blah' or value=1.0
if isinstance(key, (int, float, bool, complex)):
return key
element, prop = key.split('.', 1)
# Deal with special keys first
if prop == '_id':
net = self.project.network
return net[f"{element}._id"]
if prop == self.name:
return self[f"{element}.all"]
# An attempt at automatic interpolation if key not found
if key not in self.keys():
not_el = list(set(['pore', 'throat']).difference(set([element])))[0]
if (not_el + '.' + prop) in self.keys():
mod = {'pore': mods.misc.from_neighbor_throats,
'throat': mods.misc.from_neighbor_pores}
self.add_model(propname=key,
model=mod[element],
prop=not_el + '.' + prop,
mode='mean')
vals = super().__getitem__(key)
return vals
@property
def phase(self):
"""A shortcut to get a handle to the associated phase (itself)."""
return self
@property
def physics(self):
"""A shortcut to query the associated physics(es)."""
return self.project.find_physics(phase=self)
@property
def _subdomains(self):
return self.project.find_physics(phase=self)
| StarcoderdataPython |
12843623 | import os
def fileTest():
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
data_path = os.path.join(dir_path, '../FileTest/data.txt')
print(data_path)
file = open(data_path, 'r')
for line in file:
print(line)
if __name__ == '__main__':
fileTest() | StarcoderdataPython |
331978 | <filename>bot.py
import os
import discord
import youtube_dl as youtube_dl
from discord import channel
from discord.ext import commands
import random
import json
import logging
import math
from urllib import request
from datetime import datetime
import asyncio
import youtube_dl as ytdl
from discord.utils import find, get
from pathlib import Path
loggedinusers = []
client = commands.Bot(command_prefix="p:")
YTDL_OPTS = {
"default_search": "ytsearch",
"format": "bestaudio/best",
"quiet": True,
"extract_flat": "in_playlist"
}
def _play_song(self, client, state, song):
state.now_playing = song
state.skip_votes = set() # clear skip votes
source = discord.PCMVolumeTransformer(
discord.FFmpegPCMAudio(song.stream_url), volume=state.volume)
def after_playing(err):
if len(state.playlist) > 0:
next_song = state.playlist.pop(0)
self._play_song(client, state, next_song)
else:
asyncio.run_coroutine_threadsafe(client.disconnect(),
self.bot.loop)
client.play(source, after=after_playing)
async def status_task():
while True:
game = discord.Game(f"The Community Submitted Games!")
await client.change_presence(status=discord.Status.online, activity=game)
await asyncio.sleep(3)
game = discord.Game(f"USB Is so cool Right?")
await client.change_presence(status=discord.Status.online, activity=game)
await asyncio.sleep(3)
await client.change_presence(
activity=discord.Activity(type=discord.ActivityType.watching, name='kai-builder.github.io'))
await client.change_presence(status=discord.Status.idle, activity=game)
await asyncio.sleep(3)
game = discord.Game(f"p:help, p:database, p:os, p:post, p:submit")
await client.change_presence(status=discord.Status.idle, activity=game)
@client.event
async def on_ready():
await client.loop.create_task(status_task())
@client.command()
async def register(ctx, name, dob, age, pw):
await ctx.message.delete()
print("User Called init")
await ctx.send("Registering you...")
os.mkdir(name)
i = open(name + "/user.txt", 'w')
i.write(name + "\n" + age + "\n" + dob + "\n" + pw)
i.close()
async def audio_playing(ctx):
"""Checks that audio is currently playing before continuing."""
client = ctx.guild.voice_client
if client and client.channel and client.source:
return True
else:
raise commands.CommandError("Not currently playing any audio.")
@client.event
async def on_member_ban(guild, user):
print(guild, user)
@client.command()
async def login(ctx, name, passw):
print("User Called Log In")
await ctx.message.delete()
await ctx.send("Logging you In..")
await asyncio.sleep(random.randint(0, 6))
a = Path(name)
if a.exists():
member = ctx.message.author
man = ctx.message.author
await ctx.send("Logging you in!")
await man.add_roles(discord.utils.get(man.guild.roles, name="Verified")) # add the role
i = open(name + "/user.txt")
a = i.readlines()
if passw == a[3]:
await ctx.send("Found Your Account! Account Details:\nName: " + a[0] + "Dob: " + a[2] + " Age: " + a[1])
else:
await ctx.send("Password Login Failed. Try Again.")
else:
await ctx.send("You're Not Logged In On This Account/Server Sorry.")
@client.command()
async def itch(ctx):
print("User Called itch")
await ctx.send(
"If you didn't already know, I (U$B) Is Hosting this bot myself, Bascially, The New Game I'm Developing (KMOD) Is Now available In Pre-releases & Source Releases.\nIt is also Optional TO Pay For the App To Get Full Beta Access.\nLink: https://kai-builder.itch.io/kais-sandbox#download \nTrailer: https://www.youtube.com/watch?v=f4WhDsu-lJM&t=4s ")
@client.command()
async def backup(ctx, account):
print("User Called Backup_Password")
await ctx.send("Backing Up Your Password...")
s = open(account + "/accountbackup.txt", "w")
a = random.randint(0, 11023132139137139127382)
s.write(str(a))
s.close()
await ctx.send("Generated Backup Account Password.")
await ctx.author.send(
"Your Backup Is " + str(a) + ". DO NOT SHARE THIS WITH ANYBODY ELSE! THIS IS USED TO IDENTIFY YOUR ACCOUNT!")
@client.command()
async def login_backup(ctx, account, backup_):
print("User Called Login Through Backup")
await ctx.send("Alright!")
await asyncio.sleep(2)
await ctx.send("Getting Account Details...")
@client.event
async def on_guild_join(guild):
general = find(lambda x: guild.system_channel, guild.text_channels)
if general and general.permissions_for(guild.me).send_messages:
await general.send("""
▄▄
▀███▀▀▀██▄ ██ ▄██
██ ▀██▄ ██ ██
██ ▀██▄█▀██▄ ██████ ▄█▀██▄ ██▄████▄ ▄█▀██▄ ▄██▀███ ▄▄█▀██
██ ███ ██ ██ ██ ██ ██ ▀████ ██ ██ ▀▀▄█▀ ██
██ ▄██▄█████ ██ ▄█████ ██ ██ ▄█████ ▀█████▄██▀▀▀▀▀▀
██ ▄██▀█ ██ ██ ██ ██ ██▄ ▄████ ██ █▄ ████▄ ▄
▄████████▀ ▀████▀██▄ ▀████████▀██▄ █▀█████▀ ▀████▀██▄██████▀ ▀█████▀
**The DataBase Discord Bot**
```c++
printf("Thanks for Using my Bot!");
```
This Bot was Made By U$B And Was Made For Fun. Use this for any purpose (For Commercial Servers DM Me
(U$B#5000)) Without Pay (other than Commercial).
Thank you for Putting My Bot Into your Server and I Hope you have fun with it!
To Begin, Say p:help :)
▄▄▄ .▐▄• ▄ ▄▄▄▄▄▄▄▄▄ ▄▄▄· .▄▄ · ▄▄
▀▄.▀· █▌█▌▪▀•██ ▀▀▄ █·▐█ ▀█ ▐█ ▀. ██▌
▐▀▀▪▄ ·██· ▐█.▪▐▀▀▄ ▄█▀▀█ ▄▀▀▀█▄ ▐█·
▐█▄▄▌▪▐█·█▌ ▐█▌·▐█•█▌▐█▪ ▐▌▐█▄▪▐█ .▀
▀▀▀ •▀▀ ▀▀ ▀▀▀ .▀ ▀ ▀ ▀ ▀▀▀▀ ▀
Database Is a Self-Hosted Bot With Some Pretty Cool Features. In my Mind :)
A Forum Page for Posting Global Messages.
Accounts! If your Server Comes With A Verified Role, Users will Have to Register And Log In To Their Accounts On the Server To Get the Verified Role!
Custom Moderation Features! If you want a Custom moderation Command, The Github Package Site Is Coming soon :)
You can Talk to friends over a server,
Custom Commands Are found Also in the Github Package!
Now Happy Moderating!
""")
@client.command()
async def info(ctx):
print("User Called info")
await ctx.send(
"This Bot Is Used By Kai's sandbox Development for Moderation Purposes.\nThis Bot Can not Be used by Other users Unless The Command, p:register & p:login Is Called.")
await ctx.send("Say p:register <name> <dob> <age> <password> To Get Started.")
await ctx.send("Think Your account is already In Our Database? Say p:login <Name> <pass> To Log in!")
await ctx.send("(All information Is not used against any entities Using this Discord Bot.)")
@client.command()
@commands.has_role("Logged In")
async def downloads(ctx):
print("User Called downloads")
await ctx.send("downloads")
@client.command()
async def deleteaccount(ctx, accountname):
print("User Called deleteaccount")
await ctx.author.send("Waiting For Account BACKUP Password...")
msg = await client.wait_for('message', check=lambda message: message.author == ctx.author)
await ctx.send("Checking If " + msg + " Is in the class of your backup Password.")
@client.command()
async def id(ctx, number: int):
await client.send_message(number)
@client.command()
async def post(ctx, *, message):
print("User Called _POST_")
await ctx.send("Posting...")
a = datetime.today()
i = open("posts\\post_user" + str(ctx.author) + str(random.randint(19, 121031829236715812938178)) + ".txt", "w")
i.write(f"{message}")
i.close()
@client.command()
async def post_page(ctx):
print("User Called View")
await ctx.send("Sending You A List of the forum Page.")
await asyncio.sleep(2)
await ctx.send("List Of Posts By Raw DataBase Name")
await ctx.send("To View Posts Individually, Type p:view <postname> OR Type p:top posts")
for filename in os.listdir('posts'):
await ctx.send(filename)
@client.command()
async def view(ctx, name):
print("User Called view")
o = open("posts/" + name, "r+")
k = o.readlines()
await ctx.send(f"Found Post " + name + ". Post Details:\n\n" + k[0] + ".")
@client.command()
async def top_posts(ctx):
print("User Called top_posts")
await ctx.send("Alright!")
for file in os.listdir('posts'):
await ctx.send("Post Found!")
f = open("posts/" + file)
a = f.readlines()
for line in a:
await ctx.send(f"Post Contents: {line} ")
@client.command()
@commands.has_permissions(kick_members=True)
async def kick(ctx, user: discord.Member, *, reason=None):
if not reason:
await user.kick()
await ctx.send(f"**{user}** has been kicked for **no reason**.")
else:
await user.kick(reason=reason)
await ctx.send(f"**{user}** has been kicked for **{reason}**.")
@client.command()
@commands.has_permissions(administrator=True)
async def announce(ctx, channel: discord.TextChannel, *, message):
print("User Called annoucne")
await ctx.message.delete()
await channel.send("@everyone")
s = discord.Embed(title="Announcement", description=message)
await channel.send(embed=s)
client.remove_command("help")
@client.command()
async def music(ctx):
print("music Called")
await ctx.send("This Bot Also Contains Music! How Lovely. Most Of the Songs Are powered By Our Users. So If you "
"want to submit a song, Learn How @ https://kai-builder.github.io/bots/db/docs")
@client.command(pass_context=True)
async def ban(ctx, user: discord.Member, *, reason):
print("Banned")
await user.ban(reason=reason)
await ctx.send(f"Banned @{user} With Reason Of {reason}")
@client.command()
async def marketplace(ctx):
print("marketplace Called")
os.mkdir("MarketPlace")
await ctx.send("Loading the MarketPlace. . .")
await asyncio.sleep(random.randint(0, 10))
for filename in os.listdir("MarketPlace"):
print("Loaded MarketPlace")
@client.command()
async def community(ctx):
print("community Called")
await ctx.send("You guys Control Me!\n\nYou can Submit Bot Extensions, Accounts, Custom Prefixes, And Much "
"More.\nYou can Submit Posts And More Via p:post!\nYou can Submit Links and More!"
"\n You can Also Submit SONGS Using the p:songrequest <link> !"
"\n To Submit Extensions Or Posts, Use p:post Or p:submit <extname> <Usage>")
@client.command()
async def subforum(ctx):
print("subforum Called")
await ctx.send("subforum")
@client.command()
async def help(ctx, command=None):
print("heloin")
if command is None:
print("nonecomd")
await ctx.send("Default Commands. Say p:help <utilpack> For More Info On that Specific Package."
"\n"
"\n p:help, p:post, p:play, p:leave, p:submit, p:submitsong, p:marketplace, p:subforum <name>")
else:
if command == "community":
embed = discord.Embed(title="Community Commands", description="All Of the Basic Community Utilities")
embed.add_field(name="Networks",
value="p:post\np:top_posts\np:view <post>\np:submit <util>\np:submitsong <url>")
await ctx.send(embed=embed)
@client.command()
async def partycreate(ctx, ispub, code, *, partyname):
print("party Called")
await ctx.message.delete()
await ctx.send(f"Creating a Party With the Name Of {partyname}. . .")
await asyncio.sleep(2)
os.mkdir(f"p/{partyname}")
set = open('p/' + partyname + "/settings.ini", "w")
if ispub == "yes":
set.write(f"true")
else:
set.write(f"false\n{code}")
set.close()
await ctx.send("Created Party.")
@client.command()
async def publicparties(ctx):
print("publicparties Called")
x = 0
x = x + 5
for filename in os.listdir('p'):
i = open("p/" + filename + "/settings.ini")
f = i.readlines()
if f[0] == "true":
await ctx.send(filename)
else:
print("PrivateLobby")
async def in_voice_channel(ctx):
"""Checks that the command sender is in the same voice channel as the bot."""
voice = ctx.author.voice
bot_voice = ctx.guild.voice_client
if voice and bot_voice and voice.channel and bot_voice.channel and voice.channel == bot_voice.channel:
return True
else:
raise commands.CommandError(
"You need to be in the channel to do that.")
@client.command()
async def joinparty(ctx, code: int, *, name):
print("joinparty Called")
await ctx.send(f"Joining {name}!")
await asyncio.sleep(2)
o = open('p/' + name + "/settings.ini")
s = o.readlines()
if code == s[1]:
await ctx.send(f"Joined Party {name}")
sent_users = []
@client.command()
async def mailtomods(ctx, arg):
if isinstance(ctx.channel, discord.channel.DMChannel):
await ctx.send("Alright")
code = random.randint(0, 12073691093281287983)
mailchannel = client.get_channel(803061426050170880)
await ctx.send(f"Started Session. Code: {code}.")
await mailchannel.send(f"Started Session. Code: {code}.")
@client.command(aliases=["stop"])
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def leave(self, ctx):
"""Leaves the voice channel, if currently in one."""
clienst = ctx.guild.voice_client
state = self.get_state(ctx.guild)
if client and clienst.channel:
await clienst.disconnect()
state.playlist = []
state.now_playing = None
else:
await ctx.send("NOT IN A VOICE!!!")
@client.command()
@commands.check(audio_playing)
@commands.check(in_voice_channel)
async def skip(self, ctx):
"""Skips the currently playing song, or votes to skip it."""
state = self.get_state(ctx.guild)
client = ctx.guild.voice_client
if ctx.channel.permissions_for(
ctx.author).administrator or state.is_requester(ctx.author):
# immediately skip if requester or admin
client.stop()
elif self.config["vote_skip"]:
# vote to skip song
channel = client.channel
self._vote_skip(channel, ctx.author)
# announce vote
users_in_channel = len([
member for member in channel.members if not member.bot
]) # don't count bots
required_votes = math.ceil(
self.config["vote_skip_ratio"] * users_in_channel)
await ctx.send(
f"{ctx.author.mention} voted to skip ({len(state.skip_votes)}/{required_votes} votes)"
)
else:
raise commands.CommandError("Sorry, vote skipping is disabled.")
class Video:
"""Class containing information about a particular video."""
def __init__(self, url_or_search, requested_by):
"""Plays audio from (or searches for) a URL."""
with ytdl.YoutubeDL(YTDL_OPTS) as ydl:
video = self._get_info(url_or_search)
video_format = video["formats"][0]
self.stream_url = video_format["url"]
self.video_url = video["webpage_url"]
self.title = video["title"]
self.uploader = video["uploader"] if "uploader" in video else ""
self.thumbnail = video[
"thumbnail"] if "thumbnail" in video else None
self.requested_by = requested_by
def _get_info(self, video_url):
with ytdl.YoutubeDL(YTDL_OPTS) as ydl:
info = ydl.extract_info(video_url, download=False)
video = None
if "_type" in info and info["_type"] == "playlist":
return self._get_info(
info["entries"][0]["url"]) # get info for first video
else:
video = info
return video
def get_embed(self):
"""Makes an embed out of this Video's information."""
embed = discord.Embed(
title=self.title, description=self.uploader, url=self.video_url)
embed.set_footer(
text=f"Requested by {self.requested_by.name}",
icon_url=self.requested_by.avatar_url)
if self.thumbnail:
embed.set_thumbnail(url=self.thumbnail)
return embed
@client.command(brief="Plays audio from <url>.")
async def play(ctx, *, url):
"""Plays audio hosted at <url> (or performs a search for <url> and plays the first result)."""
clients = client.guild.voice_client
state = clients.get_state(ctx.guild) # get the guild's state
if clients and clients.channel:
try:
video = Video(url, ctx.author)
except youtube_dl.DownloadError as e:
logging.warn(f"Error downloading video: {e}")
await ctx.send(
"There was an error downloading your video, sorry.")
return
state.playlist.append(video)
message = await ctx.send(
"Added to queue.", embed=video.get_embed())
await self._add_reaction_controls(message)
else:
if ctx.author.voice is not None and ctx.author.voice.channel is not None:
channel = ctx.author.voice.channel
try:
video = Video(url, ctx.author)
except youtube_dl.DownloadError as e:
await ctx.send(
"There was an error downloading your video, sorry.")
return
client = await channel.connect()
self._play_song(client, state, video)
message = await ctx.send("", embed=video.get_embed())
await self._add_reaction_controls(message)
logging.info(f"Now playing '{video.title}'")
else:
await ctx.send("Failed To Play Video.")
async def on_reaction_add(self, reaction, user):
"""Respods to reactions added to the bot's messages, allowing reactions to control playback."""
message = reaction.message
if user != self.bot.user and message.author == self.bot.user:
await message.remove_reaction(reaction, user)
if message.guild and message.guild.voice_client:
user_in_channel = user.voice and user.voice.channel and user.voice.channel == message.guild.voice_client.channel
permissions = message.channel.permissions_for(user)
guild = message.guild
state = self.get_state(guild)
if permissions.administrator or (
user_in_channel and state.is_requester(user)):
client = message.guild.voice_client
if reaction.emoji == "⏯":
# pause audio
self._pause_audio(client)
elif reaction.emoji == "⏭":
# skip audio
client.stop()
elif reaction.emoji == "⏮":
state.playlist.insert(
0, state.now_playing
) # insert current song at beginning of playlist
client.stop() # skip ahead
elif reaction.emoji == "⏭" and self.config[
"vote_skip"] and user_in_channel and message.guild.voice_client and message.guild.voice_client.channel:
# ensure that skip was pressed, that vote skipping is
# enabled, the user is in the channel, and that the bot is
# in a voice channel
voice_channel = message.guild.voice_client.channel
self._vote_skip(voice_channel, user)
# announce vote
channel = message.channel
users_in_channel = len([
member for member in voice_channel.members
if not member.bot
]) # don't count bots
required_votes = math.ceil(
self.config["vote_skip_ratio"] * users_in_channel)
await channel.send(
f"{user.mention} voted to skip ({len(state.skip_votes)}/{required_votes} votes)"
)
@client.command()
async def form(ctx, header, description, footers, fieldnames, color=None, **fields):
print("form Called")
f = await ctx.send("Formatting Your Message")
await asyncio.sleep(2)
await f.edit(content="Almost Done...")
await asyncio.sleep(1)
await f.edit(content="Resolving Objects. . .")
await asyncio.sleep(1)
await f.edit(content="Done!")
embed = discord.Embed(title=header, description=description)
embed.set_footer(text=footers)
for word in fields:
x = 0
embed.add_field(name=f"{fieldnames}",
value=fields[x + 1])
await ctx.send(embed=embed)
@client.command()
async def Find(ctx, user: discord.Member):
print(" Called")
lo = await ctx.send("Getting User Info. . |")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . /")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . -")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . \\")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . |")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . /")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . -")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . /")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . -")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . \\")
await asyncio.sleep(0.01)
await lo.edit(content="Getting User Info. . |")
await lo.edit(content="Found User Info!")
embed = discord.Embed(title=f"User {user}", description="USER!")
embed.add_field(name="Date Joined?",
value=str(user.joined_at))
embed.add_field(name="On Mobile?",
value=str(user.is_on_mobile()))
embed.add_field(name="Nitro Since?",
value=str(user.premium_since))
embed.add_field(name="Server?", value=str(user.guild))
embed.add_field(name="Extras",
value=f"NName: {user.nick}\nReal: {user.display_name}\nStatus? : {user.status}")
await ctx.send(embed=embed)
await ctx.send(f"")
@client.command()
async def code(ctx):
print("code Called")
await ctx.send("Source Code At https://github.com/Kai-Builder/database-bot/ !")
client.run("**you**")
| StarcoderdataPython |
279589 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField, ValidationError, \
SelectField, TextAreaField, HiddenField, TimeField, BooleanField
from wtforms.validators import DataRequired
from resticweb.tools.local_session import LocalSession
from resticweb.models.general import SavedJobs, Schedule
import re
class AddJobForm(FlaskForm):
ub_name = StringField('Job Name', validators=[DataRequired()])
ub_description = TextAreaField('Description')
submit = SubmitField('Submit')
class BaseJobEditForm(FlaskForm):
saved_job_id = HiddenField('Id')
name = StringField('Job Name', validators=[DataRequired()])
description = TextAreaField('Description')
additional_params = StringField('Additional Parameters')
submit = SubmitField('Submit')
def validate_name(self, name):
with LocalSession() as session:
saved_job = session.query(SavedJobs).filter_by(name=name.data).first()
if saved_job and saved_job.id != int(self.saved_job_id.data):
raise ValidationError(f"There already exists a job with name '{name.data}'. Please pick a different name.")
class BaseJobAddForm(FlaskForm):
saved_job_id = HiddenField('Id')
name = StringField('Job Name', validators=[DataRequired()])
description = TextAreaField('Description')
additional_params = StringField('Additional Parameters')
submit = SubmitField('Submit')
def validate_name(self, name):
with LocalSession() as session:
saved_job = session.query(SavedJobs).filter_by(name=name.data).first()
if saved_job:
raise ValidationError(f"There already exists a job with name '{name.data}'. Please pick a different name.")
class AddCheckJobForm(BaseJobAddForm):
repository = SelectField('Repository to check', coerce=int)
class EditCheckJobForm(BaseJobEditForm):
repository = SelectField('Repository to check', coerce=int)
class AddVacuumJobForm(BaseJobAddForm):
additional_params = None
description = None
class EditVacuumJobForm(BaseJobEditForm):
additional_params = None
description = None
class AddForgetJobForm(BaseJobAddForm):
backup_set = SelectField('Backup Set', coerce=int)
repository = SelectField('Repository', coerce=int)
keep_last = IntegerField('Keep Last')
keep_hourly = IntegerField('Keep Hourly')
keep_daily = IntegerField('Keep Daily')
keep_weekly = IntegerField('Keep Weekly')
keep_monthly = IntegerField('Keep Monthly')
keep_yearly = IntegerField('Keep Yearly')
keep_within = StringField('Keep Within')
prune = BooleanField('Prune')
class EditForgetJobForm(BaseJobEditForm):
backup_set = SelectField('Backup Set', coerce=int)
repository = SelectField('Repository', coerce=int)
keep_last = IntegerField('Keep Last')
keep_hourly = IntegerField('Keep Hourly')
keep_daily = IntegerField('Keep Daily')
keep_weekly = IntegerField('Keep Weekly')
keep_monthly = IntegerField('Keep Monthly')
keep_yearly = IntegerField('Keep Yearly')
keep_within = StringField('Keep Within')
prune = BooleanField('Prune')
class AddPruneJobForm(BaseJobAddForm):
repository = SelectField('Repository to prune', coerce=int)
class EditPruneJobForm(BaseJobEditForm):
repository = SelectField('Repository to prune', coerce=int)
class ScheduleBaseForm(FlaskForm):
job_list = HiddenField('Job List') # JSON list of job ids
jobs_changed = HiddenField('Jobs Changed')
name = StringField('Schedule Name', validators=[DataRequired()])
description = TextAreaField('Description')
time_interval = IntegerField('Time Interval')
time_unit = SelectField('Time Unit')
time_at = StringField('Time At')
monday = BooleanField('Monday')
tuesday = BooleanField('Tuesday')
wednesday = BooleanField('Wednesday')
thursday = BooleanField('Thursday')
friday = BooleanField('Friday')
saturday = BooleanField('Saturday')
sunday = BooleanField('Sunday')
missed_timeout = IntegerField('Schedule Miss Timeout (minutes)')
paused = BooleanField('Paused')
submit = SubmitField('Submit')
def validate_time_at(self, time_at):
if self.time_unit.data in ('days', 'day', 'week', 'weeks') or self.monday.data is True or self.tuesday.data is True or self.wednesday.data is True or self.thursday.data is True or self.friday.data is True or self.saturday.data is True or self.sunday.data is True:
if not re.match(r'^([0-2]\d:)?[0-5]\d:[0-5]\d$', time_at.data):
self.time_at.data = "0" + self.time_at.data
if not re.match(r'^([0-2]\d:)?[0-5]\d:[0-5]\d$', time_at.data):
raise ValidationError('Invalid time format')
if self.time_unit.data in ('hours', 'hour'):
if not re.match(r'^([0-5]\d)?:[0-5]\d$', time_at.data):
raise ValidationError(('Invalid time format for'
' an hourly job'))
if self.time_unit.data in ('minutes', 'minute'):
if not re.match(r'^:[0-5]\d$', time_at.data):
raise ValidationError(('Invalid time format for'
' a minutely job'))
class AddScheduleForm(ScheduleBaseForm):
def validate_name(self, name):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(name=name.data).first()
if schedule:
raise ValidationError(f"There already exists a schedule with name '{name.data}'. Please pick a different name.")
class EditScheduleForm(ScheduleBaseForm):
schedule_id = HiddenField('Id')
def validate_name(self, name):
with LocalSession() as session:
schedule = session.query(Schedule).filter_by(name=name.data).first()
if schedule and schedule.id != int(self.schedule_id.data):
raise ValidationError(f"There already exists a schedule with name '{name.data}'. Please pick a different name.") | StarcoderdataPython |
1915235 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from app_libs.config_reader import ConfigReader
from app_libs.main_runner import MainRunner
__author__ = 'litleleprikon'
def main():
try:
with open('config.json', 'r') as f:
config = ConfigReader(f)
pass
except FileNotFoundError:
print('No config File\n')
exit()
if config.error:
print(config.error)
exit()
MainRunner(config)
if __name__ == '__main__':
main() | StarcoderdataPython |
5067348 | <reponame>sireliah/polish-python
"""Test script dla ftplib module."""
# Modified by <NAME>' to test FTP class, IPv6 oraz TLS
# environment
zaimportuj ftplib
zaimportuj asyncore
zaimportuj asynchat
zaimportuj socket
zaimportuj io
zaimportuj errno
zaimportuj os
zaimportuj time
spróbuj:
zaimportuj ssl
wyjąwszy ImportError:
ssl = Nic
z unittest zaimportuj TestCase, skipUnless
z test zaimportuj support
z test.support zaimportuj HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
klasa DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = Nieprawda
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times w a row dla a single
# connection, including w clear-text (non-TLS) mode.
# (behaviour witnessed przy test_data_connection)
jeżeli nie self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = Prawda
def push(self, what):
jeżeli self.baseclass.next_data jest nie Nic:
what = self.baseclass.next_data
self.baseclass.next_data = Nic
jeżeli nie what:
zwróć self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
podnieś Exception
klasa DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = Nic
self.last_received_cmd = Nic
self.last_received_data = ''
self.next_response = ''
self.next_data = Nic
self.rest = Nic
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
jeżeli self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
jeżeli space != -1:
arg = line[space + 1:]
inaczej:
arg = ""
jeżeli hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
inaczej:
self.push('550 command "%s" nie understood.' %cmd)
def handle_error(self):
podnieś Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
przy socket.socket() jako sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering dalejive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
przy socket.socket(socket.AF_INET6) jako sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended dalejive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('<PASSWORD>')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
jeżeli self.rest jest nie Nic:
offset = int(self.rest)
inaczej:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = Nic
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will zwróć long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
klasa DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = Nieprawda
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = Nic
def start(self):
assert nie self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = Prawda
self.__flag.set()
dopóki self.active oraz asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=Prawda)
def stop(self):
assert self.active
self.active = Nieprawda
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
zwróć 0
def handle_error(self):
podnieś Exception
jeżeli ssl jest nie Nic:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
klasa SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = Nieprawda
_ssl_closing = Nieprawda
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=Nieprawda,
certfile=CERTFILE, server_side=Prawda,
do_handshake_on_connect=Nieprawda,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = Prawda
def _do_ssl_handshake(self):
spróbuj:
self.socket.do_handshake()
wyjąwszy ssl.SSLError jako err:
jeżeli err.args[0] w (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
zwróć
albo_inaczej err.args[0] == ssl.SSL_ERROR_EOF:
zwróć self.handle_close()
podnieś
wyjąwszy OSError jako err:
jeżeli err.args[0] == errno.ECONNABORTED:
zwróć self.handle_close()
inaczej:
self._ssl_accepting = Nieprawda
def _do_ssl_shutdown(self):
self._ssl_closing = Prawda
spróbuj:
self.socket = self.socket.unwrap()
wyjąwszy ssl.SSLError jako err:
jeżeli err.args[0] w (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
zwróć
wyjąwszy OSError jako err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL zwróć
# z OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/<EMAIL>/msg60710.html
dalej
self._ssl_closing = Nieprawda
jeżeli getattr(self, '_ccc', Nieprawda) jest Nieprawda:
super(SSLConnection, self).close()
inaczej:
dalej
def handle_read_event(self):
jeżeli self._ssl_accepting:
self._do_ssl_handshake()
albo_inaczej self._ssl_closing:
self._do_ssl_shutdown()
inaczej:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
jeżeli self._ssl_accepting:
self._do_ssl_handshake()
albo_inaczej self._ssl_closing:
self._do_ssl_shutdown()
inaczej:
super(SSLConnection, self).handle_write_event()
def send(self, data):
spróbuj:
zwróć super(SSLConnection, self).send(data)
wyjąwszy ssl.SSLError jako err:
jeżeli err.args[0] w (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
zwróć 0
podnieś
def recv(self, buffer_size):
spróbuj:
zwróć super(SSLConnection, self).recv(buffer_size)
wyjąwszy ssl.SSLError jako err:
jeżeli err.args[0] w (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
zwróć b''
jeżeli err.args[0] w (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
zwróć b''
podnieś
def handle_error(self):
podnieś Exception
def close(self):
jeżeli (isinstance(self.socket, ssl.SSLSocket) oraz
self.socket._sslobj jest nie Nic):
self._do_ssl_shutdown()
inaczej:
super(SSLConnection, self).close()
klasa DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
jeżeli self.baseclass.secure_data_channel:
self.secure_connection()
klasa DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = Nieprawda
self._ccc = Nieprawda
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = Prawda
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer dla secure data transfer.
For TLS/SSL the only valid value dla the parameter jest '0'.
Any other value jest accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
jeżeli arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = Nieprawda
albo_inaczej arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = Prawda
inaczej:
self.push("502 Unrecognized PROT type (use C albo P).")
klasa DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
klasa TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError, EOFError)
dla x w exceptions:
spróbuj:
podnieś x('exception nie included w all_errors set')
wyjąwszy ftplib.all_errors:
dalej
def test_set_pasv(self):
# dalejive mode jest supposed to be enabled by default
self.assertPrawda(self.client.passiveserver)
self.client.set_pasv(Prawda)
self.assertPrawda(self.client.passiveserver)
self.client.set_pasv(Nieprawda)
self.assertNieprawda(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be Nic
self.assertEqual(self.client.sock, Nic)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
dla rest w (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(Nic))
self.assertPrawda(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
dla r w (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(Nic))
self.assertPrawda(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, nie a text file
przy support.check_warnings(('', BytesWarning), quiet=Prawda):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
dla name, facts w ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertPrawda(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=Nic, perm=Nic, unique=Nic, name=Nic):
type = 'type' jeżeli type jest Nic inaczej type
perm = 'perm' jeżeli perm jest Nic inaczej perm
unique = 'unique' jeżeli unique jest Nic inaczej unique
name = 'name' jeżeli name jest Nic inaczej name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" w fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces w name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" w name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
dla x w facts:
self.assertPrawda(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
dla x w self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
przy self.client.makeport():
# IPv4 jest w use, just make sure send_eprt has nie been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 jest w use, just make sure send_epsv has nie been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
jeżeli self.client.sock jest Nic:
zwróć Nieprawda
spróbuj:
self.client.sendcmd('noop')
wyjąwszy (OSError, EOFError):
zwróć Nieprawda
zwróć Prawda
# base test
przy ftplib.FTP(timeout=TIMEOUT) jako self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertPrawda(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertNieprawda(is_client_connected())
# QUIT sent inside the przy block
przy ftplib.FTP(timeout=TIMEOUT) jako self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertNieprawda(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# jest expected oraz the connection jest supposed to be closed
spróbuj:
przy ftplib.FTP(timeout=TIMEOUT) jako self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
wyjąwszy ftplib.error_perm jako err:
self.assertEqual(str(err), '550 error on quit')
inaczej:
self.fail('Exception nie podnieśd')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertNieprawda(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
spróbuj:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
wyjąwszy OSError jako e:
jeżeli e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
podnieś
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
spróbuj:
przy self.client.transfercmd('list') jako sock:
self.assertEqual(sock.getsockname()[1], port)
wyjąwszy OSError jako e:
jeżeli e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
podnieś
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response jest supposed to include the directory
# name oraz w case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 nie enabled")
klasa TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
przy self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(Prawda)
retr()
self.client.set_pasv(Nieprawda)
retr()
@skipUnless(ssl, "SSL nie available")
klasa TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer dla both control
oraz data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL nie available")
klasa TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP klasa tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
przy self.client.transfercmd('list') jako sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
przy self.client.transfercmd('list') jako sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C jest issued, the connection must be w cleartext again
self.client.prot_c()
przy self.client.transfercmd('list') jako sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() jest supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
spróbuj:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
w_końcu:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
przy self.client.transfercmd('list') jako sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=Prawda)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = Prawda
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
przy self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
przy self.assertRaises(ssl.CertificateError):
przy self.client.transfercmd("list") jako sock:
dalej
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
przy self.client.transfercmd("list") jako sock:
dalej
klasa TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait dla the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection jest ready to be accepted.
# 2) when it jest safe dla the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
spróbuj:
conn, addr = self.sock.accept()
wyjąwszy socket.timeout:
dalej
inaczej:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it jest safe to close the socket.
self.evt.set()
conn.close()
w_końcu:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNic(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
spróbuj:
ftp = ftplib.FTP(HOST)
w_końcu:
socket.setdefaulttimeout(Nic)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNic(self):
# no timeout -- do nie use global socket timeout
self.assertIsNic(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
spróbuj:
ftp = ftplib.FTP(HOST, timeout=Nic)
w_końcu:
socket.setdefaulttimeout(Nic)
self.assertIsNic(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = support.threading_setup()
spróbuj:
support.run_unittest(*tests)
w_końcu:
support.threading_cleanup(*thread_info)
jeżeli __name__ == '__main__':
test_main()
| StarcoderdataPython |
4835746 | <filename>followthegreen/followthegreen.py<gh_stars>0
# Follow The Green mission container Class
# Keeps all information handy. Dispatches intruction to do things.
#
# Cannot use Follow the green.
# We are sorry. We cannot provide Follow The Green service at this airport.
# Reasons:
# This airport does not have a routing network of taxiway.
#
# Can use Follow the green, but other issue:
# We are sorry. We cannot provide Follow The Green service now.
# Reasons:
# You are too far from the taxiways.
# We could not find a suitable route to your destination.
#
import logging
import xp
from XPLMUtilities import XPLMSpeakString
from .aircraft import Aircraft
from .airport import Airport
from .flightloop import FlightLoop
from .globals import ARRIVAL, DEPARTURE
from .lightstring import LightString
from .ui import UIUtil
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - Follow The Green - %(levelname)s - %(message)s') # filename=('FTG_log.txt')
class FollowTheGreen:
# Internal status
STATUS = {
"NEW": "NEW",
"INITIALIZED": "INIT",
"READY": "READY",
"ACTIVE": "ACTIVE"
}
def __init__(self, pi):
self.__status = FollowTheGreen.STATUS["NEW"]
self.pi = pi
self.airport = None
self.aircraft = None
self.lights = None
self.segment = 0 # counter for green segments currently lit -0-----|-1-----|-2---------|-3---
self.move = None # departure or arrival, guessed first, can be changed by pilot.
self.destination = None # Handy
self.ui = UIUtil(self) # Where windows are built
self.flightLoop = FlightLoop(self) # where the magic is done
def start(self):
# Toggles visibility of main window.
# If it was simply closed for hiding, show it again as it was.
# If it does not exist, creates it from start of process.
# if self.__status = FollowTheGreen.STATUS["ACTIVE"]:
logging.info("FollowTheGreen::status: %s, %s.", self.__status, self.ui.mainWindowExists())
if self.ui.mainWindowExists():
logging.debug("FollowTheGreen::start: mainWindow exists, changing visibility %s.", self.ui.isMainWindowVisible())
self.ui.toggleVisibilityMainWindow()
return 1
else:
# Info 1
logging.info("FollowTheGreen::start: starting..")
mainWindow = self.getAirport()
logging.debug("FollowTheGreen::start: mainWindow created")
if mainWindow and not xp.isWidgetVisible(mainWindow):
xp.showWidget(mainWindow)
logging.debug("FollowTheGreen::start: mainWindow shown")
logging.info("FollowTheGreen::start: ..started.")
return 1 # window displayed
return 0
def getAirport(self):
# Search for airport or prompt for one.
# If airport is not equiped, we loop here until we get a suitable airport.
# When one is given and satisfies the condition for FTG
# we go to next step: Find the end point of follow the green.
# @todo: We need to guess those from dataref
# Note: Aircraft should be "created" outside of FollowTheGreen
# and passed to start or getAirport. That way, we can instanciate
# individual FollowTheGreen for numerous aircrafts.
# DH: List of Aircrafts and icao categories available here:
# https://www.faa.gov/airports/engineering/aircraft_char_database/
# converted simply into a csv and using only the filds
# ICAO code and AAC, implemented in aircraft module, simplified __init__ for
# callsign only, rest comes from X-Plane dataref
self.aircraft = Aircraft("PO-123")
pos = self.aircraft.position()
if pos is None:
logging.debug("FollowTheGreen::getAirport: no plane position")
return self.ui.sorry("We could not locate your plane.")
if pos[0] == 0 and pos[1] == 0:
logging.debug("FollowTheGreen::getAirport: no plane position")
return self.ui.sorry("We could not locate your plane.")
# Info 2
logging.info("FollowTheGreen::getAirport: Plane postion %s" % pos)
airport = self.aircraft.airport(pos)
if airport is None:
logging.debug("FollowTheGreen::getAirport: no airport")
return self.ui.promptForAirport() # prompt for airport will continue with getDestination(airport)
if airport.name == "NOT FOUND":
logging.debug("FollowTheGreen::getAirport: no airport (not found)")
return self.ui.promptForAirport() # prompt for airport will continue with getDestination(airport)
# Info 3
logging.info("FollowTheGreen::getAirport: At %s" % airport.name)
return self.getDestination(airport.navAidID)
def getDestination(self, airport):
# Prompt for local destination at airport.
# Either a runway for departure or a parking for arrival.
if not self.airport or (self.airport.icao != airport): # we may have changed airport since last call
self.airport = Airport(airport)
# Info 4 to 8 in airport.prepare()
status = self.airport.prepare_new(self.ui) # [ok, errmsg] ==> loading in flight loop!
else:
return self.getDestination_cont(self.airport)
return self.ui.promptForWindow()
def getDestination_cont(self, airport):
self.airport = airport
logging.debug("FollowTheGreen::getDestination: airport ready")
self.move = self.airport.guessMove(self.aircraft.position())
# Info 10
logging.info("FollowTheGreen::getDestination: Guessing %s", self.move)
return self.ui.promptForDestination()
def getDestination_old(self, airport):
# Prompt for local destination at airport.
# Either a runway for departure or a parking for arrival.
if not self.airport or (self.airport.icao != airport): # we may have changed airport since last call
airport = Airport(airport)
# Info 4 to 8 in airport.prepare()
status = airport.prepare() # [ok, errmsg]
if not status[0]:
logging.warn("FollowTheGreen::getDestination: airport not ready: %s" % (status[1]))
return self.ui.sorry(status[1])
self.airport = airport
else:
logging.debug("FollowTheGreen::getDestination: airport already loaded")
logging.debug("FollowTheGreen::getDestination: airport ready")
self.move = self.airport.guessMove(self.aircraft.position())
# Info 10
logging.info("FollowTheGreen::getDestination: Guessing %s", self.move)
return self.ui.promptForDestination()
def newGreen(self, destination):
# What is we had a green, and now we don't?!
# so we first make sur we find a new green, and if we do, we cancel the previous one.
return self.followTheGreen(destination, True)
def followTheGreen(self, destination, newGreen=False):
# Destination is either
# the name of a runway for departure, or
# the name of a parking ramp for arrival.
# We know where we are, we know where we want to go.
# If we find a route, we light it.
if destination not in self.airport.getDestinations(self.move):
logging.debug("FollowTheGreen::followTheGreen: destination not valid %s for %s", destination, self.move)
return self.ui.promptForDestination("Destination %s not valid for %s." % (destination, self.move))
# Info 11
logging.info("FollowTheGreen::followTheGreen: Route to %s.", destination)
rerr, route = self.airport.mkRoute(self.aircraft, destination, self.move)
if not rerr:
logging.info("FollowTheGreen::getDestination: No route %s", route)
return self.ui.tryAgain(route)
# Info 12
pos = self.aircraft.position()
hdg = self.aircraft.heading()
if pos is None:
logging.debug("FollowTheGreen::getAirport: no plane position")
return self.ui.sorry("We could not locate your plane.")
if pos[0] == 0 and pos[1] == 0:
logging.debug("FollowTheGreen::getAirport: no plane position")
return self.ui.sorry("We could not locate your plane.")
if newGreen: # We had a green, and we found a new one.
# turn off previous lights
self.cancel("new green requested")
# now create new ones
logging.info("FollowTheGreen::followTheGreen: Got route: %s.", route)
self.destination = destination
onRwy = False
if self.move == ARRIVAL:
onRwy, runway = self.airport.onRunway(pos, 300) # 150m either side of runway, return [True,Runway()] or [False, None]
self.lights = LightString()
self.lights.populate(route, onRwy)
if len(self.lights.lights) == 0:
logging.debug("FollowTheGreen::getDestination: no lights")
return self.ui.sorry("We could not light a route to your destination.")
# Info 13
logging.info("FollowTheGreen::followTheGreen: Added %d lights, %d segments, %s stopbars.", len(self.lights.lights), self.lights.segments + 1, len(self.lights.stopbars))
self.segment = 0
logging.info("FollowTheGreen::followTheGreen: Segment %d/%d.", self.segment + 1, self.lights.segments + 1)
ret = self.lights.illuminateSegment(self.segment)
if not ret[0]:
return self.ui.sorry(ret[1])
logging.debug("FollowTheGreen::followTheGreen: lights instanciated for segment %d.", self.segment)
initbrgn, initdist, initdiff = self.lights.initial(pos, hdg)
logging.debug("FollowTheGreen::followTheGreen: init (%d, %d, %d).", initbrgn, initdist, initdiff)
logging.info("FollowTheGreen::followTheGreen: first light at %d m, heading %d DEG.", initdist, initbrgn)
self.flightLoop.startFlightLoop()
self.__status = FollowTheGreen.STATUS["ACTIVE"]
# Info 14
logging.info("FollowTheGreen::followTheGreen: Flightloop started.")
# Hint: distance and heading to first light
if initdiff > 20 or initdist > 200:
XPLMSpeakString("Follow the green. Taxiway is at about %d meters heading %d." % (initdist, initbrgn))
else:
XPLMSpeakString("Follow the green.")
# self.segment = 0
if self.lights.segments == 0: # just one segment
logging.debug("FollowTheGreen::followTheGreen: just one segment %s", self.move)
if self.move == ARRIVAL:
if len(self.lights.stopbars) == 0: # not terminated by a stop bar, it is probably an arrival...
logging.debug("FollowTheGreen::followTheGreen: just one segment on arrival")
return self.ui.promptForParked()
if len(self.lights.stopbars) == 1: # terminated with a stop bar, it is probably a departure...
logging.debug("FollowTheGreen::followTheGreen: 1 segment with 1 stopbar on arrival?")
return self.ui.promptForClearance()
if self.move == DEPARTURE:
if len(self.lights.stopbars) == 0: # not terminated by a stop bar, it is probably an arrival...
logging.debug("FollowTheGreen::followTheGreen: 1 segment with 0 stopbar on departure?")
return self.ui.promptForDeparture()
return self.ui.promptForClearance()
# return self.ui.sorry("Follow the green is not completed yet.") # development
def nextLeg(self):
# Called when cleared by TOWER
self.segment += 1
# Info 15
logging.info("FollowTheGreen::nextLeg: Segment %d/%d.", self.segment + 1, self.lights.segments + 1)
if self.segment > self.lights.segments:
self.flightLoop.stopFlightLoop()
self.lights.destroy()
# Info 16.a
logging.info("FollowTheGreen::nextLeg: done.")
self.segment = 0 # reset
return self.ui.bye()
ret = self.lights.illuminateSegment(self.segment)
if not ret[0]:
self.cancel()
return self.ui.sorry(ret[1])
logging.debug("FollowTheGreen::followTheGreen: lights instanciated (%d).", self.segment)
if self.move == DEPARTURE and self.segment == (self.lights.segments - 1):
return self.ui.promptForDeparture()
if self.move == DEPARTURE and self.segment == self.lights.segments:
# Info 16.b
logging.info("FollowTheGreen::nextLeg: ready for take-off.")
self.segment = 0 # reset
return self.ui.bye()
if self.move == ARRIVAL and self.segment == self.lights.segments:
return self.ui.promptForParked()
return self.ui.promptForClearance()
def cancel(self, reason=""):
# Abandon the FTG mission. Instruct subroutines to turn off FTG lights, remove them,
# and restore the environment.
if self.flightLoop:
self.flightLoop.stopFlightLoop()
logging.info("FollowTheGreen::cancel: Flightloop stopped.")
if self.lights:
self.lights.destroy()
self.lights = None
if self.ui.mainWindowExists():
self.ui.destroyMainWindow()
# Info 16
logging.info("FollowTheGreen::cancel: cancelled: %s.", reason)
return [True, ""]
def disable(self):
# alias to cancel
return self.cancel("disabled")
def stop(self):
# alias to cancel
return self.cancel("stopped")
| StarcoderdataPython |
9772482 | import unittest
from aviation_weather.components.pressure import Pressure
from aviation_weather.components.remarks import Remarks
from aviation_weather.components.runwayvisualrange import RunwayVisualRange
from aviation_weather.components.skycondition import SkyCondition
from aviation_weather.components.location import Location
from aviation_weather.components.temperature import Temperature
from aviation_weather.components.time import Time
from aviation_weather.components.visibility import Visibility
from aviation_weather.components.weathergroup import WeatherGroup
from aviation_weather.components.wind import Wind
from aviation_weather.exceptions import ReportDecodeError
from aviation_weather.report import Report
class TestReport(unittest.TestCase):
"""Unit tests for the Report parser"""
def _test_parse(self, raw, location, time, wind, visibility, runway_visual_range,
weather_groups, sky_conditions, temperature, pressure, remarks):
report = Report(raw)
self.assertEqual(raw, report.raw)
# TODO: assert report.type and report.modifier
self.assertEqual(location, report.location)
self.assertEqual(time, report.time)
self.assertEqual(wind, report.wind)
self.assertEqual(visibility, report.visibility)
self.assertEqual(runway_visual_range, report.runway_visual_range)
self.assertEqual(weather_groups, report.weather_groups)
self.assertEqual(sky_conditions, report.sky_conditions)
self.assertEqual(temperature, report.temperature)
self.assertEqual(pressure, report.pressure)
self.assertEqual(remarks, report.remarks)
def test_parse_KJFK(self):
self._test_parse(
raw=("KJFK 182151Z 28022G34KT 10SM SCT065 M04/M17 A2990 RMK AO2 "
"PK WND 30034/2145 SLP123 VIRGA OHD AND E-SE T10391167"),
location=Location("KJFK"),
time=Time("182151Z"),
wind=Wind("28022G34KT"),
visibility=Visibility("10SM"),
runway_visual_range=None,
weather_groups=None,
sky_conditions=(SkyCondition("SCT065"),),
temperature=Temperature("M04/M17"),
pressure=Pressure("A2990"),
remarks=Remarks("RMK AO2 PK WND 30034/2145 SLP123 VIRGA OHD AND E-SE T10391167")
)
def test_parse_MKJP(self):
self._test_parse(
raw="MKJP 182300Z 14014KT 9999 SCT022 28/22 Q1015",
location=Location("MKJP"),
time=Time("182300Z"),
wind=Wind("14014KT"),
visibility=Visibility("9999"),
runway_visual_range=None,
weather_groups=None,
sky_conditions=(SkyCondition("SCT022"),),
temperature=Temperature("28/22"),
pressure=Pressure("Q1015"),
remarks=None
)
def test_parse_LBBG(self):
self._test_parse(
raw=("METAR LBBG 041600Z 12003MPS 290V310 1400 R04/P1500N R22/P1500U "
"+SN BKN022 OVC050 M04/M07 Q1020 NOSIG 8849//91="),
location=Location("LBBG"),
time=Time("041600Z"),
wind=Wind("12003MPS 290V310"),
visibility=Visibility("1400"),
runway_visual_range=(RunwayVisualRange("R04/P1500N"), RunwayVisualRange("R22/P1500U")),
weather_groups=(WeatherGroup("+SN"),),
sky_conditions=(SkyCondition("BKN022"), SkyCondition("OVC050")),
temperature=Temperature("M04/M07"),
pressure=Pressure("Q1020"),
remarks=None
)
def test_parse_KTTN(self):
self._test_parse(
raw=("METAR KTTN 051853Z 04011KT 1/2SM VCTS SN FZFG BKN003 OVC010 "
"M02/M02 A3006 RMK AO2 TSB40 SLP176 P0002 T10171017="),
location=Location("KTTN"),
time=Time("051853Z"),
wind=Wind("04011KT"),
visibility=Visibility("1/2SM"),
runway_visual_range=None,
weather_groups=(WeatherGroup("VCTS"), WeatherGroup("SN"), WeatherGroup("FZFG")),
sky_conditions=(SkyCondition("BKN003"), SkyCondition("OVC010")),
temperature=Temperature("M02/M02"),
pressure=Pressure("A3006"),
remarks=Remarks("RMK AO2 TSB40 SLP176 P0002 T10171017=")
)
def test_parse_KSLC(self):
self._test_parse(
raw="KSLC 192353Z 30004KT 10SM CLR 29/02 A3000 RMK AO2 SLP110 T02940017 10306 20261 56014",
location=Location("KSLC"),
time=Time("192353Z"),
wind=Wind("30004KT"),
visibility=Visibility("10SM"),
runway_visual_range=None,
weather_groups=None,
sky_conditions=(SkyCondition("CLR"),),
temperature=Temperature("29/02"),
pressure=Pressure("A3000"),
remarks=Remarks("RMK AO2 SLP110 T02940017 10306 20261 56014")
)
def test_parse_KAZO(self):
self._test_parse(
raw=("KAZO 270257Z 26013KT 1 3/4SM R35/4500VP6000FT -SN BKN016 "
"OVC022 M02/M06 A3009 RMK AO2 P0000 T10221056"),
location=Location("KAZO"),
time=Time("270257Z"),
wind=Wind("26013KT"),
visibility=Visibility("1 3/4SM"),
runway_visual_range=(RunwayVisualRange("R35/4500VP6000FT"),),
weather_groups=(WeatherGroup("-SN"),),
sky_conditions=(SkyCondition("BKN016"), SkyCondition("OVC022")),
temperature=Temperature("M02/M06"),
pressure=Pressure("A3009"),
remarks=Remarks("RMK AO2 P0000 T10221056")
)
def test_parse_TJSJ(self):
self._test_parse(
raw=("SPECI TJSJ 270256Z 12003KT 10SM FEW020 SCT036 BKN095 24/22 A3013 "
"RMK AO2 RAB06E37 SLP203 SHRA DSNT W P0024 60026 T02440222 50005"),
location=Location("TJSJ"),
time=Time("270256Z"),
wind=Wind("12003KT"),
visibility=Visibility("10SM"),
runway_visual_range=None,
weather_groups=None,
sky_conditions=(SkyCondition("FEW020"), SkyCondition("SCT036"), SkyCondition("BKN095")),
temperature=Temperature("24/22"),
pressure=Pressure("A3013"),
remarks=Remarks("RMK AO2 RAB06E37 SLP203 SHRA DSNT W P0024 60026 T02440222 50005")
)
def test_parse_EIDW(self):
self._test_parse(
raw="EIDW 092307Z 24035G55KT 210V270 1700 +SHRA BKN007 OVC015CB 08/07",
location=Location("EIDW"),
time=Time("092307Z"),
wind=Wind("24035G55KT 210V270"),
visibility=Visibility("1700"),
runway_visual_range=None,
weather_groups=(WeatherGroup("+SHRA"),),
sky_conditions=(SkyCondition("BKN007"), SkyCondition("OVC015CB")),
temperature=Temperature("08/07"),
pressure=None,
remarks=None
)
def test_parse_MUHA(self):
self._test_parse(
raw="MUHA 102255Z 04010KT 9000 FEW018TCU BKN025 25/22 Q1017",
location=Location("MUHA"),
time=Time("102255Z"),
wind=Wind("04010KT"),
visibility=Visibility("9000"),
runway_visual_range=None,
weather_groups=None,
sky_conditions=(SkyCondition("FEW018TCU"), SkyCondition("BKN025")),
temperature=Temperature("25/22"),
pressure=Pressure("Q1017"),
remarks=None
)
def _test_retrieve(self, code):
self.assertIsInstance(Report.retrieve(code), Report)
def test_retrieve_KJFK(self):
self._test_retrieve("KJFK")
def test_retrieve_EGLL(self):
self._test_retrieve("EGLL")
def _test_invalid(self, raw):
with self.assertRaises(ReportDecodeError):
Report(raw)
def test_invalid_blank(self):
self._test_invalid("")
def test_invalid_bad(self):
self._test_invalid("LFPG")
| StarcoderdataPython |
6628262 | <filename>sympyosis/ext/processes/__init__.py
from sympyosis.ext.processes.supervisor import SupervisorManager
| StarcoderdataPython |
6418471 | <reponame>MayoG/PipeRT2<gh_stars>1-10
from typing import Dict, List
from pipert2.core.base.wire import Wire
from pipert2.core.base.flow import Flow
from pipert2.utils.exceptions import FloatingRoutine, UniqueRoutineName
def validate_flow(flows: Dict[str, Flow], wires: Dict[tuple, Wire]):
"""Validate flow and raise an exception if not valid.
Args:
flows: The flows to validate.
wires: The wires to validate.
Raises:
UniqueRoutineName: Raises in two or more routines have the same name.
FloatingRoutine: If a routine contained in flow but not link to any other routines.
"""
validate_routines_unique_names(flows)
validate_flows_routines_are_linked(flows, wires)
def validate_routines_unique_names(flows: Dict[str, Flow]):
"""Validate all routines in the flows has unique names.
Args:
flows: The flow to validate.
Raises:
UniqueRoutineName: Raises in two or more routines have the same name.
"""
routine_names = []
for flow in flows.values():
for routine in flow.routines.values():
if routine.name in routine_names:
raise UniqueRoutineName(f"The routine name {routine.name} isn't unique, please choose a unique name.")
routine_names.append(routine.name)
def validate_flows_routines_are_linked(flows: Dict[str, Flow], wires: Dict[tuple, Wire]):
"""Validate that all routines flows are linked to other routines.
Raises:
FloatingRoutine: If a routine contained in flow but not link to any other routines.
"""
for flow in flows.values():
for routine in flow.routines.values():
routine_contained = False
for wire in wires.values():
if wire.source.name == routine.name or routine in wire.destinations:
routine_contained = True
break
if not routine_contained:
raise FloatingRoutine(f"The routine {routine.name} "
f"in flow {flow.name} isn't linked to any other routine.")
| StarcoderdataPython |
9722836 | <filename>tx_parse_xml/acl__prop_to_title.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from pathlib import Path
from bs4 import BeautifulSoup
FILE_NAME_ACL = Path(r'C:\<...>\ads\<...>\src\<...>.xml')
FILE_NAME_ACL_LOCALE = FILE_NAME_ACL.parent.parent / 'locale' / 'en' / ('mlb' + FILE_NAME_ACL.name)
root_acl = BeautifulSoup(open(FILE_NAME_ACL, 'rb'), 'html.parser')
root_acl_locale = BeautifulSoup(open(FILE_NAME_ACL_LOCALE, 'rb'), 'html.parser')
# NOTE: <Group Id="cpg<...>" Name="<...>" Members="<PROP_IDS">
PROP_IDS = "prd<...> prd<...>".split()
items = []
for prop_id in PROP_IDS:
prop_el = root_acl.select_one('#' + prop_id)
name = prop_el['name']
title_id = prop_el.presentation['titleid']
title = root_acl_locale.select_one('#' + title_id).value.text
items.append((name, title))
items.sort()
for name, title in items:
print(name, title, sep='\t')
| StarcoderdataPython |
4925496 | <reponame>tzulberti/entrenamiento-arqueria
"""Actualizar permiso_usuario
Revision ID: 043
Revises: 042
Create Date: 2015-01-21 06:59:13.639539
"""
# revision identifiers, used by Alembic.
revision = '043'
down_revision = '042'
from alembic import op
import sqlalchemy as db
def upgrade():
op.drop_table('permiso_usuario')
op.create_table('permiso_usuario',
db.Column('id', db.Integer, primary_key=True),
db.Column('id_usuario', db.Integer, db.ForeignKey('usuario.id', ondelete='CASCADE'), nullable=False),
db.Column('id_permiso', db.Integer, db.ForeignKey('permiso.id', ondelete='CASCADE'), nullable=False),
)
def downgrade():
pass
| StarcoderdataPython |
8146112 | import os
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf2
from tensorflow.keras import Input, layers
from tensorflow.keras.layers import Dense, Input
from tensorflow.python import ipu
from functools import partial
cfg = ipu.utils.create_ipu_config()
cfg = ipu.utils.auto_select_ipus(cfg, 1)
ipu.utils.configure_ipu_system(cfg)
sample_n = 10000
meana = np.array([1, 1])
cova = np.array([[0.1, 0],[0, 0.1]])
meanb = np.array([2, 2])
covb = np.array([[0.1, 0],[0, 0.1]])
x_red = np.random.multivariate_normal(mean=meana, cov = cova, size=sample_n)
x_green = np.random.multivariate_normal(mean=meanb, cov = covb, size=sample_n)
y_red = np.array([1] * sample_n)
y_green = np.array([0] * sample_n)
# plt.scatter(x_red[:, 0], x_red[:, 1], c = 'red' , marker='.', s = 30)
# plt.scatter(x_green[:, 0], x_green[:, 1], c = 'green', marker='.', s = 30)
# plt.show()
X = np.concatenate([x_red, x_green])
# X = np.concatenate([np.ones((sample_n*2, 1)), X], axis = 1)
y = np.concatenate([y_red, y_green])
y = np.expand_dims(y, axis = 1)
X = X.astype(np.float32)
y = y.astype(np.float32)
def lr(input_dim = 2, output_dim = 1, hidden = 32):
inputs = Input(name="data", shape=(input_dim,))
lr_l = layers.Dense(hidden, activation="relu", name = "linear", )(inputs)
outputs = layers.Dense(output_dim,
activation='sigmoid', use_bias=True)(lr_l)
model = tf2.keras.Model(inputs=inputs, outputs=outputs)
# model = ipu.keras.Model(inputs=inputs, outputs=outputs)
return model
ds = tf2.data.Dataset.from_tensor_slices((X, y))
ds = ds.batch(5, drop_remainder=True).shuffle(5)
# ds_x = tf2.data.Dataset.from_tensor_slices(X)
# ds_y = tf2.data.Dataset.from_tensor_slices(y)
# ds = tf2.data.Dataset.zip((ds_x, ds_y))
ds = ds.repeat()
for xt, yt in ds.take(1):
print(xt)
print(yt)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(ds, feed_name="infeed")
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(feed_name="outfeed")
infeed_queue.initializer
print(infeed_queue._dequeue())
def training_step(model, opt, count, X, y):
with tf2.GradientTape() as tape:
logits = model(X, training=True)
losses = tf2.math.reduce_mean(tf2.keras.losses.binary_crossentropy(y, logits))
grads = tape.gradient(losses, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
outfeed_queue.enqueue({"losses": losses})
return count
@tf2.function(experimental_compile=True)
def my_train_loop():
model = lr()
opt = tf2.keras.optimizers.Adam()
counter = 0
training_step_with_model = partial(training_step, model, opt)
count = ipu.loops.repeat(10, training_step_with_model, [counter], infeed_queue)
return count
# Initialize the IPU default strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
infeed_queue.initializer
losses = strategy.experimental_run_v2(my_train_loop)
print("losses", losses)
# The outfeed dequeue has to happen after the outfeed enqueue op has been executed.
result = outfeed_queue.dequeue()
print("outfeed result", result)
# # Create an IPU distribution strategy
# strategy = ipu.ipu_strategy.IPUStrategy()
# with strategy.scope():
# # An optimizer for updating the trainable variables
# opt = tf2.keras.optimizers.SGD(0.01)
# # Create an instance of the model
# model = lr()
# # Train the model
# for i in range(5):
# loss = strategy.experimental_run_v2(training_step, args=[X, y, model, opt])
# print("Step " + str(i) + " loss = " + str(loss.numpy()))
# strategy = ipu.ipu_strategy.IPUStrategy()
# with strategy.scope():
# model = lr()
# model.summary()
# model.compile('adam', loss=tf2.losses.BinaryCrossentropy())
# model.fit(X, y, epochs=5, batch_size = 10)
# print(model.predict(X[0: 10], batch_size = 5))
| StarcoderdataPython |
3255623 | <reponame>blackapple1202/TensorflowCodeRepo<filename>04.Create_Images_to_Table/create_image_table.py
import PIL
from PIL import Image, ImageOps, ImageDraw
import pandas as pd
import shutil
import os.path
import random
from pathlib import Path
############### CONFIGURE ########################
# Table Configure Variables
# Image Size Configuration
IMAGE_START_NUMBER = 1
IMAGE_END_NUMBER = 200
TABLE_IM_PIXEL = 480
TABLE_IM_WIDTH_NUMBER = 4
TABLE_IM_HEIGHT_NUMBER = 4
# Image Background Configuration
BACKGROUND_START_NUMBER = 1
BACKGROUND_END_NUMBER = 16
BACKGROUND_FOLDER = 'backgrounds'
BACKGROUND_IMAGE_FILE_NAME = '{}_background.jpg'
# Set input path and output path
INPUT_FOLDER = 'images'
INPUT_IMAGE_FILE_NAME = '{}_crop.png'
OUTPUT_FOLDER = 'data'
OUTPUT_IMAGE_FILE_NAME = '{}_table{}.jpg'
OUTPUT_CLONE_FOLDER = 'data/clone'
# Set REPETITION number of extraction
EXTRACT_OUTPUT_INDEX_MIN = 181
EXTRACT_OUTPUT_INDEX_MAX = 240
# REPETITION NUMBER = EXTRACT_OUTPUT_INDEX_MAX - EXTRACT_OUTPUT_INDEX_MIN + 1
# Toggle options
TOGGLE_BACKGROUND = True
TOGGLE_SHUFFLE_BACKGROUND = False
TOGGLE_SHUFFLE_IMAGE = True
TOGGLE_CSV_TO_SAVE_INDIVIDUAL = False
TOGGLE_CLONE_IMAGE_TO_SHOW = False
TOGGLE_CLONE_IMAGE_TO_SAVE = True
OUTPUT_CLONE_IMAGE_FILE_NAME = 'include_boundaries_{}_table{}.jpg'
# Set index of EXTRACT_MODE to OUTPUT_IMAGE_EXTRACT_MODE
# Default is same as 'all'
EXTRACT_MODE = ['default', 'all', 'odd', 'even' , 'random']
RANDOM_START_RANGE_MIN = 0
RANDOM_START_RANGE_MAX = 3
RANDOM_INCREMENT_RANGE_MIN = 2
RANDOM_INCREMENT_RANGE_MAX = 6
OUTPUT_IMAGE_EXTRACT_MODE = EXTRACT_MODE[4]
# Table Boundary Configure
BOUNDARY_PADDING_PIXEL = {'top': 4, 'bottom': 4, 'left': 4, 'right': 4}
# CSV Configure
LABEL = 'face'
OUTPUT_CSV_FILE_NAME = '{}_labels{}.csv'
# Extract Training(True) or Testing(False)?
DATA_USAGE = True
###################################################
start_step = 0
increment_step = 1
def check_image_with_pil(path):
try:
Image.open(path)
except IOError:
return False
return True
def show_table_image(tableImg):
tableImg.show()
def save_table_image(path , tableImg):
tableImg.save(path)
def save_boundaries_to_csv(path, input_image_list):
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
images_df = pd.DataFrame(input_image_list, columns=column_name)
images_df.to_csv(path, index=None)
def append_boundary_to_csv(output_image_list, filename, width, height, label, xmin, ymin, xmax, ymax):
value = (filename, width, height, label, xmin, ymin, xmax, ymax)
output_image_list.append(value)
def extract(repeat_index, background_index, all_image_list):
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
image_list = []
SOURCE_IM_PIXEL = (TABLE_IM_PIXEL / TABLE_IM_WIDTH_NUMBER)
tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
IMAGES_COUNT = IMAGE_START_NUMBER
clone_tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
DrawImg = ImageDraw.Draw(clone_tableImage)
if TOGGLE_BACKGROUND:
background = Image.open('{}/{}'.format(BACKGROUND_FOLDER, BACKGROUND_IMAGE_FILE_NAME.format(background_index)))
background = background.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
tableImage.paste(background, (0, 0))
clone_tableImage.paste(background, (0, 0))
if not RANDOM_INCREMENT_RANGE_MIN > 0 or not RANDOM_INCREMENT_RANGE_MAX > RANDOM_INCREMENT_RANGE_MIN:
print('RANDOM_INCREMENT_RANGE should be set properly')
return
for directory in [INPUT_FOLDER]:
for i in range(0, TABLE_IM_WIDTH_NUMBER):
start_step = 0
increment_step = 1
if OUTPUT_IMAGE_EXTRACT_MODE == 'all':
start_step = 0
increment_step = 1
elif OUTPUT_IMAGE_EXTRACT_MODE == 'odd':
if i % 2 == 0:
start_step = 1
else:
start_step = 0
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'even':
if i % 2 == 0:
start_step = 0
else:
start_step = 1
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'random':
start_step = random.randrange(RANDOM_START_RANGE_MIN, RANDOM_START_RANGE_MAX)
increment_step = random.randrange(RANDOM_INCREMENT_RANGE_MIN, RANDOM_INCREMENT_RANGE_MAX)
for j in range(start_step, TABLE_IM_HEIGHT_NUMBER, increment_step):
# Open image on images directory
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image is not exist on folder
while not check_image_with_pil('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT))):
# Skip to next index
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image index is overwhelmed the end number
if IMAGES_COUNT > IMAGE_END_NUMBER:
# Save process35f
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of script
return
im = Image.open('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
im = ImageOps.expand(im, border=(int)(SOURCE_IM_PIXEL*0.01), fill='white')
im = im.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
im.thumbnail((SOURCE_IM_PIXEL, SOURCE_IM_PIXEL))
xmin = (j * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['left']
ymin = (i * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['top']
xmax = (j * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['right']
ymax = (i * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['bottom']
append_boundary_to_csv(image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
append_boundary_to_csv(all_image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
clone_tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
DrawImg.rectangle([(xmin, ymin), (xmax, ymax)], fill=None, outline='green')
# Save process
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of Script
def main():
if not EXTRACT_OUTPUT_INDEX_MIN > 0 or not EXTRACT_OUTPUT_INDEX_MAX >= EXTRACT_OUTPUT_INDEX_MIN:
print('EXTRACT_OUTPUT_INDEX should be set properly')
return
background_index = 0
image_list = []
for i in range(EXTRACT_OUTPUT_INDEX_MIN, EXTRACT_OUTPUT_INDEX_MAX + 1):
if TOGGLE_SHUFFLE_BACKGROUND:
background_index = random.randrange(BACKGROUND_START_NUMBER, BACKGROUND_END_NUMBER)
else:
background_index = background_index + 1;
if(background_index >= BACKGROUND_END_NUMBER):
background_index = BACKGROUND_START_NUMBER
extract(i, background_index, image_list)
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage, ''))
save_boundaries_to_csv(csv_path, image_list)
main() | StarcoderdataPython |
8180791 | <reponame>ReenigneCA/moonlight_hdr_launcher
import json
import os
import sys
from distutils.errors import DistutilsFileError
from distutils.file_util import copy_file
from hashlib import sha256
from os.path import expandvars
from pathlib import Path
from tkinter import messagebox
from typing import List
from winreg import CreateKey, OpenKey, SetValueEx, CloseKey, KEY_WRITE, HKEY_CURRENT_USER, REG_SZ
import logging
import oschmod
_logger = logging.getLogger('moonlight_hdr_launcher')
LAUNCHER_EXE = 'MassEffectAndromeda.exe'
ADDITIONAL_PROGRAMFILES_FILES = ['moonlight_hdr_launcher.ini', 'crashpad_handler.exe']
ADDITIONAL_STREAMING_FILES = ['mass_effect_andromeda-box-art.png', 'mass_effect_andromeda-box-art.jpg']
REG_PATH = R'SOFTWARE\lyckantropen\moonlight_hdr_launcher'
DESTINATION_FOLDER = R'C:\Program Files\moonlight_hdr_launcher'
def get_source_folder() -> Path:
try:
source_folder = Path(sys._MEIPASS)
except Exception:
source_folder = Path(__file__).parent.absolute()
return source_folder
def get_sha256(file: Path) -> str:
content_bytes = file.read_bytes()
return ''.join([hex(b)[2:] for b in sha256(content_bytes).digest()])
def show_error(message: str, cmdline: bool = False) -> None:
_logger.error(message)
if cmdline:
print(f'ERROR: {message}')
else:
messagebox.showerror('Error', message)
def show_warning(message: str, cmdline: bool = False) -> None:
_logger.warning(message)
if cmdline:
print(f'WARNING: {message}')
else:
messagebox.showwarning('Warning', message)
def write_path_to_reg(destination_folder: Path, reg_path: str, reg_key: str) -> None:
CreateKey(HKEY_CURRENT_USER, reg_path)
registry_key = OpenKey(HKEY_CURRENT_USER, reg_path, 0, KEY_WRITE)
SetValueEx(registry_key, reg_key, 0, REG_SZ, str(destination_folder))
CloseKey(registry_key)
def create_folder(folder: Path, cmdline: bool) -> None:
try:
folder.mkdir(parents=True, exist_ok=True)
except DistutilsFileError as e:
show_warning(f'No permission to create {folder}, re-run as Administrator', cmdline)
raise e
def copy_files(source_paths: List[Path], destination_folder: Path, cmdline: bool) -> None:
for source_path in source_paths:
if source_path.exists():
try:
dest_name, copied = copy_file(source_path, destination_folder, update=True)
if copied:
_logger.info(f'Copied {source_path} to {dest_name}')
else:
_logger.info(f'Skipped copying {source_path} to {dest_name} because destination is newer than source')
except DistutilsFileError as e:
show_warning(f'No permission to copy {source_path} to {destination_folder}, re-run as Administrator', cmdline)
raise e
else:
_logger.warning(f'Source file {source_path} does not exist')
def get_masseffectandromeda_location(cmdline: bool) -> Path:
# find StreamingAssetsData subfolder
try:
app_data = Path(expandvars(r'%LOCALAPPDATA%'))
_logger.info(f'Found AppData path: {app_data}')
mad_path = next(app_data.glob(
'**/StreamingAssetsData/mass_effect_andromeda/*'))
_logger.info(f'Found StreamingAssetsData folder for Mass Effect Andromeda: {mad_path}')
return mad_path
except StopIteration as e:
show_error('Unable to find entry for Mass Effect Andromeda. Have you tried scanning for games in GeForce Experience?', cmdline)
raise e
class MoonlightHdrLauncherInstaller:
def __init__(self, source_folder: Path,
destination_folder: Path,
launcher_exe: str,
additional_programfiles_files: List[str],
additional_streaming_files: List[str],
reg_path: str):
self.source_folder = source_folder
self.destination_folder = destination_folder
self.launcher_exe = launcher_exe
self.additional_programfiles_files = additional_programfiles_files
self.additional_streaming_files = additional_streaming_files
self.reg_path = reg_path
self.launcher_path = Path(source_folder / 'dist' / launcher_exe)
self.programfiles_files = [self.launcher_path] + [source_folder/'dist' / file_path for file_path in additional_programfiles_files]
self.streaming_files = [source_folder/'dist' / file_path for file_path in additional_streaming_files]
def modify_streamsettings_json(self, mad_path: Path) -> None:
streaming_settings_path = mad_path / 'StreamingSettings.json'
streaming_settings = json.loads(streaming_settings_path.read_text())
streaming_settings['GameData'][0]['StreamingDisplayName'] = 'HDR Launcher'
streaming_settings['GameData'][0]['StreamingCaption'] = 'HDR Launcher'
streaming_settings['GameData'][0]['StreamingClassName'] = 'HDR Launcher'
streaming_settings['GameData'][0][
'StreamingCommandLine'] = f'start {self.launcher_exe}'
final_json = json.dumps(streaming_settings, indent=4)
_logger.debug(f'Final StreamingSettings.json: {final_json}')
_logger.debug(f'Saving to {streaming_settings_path}')
streaming_settings_path.write_text(final_json)
def modify_metadata_json(self, mad_path: Path) -> None:
streaming_settings_path = mad_path / 'StreamingSettings.json'
metadata_path = mad_path / 'metadata.json'
metadata = json.loads(metadata_path.read_text())
metadata['metadata']['files'] = [{
'filename': f.name,
'url': '',
'sha256': get_sha256(f), 'size': f.stat().st_size} for f in [streaming_settings_path, *self.streaming_files]
]
final_metadata_json = json.dumps(metadata, indent=4)
_logger.debug(f'Final metadata.json: {final_metadata_json}')
_logger.debug(f'Saving to {metadata_path}')
metadata_path.write_text(final_metadata_json)
def install(self, cmdline: bool) -> None:
_logger.debug(f'Installing Moonlight HDR Launcher {(self.source_folder/"dist"/"version").read_text()}')
_logger.debug(f'Source folder: {self.source_folder}')
_logger.debug(f'Destination folder for launcher: {self.destination_folder}')
_logger.debug(f'Launcher path: {self.launcher_path}')
_logger.debug(f'List of files to put in {self.destination_folder}: {self.programfiles_files}')
_logger.debug(f'List of files to put in Streaming folder: {self.streaming_files}')
if not self.launcher_path.exists():
show_error(f'{self.launcher_path} does not exist', cmdline)
raise Exception(f'{self.launcher_path} does not exist')
create_folder(self.destination_folder, cmdline)
copy_files(self.programfiles_files, self.destination_folder, cmdline)
# set destination folder read-write
oschmod.set_mode_recursive(str(self.destination_folder), 0o777)
# write destination_folder to registry
try:
_logger.debug(f'Writing destination_folder="{self.destination_folder}" to registry at "{self.reg_path}"')
write_path_to_reg(self.destination_folder, self.reg_path, 'destination_folder')
except WindowsError as e:
show_error(f'Failed to write destination_folder to registry: {e}')
raise e
show_warning('Before clicking OK, open GeForce Experience and re-scan for games.', cmdline)
mad_path = get_masseffectandromeda_location(cmdline)
# set StreamingAssetsData folder read-write
oschmod.set_mode_recursive(str(mad_path), 0o777)
# copy files to StreamingAssetsData destination
copy_files(self.streaming_files, self.destination_folder, cmdline)
self.modify_streamsettings_json(mad_path)
self.modify_metadata_json(mad_path)
# set StreamingAssetsData folder read-only
oschmod.set_mode_recursive(str(mad_path), 'a+r,a-w')
show_warning('The installer will now attempt to restart GeForce Experience. If the new entry does not appear, reboot your PC.', cmdline)
# kill NVIDIA processes
os.system('taskkill /f /im "nvcontainer.exe"')
os.system('taskkill /f /im "NVIDIA Share.exe"')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--launcher-exe', type=str, required=False, default=LAUNCHER_EXE, help='Name of the launcher')
parser.add_argument('--destination-folder', type=str, required=False,
default=DESTINATION_FOLDER, help='Destination path')
parser.add_argument('--cmdline', action='store_true', help='Do not show windows with prompts')
args = parser.parse_args(sys.argv[1:])
log_file = Path(__file__).parent.absolute() / 'moonlight_hdr_launcher_install.log'
print(f'Log file: {log_file}')
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename=log_file.as_posix(),
level=logging.DEBUG)
launcher_exe = args.launcher_exe
destination_folder = Path(args.destination_folder)
cmdline = args.cmdline
installer = MoonlightHdrLauncherInstaller(
source_folder=get_source_folder(),
destination_folder=destination_folder,
launcher_exe=launcher_exe,
additional_programfiles_files=ADDITIONAL_PROGRAMFILES_FILES,
additional_streaming_files=ADDITIONAL_STREAMING_FILES,
reg_path=REG_PATH)
installer.install(cmdline)
| StarcoderdataPython |
6555944 | <gh_stars>0
from trading.handlers.routes_functions import StockViews
views = StockViews()
def configure_routes(app):
@app.route('/')
def first_page():
return views.start()
@app.route('/list_rates', methods=['GET'])
def list_rates():
return views.get_rates(views.rates)
@app.route('/send_rates', methods=['POST'])
def rates():
return views.send_rates()
@app.route('/trade', methods=['GET'])
def trade():
return views.get_trade()
| StarcoderdataPython |
9704159 | <filename>helper_methods.py
# -*- coding: utf-8 -*-
from ryu.lib.packet import ethernet, ether_types as ether, packet
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
import hashlib
from ryu.lib.packet import packet, ethernet, arp, vlan
#contain methods that can be used by different classes and apps
def send_msgs(dp, msgs):
"Send all the messages provided to the datapath"
if bool(msgs):
# print('msgs start to send\n')
for msg in msgs:
# print("! ", dp.id, msg)
# print()
# print()
dp.send_msg(msg)
# print('End of send')
def send_l3_msgs(msgs):
# структура msgs = {dp:[msgs]}
for dp in msgs.keys():
if bool(msgs[dp]):
for msg in msgs[dp]:
dp.send_msg(msg)
def make_message (datapath, cookie, table_id, priority, match, instructions = None, actions = None, buffer_id=None, command = None, idle_timeout = 0, hard_timeout = 0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = []
if actions is not None:
inst += [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
if instructions is not None:
inst += instructions
if command is None:
command = ofproto.OFPFC_ADD
if buffer_id:
msg = parser.OFPFlowMod(datapath=datapath, cookie=cookie, table_id=table_id, priority=priority, buffer_id=buffer_id, match=match, instructions=inst, command = command, idle_timeout = idle_timeout, hard_timeout = hard_timeout)
else:
msg = parser.OFPFlowMod(datapath=datapath, cookie=cookie, table_id=table_id,priority=priority, match=match, instructions=inst, command = command, idle_timeout = idle_timeout, hard_timeout = hard_timeout)
return msg
def del_flow(dp, cookie, table_id = None, match = None, out_port=None, out_group=None, priority=32768, actions = None, instructions = None, idle_timeout = 0, hard_timeout = 0):
parser = dp.ofproto_parser
ofp = dp.ofproto
if out_port is None:
out_port = ofp.OFPP_ANY
if out_group is None:
out_group = ofp.OFPG_ANY
if table_id is None:
table_id = ofp.OFPTT_ALL
inst = []
if actions is not None:
inst += [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
if instructions is not None:
inst += instructions
msg = parser.OFPFlowMod(cookie=cookie, cookie_mask=0xFFFFFFFFFFFFFFFF, datapath=dp, table_id=table_id, command=ofp.OFPFC_DELETE, out_port=out_port, out_group=out_group, match = match, priority=priority, instructions = inst, idle_timeout = idle_timeout, hard_timeout = hard_timeout)
return msg
def port_up(port, datapath):
if port.mac is None:
#значит такого интерфейса не сущетсвует
return []
proto= datapath.ofproto
mask_all = (proto.OFPPC_PORT_DOWN | proto.OFPPC_NO_RECV | proto.OFPPC_NO_FWD | proto.OFPPC_NO_PACKET_IN)
#hw_addr=hw_addr,
return [parser.OFPPortMod(datapath, port_no=port.num, config=0, mask=mask_all, hw_addr = port.mac)] # 0 means "up" state = no flag configured
def port_shut(port, datapath):
if port.mac is None:
return []
proto= datapath.ofproto
#hw_addr=hw_addr,
return [parser.OFPPortMod(datapath, port_no=port.num, mask=(proto.OFPPC_PORT_DOWN), config=proto.OFPPC_PORT_DOWN, hw_addr = port.mac)]
def goto_table(table_id):
"Generate an OFPInstructionGotoTable message"
return parser.OFPInstructionGotoTable(table_id)
def apply_actions(dp, actions):
"Generate an OFPInstructionActions message with OFPIT_APPLY_ACTIONS"
return dp.ofproto_parser.OFPInstructionActions(dp.ofproto.OFPIT_APPLY_ACTIONS, actions)
def action_output(dp, port, max_len=None):
"Generate an OFPActionOutput message"
kwargs = {'port': port}
if max_len != None:
kwargs['max_len'] = max_len
return dp.ofproto_parser.OFPActionOutput(**kwargs)
def match(dp, in_port=None, eth_dst=None, eth_src=None, eth_type=None, **kwargs):
"Generate an OFPMatch message"
if in_port != None:
kwargs['in_port'] = in_port
if eth_dst != None:
kwargs['eth_dst'] = eth_dst
if eth_src != None:
kwargs['eth_src'] = eth_src
if eth_type != None:
kwargs['eth_type'] = eth_type
return dp.ofproto_parser.OFPMatch(**kwargs)
def barrier_request(dp):
"""Generate an OFPBarrierRequest message
Used to ensure all previous flowmods are applied before running the
flowmods after this request. For example, make sure the flowmods that
delete any old flows for a host complete before adding the new flows.
Otherwise there is a chance that the delete operation could occur after
the new flows are added in a multi-threaded datapath.
"""
return [dp.ofproto_parser.OFPBarrierRequest(datapath=dp)]
def props(cls):
#get all Class properties
return [i for i in cls.__dict__.keys() if i[:1] != '_']
def hash_for(data):
# Prepare the project id hash
hashId = hashlib.md5()
hashId.update(repr(data).encode('utf-8'))
return hashId.hexdigest()
def get_key(d, value):
#получить ключ по значению в словаре
for k, v in d.items():
if v == value:
return k
return None
# @functools.lru_cache(maxsize=1024)
def arp_request( src_mac, src_ip, dst_ip, vid = None):
src_ip = str(src_ip)
dst_ip = str(dst_ip)
BROADCAST = 'ff:ff:ff:ff:ff:ff'
# BROADCAST = '00:00:00:00:00:00'
e = ethernet.ethernet(src=src_mac, dst=BROADCAST, ethertype = 0x806)
a = arp.arp(opcode=arp.ARP_REQUEST, src_mac=src_mac, src_ip=src_ip, dst_mac=BROADCAST, dst_ip=dst_ip)
p = packet.Packet()
if vid is not None:
# 0x8100 - vlan ethertype
vl_e = ethernet.ethernet(src=src_mac, dst=BROADCAST, ethertype = 0x8100)
vl = vlan.vlan(vid=vid, ethertype=0x806)
p.add_protocol(vl_e)
p.add_protocol(vl)
else:
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
return p
def arp_reply(dp, out_ports, src_mac, src_ip, dst_mac, dst_ip, vid = None):
src_ip = str(src_ip)
dst_ip = str(dst_ip)
p = packet.Packet()
print(dp, out_ports, src_mac, src_ip, dst_mac, dst_ip)
e = ethernet.ethernet(src=src_mac, dst=dst_mac, ethertype = 0x806)
a = arp.arp(opcode=2, src_mac=src_mac, src_ip=src_ip, dst_mac=dst_mac, dst_ip=dst_ip)
if vid is not None:
# 0x8100 - vlan ethertype
vl_e = ethernet.ethernet(src=src_mac, dst=dst_mac, ethertype = 0x8100)
vl = vlan.vlan(vid=vid, ethertype=0x806)
p.add_protocol(vl_e)
p.add_protocol(vl)
else:
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
return packet_output(p, out_ports, dp)
def packet_output(packet, out_ports, dp):
ofproto = dp.ofproto
parser = dp.ofproto_parser
actions = []
for port in out_ports:
actions+=[parser.OFPActionOutput(port)]
return [parser.OFPPacketOut(datapath=dp, buffer_id=ofproto.OFP_NO_BUFFER, in_port=ofproto.OFPP_CONTROLLER, actions=actions, data=packet)]
| StarcoderdataPython |
6453942 | # Generated by Django 3.1.6 on 2021-02-19 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Roads',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original_image', models.ImageField(upload_to='')),
('detected_image', models.ImageField(upload_to='')),
('detected_base64', models.TextField()),
('longitude', models.DecimalField(decimal_places=6, max_digits=9)),
('latitude', models.DecimalField(decimal_places=6, max_digits=9)),
('effect_percentage', models.DecimalField(decimal_places=6, max_digits=9)),
('boxes', models.TextField()),
('scores', models.TextField()),
('classes', models.TextField()),
('num', models.TextField()),
],
),
]
| StarcoderdataPython |
9725017 | # -*- coding: utf-8 -*-
import re
from gevent import monkey; monkey.patch_all()
from web import app as web
from app_sina import app as git_http
ROUTE_MAP = [(re.compile(r'/[^/]*\.git.*'), git_http),
(re.compile(r'/[^/]*/([^/]*)\.git.*'), git_http),
(re.compile(r'/.*'), web)]
class Application(object):
def __call__(self, environ, start_response):
for rule, func in ROUTE_MAP:
if rule.match(environ['PATH_INFO']):
return func(environ, start_response)
return web(environ, start_response)
app = Application()
| StarcoderdataPython |
152053 | <reponame>sash-ko/PyRM
import unittest
from revpy import mfrm
from revpy.exceptions import InvalidInputParameters
class MFRMTestHost(unittest.TestCase):
def test_empty_estimate_host_level(self):
estimations = mfrm.estimate_host_level({}, {}, {}, 0.9)
self.assertEqual(estimations, (0, 0, 0))
def test_estimate_paper_ex1(self):
"""This test is based on the exaple #1 in the paper"""
utilities = {'fare1': -2.8564, 'fare2': -2.5684}
probs, nofly_prob = mfrm.selection_probs(utilities, 0.5)
estimations = mfrm.estimate_host_level({'fare1': 3, 'fare2': 0},
{'fare1': 1, 'fare2': 0.},
probs, nofly_prob)
self.assertIsInstance(estimations, tuple)
self.assertEqual(len(estimations), 3)
# round numbers
estimations = round_tuple(estimations)
self.assertTupleEqual(estimations, (5, 2.86, 0.86))
def test_demand_mass_balance_h(self):
estimations = mfrm.demand_mass_balance_c(3, 3, 1, 0.86)
self.assertTupleEqual(round_tuple(estimations), (2.14, 0., 0.86))
def test_invalid_parameters(self):
"""Should not rise any exception
"""
mfrm.estimate_host_level({'fare2': 3}, {'fare2': 1.},
{'fare1': 0.1, 'fare2': 0.4}, 0.5)
class MFRMTestClass(unittest.TestCase):
def test_empty_estimate_class_level(self):
estimations = mfrm.estimate_class_level({}, {}, {}, 0.9)
self.assertEqual(estimations, {})
def test_demand_mass_balance_c_ex1(self):
estimations = mfrm.demand_mass_balance_c(3, 3, 1, 0.86)
self.assertIsInstance(estimations, tuple)
self.assertEqual(len(estimations), 3)
self.assertTupleEqual(round_tuple(estimations), (2.14, 0., 0.86))
def test_demand_mass_balance_c_ex2_a11(self):
estimations = mfrm.demand_mass_balance_c(3, 0., 0.1, 0.61)
self.assertTupleEqual(round_tuple(estimations), (0, 0, 0))
def test_demand_mass_balance_c_ex2_a21(self):
estimations = mfrm.demand_mass_balance_c(3, 2, 1, 0.61)
self.assertTupleEqual(round_tuple(estimations), (1.59, 0, 0.41))
def test_estimate_class_level_struct(self):
utilities = {'fare1': -2.8564, 'fare2': -2.5684}
probs, nofly_prob = mfrm.selection_probs(utilities, 0.5)
estimations = mfrm.estimate_class_level({'fare1': 3, 'fare2': 0},
{'fare1': 1, 'fare2': 0.},
probs, nofly_prob)
self.assertIsInstance(estimations, dict)
self.assertEqual(len(estimations), 2)
self.assertEqual(sorted(estimations.keys()), ['fare1', 'fare2'])
self.assertEqual(sorted(estimations['fare1'].keys()),
['demand', 'recapture', 'spill'])
def test_estimate_class_level_ex1(self):
utilities = {'fare1': -2.8564, 'fare2': -2.5684}
probs, nofly_prob = mfrm.selection_probs(utilities, 0.5)
estimations = mfrm.estimate_class_level({'fare1': 3, 'fare2': 0},
{'fare1': 1, 'fare2': 0.},
probs, nofly_prob)
self.assertAlmostEqual(estimations['fare1']['spill'], 0, places=2)
self.assertAlmostEqual(estimations['fare1']['recapture'], 0.86, 2)
self.assertAlmostEqual(estimations['fare1']['demand'], 2.14, 2)
self.assertAlmostEqual(estimations['fare2']['spill'], 2.86, places=2)
self.assertAlmostEqual(estimations['fare2']['recapture'], 0, 2)
self.assertAlmostEqual(estimations['fare2']['demand'], 2.86, 2)
def test_invalid_parameters(self):
"""Should not rise any exception
"""
mfrm.estimate_class_level({'fare2': 3}, {'fare2': 1.},
{'fare1': 0.1, 'fare2': 0.4}, 0.5)
def test_non_zero_demand_zero_availability(self):
with self.assertRaises(InvalidInputParameters):
mfrm.estimate_class_level({'fare1': 3, 'fare2': 1},
{'fare2': 1.},
{'fare1': 0.1, 'fare2': 0.4}, 0.5)
def test_estimate_class_level_ex3(self):
"""Example 3 from MFRM paper"""
probs = {
'a11': 0.0256,
'a12': 0.0513,
'a13': 0.0769,
'a21': 0.041,
'a22': 0.0615,
'a23': 0.0821,
'a31': 0.0154,
'a32': 0.0205,
'a33': 0.0256
}
observed = {
'a11': 2,
'a12': 5,
'a13': 0,
'a21': 4,
'a22': 0,
'a23': 0,
'a31': 0,
'a32': 3,
'a33': 6
}
availability = {
'a11': 1,
'a12': 1,
'a13': 0.25,
'a21': 1,
'a22': 0.5,
'a23': 0,
'a31': 1,
'a32': 1,
'a33': 0.5
}
nofly_prob = 0.6
host_estimations = mfrm.estimate_host_level(observed, availability,
probs, nofly_prob)
estimations = round_tuple(host_estimations)
self.assertTupleEqual(estimations, (30.16, 13.83, 3.67))
class_estimations = mfrm.estimate_class_level(observed, availability,
probs, nofly_prob)
# ensure that class demand, spill and recapture in total
# equals host level estimations
total_class = sum([v['demand'] for v in class_estimations.values()])
self.assertAlmostEqual(total_class, host_estimations[0], 2)
total_class = sum([v['spill'] for v in class_estimations.values()])
self.assertAlmostEqual(total_class, host_estimations[1], 2)
total_class = sum([v['recapture'] for v in class_estimations.values()])
self.assertAlmostEqual(total_class, host_estimations[2], 2)
expected = {
'a11': {'demand': 1.63, 'spill': 0, 'recapture': 0.37},
'a12': {'demand': 4.08, 'spill': 0, 'recapture': 0.92},
# 'a13': {'demand': 4.35, 'spill': 4.35, 'recapture': 0.},
'a13': {'demand': 3.02, 'spill': 3.02, 'recapture': 0.},
'a21': {'demand': 3.27, 'spill': 0, 'recapture': 0.73},
# 'a22': {'demand': 2.32, 'spill': 2.32, 'recapture': 0.},
'a22': {'demand': 1.61, 'spill': 1.61, 'recapture': 0.},
# 'a23': {'demand': 6.19, 'spill': 6.19, 'recapture': 0.},
'a23': {'demand': 4.3, 'spill': 4.3, 'recapture': 0.},
'a31': {'demand': 0, 'spill': 0, 'recapture': 0.},
'a32': {'demand': 2.45, 'spill': 0, 'recapture': 0.55},
# NOTE: this example case is different from the paper. Somehow
# it doesn't satisfy (14): s = d*k, k33 = 0.5 ...
# It also affects a13, a22, a23 that have 0 bookings and
# calibrated according to the unaccounted spill
# 'a33': {'demand': 5.87, 'spill': 0.97, 'recapture': 1.1},
'a33': {'demand': 9.798, 'spill': 4.899, 'recapture': 1.1}
}
for element, values in expected.items():
for key, value in values.items():
self.assertAlmostEqual(
class_estimations[element][key], value, 2)
def test_estimate_class_level_regression_1(self):
utilities = {
'29255588_3950': 1.4659572,
'27330948_3950': 2.16431,
'29255588_2490': 1.1630461,
'29255578_2990': 1.3300509,
'29255508_3950': 0.43902999,
'29255578_3590': 0.83872116,
'29255578_3950': 0.70265454,
'29255528_3590': 0.52609205,
'29255518_3590': 0.52609205,
'30920848_3950': -0.19642138,
'27331928_3950': 0.096954226,
'27337358_3590': 0.52609205,
'27334478_3590': -0.12226862,
'29255548_3950': 1.4128097,
'29255558_3590': 0.2330209,
'29255588_2990': 0.99219722,
'29255538_3590': 0.76555932,
'27341178_3590': 0.61577702,
'29255548_3590': 1.3927615,
'29255558_2990': 0.72435057
}
observed = {
'29255588_3950': 3,
'27330948_3950': 1,
'29255578_2990': 7,
'27331928_2450': 1,
'27331928_3200': 4,
# missing in 'utilities'
'27331928_2490': 1,
'29255588_3018': 1,
'29255588_2490': 6,
'29255518_3950': 1,
'29255578_3018': 1,
'27331928_3950': 2,
'29255578_3950': 3,
'29255548_3950': 2,
'29255588_2518': 1,
'29255538_3590': 1,
'29255578_3590': 1,
'29255588_2241': 1
}
availability = {
'29255588_3950': 0.650909090909091,
'27330948_3950': 0.634146341463415,
'29255578_2990': 0.1875,
'27331928_2450': 0.00436681222707424,
'30920848_3950': 0.5,
'29255538_3950': 0.0411764705882353,
'29255528_3590': 0.796875,
'29255518_3950': 0.032967032967033,
'29255588_3590': 0.0254545454545455,
'29255518_3590': 0.648351648351648,
'29255558_3950': 0.00436681222707424,
'27334478_3590': 0.753246753246753,
'27334478_3950': 0.0779220779220779,
'29255528_3950': 0.0703125,
'29255548_3950': 0.521951219512195,
'29255558_3590': 0.148471615720524,
'29255578_3018': 0.00436681222707424,
'27331928_2490': 0.00436681222707424,
'29255548_3590': 0.268292682926829,
'29255588_2241': 0.00436681222707424,
'29255508_3950': 0.714285714285714,
'27331928_3200': 0.00436681222707424,
'29255588_2518': 0.00436681222707424,
'29255588_3018': 0.00436681222707424,
'29255588_2490': 0.0872727272727273,
'27331928_3950': 0.370517928286853,
'27337358_3590': 0.774193548387097,
'29255578_3950': 0.213235294117647,
'29255578_3590': 0.132352941176471,
'29255588_2990': 0.236363636363636,
'29255538_3590': 0.911764705882353,
'27341178_3590': 0.971428571428571,
'29255558_2990': 0.152838427947598
}
probs, nofly_prob = mfrm.selection_probs(utilities, 0.7)
class_level = mfrm.estimate_class_level(observed, availability,
probs, nofly_prob)
# no utility
self.assertTrue('27331928_2490' not in class_level)
# low utility and no observed demand
self.assertEqual(class_level['27334478_3590'],
{'demand': 0, 'spill': 0, 'recapture': 0})
# one observation and low utility
self.assertLess(class_level['29255538_3590']['demand'],
observed['29255538_3590'])
self.assertLess(class_level['29255538_3590']['spill'], 0.1)
# high utility, many bookings, low availability
self.assertGreater(class_level['29255578_2990']['demand'],
observed['29255578_2990'])
self.assertGreater(class_level['29255578_2990']['spill'],
observed['29255578_2990'])
# moderate utility, many bookings, moderate availability
self.assertGreater(class_level['29255578_3950']['demand'],
observed['29255578_3950'])
def test_estimate_class_level_regression_2(self):
utilities = {
'25169088_1990': -0.32789022,
'25177238_2490': 0.74647802,
'30920878_3590': 1.1093128,
'30921208_3590': -0.95266426,
'25176068_2990': 0.60245919,
'27334408_3590': 1.3004869,
'27336048_2490': -0.72204816,
'25176068_2490': 1.1348069,
'27330978_3950': -0.95266426,
'27331818_2490': 1.1348069,
'25174968_2990': -0.95266426,
'27331818_3590': 0.60245919,
'25172878_2990': 1.3004869,
'27330978_3590': -0.95266426,
'25174968_3590': -0.95266426,
'27334408_3950': 1.3004869,
'30920428_2490': -0.72204816,
'27331818_2990': 0.60245919,
'25168168_1990': -0.86685419,
'25168168_3590': 0.028352857,
'25177238_2990': 0.37813818,
'25170958_1990': 0.43109083,
'25178118_1500': 1.557166,
'25172878_3590': 1.3004869,
'25173528_2990': -0.86880505,
'25173528_2490': 0.77468485,
'27332988_2990': 0.59399945,
'25168168_2490': -0.72204816,
'25170958_2490': 0.5758968,
'27332988_2490': -0.18308425,
'25168168_2990': 0.028352857}
observed = {
'25169088_1990': 2,
'25177238_2490': 5,
'25176068_2990': 1,
'25177238_2990': 1,
'25170958_1990': 7,
'25176068_2490': 5,
'25168168_1990': 1,
'25174968_2990': 2,
'25173528_2990': 2,
'25173528_2490': 1,
'25172878_2990': 3,
'25168168_2490': 3,
'25170958_2490': 3,
'25178118_1500': 5}
availability = {
'25177238_2490': 0.954545454545455,
'25169088_1990': 1.0,
'30920878_3590': 1.0,
'30921208_3590': 1.0,
'25176068_2990': 0.272727272727273,
'27334408_3590': 0.409090909090909,
'27334408_3950': 0.590909090909091,
'25176068_2490': 0.909090909090909,
'27331818_2990': 0.363636363636364,
'27331818_2490': 0.590909090909091,
'25172878_3590': 0.136363636363636,
'27331818_3590': 0.136363636363636,
'25172878_2990': 0.909090909090909,
'27330978_3590': 0.454545454545455,
'25174968_3590': 0.0454545454545455,
'27336048_2490': 1.0,
'30920428_2490': 1.0,
'27330978_3950': 0.590909090909091,
'25168168_1990': 0.727272727272727,
'25168168_3590': 0.0454545454545455,
'25177238_2990': 0.0909090909090909,
'25170958_1990': 0.590909090909091,
'25178118_1500': 1.0,
'25174968_2990': 1.0,
'25173528_2990': 0.590909090909091,
'25173528_2490': 0.5,
'27332988_2990': 0.545454545454545,
'25168168_2490': 0.454545454545455,
'25170958_2490': 0.454545454545455,
'27332988_2490': 0.681818181818182,
'25168168_2990': 0.0454545454545455
}
probs, nofly_prob = mfrm.selection_probs(utilities, 0.7)
class_level = mfrm.estimate_class_level(observed, availability,
probs, nofly_prob)
self.assertEqual(class_level['25168168_2990'],
{'demand': 0, 'spill': 0, 'recapture': 0})
total_demand = sum([round(d['demand']) for d in class_level.values()])
self.assertEqual(total_demand, 50)
total_demand = sum([round(d['spill']) for d in class_level.values()])
self.assertEqual(total_demand, 20)
def test_calibrate_no_booking_balance(self):
estimates = {
'p1': {'demand': 0, 'spill': 0},
'p2': {'demand': 0, 'spill': 0},
'p3': {'demand': 0, 'spill': 0}
}
observed = {}
probs = {
'p1': 0.3,
'p2': 0.3,
'p3': 0.1
}
host_spill = 10
availability = {}
result = mfrm.calibrate_no_booking(estimates, observed, availability,
probs, host_spill)
self.assertAlmostEqual(sum([r['demand'] for r in result.values()]),
host_spill)
self.assertAlmostEqual(sum([r['spill'] for r in result.values()]),
host_spill)
self.assertEqual(result['p1']['demand'], result['p2']['demand'])
self.assertGreater(result['p1']['demand'], result['p3']['demand'])
availability = {'p1': 0.5, 'p2': 0.1}
result = mfrm.calibrate_no_booking(estimates, observed,
availability, probs, host_spill)
self.assertGreater(result['p2']['demand'], result['p1']['demand'])
availability = {'p1': 0.5}
result = mfrm.calibrate_no_booking(estimates, observed,
availability, probs, host_spill)
self.assertGreater(result['p2']['demand'], result['p1']['demand'])
def round_tuple(tlp, level=2):
return tuple([round(e, level) for e in tlp])
| StarcoderdataPython |
19222 | <reponame>tansey/smoothfdr
# import itertools
# from functools import partial
# from scipy.stats import norm
# from scipy.sparse import csc_matrix, linalg as sla
# from scipy import sparse
# from scipy.optimize import minimize, minimize_scalar
# from collections import deque, namedtuple
import numpy as np
from networkx import Graph
from pygfl.solver import TrailSolver
from pygfl.trails import decompose_graph, save_chains
from pygfl.utils import chains_to_trails, calc_plateaus, hypercube_edges
from smoothfdr.smoothed_fdr import GaussianKnown
from smoothfdr.normix import *
from smoothfdr.utils import calc_fdr
def smooth_fdr(data, fdr_level, edges=None, initial_values=None, verbose=0, null_dist=None, signal_dist=None, num_sweeps=10, missing_val=None):
flat_data = data.flatten()
nonmissing_flat_data = flat_data
if edges is None:
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data.shape))
edges = hypercube_edges(data.shape)
if missing_val is not None:
if verbose:
print('Removing all data points whose data value is {0}'.format(missing_val))
edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val]
nonmissing_flat_data = flat_data[flat_data != missing_val]
# Decompose the graph into trails
g = Graph()
g.add_edges_from(edges)
chains = decompose_graph(g, heuristic='greedy')
ntrails, trails, breakpoints, edges = chains_to_trails(chains)
if null_dist is None:
# empirical null estimation
mu0, sigma0 = empirical_null(nonmissing_flat_data, verbose=max(0,verbose-1))
elif isinstance(null_dist,GaussianKnown):
mu0, sigma0 = null_dist.mean, null_dist.stdev
else:
mu0, sigma0 = null_dist
null_dist = GaussianKnown(mu0, sigma0)
if verbose:
print('Empirical null: {0}'.format(null_dist))
# signal distribution estimation
if verbose:
print('Running predictive recursion for {0} sweeps'.format(num_sweeps))
if signal_dist is None:
grid_x = np.linspace(max(-20, nonmissing_flat_data.min() - 1), min(nonmissing_flat_data.max() + 1, 20), 220)
pr_results = predictive_recursion(nonmissing_flat_data, num_sweeps, grid_x, mu0=mu0, sig0=sigma0)
signal_dist = GridDistribution(pr_results['grid_x'], pr_results['y_signal'])
if verbose:
print('Smoothing priors via solution path algorithm')
solver = TrailSolver()
solver.set_data(flat_data, edges, ntrails, trails, breakpoints)
results = solution_path_smooth_fdr(flat_data, solver, null_dist, signal_dist, verbose=max(0, verbose-1))
results['discoveries'] = calc_fdr(results['posteriors'], fdr_level)
results['null_dist'] = null_dist
results['signal_dist'] = signal_dist
# Reshape everything back to the original data shape
results['betas'] = results['betas'].reshape(data.shape)
results['priors'] = results['priors'].reshape(data.shape)
results['posteriors'] = results['posteriors'].reshape(data.shape)
results['discoveries'] = results['discoveries'].reshape(data.shape)
results['beta_iters'] = np.array([x.reshape(data.shape) for x in results['beta_iters']])
results['prior_iters'] = np.array([x.reshape(data.shape) for x in results['prior_iters']])
results['posterior_iters'] = np.array([x.reshape(data.shape) for x in results['posterior_iters']])
return results
def smooth_fdr_known_dists(data, fdr_level, null_dist, signal_dist, edges=None, initial_values=None, verbose=0, missing_val=None):
'''FDR smoothing where the null and alternative distributions are known
(and not necessarily Gaussian). Both must define the function pdf.'''
flat_data = data.flatten()
nonmissing_flat_data = flat_data
if edges is None:
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data.shape))
edges = hypercube_edges(data.shape)
if missing_val is not None:
if verbose:
print('Removing all data points whose data value is {0}'.format(missing_val))
edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val]
nonmissing_flat_data = flat_data[flat_data != missing_val]
# Decompose the graph into trails
g = Graph()
g.add_edges_from(edges)
chains = decompose_graph(g, heuristic='greedy')
ntrails, trails, breakpoints, edges = chains_to_trails(chains)
if verbose:
print('Smoothing priors via solution path algorithm')
solver = TrailSolver()
solver.set_data(flat_data, edges, ntrails, trails, breakpoints)
results = solution_path_smooth_fdr(flat_data, solver, null_dist, signal_dist, verbose=max(0, verbose-1))
results['discoveries'] = calc_fdr(results['posteriors'], fdr_level)
results['null_dist'] = null_dist
results['signal_dist'] = signal_dist
# Reshape everything back to the original data shape
results['betas'] = results['betas'].reshape(data.shape)
results['priors'] = results['priors'].reshape(data.shape)
results['posteriors'] = results['posteriors'].reshape(data.shape)
results['discoveries'] = results['discoveries'].reshape(data.shape)
results['beta_iters'] = np.array([x.reshape(data.shape) for x in results['beta_iters']])
results['prior_iters'] = np.array([x.reshape(data.shape) for x in results['prior_iters']])
results['posterior_iters'] = np.array([x.reshape(data.shape) for x in results['posterior_iters']])
return results
def solution_path_smooth_fdr(data, solver, null_dist, signal_dist, min_lambda=0.20, max_lambda=1.5, lambda_bins=30, verbose=0, initial_values=None):
'''Follows the solution path of the generalized lasso to find the best lambda value.'''
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
u_trace = []
w_trace = []
c_trace = []
results_trace = []
best_idx = None
best_plateaus = None
for i, _lambda in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, _lambda))
# Fit to the final values
results = fixed_penalty_smooth_fdr(data, solver, _lambda, null_dist, signal_dist,
verbose=max(0,verbose - 1),
initial_values=initial_values)
if verbose:
print('Calculating degrees of freedom')
plateaus = calc_plateaus(results['beta'], solver.edges)
dof_trace[i] = len(plateaus)
if verbose:
print('Calculating AIC')
# Get the negative log-likelihood
log_likelihood_trace[i] = -_data_negative_log_likelihood(data, results['c'], null_dist, signal_dist)
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (data.shape[0] - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(data)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the final run parameters to use for warm-starting the next iteration
initial_values = results
# Save the trace of all the resulting parameters
beta_trace.append(results['beta'])
w_trace.append(results['w'])
c_trace.append(results['c'])
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta_iters': np.array(beta_trace),
'posterior_iters': np.array(w_trace),
'prior_iters': np.array(c_trace),
'lambda_iters': lambda_grid,
'best': best_idx,
'betas': beta_trace[best_idx],
'priors': c_trace[best_idx],
'posteriors': w_trace[best_idx],
'lambda': lambda_grid[best_idx],
'plateaus': best_plateaus}
def fixed_penalty_smooth_fdr(data, solver, _lambda, null_dist, signal_dist, initial_values=None, verbose=0):
converge = 1e-6
max_steps = 30
m_steps = 1
m_converge = 1e-6
w_iters = []
beta_iters = []
c_iters = []
delta_iters = []
delta = converge + 1
if initial_values is None:
beta = np.zeros(data.shape)
prior_prob = np.exp(beta) / (1 + np.exp(beta))
else:
beta = initial_values['beta']
prior_prob = initial_values['c']
prev_nll = 0
cur_step = 0
while delta > converge and cur_step < max_steps:
if verbose:
print('Step #{0}'.format(cur_step))
if verbose:
print('\tE-step...')
# Get the likelihood weights vector (E-step)
post_prob = _e_step(data, prior_prob, null_dist, signal_dist)
if verbose:
print('\tM-step...')
# Find beta using an alternating Taylor approximation and convex optimization (M-step)
beta, initial_values = _m_step(beta, prior_prob, post_prob, _lambda,
solver, m_converge, m_steps,
max(0,verbose-1), initial_values)
# Get the signal probabilities
prior_prob = ilogit(beta)
cur_nll = _data_negative_log_likelihood(data, prior_prob, null_dist, signal_dist)
# Track the change in log-likelihood to see if we've converged
delta = np.abs(cur_nll - prev_nll) / (prev_nll + converge)
if verbose:
print('\tDelta: {0}'.format(delta))
# Track the step
w_iters.append(post_prob)
beta_iters.append(beta)
c_iters.append(prior_prob)
delta_iters.append(delta)
# Increment the step counter
cur_step += 1
# Update the negative log-likelihood tracker
prev_nll = cur_nll
# DEBUGGING
if verbose:
print('\tbeta: [{0:.4f}, {1:.4f}]'.format(beta.min(), beta.max()))
print('\tprior_prob: [{0:.4f}, {1:.4f}]'.format(prior_prob.min(), prior_prob.max()))
print('\tpost_prob: [{0:.4f}, {1:.4f}]'.format(post_prob.min(), post_prob.max()))
w_iters = np.array(w_iters)
beta_iters = np.array(beta_iters)
c_iters = np.array(c_iters)
delta_iters = np.array(delta_iters)
# Return the results of the run
return {'beta': beta, 'w': post_prob, 'c': prior_prob,
'z': initial_values['z'], 'u': initial_values['u'],
'w_iters': w_iters, 'beta_iters': beta_iters,
'c_iters': c_iters, 'delta_iters': delta_iters}
def _data_negative_log_likelihood(data, prior_prob, null_dist, signal_dist):
'''Calculate the negative log-likelihood of the data given the weights.'''
signal_weight = prior_prob * signal_dist.pdf(data)
null_weight = (1-prior_prob) * null_dist.pdf(data)
return -np.log(signal_weight + null_weight).sum()
def _e_step(data, prior_prob, null_dist, signal_dist):
'''Calculate the complete-data sufficient statistics (weights vector).'''
signal_weight = prior_prob * signal_dist.pdf(data)
null_weight = (1-prior_prob) * null_dist.pdf(data)
post_prob = signal_weight / (signal_weight + null_weight)
return post_prob
def _m_step(beta, prior_prob, post_prob, _lambda,
solver, converge, max_steps,
verbose, initial_values):
'''
Alternating Second-order Taylor-series expansion about the current iterate
'''
prev_nll = _m_log_likelihood(post_prob, beta)
delta = converge + 1
cur_step = 0
while delta > converge and cur_step < max_steps:
if verbose:
print('\t\tM-Step iteration #{0}'.format(cur_step))
print('\t\tTaylor approximation...')
# Cache the exponentiated beta
exp_beta = np.exp(beta)
# Form the parameters for our weighted least squares
weights = (prior_prob * (1 - prior_prob))
y = beta - (prior_prob - post_prob) / weights
solver.set_values_only(y, weights=weights)
if initial_values is None:
initial_values = {'beta': solver.beta, 'z': solver.z, 'u': solver.u}
else:
solver.beta = initial_values['beta']
solver.z = initial_values['z']
solver.u = initial_values['u']
solver.solve(_lambda)
# if np.abs(beta).max() > 20:
# beta = np.clip(beta, -20, 20)
# u = None
beta = initial_values['beta']
# Get the current log-likelihood
cur_nll = _m_log_likelihood(post_prob, beta)
# Track the convergence
delta = np.abs(prev_nll - cur_nll) / (prev_nll + converge)
if verbose:
print('\t\tM-step delta: {0}'.format(delta))
# Increment the step counter
cur_step += 1
# Update the negative log-likelihood tracker
prev_nll = cur_nll
return beta, initial_values
def _m_log_likelihood(post_prob, beta):
'''Calculate the log-likelihood of the betas given the weights and data.'''
return (np.log(1 + np.exp(beta)) - post_prob * beta).sum()
def ilogit(x):
return 1. / (1. + np.exp(-x))
| StarcoderdataPython |
399955 | import pandas as pd
def read_csv(file_path, show_info=False):
# Read the data into a data frame
data = pd.read_csv(file_path)
# display info
if (not show_info):
return data
# Check the number of data points in the data set
print("# of data points (rows):", len(data))
# Check the number of features in the data set
print("# of features (columns):", len(data.columns))
# Check the data types
print(data.dtypes.unique())
# Check any number of columns with NaN
print("NaN in Rows:", data.isnull().any().sum(), ' / ', len(data.columns))
# Check any number of data points with NaN
print("NaN in Columns:", data.isnull().any(axis=1).sum(), ' / ', len(data))
# Return the data frame
return data
| StarcoderdataPython |
3319529 | #!/usr/bin/env python
import json
import unittest
from binoas.transformers import BasePostTransformer, JSONPathPostTransformer
class TestBasePostTransformer(unittest.TestCase):
def setUp(self):
config = {
'binoas': {
'applications': {
'poliflw': {}
}
}
}
self.post_transformer = BasePostTransformer(config)
def test_transform(self):
with self.assertRaises(NotImplementedError):
self.post_transformer.transform({})
class TestJSONPathPostTransformer(unittest.TestCase):
def setUp(self):
config = {
'binoas': {
'applications': {
'poliflw': {
'name': 'PoliFLW',
'rules': {
'id': "meta.original_object_id",
'title': "title",
'description': "description",
'url': "meta.original_object_urls.html",
'created': "date",
'modified': "date",
'data': [
'parties',
'politicians',
'location',
'source',
'type',
{
'name': 'topic',
'path': 'topics[*].name'
}
]
}
},
'politwoops': {
'name': 'Politwoops',
'rules': {
'id': "details.id_str",
'title': "details.text",
'description': "details.extended_tweet.full_text",
'url': "details.id_str",
'created': "created_at",
'modified': "updated_at",
'data': [
'user_name',
'politician_id',
'politician.party_id',
]
}
}
}
}
}
self.post_transformer = JSONPathPostTransformer(config)
def test_transform_no_valid_post(self):
with self.assertRaises(ValueError):
self.post_transformer.transform({})
def test_transform_valid_post_poliflw(self):
expected = {
'application': 'poliflw',
'payload': {
'id': 'https://www.cda.nl/noord-holland/amsterdam/actueel/nieuws/zomerborrel-6-september/',
'title': 'Zomerborrel: 6 september',
'description': (
'Donderdag 6 september hopen we op zomers weer, want dan vindt'
' voor alle Amsterdamse CDA leden en geïnteresseerden de CDA '
'zomerborrel plaats. Voor €15,- ontvang je drie drankjes (bier,'
' wijn, fris), een klein puntzakje friet en twee gefrituurde '
'snacks. Wie komt gezellig langs?&nbsp;\nLocatie: Brasserie'
' Nel, Amstelveld 12Aanvang: 19.30uAanmelden: via '
'<EMAIL>Betaling: Je kunt de €15,- cash '
'meenemen of van te voren via CDA Afdeling Amsterdam op rekening '
'NL38INGB0000065005 overmaken o.v.v. CDA zomerborrel incl. je naam.'
),
'url': 'https://www.cda.nl/noord-holland/amsterdam/actueel/nieuws/zomerborrel-6-september/',
'created': '2018-07-25T12:31:38',
'modified': '2018-07-25T12:31:38',
'data': [
{'key': 'parties', 'value': 'CDA'},
{'key': 'parties', 'value': 'CDA'},
{'key': 'location', 'value': 'Amsterdam'},
{'key': 'source', 'value': 'Partij nieuws'},
{'key': 'type', 'value': 'Partij'},
{'key': 'topic', 'value': 'Zorg en gezondheid | Organisatie en beleid'}
]
}
}
with open('tests/data/poliflw.json', 'r') as in_file:
content = in_file.read()
data = json.loads(content)
result = self.post_transformer.transform(data)
self.assertEqual(result, expected)
def test_transform_valid_post_politwoops(self):
expected = {
'application': 'politwoops',
'payload': {
'id': '1025749465611808768',
'title': (
'Fijn de steun voor de initiatiefwet van @D66 @PvdA & '
'@groenlinks van het Kabinet @KajsaOllongren & '
'@markharbers om… https://t.co/YBlgoGseOQ'),
'description': (
'Fijn de steun voor de initiatiefwet van @D66 @PvdA & '
'@groenlinks van het Kabinet @KajsaOllongren & '
'@markharbers om artikel 1 vd #Grondwet uit te breiden! '
'\uf64f\uf3fb! Nog wel wat werk te doen samen met collega'
'’s @kirstenvdhul & @NevinOzutok, maar dit is een '
'fijne stimulans!\uf44a\uf3fb\uf308 https://t.co/KwCjmGt1cM'),
'url': '1025749465611808768',
'created': '2018-08-04T16:25:04+02:00',
'modified': '2018-08-04T16:29:59+02:00',
'data': [
{'key': 'user_name', 'value': 'Vera_Bergkamp'},
{'key': 'politician_id', 'value': 911},
{'key': 'politician.party_id', 'value': 3}
]
}
}
with open('tests/data/politwoops.json', 'r') as in_file:
content = in_file.read()
data = json.loads(content)
result = self.post_transformer.transform(data)
self.assertEqual(result, expected)
def test_transform_valid_post_politwoops_no_find_no_data(self):
expected = {
'application': 'politwoops',
'payload': {
'id': None,
'title': (
'Fijn de steun voor de initiatiefwet van @D66 @PvdA & '
'@groenlinks van het Kabinet @KajsaOllongren & '
'@markharbers om… https://t.co/YBlgoGseOQ'),
'description': (
'Fijn de steun voor de initiatiefwet van @D66 @PvdA & '
'@groenlinks van het Kabinet @KajsaOllongren & '
'@markharbers om artikel 1 vd #Grondwet uit te breiden! '
'\uf64f\uf3fb! Nog wel wat werk te doen samen met collega'
'’s @kirstenvdhul & @NevinOzutok, maar dit is een '
'fijne stimulans!\uf44a\uf3fb\uf308 https://t.co/KwCjmGt1cM'),
'url': '1025749465611808768',
'created': '2018-08-04T16:25:04+02:00',
'modified': '2018-08-04T16:29:59+02:00',
'data': [
{'key': 'user_name', 'value': 'Vera_Bergkamp'},
{'key': 'politician_id', 'value': 911},
{'key': 'politician.party_id', 'value': 3}
]
}
}
self.post_transformer.config[
'binoas']['applications']['politwoops']['rules']['id'] = (
'meta.original_object_id')
with open('tests/data/politwoops.json', 'r') as in_file:
content = in_file.read()
data = json.loads(content)
result = self.post_transformer.transform(data)
self.assertEqual(result, expected)
| StarcoderdataPython |
3472989 | <gh_stars>1-10
# Copyright (c) 2015, <NAME>
#
# See the LICENSE file for legal information regarding use of this file.
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.utils.constanttime import ct_lt_u32, ct_gt_u32, ct_le_u32, \
ct_lsb_prop_u8, ct_isnonzero_u32, ct_neq_u32, ct_eq_u32, \
ct_check_cbc_mac_and_pad, ct_compare_digest
from hypothesis import given, example
import hypothesis.strategies as st
from tlslite.utils.compat import compatHMAC
from tlslite.utils.cryptomath import getRandomBytes
from tlslite.recordlayer import RecordLayer
import tlslite.utils.tlshashlib as hashlib
import hmac
class TestContanttime(unittest.TestCase):
@given(i=st.integers(0,2**32 - 1), j=st.integers(0,2**32 - 1))
@example(i=0, j=0)
@example(i=0, j=1)
@example(i=1, j=0)
@example(i=2**32 - 1, j=2**32 - 1)
@example(i=2**32 - 2, j=2**32 - 1)
@example(i=2**32 - 1, j=2**32 - 2)
def test_ct_lt_u32(self, i, j):
self.assertEqual((i < j), (ct_lt_u32(i, j) == 1))
@given(i=st.integers(0,2**32 - 1), j=st.integers(0,2**32 - 1))
@example(i=0, j=0)
@example(i=0, j=1)
@example(i=1, j=0)
@example(i=2**32 - 1, j=2**32 - 1)
@example(i=2**32 - 2, j=2**32 - 1)
@example(i=2**32 - 1, j=2**32 - 2)
def test_ct_gt_u32(self, i, j):
self.assertEqual((i > j), (ct_gt_u32(i, j) == 1))
@given(i=st.integers(0,2**32 - 1), j=st.integers(0,2**32 - 1))
@example(i=0, j=0)
@example(i=0, j=1)
@example(i=1, j=0)
@example(i=2**32 - 1, j=2**32 - 1)
@example(i=2**32 - 2, j=2**32 - 1)
@example(i=2**32 - 1, j=2**32 - 2)
def test_ct_le_u32(self, i, j):
self.assertEqual((i <= j), (ct_le_u32(i, j) == 1))
@given(i=st.integers(0,2**32 - 1), j=st.integers(0,2**32 - 1))
@example(i=0, j=0)
@example(i=0, j=1)
@example(i=1, j=0)
@example(i=2**32 - 1, j=2**32 - 1)
@example(i=2**32 - 2, j=2**32 - 1)
@example(i=2**32 - 1, j=2**32 - 2)
def test_ct_neq_u32(self, i, j):
self.assertEqual((i != j), (ct_neq_u32(i, j) == 1))
@given(i=st.integers(0,2**32 - 1), j=st.integers(0,2**32 - 1))
@example(i=0, j=0)
@example(i=0, j=1)
@example(i=1, j=0)
@example(i=2**32 - 1, j=2**32 - 1)
@example(i=2**32 - 2, j=2**32 - 1)
@example(i=2**32 - 1, j=2**32 - 2)
def test_ct_eq_u32(self, i, j):
self.assertEqual((i == j), (ct_eq_u32(i, j) == 1))
@given(i=st.integers(0,255))
@example(i=0)
@example(i=255)
def test_ct_lsb_prop_u8(self, i):
self.assertEqual(((i & 0x1) == 1), (ct_lsb_prop_u8(i) == 0xff))
self.assertEqual(((i & 0x1) == 0), (ct_lsb_prop_u8(i) == 0x00))
@given(i=st.integers(0,2**32 - 1))
@example(i=0)
def test_ct_isnonzero_u32(self, i):
self.assertEqual((i != 0), (ct_isnonzero_u32(i) == 1))
class TestContanttimeCBCCheck(unittest.TestCase):
@staticmethod
def data_prepare(application_data, seqnum_bytes, content_type, version,
mac, key):
r_layer = RecordLayer(None)
r_layer.version = version
h = hmac.new(key, digestmod=mac)
digest = r_layer.calculateMAC(h, seqnum_bytes, content_type,
application_data)
return application_data + digest
def test_with_empty_data_and_minimum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(0)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x00')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_empty_data_and_maximum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(0)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\xff'*256)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_little_data_and_minimum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*32)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x00')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_little_data_and_maximum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*32)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\xff'*256)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_lots_of_data_and_minimum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*1024)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x00')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_lots_of_data_and_maximum_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*1024)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\xff'*256)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_lots_of_data_and_small_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*1024)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x0a'*11)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_too_little_data(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
mac = hashlib.sha1
data = bytearray(mac().digest_size)
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_invalid_hash(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*1024)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
data[-1] ^= 0xff
padding = bytearray(b'\xff'*256)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
@given(i=st.integers(1, 20))
def test_with_invalid_random_hash(self, i):
key = compatHMAC(getRandomBytes(20))
seqnum_bytes = bytearray(16)
content_type = 0x15
version = (3, 3)
application_data = getRandomBytes(63)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
data[-i] ^= 0xff
padding = bytearray(b'\x00')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_invalid_pad(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*1024)
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x00' + b'\xff'*255)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_pad_longer_than_data(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01')
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\xff')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_pad_longer_than_data_in_SSLv3(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 0)
application_data = bytearray(b'\x01')
mac = hashlib.sha1
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray([len(application_data) + mac().digest_size + 1])
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertFalse(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_null_pad_in_SSLv3(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 0)
application_data = bytearray(b'\x01'*10)
mac = hashlib.md5
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x00'*10 + b'\x0a')
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_MD5(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 1)
application_data = bytearray(b'\x01'*10)
mac = hashlib.md5
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x0a'*11)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_SHA256(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 3)
application_data = bytearray(b'\x01'*10)
mac = hashlib.sha256
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x0a'*11)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
def test_with_SHA384(self):
key = compatHMAC(bytearray(20))
seqnum_bytes = bytearray(16)
content_type = 0x14
version = (3, 3)
application_data = bytearray(b'\x01'*10)
mac = hashlib.sha384
data = self.data_prepare(application_data, seqnum_bytes, content_type,
version, mac, key)
padding = bytearray(b'\x0a'*11)
data += padding
h = hmac.new(key, digestmod=mac)
h.block_size = mac().block_size # python2 workaround
self.assertTrue(ct_check_cbc_mac_and_pad(data, h, seqnum_bytes,
content_type, version))
class TestCompareDigest(unittest.TestCase):
def test_with_equal_length(self):
self.assertTrue(ct_compare_digest(bytearray(10), bytearray(10)))
self.assertTrue(ct_compare_digest(bytearray(b'\x02'*8),
bytearray(b'\x02'*8)))
def test_different_lengths(self):
self.assertFalse(ct_compare_digest(bytearray(10), bytearray(12)))
self.assertFalse(ct_compare_digest(bytearray(20), bytearray(12)))
def test_different(self):
self.assertFalse(ct_compare_digest(bytearray(b'\x01'),
bytearray(b'\x03')))
self.assertFalse(ct_compare_digest(bytearray(b'\x01'*10 + b'\x02'),
bytearray(b'\x01'*10 + b'\x03')))
self.assertFalse(ct_compare_digest(bytearray(b'\x02' + b'\x01'*10),
bytearray(b'\x03' + b'\x01'*10)))
| StarcoderdataPython |
9660775 | import os
import logging
from progress.bar import Bar
from requests.exceptions import RequestException
from page_loader import resource
from page_loader.dom_tree import set_local_resources
from page_loader.storage import create_file, create_dir
from page_loader.urls import url_to_name, url_to_file_name
def download(page_url, output_dir_path):
page_html = resource.get(page_url, decode=True)
resources_dir_path = os.path.join(
output_dir_path, url_to_name(page_url) + '_files',
)
updated_page_html, resources_info = set_local_resources(
page_html, page_url, resources_dir_path,
)
page_file_path = os.path.join(output_dir_path, url_to_file_name(page_url))
create_file(updated_page_html, page_file_path)
resources_count = len(resources_info)
if resources_count > 0:
create_dir(resources_dir_path)
progress_bar = Bar('Downloading page resources', max=resources_count)
for info in resources_info:
_download_page_resource(info['url'], info['download_path'])
progress_bar.next()
progress_bar.finish()
return page_file_path
def _download_page_resource(url, download_path):
try:
content = resource.get(url)
create_file(content, download_path)
except (RequestException, OSError) as e:
logging.warning(f"Page resource {url} wasn't downloaded - {str(e)}")
| StarcoderdataPython |
3175 | <reponame>dunzoit/alerta-contrib
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
class SentryWebhook(WebhookBase):
def incoming(self, query_string, payload):
# For Sentry v9
# Defaults to value before Sentry v9
if 'request' in payload.get('event'):
key = 'request'
else:
key = 'sentry.interfaces.Http'
if payload.get('event')[key]['env'].get('ENV', 'prod') == 'prod':
environment = 'Production'
else:
environment = 'Development'
if payload['level'] == 'error':
severity = 'critical'
else:
severity = 'ok'
return Alert(
resource=payload['culprit'],
event=payload['event']['event_id'],
environment=environment,
severity=severity,
service=[payload['project']],
group='Application',
value=payload['level'],
text='{}\n{}\n{}'.format(payload['message'], payload['event'].get('title', ''), payload['url']),
tags=['{}={}'.format(k, v) for k, v in payload['event']['tags']],
attributes={'modules': ['{}=={}'.format(k, v) for k, v in payload['event']['modules'].items()]},
origin='sentry.io',
raw_data=str(payload)
)
| StarcoderdataPython |
11268296 | import os
import subprocess
import sys
from typing import Any
from huggingface_hub import snapshot_download
class Pipeline:
def __init__(self, model_id: str):
filepath = snapshot_download(model_id)
sys.path.append(filepath)
if "requirements.txt" in os.listdir(filepath):
cache_dir = os.environ["PIP_CACHE"]
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"--cache-dir",
cache_dir,
"-r",
os.path.join(filepath, "requirements.txt"),
]
)
from pipeline import PreTrainedPipeline
self.model = PreTrainedPipeline(filepath)
if hasattr(self.model, "sampling_rate"):
self.sampling_rate = self.model.sampling_rate
else:
# 16000 by default if not specified
self.sampling_rate = 16000
def __call__(self, inputs: Any) -> Any:
return self.model(inputs)
class PipelineException(Exception):
pass
| StarcoderdataPython |
1685936 | import logging
import pandas as pd
from eurito_indicators.pipeline.hSBM_Topicmodel.sbmtm import sbmtm
def train_model(corpus, doc_ids):
"""Trains top sbm model on tokenised corpus"""
model = sbmtm()
model.make_graph(corpus, documents=doc_ids)
model.fit()
return model
def post_process_model(model, top_level, top_words=5):
"""Function to post-process the outputs of a hierarchical topic model
_____
Args:
model: A hsbm topic model
top_level: The level of resolution at which we want to extract topics
top_words: top_words to include in the topic name
_____
Returns:
A topic mix df with topics and weights by document
"""
# Extract the word mix (word components of each topic)
logging.info("Creating topic names")
word_mix = model.topics(l=top_level)
# Tidy names
topic_name_lookup = {
key: "_".join([x[0] for x in values[:top_words]])
for key, values in word_mix.items()
}
topic_names = list(topic_name_lookup.values())
# Extract the topic mix df
logging.info("Extracting topics")
topic_mix_ = pd.DataFrame(
model.get_groups(l=top_level)["p_tw_d"].T,
columns=topic_names,
index=model.documents,
)
return topic_mix_
def filter_topics(topic_df, presence_thr, prevalence_thr):
"""Filter uninformative ("stop") topics
Args:
top_df (df): topics
presence_thr (int): threshold to detect topic in article
prevalence_thr (int): threshold to exclude topic from corpus
Returns:
Filtered df
"""
# Remove highly uninformative / generic topics
topic_prevalence = (
topic_df.applymap(lambda x: x > presence_thr)
.mean()
.sort_values(ascending=False)
)
# Filter topics
filter_topics = topic_prevalence.index[topic_prevalence > prevalence_thr].tolist()
# We also remove short topics (with less than two ngrams)
filter_topics = filter_topics + [
x for x in topic_prevalence.index if len(x.split("_")) <= 2
]
topic_df_filt = topic_df.drop(filter_topics, axis=1)
return topic_df_filt, filter_topics
def post_process_model_clusters(model, top_level, cl_level, top_thres=1, top_words=5):
"""Function to post-process the outputs of a hierarchical topic model
_____
Args:
model: A hsbm topic model
top_level: The level of resolution at which we want to extract topics
cl_level:The level of resolution at which we want to extract clusters
top_thres: The maximum share of documents where a topic appears.
1 means that all topics are included
top_words: number of words to use when naming topics
_____
Returns:
A topic mix df with topics and weights by document
A lookup between ids and clusters
"""
# Extract the word mix (word components of each topic)
topic_mix_ = post_process_model(model, top_level, top_words)
# word_mix = model.topics(l=top_level)
# # Create tidier names
# topic_name_lookup = {
# key: "_".join([x[0] for x in values[:5]]) for key, values in word_mix.items()
# }
# topic_names = list(topic_name_lookup.values())
# # Extract the topic mix df
# topic_mix_ = pd.DataFrame(
# model.get_groups(l=top_level)["p_tw_d"].T,
# columns=topic_names,
# index=model.documents,
# )
# Remove highly uninformative / generic topics
topic_prevalence = (
topic_mix_.applymap(lambda x: x > 0).mean().sort_values(ascending=False)
)
filter_topics = topic_prevalence.index[topic_prevalence < top_thres]
topic_mix = topic_mix_[filter_topics]
# Extract the clusters to which different documents belong (we force all documents
# to belong to a cluster)
cluster_assignment = model.clusters(l=cl_level, n=len(model.documents))
# cluster_sets = {
# c: set([x[0] for x in papers]) for c, papers in cluster_assigment.items()
# }
# # Assign topics to their clusters
# topic_mix["cluster"] = [
# [f"cluster_{n}" for n, v in cluster_sets.items() if x in v][0]
# for x in topic_mix.index
# ]
return topic_mix, cluster_assignment
| StarcoderdataPython |
156828 | <filename>salt/hg/files/hg/src/hglookup.py
# hglookup.py
#
# Lookup a revision hash in a bunch of different hgwebdir repos.
# Also includes special treatment for subversion revisions from
# the CPython repo.
#
# Written by <NAME>, 2010.
# Updated by <NAME>, 2017.
from __future__ import print_function
import io
import json
import os
from wsgiref.simple_server import make_server
class hglookup(object):
def __init__(self, hg_commits, verbose=False):
self.verbose = verbose
hg_commits = set(hg_commits)
hg_commits.update(commit[:12] for commit in list(hg_commits))
self.hg_commits = frozenset(hg_commits)
def successful_response(self, response, url):
content_type = 'text/plain'
headers = [("Content-Type", 'text/plain'),
("Location", url)]
response("303 See Other", headers)
return []
def failed_response(self, response):
response("404 Not Found", [('Content-Type', 'text/plain')])
return ['Usage: /lookup/GITHEXHASH or gitGITHEXHASH (10, 11, or 40 hex characters)\n',
'/lookup/HGHEXNODE or hgHGHEXNODE (12 or 40 hex characters)\n',
'/lookup/rSVNREVISION\n']
def __call__(self, env, response):
node = env.get('PATH_INFO', '').strip('/')
if not node:
return self.failed_response(response)
elif node.startswith('hg') or node in self.hg_commits:
if node.startswith('hg'):
node = node[len('hg'):]
url = 'https://hg.python.org/cpython/rev/' + node
return self.successful_response(response, url)
elif node.startswith('r'):
url = 'http://svn.python.org/view?view=revision&revision=' + node[1:]
return self.successful_response(response, url)
elif not node.startswith('git') and len(node) not in {10, 11, 40}:
return self.failed_response(response)
else:
if node.startswith('git'):
node = node[len('git'):]
url = 'https://github.com/python/cpython/commit/' + node
return self.successful_response(response, url)
if __name__ == '__main__':
HG_COMMITS = 'hg_commits.json'
print("Loading hg commits from the JSON file ...")
# Use `hg log --template "\"{node}\",\n` to help generate the JSON file.
with io.open(HG_COMMITS, 'r', encoding="utf-8") as file:
hg_commits = json.load(file)
application = hglookup(hg_commits, verbose=True)
httpd = make_server('', 8123, application)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
| StarcoderdataPython |
85056 | <reponame>WaffleHacks/application-portal
"""add application flagged
Revision ID: 108677b68119
Revises: 0cf086aa6b96
Create Date: 2022-05-30 21:45:48.595341+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "108677b68119"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"applications",
sa.Column(
"flagged",
sa.Boolean(),
nullable=False,
server_default=sa.text("false"),
),
)
# ### end Alembic commands ###
# Determine if an any applications should be flagged based on their graduation year and birthdate
# The conditions for being flagged are:
# - graduation year is less than the last year (this is to allow new grads to participate)
# - age is less than 13 years
op.execute(
"UPDATE applications "
"SET flagged = true WHERE "
"graduation_year < date_part('year', current_date) - 1 OR "
"to_date(date_of_birth, 'DD-MM-YYYY') <= current_date - '13 years'::interval"
)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("applications", "flagged")
# ### end Alembic commands ###
| StarcoderdataPython |
284908 | <reponame>geoanalytics-ca/xcube-cds
# MIT License
#
# Copyright (c) 2020 Brockmann Consult GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
import pathlib
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import xarray as xr
from xcube.core.store import DataDescriptor
from xcube.core.store import DatasetDescriptor
from xcube.core.store import VariableDescriptor
from xcube.util.jsonschema import JsonArraySchema
from xcube.util.jsonschema import JsonDateSchema
from xcube.util.jsonschema import JsonNumberSchema
from xcube.util.jsonschema import JsonObjectSchema
from xcube.util.jsonschema import JsonStringSchema
from xcube_cds.store import CDSDatasetHandler
class ERA5DatasetHandler(CDSDatasetHandler):
def __init__(self):
self._read_dataset_info()
def _read_dataset_info(self):
"""Read dataset information from JSON files"""
# Information for each supported dataset is contained in a
# semi-automatically generated JSON file. The largest part of this
# file is the "variables" table. This table maps request parameter
# names to NetCDF variable names, and was generated by the following
# process:
#
# 1. Obtain the complete list of valid request parameters via the Web
# interface by selecting every box and copying the parameter names
# out of the generated API request.
#
# 2. For each request parameter, make a separate API request
# containing only that request parameter, producing a NetCDF file
# containing only the corresponding output parameter.
#
# 3. Read the name of the single output variable from the NetCDF file
# and collate it with the original request parameter. (Also read the
# long_name and units attributes.)
#
# In this way we are guaranteed to get the correct NetCDF variable
# name for each request parameter, without having to trust that the
# documentation is correct.
#
# Unfortunately this procedure doesn't work with all datasets, since
# some (e.g. satellite-soil-moisture) don't have a one-to-one mapping
# from request variables to output variables.
#
# Table fields are:
# 1. request parameter name in CDS API
# 2. NetCDF variable name (NB: not always CF-conformant)
# 3. units from NetCDF attributes
# 4. "long name" from NetCDF attributes
ds_info_path = pathlib.Path(__file__).parent
all_pathnames = [os.path.join(ds_info_path, leafname)
for leafname in os.listdir(ds_info_path)]
pathnames = filter(lambda p: os.path.isfile(p) and p.endswith('.json'),
all_pathnames)
self._dataset_dicts = {}
for pathname in pathnames:
with open(pathname, 'r') as fh:
ds_dict = json.load(fh)
_, leafname = os.path.split(pathname)
self._dataset_dicts[leafname[:-5]] = ds_dict
# The CDS API delivers data from these datasets in an unhelpful format
# (issue #6) and sometimes with non-increasing time (issue #5), so
# for now they are blacklisted.
blacklist = frozenset([
'reanalysis-era5-land-monthly-means:monthly_averaged_reanalysis_by_hour_of_day',
'reanalysis-era5-single-levels-monthly-means:monthly_averaged_ensemble_members_by_hour_of_day',
'reanalysis-era5-single-levels-monthly-means:monthly_averaged_reanalysis_by_hour_of_day'
])
# We use a list rather than a set, since we want to preserve ordering
# and the number of elements is small.
self._valid_data_ids = []
self._data_id_to_human_readable = {}
for ds_id, ds_dict in self._dataset_dicts.items():
# product_type is actually a request parameter, but we implement
# it as a suffix to the data_id to make it possible to specify
# requests using only the standard, known store parameters.
product_types = ds_dict['product_types']
if len(product_types) == 0:
# No product types defined (i.e. there is just a single,
# implicit product type), so we just use the dataset ID without
# a suffix.
if ds_id not in blacklist:
self._valid_data_ids.append(ds_id)
self._data_id_to_human_readable[ds_id] = \
ds_dict['description']
else:
for pt_id, pt_desc in product_types:
data_id = ds_id + ':' + pt_id
if data_id not in blacklist:
self._valid_data_ids.append(data_id)
self._data_id_to_human_readable[data_id] = \
ds_dict['description'] + ' \N{EN DASH} ' + pt_desc
def get_supported_data_ids(self) -> List[str]:
return list(self._valid_data_ids)
def get_open_data_params_schema(self, data_id: Optional[str] = None) -> \
JsonObjectSchema:
# If the data_id has a product type suffix, remove it.
dataset_id = data_id.split(':')[0] if ':' in data_id else data_id
ds_info = self._dataset_dicts[dataset_id]
variable_info_table = ds_info['variables']
bbox = ds_info['bbox']
params = dict(
dataset_name=JsonStringSchema(
min_length=1,
enum=list(self._valid_data_ids),
description='identifier of the requested dataset'),
variable_names=JsonArraySchema(
items=(JsonStringSchema(
min_length=0,
enum=[cds_api_name
for cds_api_name, _, _, _ in variable_info_table]
)),
unique_items=True,
nullable=True,
description='identifiers of the requested variables'
),
crs=JsonStringSchema(
nullable=True,
default=ds_info['crs'],
enum=[None, ds_info['crs']],
description='co-ordinate reference system'),
# W, S, E, N (will be converted to N, W, S, E)
bbox=JsonArraySchema(items=(
JsonNumberSchema(minimum=bbox[0], maximum=bbox[2]),
JsonNumberSchema(minimum=bbox[1], maximum=bbox[3]),
JsonNumberSchema(minimum=bbox[0], maximum=bbox[2]),
JsonNumberSchema(minimum=bbox[1], maximum=bbox[3])),
description='bounding box (min_x, min_y, max_x, max_y)'),
spatial_res=JsonNumberSchema(
minimum=ds_info['spatial_res'],
maximum=10,
default=ds_info['spatial_res'],
description='spatial resolution'),
time_range=JsonDateSchema.new_range(),
time_period=JsonStringSchema(
const=ds_info['time_period'],
description='time aggregation period'),
)
required = [
'variable_names',
'bbox',
'spatial_res',
'time_range',
]
return JsonObjectSchema(
properties=params,
required=required
)
def get_human_readable_data_id(self, data_id: str):
return self._data_id_to_human_readable[data_id]
def describe_data(self, data_id: str) -> DataDescriptor:
ds_info = self._dataset_dicts[data_id.split(':')[0]]
return DatasetDescriptor(
data_id=data_id,
data_vars=self._create_variable_descriptors(data_id),
crs=ds_info['crs'],
bbox=tuple(ds_info['bbox']),
spatial_res=ds_info['spatial_res'],
time_range=tuple(ds_info['time_range']),
time_period=ds_info['time_period'],
open_params_schema=self.get_open_data_params_schema(data_id)
)
def _create_variable_descriptors(self, data_id: str):
dataset_id = data_id.split(':')[0]
return [
VariableDescriptor(
name=netcdf_name,
# dtype string format not formally defined as of 2020-06-18.
# t2m is actually stored as a short with scale and offset in
# the NetCDF file, but converted to float by xarray on opening:
# see http://xarray.pydata.org/en/stable/io.html .
dtype='float32',
dims=('time', 'latitude', 'longitude'),
attrs=dict(units=units, long_name=long_name))
for (api_name, netcdf_name, units, long_name)
in self._dataset_dicts[dataset_id]['variables']
]
def transform_params(self, plugin_params: Dict, data_id: str) -> \
Tuple[str, Dict]:
"""Transform supplied parameters to CDS API format.
:param plugin_params: parameters in form expected by this plugin
:param data_id: the ID of the requested dataset
:return: parameters in form expected by the CDS API
"""
dataset_name, product_type = \
data_id.split(':') if ':' in data_id else (data_id, None)
# We need to split out the bounding box co-ordinates to re-order them.
x1, y1, x2, y2 = plugin_params['bbox']
# Translate our parameters (excluding time parameters) to the CDS API
# scheme.
resolution = plugin_params['spatial_res']
variable_names_param = plugin_params['variable_names']
# noinspection PySimplifyBooleanCheck
if variable_names_param == []:
# The "empty list of variables" case should be handled by the main
# store class; if an empty list gets this far, something's wrong.
raise ValueError('variable_names may not be an empty list.')
elif variable_names_param is None:
variable_table = self._dataset_dicts[dataset_name]['variables']
variable_names = [line[0] for line in variable_table]
else:
variable_names = variable_names_param
params_combined = {
'variable': variable_names,
# For the ERA5 dataset, we need to crop the area by half a
# cell-width. ERA5 data are points, but xcube treats them as
# cell centres. The bounds of a grid of cells are half a cell-width
# outside the bounds of a grid of points, so we have to crop each
# edge by half a cell-width to end up with the requested bounds.
# See https://confluence.ecmwf.int/display/CKB/ERA5%3A+What+is+the+spatial+reference#ERA5:Whatisthespatialreference-Visualisationofregularlat/londata
'area': [y2 - resolution / 2,
x1 + resolution / 2,
y1 + resolution / 2,
x2 - resolution / 2],
# Note: the "grid" parameter is not exposed via the web interface,
# but is described at
# https://confluence.ecmwf.int/display/CKB/ERA5%3A+Web+API+to+CDS+API
'grid': [resolution, resolution],
'format': 'netcdf'
}
if product_type is not None:
params_combined['product_type'] = product_type
# Convert the time range specification to the nearest equivalent
# in the CDS "orthogonal time units" scheme.
time_params_from_range = self.transform_time_params(
self.convert_time_range(plugin_params['time_range']))
params_combined.update(time_params_from_range)
# If any of the "years", "months", "days", and "hours" parameters
# were passed, they override the time specifications above.
time_params_explicit = \
self.transform_time_params(plugin_params)
params_combined.update(time_params_explicit)
# Transform singleton list values into their single members, as
# required by the CDS API.
desingletonned = self.unwrap_singleton_values(params_combined)
return dataset_name, desingletonned
def read_file(self, dataset_name: str, cds_api_params: Dict,
file_path: str, temp_dir: str):
# decode_cf is the default, but it's clearer to make it explicit.
return xr.open_dataset(file_path, decode_cf=True)
| StarcoderdataPython |
5191257 | <filename>titanic/Titanic.py
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import pprint
def clean_data(df, drop_passenger_id):
pp = pprint.PrettyPrinter(indent=4)
# Get the unique values of Sex
sexes = sorted(df['Sex'].unique())
# Generate a mapping of Sex from a string to a number representation
# Fill in missing values of Embarked
# Since the vast majority of passengers embarked in 'S': 3,
# we assign the missing vavlues in Embarked to 'S':
genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1)))
# Transform Sex from a string to a number representation
df['Sex_Val'] = df['Sex'].map(genders_mapping).astype(int)
# Replace nan with S
df = df.replace({'Embarked': { np.nan: 'S' }});
# Get the unique values of Embarked
embarked_locs = sorted(df['Embarked'].unique())
# Generate a mapping of Embarked from a string to a number representation
embarked_locs_mapping = dict(zip(embarked_locs,
range(0, len(embarked_locs) + 1)))
# Transform Embarked from a string to dummy variables
df = pd.concat([df, pd.get_dummies(df['Embarked'], prefix='Embarked_Val')], axis=1)
# Fill in missing values of Fare with the average Fare
if len(df[df['Fare'].isnull()] > 0):
avg_fare = df['Fare'].mean()
df.replace({ None: avg_fare }, inplace=True)
# To keep Age in tact, make a copy of it called AgeFill
# that we will use to fill in the missing ages:
df['AgeFill'] = df['Age']
# Determine the Age typical for each passenger class by Sex_Val.
# We'll use the median instead of the mean because the Age
# histogram seems to be right skewed.
df['AgeFill'] = df['AgeFill'] \
.groupby([df['Sex_Val'], df['Pclass']]) \
.apply(lambda x: x.fillna(x.median()))
# Define a new feature FamilySize that is the sum of
# Parch (number of parents or children on board) and
# SibSp (number of siblings or spouses):
df['FamilySize'] = df['SibSp'] + df['Parch']
# Drop the columns we won't use:
df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
# Drop the Age column since we will be using the AgeFill column instead.
# Drop the SibSp and Parch columns since we will be using FamilySize.
# Drop the PassengerId column since it won't be used as a feature.
df = df.drop(['Age', 'SibSp', 'Parch'], axis=1)
if drop_passenger_id:
df = df.drop(['PassengerId'], axis=1)
return df
df_train = pd.read_csv('./data/train.csv')
df_train = clean_data(df_train, drop_passenger_id=True)
train_data = df_train.values
# print(df_train)
clf = RandomForestClassifier(n_estimators=100)
# Training data features, skip the first column 'Survived'
train_features = train_data[:, 1:]
# 'Survived' column values
train_target = train_data[:, 0]
# Fit the model to our training data
clf = clf.fit(train_features, train_target)
score = clf.score(train_features, train_target)
"Mean accuracy of Random Forest: {0}".format(score)
df_test = pd.read_csv('./data/test.csv')
# Data wrangle the test set and convert it to a numpy array
df_test = clean_data(df_test, drop_passenger_id=False)
test_data = df_test.values
# Get the test data features, skipping the first column 'PassengerId'
test_x = test_data[:, 1:]
# Predict the Survival values for the test data
test_y = clf.predict(test_x)
df_test['Survived'] = test_y.astype(int)
df_test[['PassengerId', 'Survived']].to_csv('./data/result.csv', index=False)
| StarcoderdataPython |
6450274 | <gh_stars>0
'''
Owner - <NAME>
Email - <EMAIL>
Github - https://github.com/rawalshree
'''
import math
global plain
global cipher
global Success
Success = False
plain = ""
cipher = ""
class Railfence:
def setKey(self, key):
global Success
try:
self.key = int(key)
if self.key > 0:
Success = True
except:
pass
def encryption(self, plainText):
global cipher, Success
self.plainText = plainText
if Success:
for x in range(self.key):
for y in range(x, len(self.plainText), self.key):
cipher += self.plainText[y]
return cipher
else:
print("Invalid Key")
return self.plainText
def decryption(self, cipherText):
global plain
self.cipherText = cipherText
if Success:
diff = len(self.cipherText) % self.key
width = int(math.ceil(len(self.cipherText) / (self.key * 1.0)))
for x in range(width):
z = x
while z < len(self.cipherText) and len(plain) < len(self.cipherText):
if (z < width * diff) or diff == 0:
plain += self.cipherText[z]
z += width
else:
plain += self.cipherText[z]
z += width - 1
return plain
else:
print("Invalid Key")
return self.plainText | StarcoderdataPython |
9769153 | <reponame>PedruuH/Sinais_e_Multimidea<gh_stars>0
"""
@authors:
<NAME> - 11611ECP021
<NAME> - 11721ECP009
<NAME> - 11611ECP017
@def: Trabalho final de Sinais e Multimidia.
"""
import sys, cv2, numpy as np, imutils, matplotlib.pyplot as plt, scipy.ndimage
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QTimer
# Kernels utilizado com base no programa criado pelo professor
kernel={
0: np.array([[0,0,0],[0,1,0],[0,0,0]], dtype=float),
1: np.array([[1,0,-1],[0,0,0],[-1,0,1]], dtype=float),
2: np.array([[0,-1,0],[-1,4,-1],[0,-1,0]], dtype=float),
3: np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]], dtype=float),
4: np.array([[0,0,-1,0,0],[0,-1,-2,-1,0],[-1,-2,16,-2,-1],[0,-1,-2,-1,0],[0,0,-1,0,0]], dtype=float),
5: np.array([[-3, 0, 3],[-10,0,10],[-3, 0, 3]], dtype=float),
6: np.array([[-1,-2,-1],[0,0,0],[1,2,1]], dtype=float),
7: np.array([[-1,0,1],[-2,0,2],[-1,0,1]], dtype=float),
8: np.array([[-1,-1,-1],[2,2,2],[-1,-1,-1]], dtype=float),
9: np.array([[-1,2,-1],[-1,2,-1],[-1,2,-1]], dtype=float),
10: np.array([[-1,-1,2],[-1,2,-1],[2,-1,-1]], dtype=float),
11: np.array([[2,-1,-1],[-1,2,-1],[-1,-1,2]], dtype=float),
12: (1/9)*np.ones((3,3), dtype=float),
13: (1/16)*np.array([[1,2,1],[2,4,2],[1,2,1]], dtype=float),
14: (1/256)*np.array([[1,4,6,4,1],[4,16,24,16,4],[6,24,36,24,6],[4,16,24,16,4],[1,4,6,4,1]], dtype=float),
15: np.array([[0,-1,0],[-1,5,-1],[0,-1,0]], dtype=float),
16: (-1/256)*np.array([[1,4,6,4,1],[4,16,24,16,4],[6,24,-476,24,6],[4,16,24,16,4],[1,4,6,4,1]], dtype=float),
}
"""
@def: Classe principal
"""
class MainWindow(QWidget):
def __init__(self):
# Inicializo a aplicação baseado no meu layout "home.ui"
super().__init__()
self.dlg = uic.loadUi("home.ui")
self.dlg.show()
# Inicializo um timer para controlar meus frames
self.timer = QTimer()
self.timer.timeout.connect(self.initialize_camera)
# Inicializo o evento de clicar para abrir a camêra
self.dlg.qbutton_start.clicked.connect(self.timer_control)
# @def: Inicialização da Câmera
def initialize_camera(self):
# Recupero o valor de tamanho do meu width
width_camera = self.dlg.qlabel_camera.width()
# Recupero o frame lido
ret, image = self.cap.read()
# Realizo um resize para o tamanho da imagem no aplicacao
image = imutils.resize(image, width=width_camera)
# Transformo a imagem em cinza
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Faço a convolucao
conv = np.uint8(np.round(convolution(gray, kernel[self.dlg.qcombobox_conv.currentIndex()])))
# Recupero os tamanhos -> defino o meu canal -> calculo o meu passo
height, width = conv.shape
channel = 1
step = channel * width
qImg = QImage(conv.data.tobytes(), width, height, step, QImage.Format_Indexed8)
# Se meu ativar blur estiver clicado, eu passo a imagem no meu detector de blur
if self.dlg.qradiobutton_blur.isChecked():
(mean, blurry) = blur_detect(gray, size=60, thresh=int(self.dlg.qlineedit_th.text()), showing=False)
text = "Resultado: " + "Está borrado" if blurry else "Não está borrado"
# Escrevo meu resultado
self.dlg.qlabel_resultado.setText(str(text) + " - " + str(mean))
else:
# Escrevo meu resultado
self.dlg.qlabel_resultado.setText('Resultado: Blur não selecionado')
# Exibo a imagem apos o tratamento
self.dlg.qlabel_camera.setPixmap(QPixmap.fromImage(qImg))
self.dlg.qlabel_camera.setScaledContents(True)
def timer_control(self):
if not self.timer.isActive():
self.cap = cv2.VideoCapture(0)
self.timer.start(20)
# @def: Convolução
def my_convolve2d(a, conv_filter):
submatrices = np.array([
[a[:-2,:-2], a[:-2,1:-1], a[:-2,2:]],
[a[1:-1,:-2], a[1:-1,1:-1], a[1:-1,2:]],
[a[2:,:-2], a[2:,1:-1], a[2:,2:]]])
multiplied_subs = np.einsum('ij,ijkl->ijkl',conv_filter,submatrices)
return np.sum(np.sum(multiplied_subs, axis = -3), axis = -3)
def convolution(im, omega):
return scipy.ndimage.convolve(im, omega, output=None, mode='reflect', cval=0.0, origin=0)
# @def: Deteção de Blur
def blur_detect(image, size=60, thresh=10, showing=False):
# recupero as dimensões da imagem e derivo as coordenadas do centro (x, y) dividindo por dois
(h, w) = image.shape
(deltaX, deltaY) = (int(w / 2.0), int(h / 2.0))
# Calculo a FFT -> encontro a transformação de frequência -> mudo componente de frequência zero para o centro
fft = np.fft.fft2(image)
fftShift = np.fft.fftshift(fft)
# verifique se estamos vendo nossa saida
if showing:
# calcular a magnitude da transformada
magnitude = 20 * np.log(np.abs(fftShift))
# Entrada original
(fig, ax) = plt.subplots(1, 2, )
ax[0].imshow(image, cmap="gray")
ax[0].set_title("Input")
ax[0].set_xticks([])
ax[0].set_yticks([])
# Imagem de magnitude
ax[1].imshow(magnitude, cmap="gray")
ax[1].set_title("Magnitude Spectrum")
ax[1].set_xticks([])
ax[1].set_yticks([])
# Exibir com o show da matplotlib
plt.show()
# remover frequências baixas -> aplicar o deslocamento inverso -> aplicar o FFT inverso
fftShift[deltaY - size:deltaY + size, deltaX - size:deltaX + size] = 0
fftShift = np.fft.ifftshift(fftShift)
recon = np.fft.ifft2(fftShift)
# Calculo o espectro de magnitude da imagem reconstruída -> Calcule a média dos valores de magnitude
magnitude = 20 * np.log(np.abs(recon))
mean = np.mean(magnitude)
#se o valor médio das magnitudes for menor que o valor limite <-> imagem borrada
return (mean, mean <= thresh)
# Inicializando a aplicação
if __name__ == '__main__':
app = QApplication(sys.argv)
# Inicializo a MainWindow
mainWindow = MainWindow()
# Finalizo a aplicacao
sys.exit(app.exec_()) | StarcoderdataPython |
9698585 | from connection.scooter_controller import ScooterController
if __name__ == '__main__':
controller = ScooterController()
controller.handle()
| StarcoderdataPython |
5042538 | import sys
def exchange(numbers, i):
first_part = numbers[:i + 1]
second_part = numbers[i + 1:]
return second_part + first_part
def max_even_index(numbers, even):
max_number_even = -sys.maxsize
max_number_even_index = -1
for i in range(len(numbers)):
if numbers[i] % 2 == even and numbers[i] >= max_number_even:
max_number_even = numbers[i]
max_number_even_index = i
if max_number_even_index == -1:
return 'No matches'
else:
return max_number_even_index
def min_even_index(numbers, even):
min_number_even = sys.maxsize
max_number_even_index = -1
for i in range(len(numbers)):
if numbers[i] % 2 == even and numbers[i] <= min_number_even:
min_number_even = numbers[i]
max_number_even_index = i
if max_number_even_index == -1:
return 'No matches'
else:
return max_number_even_index
def first(numbers, event, counter):
first_list_odd = []
count = 0
for number in numbers:
if count == counter:
break
if number % 2 == event:
first_list_odd.append(number)
count += 1
return first_list_odd
def last(numbers, even, counter):
last_list_even = []
count = 0
for i in range(len(numbers) - 1, -1, -1):
if count == counter:
break
if numbers[i] % 2 == even:
last_list_even.append(numbers[i])
count += 1
return last_list_even[::-1]
numbers_list = input().split(' ')
numbers_list = [int(x) for x in numbers_list]
command_input = input().split(' ')
while command_input[0] != 'end':
command = command_input[0]
if command == 'exchange':
index = int(command_input[1])
if len(numbers_list) > index >= 0:
numbers_list = exchange(numbers_list, index)
else:
print('Invalid index')
elif command == 'max':
criteria = command_input[1]
res = max_even_index(numbers_list, 0 if criteria == "even" else 1)
print(res)
elif command == 'min':
criteria = command_input[1]
res = min_even_index(numbers_list, 0 if criteria == "even" else 1)
print(res)
elif command == 'first':
criteria = command_input[2]
index = int(command_input[1])
if len(numbers_list) >= index >= -1:
print(first(numbers_list, 0 if criteria == "even" else 1, index))
else:
print('Invalid count')
elif command == 'last':
criteria = command_input[2]
index = int(command_input[1])
if len(numbers_list) >= index >= -1:
print(last(numbers_list, 0 if criteria == "even" else 1, index))
else:
print('Invalid count')
command_input = input().split(' ')
print(numbers_list)
| StarcoderdataPython |
3277711 | import numpy as np
import os
from utils import data_paths, data_splitting
def test_write_numpy_array_to_file_returns_none():
input_array = np.ones((4, 3))
array_file_name = 'array_test_file.npy'
array_file_path = os.path.join(data_paths.DATA_DIR_PATH, array_file_name)
try:
return_value = data_splitting.write_numpy_array_to_file(input_array,
array_file_path)
assert return_value is None
finally:
try:
os.remove(array_file_path)
except FileNotFoundError:
pass
def test_write_numpy_array_to_file_creates_file():
input_array = np.ones((4, 3))
array_file_name = 'array_test_file.npy'
array_file_path = os.path.join(data_paths.DATA_DIR_PATH, array_file_name)
assert not os.path.isfile(array_file_path), ('{} is for test use only'
.format(array_file_path))
try:
data_splitting.write_numpy_array_to_file(input_array, array_file_path)
assertion_message = 'File not created by write_numpy_array_to_file'
assert os.path.isfile(array_file_path), assertion_message
finally:
try:
os.remove(array_file_path)
except FileNotFoundError:
pass
def test_write_numpy_array_to_file_creates_expected_file():
input_array = np.ones((4, 3))
expected_array = np.copy(input_array)
array_file_name = 'array_test_file.npy'
array_file_path = os.path.join(data_paths.DATA_DIR_PATH, array_file_name)
assert not os.path.isfile(array_file_path), ('{} is for test use only'
.format(array_file_path))
try:
data_splitting.write_numpy_array_to_file(input_array, array_file_path)
actual_array = np.load(array_file_path)
np.testing.assert_array_equal(actual_array, expected_array)
finally:
try:
os.remove(array_file_path)
except FileNotFoundError:
pass
def test_create_numpy_array_from_generator_returns_numpy_array():
overestimated_shape = (10, 1)
def input_generator():
yield 0
return_value = data_splitting.create_numpy_array_from_generator(
generator=input_generator,
overestimated_shape=overestimated_shape
)
assert isinstance(return_value, np.ndarray)
assert return_value.dtype == np.int32
def test_create_numpy_array_from_generator_returns_expected_array():
expected_array = np.random.randint(0, 999, (5, 4)).astype(np.int32)
overestimated_shape = (10, 4)
def input_generator():
for thing in expected_array:
yield thing
actual_array = data_splitting.create_numpy_array_from_generator(
generator=input_generator,
overestimated_shape=overestimated_shape
)
np.testing.assert_array_equal(actual_array, expected_array)
| StarcoderdataPython |
6593170 | <filename>ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/util/psi/data/incorrect/keyword/in_3.py
# Misusing the keyword <break>
names = ['pam', 'jim', 'michael']
if 'jim' in names:
print('jim found')
break | StarcoderdataPython |
138522 | from __future__ import division, print_function
try:
from phenix.program_template import ProgramTemplate
except ImportError:
from libtbx.program_template import ProgramTemplate
import os
import libtbx.phil
from libtbx.utils import Sorry
from libtbx import easy_pickle
import mmtbx.ringer.emringer
# =============================================================================
program_citations = libtbx.phil.parse('''
citation {
article_id = emringer1
authors = <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
title = Side-chain-directed model and map validation for 3D Electron Cryomicroscopy.
journal = Nature Methods
volume = 10
pages = 943-46
year = 2015
doi_id = "10.1038/nmeth.3541"
pmid = 26280328
external = True
}
citation {
article_id = emringer2
authors = <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
title = Automated electron-density sampling reveals widespread conformational polymorphism in proteins.
journal = Protein Sci.
volume = 7
pages = 1420-31
year = 2010
doi_id = ""
pmid = 20499387
external = True
}
''')
# =============================================================================
master_phil_str = '''
include scope libtbx.phil.interface.tracking_params
include scope mmtbx.ringer.emringer.master_params
map_label = 2FOFCWT,PH2FOFCWT
.type = str
.input_size = 200
.short_caption = 2Fo-FC map labels
.help = Labels for 2Fo-Fc map coefficients
show_gui = False
.type = bool
output_base = None
.type = str
output_dir = None
.type = path
.short_caption = Output directory
quiet = False
.type = bool
.short_caption = no graphs
.help = Don't output files or graphs
'''
# =============================================================================
class Program(ProgramTemplate):
description = '''
Program for calculating the EMRinger score.\n
Minimum required inputs:
Model file
Map file (or file with map coefficients)
How to run:
phenix.emringer model.pdb map.ccp4
'''
datatypes = ['model', 'real_map', 'phil', 'map_coefficients']
citations = program_citations
master_phil_str = master_phil_str
# ---------------------------------------------------------------------------
def validate(self):
print('Validating inputs', file=self.logger)
self.data_manager.has_models(raise_sorry=True)
if not (self.data_manager.has_real_maps() or
self.data_manager.has_map_coefficients()):
raise Sorry("Supply a map file or a file with map coefficients.")
elif (self.data_manager.has_real_maps() and
self.data_manager.has_map_coefficients()):
raise Sorry("Supply either a map file or a file with map coefficients.")
# ---------------------------------------------------------------------------
def run(self):
map_inp = None
miller_array = None
print('Using model: %s' % self.data_manager.get_default_model_name(),
file=self.logger)
model = self.data_manager.get_model()
if self.data_manager.has_map_coefficients():
miller_arrays = self.data_manager.get_miller_arrays()
miller_array = self.find_label(miller_arrays = miller_arrays)
print('Using miller array: %s' % miller_array.info().label_string(),
file=self.logger)
elif self.data_manager.has_real_maps():
print('Using map: %s' % self.data_manager.get_default_real_map_name(),
file=self.logger)
map_inp = self.data_manager.get_real_map()
print("CCP4 map statistics:", file=self.logger)
map_inp.show_summary(out=self.logger, prefix=" ")
if (self.params.output_base is None) :
pdb_base = os.path.basename(self.data_manager.get_default_model_name())
self.params.output_base = os.path.splitext(pdb_base)[0] + "_emringer"
if not self.params.quiet:
plots_dir = self.params.output_base + "_plots"
if (not os.path.isdir(plots_dir)) :
os.makedirs(plots_dir)
task_obj = mmtbx.ringer.emringer.emringer(
model = model,
miller_array = miller_array,
map_inp = map_inp,
params = self.params,
out = self.logger)
task_obj.validate()
task_obj.run()
self.results = task_obj.get_results()
ringer_result = self.results.ringer_result
if not self.params.quiet:
# save as pickle
easy_pickle.dump("%s.pkl" % self.params.output_base, ringer_result)
print ('Wrote %s.pkl' % self.params.output_base, file=self.logger)
# save as CSV
csv = "\n".join([ r.format_csv() for r in ringer_result])
open("%s.csv" % self.params.output_base, "w").write(csv)
print ('Wrote %s.csv' % self.params.output_base, file=self.logger)
scoring_result = self.results.scoring_result
scoring_result.show_summary(out = self.logger)
#rolling_result = self.results.rolling_result
# It would be good to have central code for this
# ---------------------------------------------------------------------------
def find_label(self, miller_arrays):
best_guess = None
best_labels = []
all_labels = []
miller_array = None
for array in miller_arrays:
label = array.info().label_string().replace(" ", "")
if (self.params.map_label is not None):
if (label == self.params.map_label.replace(" ", "")):
miller_array = array
return miller_array
elif (self.params.map_label is None):
if (array.is_complex_array()):
all_labels.append(label)
if (label.startswith("2FOFCWT") or label.startswith("2mFoDFc") or
label.startswith("FWT")) :
best_guess = array
best_labels.append(label)
if (miller_array is None):
if (len(all_labels) == 0) :
raise Sorry("No valid (pre-weighted) map coefficients found in file.")
elif (len(best_labels) == 0) :
raise Sorry("Couldn't automatically determine appropriate map labels. "+
"Choices:\n %s" % " \n".join(all_labels))
elif (len(best_labels) > 1) :
raise Sorry("Multiple appropriate map coefficients found in file. "+
"Choices:\n %s" % "\n ".join(best_labels))
elif (len(best_labels) == 1):
miller_array = best_guess
print(" Guessing %s for input map coefficients"% best_labels[0], file=self.logger)
return miller_array
# ---------------------------------------------------------------------------
def get_results(self):
return self.results
| StarcoderdataPython |
3493242 | from django import forms
from .models import Request, Restriction
class DateInput(forms.DateInput):
input_type = 'date'
class TextInput(forms.TextInput):
input_type = 'text'
class RequestForm(forms.ModelForm):
class Meta:
model = Request
fields = ['leave_type', 'start', 'end', 'reason', 'attachment']
widgets = {
'start': TextInput(),
'end': TextInput()
}
class RestricionForm(forms.ModelForm):
class Meta:
model = Restriction
fields = ['from_date', 'to_date', 'reason']
widgets = {
'from_date': TextInput(),
'to_date': TextInput()
}
class EmailForm(forms.Form):
email = forms.EmailField(max_length=50)
subject = forms.CharField(widget=forms.TextInput, max_length=50)
include_attachment = forms.BooleanField(widget=forms.CheckboxInput, initial=True, required=False)
message = forms.CharField(widget=forms.Textarea, max_length=200)
| StarcoderdataPython |
8140098 | #!/usr/bin/python3
# -*- encoding="UTF-8" -*-
import sys
sys.path.append("..")
import parameters
def addNode(stringNode = ''):
if stringNode == '':
pass
#print( "--NONE ADDED!" )
elif stringNode == '0':
pass
#print( "--0 is GND!" )
else:
if stringNode in parameters.NodesDict: #Already exit
pass
#print(stringNode,'is already exit!' )
else:
lenNodes = len(parameters.NodesDict)
parameters.NodesDict[stringNode] = lenNodes
#print("Add Node: ",stringNode, 'as', lenNodes )
| StarcoderdataPython |
3286995 | <filename>enigma/rotor/encoder.py
"""The Encoder class."""
from .wiring import Wiring
class Encoder:
"""Base class for encoders."""
name = None
def __init__(self, wiring: str = "YRUHQSLDPXNGOKMIEBFZCWVJAT"):
"""Set wiring and position encodings."""
self.wiring = Wiring(wiring)
| StarcoderdataPython |
4921113 | <reponame>ngilles/adventofcode-2020
import operator as op
from functools import reduce
from utils import puzzle_input
example = "\n".join(
[
"abc",
"",
"a",
"b",
"c",
"",
"ab",
"ac",
"",
"a",
"a",
"a",
"a",
"",
"b",
]
)
with puzzle_input(6, example, False) as f:
groups = [[set(a) for a in g.split("\n")] for g in f.read().split("\n\n")]
# Merge group answer, union ("anyone")
per_group = [reduce(op.or_, g) for g in groups]
print(sum(len(g) for g in per_group))
# Merge group answers, intersection ("everyone")
per_group = [reduce(op.and_, g) for g in groups]
print(sum(len(g) for g in per_group))
| StarcoderdataPython |
1928874 | import os
import time
import subprocess
import threading
import socket
import sys, uuid
import platform
import mlflow
import ray
import inspect
from textwrap import dedent
from azureml.core import Workspace, Experiment, Environment, Datastore, Dataset, ScriptRunConfig, Run
from azureml.core.runconfig import PyTorchConfiguration
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
import logging
class Ray_On_AML():
# pyarrow >=6.0.1
# dask >=2021.11.2
# adlfs >=2021.10.0
# fsspec==2021.10.1
# ray[default]==1.9.0
# base_conda_dep =['gcsfs','fs-gcsfs','numpy','h5py','scipy','toolz','bokeh','dask','distributed','matplotlib','pandas','pandas-datareader','pytables','snakeviz','ujson','graphviz','fastparquet','dask-ml','adlfs','pytorch','torchvision','pip'], base_pip_dep = ['azureml-defaults','python-snappy', 'fastparquet', 'azureml-mlflow', 'ray[default]==1.8.0', 'xgboost_ray', 'raydp', 'xgboost', 'pyarrow==4.0.1']
def __init__(self, ws=None, base_conda_dep =['adlfs','pip'], base_pip_dep = ['ray[tune]==1.9.0', 'xgboost_ray', 'dask','pyarrow >= 4.0.1','fsspec==2021.10.1'], vnet_rg = None, compute_cluster = 'cpu-cluster', vm_size='STANDARD_DS3_V2',vnet='rayvnet', subnet='default', exp ='ray_on_aml', maxnode =5, additional_conda_packages=[],additional_pip_packages=[], job_timeout=60000):
self.ws = ws
self.base_conda_dep=base_conda_dep
self.base_pip_dep= base_pip_dep
self.vnet_rg=vnet_rg
self.compute_cluster=compute_cluster
self.vm_size=vm_size
self.vnet=vnet
self.subnet =subnet
self.exp= exp
self.maxnode=maxnode
self.additional_conda_packages=additional_conda_packages
self.additional_pip_packages=additional_pip_packages
self.job_timeout = job_timeout
def flush(self,proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == "" and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
def get_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def startRayMaster(self):
conda_env_name = sys.executable.split('/')[-3]
print(conda_env_name)
#set the the python to this conda env
cmd =f'. /anaconda/etc/profile.d/conda.sh && conda activate {conda_env_name} && ray stop && ray start --head --port=6379 --object-manager-port=8076'
try:
#if this is not the default environment, it will run
subprocess.check_output(cmd, shell=True)
except:
# User runs this in default environment, just goahead without activating
cmd ='ray stop && ray start --head --port=6379 --object-manager-port=8076'
subprocess.check_output(cmd, shell=True)
ip = self.get_ip()
return ip
def checkNodeType(self):
rank = os.environ.get("RANK")
if rank is None:
return "interactive" # This is interactive scenario
elif rank == '0':
return "head"
else:
return "worker"
#check if the current node is headnode
def startRay(self,master_ip=None):
ip = self.get_ip()
print("- env: MASTER_ADDR: ", os.environ.get("MASTER_ADDR"))
print("- env: MASTER_PORT: ", os.environ.get("MASTER_PORT"))
print("- env: RANK: ", os.environ.get("RANK"))
print("- env: LOCAL_RANK: ", os.environ.get("LOCAL_RANK"))
print("- env: NODE_RANK: ", os.environ.get("NODE_RANK"))
rank = os.environ.get("RANK")
if master_ip is None:
master_ip = os.environ.get("MASTER_ADDR")
print("- my rank is ", rank)
print("- my ip is ", ip)
print("- master is ", master_ip)
if not os.path.exists("logs"):
os.makedirs("logs")
print("free disk space on /tmp")
os.system(f"df -P /tmp")
cmd = f"ray start --address={master_ip}:6379 --object-manager-port=8076"
print(cmd)
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.flush(worker_proc, worker_log)
def getRay(self, init_ray_in_worker=False, logging_level=logging.ERROR):
if self.checkNodeType()=="interactive" and self.ws is None:
#Interactive scenario, workspace object is require
raise Exception("For interactive use, please pass AML workspace to the init")
if self.checkNodeType()=="interactive":
return self.getRayInteractive(logging_level)
elif self.checkNodeType() =='head':
print("head node detected")
self.startRayMaster()
time.sleep(10) # wait for the worker nodes to start first
ray.init(address="auto", dashboard_port =5000,ignore_reinit_error=True)
return ray
else:
print("workder node detected")
self.startRay()
if init_ray_in_worker:
ray.init(address="auto", dashboard_port =5000,ignore_reinit_error=True)
return ray
def getRayInteractive(self):
master_ip = self.startRayMaster()
# Verify that cluster does not exist already
ws_detail = self.ws.get_details()
ws_rg = ws_detail['id'].split("/")[4]
try:
ray_cluster = ComputeTarget(workspace=self.ws, name=self.compute_cluster)
print('Found existing cluster, use it.')
except ComputeTargetException:
if self.vnet_rg is None:
vnet_rg = ws_rg
else:
vnet_rg = self.vnet_rg
compute_config = AmlCompute.provisioning_configuration(vm_size=self.vm_size,
min_nodes=0, max_nodes=self.maxnode,
vnet_resourcegroup_name=vnet_rg,
vnet_name=self.vnet,
subnet_name=self.subnet)
ray_cluster = ComputeTarget.create(self.ws, self.compute_cluster, compute_config)
ray_cluster.wait_for_completion(show_output=True)
python_version = ["python="+platform.python_version()]
conda_packages = python_version+self.additional_conda_packages +self.base_conda_dep
pip_packages = self.base_pip_dep +self.additional_pip_packages
rayEnv = Environment(name="rayEnv")
dockerfile = r"""
FROM mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04
ARG HTTP_PROXY
ARG HTTPS_PROXY
# set http_proxy & https_proxy
ENV http_proxy=${HTTP_PROXY}
ENV https_proxy=${HTTPS_PROXY}
RUN http_proxy=${HTTP_PROXY} https_proxy=${HTTPS_PROXY} apt-get update -y \
&& mkdir -p /usr/share/man/man1 \
&& http_proxy=${HTTP_PROXY} https_proxy=${HTTPS_PROXY} apt-get install -y openjdk-8-jdk \
&& mkdir /raydp \
&& pip --no-cache-dir install raydp
WORKDIR /raydp
# unset http_proxy & https_proxy
ENV http_proxy=
ENV https_proxy=
"""
# dockerfile = r"""
# FROM mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210615.v1
# # Install OpenJDK-8
# RUN apt-get update -y \
# && mkdir -p /usr/share/man/man1 \
# && apt-get install -y openjdk-8-jdk
# """
# Set the base image to None, because the image is defined by Dockerfile.
rayEnv.docker.base_image = None
rayEnv.docker.base_dockerfile = dockerfile
conda_dep = CondaDependencies()
for conda_package in conda_packages:
conda_dep.add_conda_package(conda_package)
for pip_package in pip_packages:
conda_dep.add_pip_package(pip_package)
# Adds dependencies to PythonSection of myenv
rayEnv.python.conda_dependencies=conda_dep
##Create the source file
os.makedirs(".tmp", exist_ok=True)
source_file_content = """
import os
import time
import subprocess
import threading
import socket
import sys, uuid
import platform
#import mlflow
import ray
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == "" and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
def startRay(master_ip=None):
ip = socket.gethostbyname(socket.gethostname())
print("- env: MASTER_ADDR: ", os.environ.get("MASTER_ADDR"))
print("- env: MASTER_PORT: ", os.environ.get("MASTER_PORT"))
print("- env: RANK: ", os.environ.get("RANK"))
print("- env: LOCAL_RANK: ", os.environ.get("LOCAL_RANK"))
print("- env: NODE_RANK: ", os.environ.get("NODE_RANK"))
rank = os.environ.get("RANK")
master = os.environ.get("MASTER_ADDR")
print("- my rank is ", rank)
print("- my ip is ", ip)
print("- master is ", master)
if not os.path.exists("logs"):
os.makedirs("logs")
print("free disk space on /tmp")
os.system(f"df -P /tmp")
cmd = f"ray start --address={master_ip}:6379 --object-manager-port=8076"
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
flush(worker_proc, worker_log)
time.sleep(60000)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--master_ip")
args, unparsed = parser.parse_known_args()
master_ip = args.master_ip
startRay(master_ip)
"""
source_file = open(".tmp/source_file.py", "w")
n = source_file.write(dedent(source_file_content))
source_file.close()
src = ScriptRunConfig(source_directory='.tmp',
script='source_file.py',
environment=rayEnv,
compute_target=ray_cluster,
distributed_job_config=PyTorchConfiguration(node_count=self.maxnode),
arguments = ["--master_ip",master_ip]
)
run = Experiment(self.ws, self.exp).submit(src)
time.sleep(10)
ray.shutdown()
ray.init(address="auto", dashboard_port =5000,ignore_reinit_error=True, logging_level=logging_level)
self.run = run
self.ray = ray
while True:
active_run = Run.get(self.ws,run.id)
if active_run.status != 'Running':
print("Waiting: Cluster status is in ", active_run.status)
time.sleep(10)
else:
return active_run, ray
def shutdown(self, end_all_runs=False):
def end_all_run():
exp= Experiment(self.ws,self.exp)
runs = exp.get_runs()
for run in runs:
if run.status =='Running':
print("Get active run ", run.id)
run.cancel()
if end_all_run:end_all_run()
try:
self.run.cancel()
except:
print("Run does not exisit, finding active runs to cancel")
end_all_run()
try:
self.ray.shutdown()
except:
print("Cannot shutdown ray")
| StarcoderdataPython |
9775410 | import base64
from dnslib import DNSRecord, RR, TXT, QTYPE
def server_encrypt(dns_packet: DNSRecord, data, question):
data = base64.b64encode(data)
dns_packet = dns_packet.reply()
dns_packet.add_answer(RR(question, rtype=QTYPE.TXT, rdata=TXT(data)))
return dns_packet.pack()
def server_decrypt(dns_packet: bytes):
query = DNSRecord.parse(dns_packet)
labels = query.questions[0].qname.label
i, question = 0, labels[0]
for tmp in labels[1:]:
question += b'.' + tmp
question = bytes.decode(question)
labels = labels[::-1]
i, buf = 3, b''
while i < len(labels):
buf += labels[i]
i += 1
return base64.b64decode(buf), question, query
| StarcoderdataPython |
9607833 | def f_to_c(f):
c = (f - 32) * 5/9
return f
f = 58.0
c = f_to_c (f)
print ("fahrenheit of" + str(f) + "is" + str(c) + "in fahrenheit")
| StarcoderdataPython |
6692577 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Implementation of the ``wgs_cnv_export`` step.
The ``wgs_cnv_export`` step takes as the input the results of the ``wgs_cnv_annotation`` step and
uses ``varfish-annotator-cli annotate`` commmand to create files fit for import into VarFish
Server.
==========
Stability
==========
TODO
==========
Step Input
==========
The WGS SV export step uses Snakemake sub workflows for using the result of the
``wgs_cnv_export`` step.
===========
Step Output
===========
TODO
====================
Global Configuration
====================
TODO
=====================
Default Configuration
=====================
.. include:: DEFAULT_CONFIG_wgs_cnv.rst
==================
Parallel Execution
==================
Parallel execution is not performed currently.
"""
import os
import sys
from biomedsheets.shortcuts import GermlineCaseSheet, is_not_background
from snakemake.io import expand
from snappy_pipeline.utils import dictify, listify
from snappy_pipeline.workflows.abstract import (
BaseStep,
BaseStepPart,
LinkOutStepPart,
WritePedigreeStepPart,
)
from snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow
from snappy_pipeline.workflows.wgs_cnv_annotation import WgsCnvAnnotationWorkflow
from snappy_pipeline.workflows.wgs_cnv_calling import WgsCnvCallingWorkflow
__author__ = "<NAME> <<EMAIL>>"
#: Extension of files
EXTS = (".tsv.gz", ".tsv.gz.md5")
#: Infixes to use for file name generation
INFIXES = ("gts", "feature-effects", "db-infos")
#: Default configuration for the wgs_cnv_export step
DEFAULT_CONFIG = r"""
# Default configuration wgs_cnv_export.
step_config:
wgs_cnv_export:
path_wgs_cnv_annotation: ../wgs_cnv_annotation
tools_ngs_mapping: null
tools_wgs_cnv_calling: null
path_refseq_ser: REQUIRED # REQUIRED: path to RefSeq .ser file
path_ensembl_ser: REQUIRED # REQUIRED: path to ENSEMBL .ser file
path_db: REQUIRED # REQUIRED: spath to annotator DB file to use
"""
class VarfishAnnotatorAnnotateStepPart(BaseStepPart):
"""Annotate VCF file using "varfish-annotator annotate"."""
name = "varfish_annotator"
def __init__(self, parent):
super().__init__(parent)
self.base_path_out = (
"work/{mapper}.{var_caller}.varfish_annotated.{index_ngs_library}/out/.done"
)
# Build shortcut from index library name to pedigree
self.index_ngs_library_to_pedigree = {}
for sheet in self.parent.shortcut_sheets:
self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)
@dictify
def get_input_files(self, action):
"""Return path to pedigree input file"""
assert action == "annotate"
yield "ped", "work/write_pedigree.{index_ngs_library}/out/{index_ngs_library}.ped"
tpl = (
"output/{mapper}.{var_caller}.annotated.{index_ngs_library}/out/"
"{mapper}.{var_caller}.annotated.{index_ngs_library}"
)
KEY_EXT = {"vcf": ".vcf.gz", "tbi": ".vcf.gz.tbi"}
wgs_cnv_annotation = self.parent.sub_workflows["wgs_cnv_annotation"]
for key, ext in KEY_EXT.items():
yield key, wgs_cnv_annotation(tpl + ext)
@dictify
def get_output_files(self, action):
"""Return output files for the filtration"""
assert action == "annotate"
prefix = (
"work/{mapper}.{var_caller}.varfish_annotated.{index_ngs_library}/out/"
"{mapper}.{var_caller}.varfish_annotated.{index_ngs_library}"
)
for infix in INFIXES:
key = infix.replace("-", "_")
yield key, prefix + ".%s.tsv.gz" % infix
yield key + "_md5", prefix + ".%s.tsv.gz.md5" % infix
@dictify
def _get_log_file(self, action):
assert action == "annotate"
prefix = (
"work/{mapper}.{var_caller}.varfish_annotated.{index_ngs_library}/log/"
"{mapper}.{var_caller}.varfish_annotated.{index_ngs_library}"
)
key_ext = (
("wrapper", ".wrapper.py"),
("log", ".log"),
("conda_info", ".conda_info.txt"),
("conda_list", ".conda_list.txt"),
)
for key, ext in key_ext:
yield key, prefix + ext
@classmethod
def update_cluster_config(cls, cluster_config):
"""Update cluster configuration with resource requirements"""
cluster_config["wgs_cnv_export_varfish_annotator_annotate_svs"] = {
"mem": 7 * 1024 * 2,
"time": "100:00",
"ntasks": 2,
}
def get_params(self, action):
assert action == "annotate"
def get_params_func(wildcards):
result = {"is_wgs": True, "step_name": "wgs_cnv_export"}
pedigree = self.index_ngs_library_to_pedigree[wildcards.index_ngs_library]
for donor in pedigree.donors:
if (
donor.dna_ngs_library
and donor.dna_ngs_library.extra_infos.get("libraryType") == "WGS"
):
result["is_wgs"] = True
return result
return result
return get_params_func
class WgsCnvExportWorkflow(BaseStep):
"""Perform germline WGS SV export"""
name = "wgs_cnv_export"
sheet_shortcut_class = GermlineCaseSheet
@classmethod
def default_config_yaml(cls):
"""Return default config YAML, to be overwritten by project-specific one"""
return DEFAULT_CONFIG
def __init__(
self, workflow, config, cluster_config, config_lookup_paths, config_paths, workdir
):
super().__init__(
workflow,
config,
cluster_config,
config_lookup_paths,
config_paths,
workdir,
(WgsCnvAnnotationWorkflow, WgsCnvCallingWorkflow, NgsMappingWorkflow),
)
# Register sub step classes so the sub steps are available
self.register_sub_step_classes(
(WritePedigreeStepPart, VarfishAnnotatorAnnotateStepPart, LinkOutStepPart)
)
# Register sub workflows
self.register_sub_workflow("wgs_cnv_annotation", self.config["path_wgs_cnv_annotation"])
# Copy over "tools" setting from wgs_cnv_calling/ngs_mapping if not set here
if not self.config["tools_ngs_mapping"]:
self.config["tools_ngs_mapping"] = self.w_config["step_config"]["ngs_mapping"]["tools"][
"dna"
]
if not self.config["tools_wgs_cnv_calling"]:
# Remove plain ERDS as it does not do multi-sample genotypeing
tools = self.w_config["step_config"]["wgs_cnv_calling"]["tools"]
self.config["tools_wgs_cnv_calling"] = [t for t in tools if t != "erds"]
@listify
def get_result_files(self):
"""Return list of result files for the NGS mapping workflow
We will process all primary DNA libraries and perform joint calling within pedigrees
"""
name_patternn = "{mapper}.{caller}.varfish_annotated.{index_library.name}"
yield from self._yield_result_files(
os.path.join("output", name_patternn, "out", name_patternn + ".{infix}{ext}"),
mapper=self.config["tools_ngs_mapping"],
caller=self.config["tools_wgs_cnv_calling"],
infix=INFIXES,
ext=EXTS,
)
yield from self._yield_result_files(
os.path.join("output", name_patternn, "log", name_patternn + "{ext}"),
mapper=self.config["tools_ngs_mapping"],
caller=self.config["tools_wgs_cnv_calling"],
ext=(
".log",
".log.md5",
".conda_info.txt",
".conda_info.txt.md5",
".conda_list.txt",
".conda_list.txt.md5",
),
)
def _yield_result_files(self, tpl, **kwargs):
"""Build output paths from path template and extension list"""
for sheet in filter(is_not_background, self.shortcut_sheets):
for pedigree in sheet.cohort.pedigrees:
if not pedigree.index: # pragma: no cover
msg = "INFO: pedigree without index (names: {})"
print(
msg.format(list(sorted(d.name for d in pedigree.donors))), file=sys.stderr
)
continue
elif not pedigree.index.dna_ngs_library: # pragma: no cover
msg = "INFO: pedigree index without DNA NGS library (names: {})"
print(
msg.format( # pragma: no cover
list(sorted(d.name for d in pedigree.donors))
),
file=sys.stderr,
)
continue # pragma: no cover
yield from expand(tpl, index_library=[pedigree.index.dna_ngs_library], **kwargs)
def check_config(self):
"""Check that the path to the NGS mapping is present"""
self.ensure_w_config(
("step_config", "wgs_cnv_export", "path_wgs_cnv_annotation"),
("Path to WGS SV annotation not configured but required for WGS SV export"),
)
| StarcoderdataPython |
9658395 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.datasources.nagios.driver import NagiosDriver
from vitrage.tests.mocks import mock_driver
class MockNagiosDriver(NagiosDriver):
"""A nagios driver for tests.
Instead of calling Nagios URL to get the data, it returns the data it
is asked to
"""
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def __init__(self):
super(MockNagiosDriver, self).__init__()
self.service_datas = None
def set_service_datas(self, service_datas):
self.service_datas = service_datas
def _get_alarms(self):
alarms = []
for service_data in self.service_datas:
generators = mock_driver.simple_nagios_alarm_generators(
host_num=1,
events_num=1,
snap_vals=service_data)
alarms.append(
mock_driver.generate_sequential_events_list(generators)[0])
return alarms
| StarcoderdataPython |
12846813 | #!/bin/env python
# -*- coding: utf-8 -*-
"""
productporter.product.views
~~~~~~~~~~~~~~~~~~~~~~~~~
product blueprint
:copyright: (c) 2014 by the ProductPorter Team.
:license: BSD, see LICENSE for more details.
"""
import datetime
import json
from flask import Blueprint, request, current_app, flash, redirect, \
url_for, jsonify, make_response
from flask.ext.login import current_user
from qiniu import Auth
from productporter.product.phapi import ProductHuntAPI
from productporter.product.models import Product, Tag
from productporter.utils.helper import render_template, pull_and_save_posts, render_markup, \
query_products, can_translate, can_review, is_online
from productporter.utils.decorators import moderator_required
from productporter.user.models import User
product = Blueprint('product', __name__)
def _tag_names(post):
"""return tag names of this post"""
tagnames = []
for tag in post.tags:
if len(tagnames) == 0:
tagnames.append(tag.name)
else:
tagnames.append('; ' + tag.name)
return ''.join(tagnames)
def _render_tags(post):
"""render tags. MUST BE THE SAME of macro 'render_tags' in macro.jinja.html"""
tag_template = '<a class="label label-default" href="%s">%s</a>'
tag_html = []
for tag in post.tags:
tag_html.append(tag_template % \
(url_for('product.tags', tag=tag.name), tag.name))
tag_html.append('<br/><br/>')
return '\n'.join(tag_html)
def _render_contributors(contributers, postid, locked_by, field):
"""render contributors, MUST BE THE SAME of macro 'contributors' in macro.jinja.html"""
div_template = "<div class='translaters-list' data-postid='%s' field='%s'>edit by %s</div>"
user_template = "<a href='%s'>@%s</a>"
user_htmls = []
users = contributers.all()
for user in users:
nickname = user.nickname if user.nickname else user.username
user_htmls.append(user_template % \
(url_for('user.profile', username=user.username), nickname))
if locked_by:
nickname = locked_by.nickname if locked_by.nickname else locked_by.username
user_htmls.append((' - locked by ' + user_template) % \
(url_for('user.profile', username=locked_by.username), nickname))
return div_template % (postid, field, '\n'.join(user_htmls))
def _post_aquire_translate(request):
"""aquire to translate post"""
postid = request.args.get('postid')
field = request.args.get('field', 'ctagline')
current_app.logger.info('aquire translate %s for post %s' % (field, str(postid)))
if not can_translate(current_user):
ret = {
'status': 'error',
'postid': postid,
'error': 'Please sign in first'
}
return make_response(jsonify(**ret), 401)
post = Product.query.filter(Product.postid==postid).first_or_404()
if getattr(post, field + '_locked'):
ret = {
'status': 'error',
'postid': postid,
'error': '%s is locked. Please contact adminitrator.'
}
return make_response(jsonify(**ret), 403)
editing_user = getattr(post, 'editing_' + field + '_user')
if (editing_user) and \
(editing_user.username != current_user.username) and \
(is_online(editing_user)):
ret = {
'status': 'error',
'postid': post.postid,
'error': '%s is editing by %s' % \
(field, editing_user.username)
}
return make_response(jsonify(**ret), 400)
setattr(post, 'editing_' + field + '_user_id', current_user.id)
post.save()
ret = {
'status': 'success',
'postid': post.postid,
'field': field,
'value': getattr(post, field),
'tags': _tag_names(post)
}
return jsonify(**ret)
# translate detail
@product.route('/translate', methods=["GET", "PUT", "POST"])
def translate():
"""
use GET to aquire translation
use PUT/POST to commit translation
:param postid: The postid of product
:param field: The field of operation, could be 'ctagline' or 'cintro'
:param value: The value of translate field
"""
if request.method == 'GET':
return _post_aquire_translate(request)
jsondata = None
try:
jsondata = json.loads(request.data)
except ValueError:
ret = {
'status': 'error',
'message': "invalid json data"
}
return make_response(jsonify(**ret), 405)
postid = jsondata['postid']
field = jsondata['field']
if not can_translate(current_user):
ret = {
'status': 'error',
'postid': postid,
'field': field,
'error': 'Please sign in first'
}
return make_response(jsonify(**ret), 401)
post = Product.query.filter(Product.postid==postid).first_or_404()
try:
canceled = jsondata['canceled']
if canceled:
setattr(post, 'editing_' + field + '_user_id', None)
post.save()
ret = {
'status': 'success',
'postid': post.postid,
'field': field
}
return jsonify(**ret)
except KeyError:
pass
current_app.logger.info('commit %s for post %s' % (field, str(postid)))
# deal with tags
if field == 'ctagline':
post.set_tags(jsondata['tags'])
# deal with other filed data
setattr(post, field, jsondata['value'])
setattr(post, 'editing_' + field + '_user_id', None)
post.save()
getattr(current_user, 'add_' + field + '_product')(post)
ret = {
'status': 'success',
'postid': post.postid,
'field': field,
'value': render_markup(getattr(post, field)),
'contributors': _render_contributors( \
getattr(post, field + '_editors'), post.postid, \
getattr(post, field + '_locked_user'), field),
'tags': _render_tags(post)
}
return jsonify(**ret)
# posts list
@product.route('/', methods=["GET"])
def index():
""" product posts home dashboard """
return redirect(url_for('product.posts'))
# posts list
@product.route('/posts/', methods=["GET"])
def posts():
""" product posts home dashboard """
spec_day = request.args.get('day', '')
day, posts = query_products(spec_day)
post_count = len(posts)
tags = Tag.names()
return render_template('product/posts.jinja.html',
post_count=post_count, posts=posts, day=day, tags=tags)
# posts list
@product.route('/posts/<postid>', methods=["GET"])
def post_intro(postid):
""" product detail information page """
post = Product.query.filter(Product.postid==postid).first_or_404()
tags = Tag.names()
return render_template('product/post_intro.jinja.html', post=post, tags=tags)
#pull products
@product.route('/pull')
def pull():
""" pull data from producthunt.com """
day = request.args.get('day', '')
count = pull_and_save_posts(day)
return "pulled %d posts " % (count)
@product.route('/lock', methods=['GET'])
@moderator_required
def lock():
"""
lock product
:param postid: The postid of product
:param op: Operation, clould be 'lock' or 'unlock'
:param field: Field, could be 'ctagline' or 'cintro'
"""
postid = request.args.get('postid', '')
op = request.args.get('op', 'lock')
field = request.args.get('field', 'ctagline')
post = Product.query.filter(Product.postid==postid).first_or_404()
if op.lower() == 'lock':
setattr(post, field + '_locked', True)
setattr(post, field + '_locked_user_id', current_user.id)
op = 'Unlock'
else:
setattr(post, field + '_locked', False)
setattr(post, field + '_locked_user_id', None)
op = 'Lock'
post.save()
ret = {
'status': 'success',
'postid': post.postid,
'contributors': _render_contributors( \
getattr(post, field + '_editors'), post.postid, \
getattr(post, field + '_locked_user'), field)
}
return jsonify(**ret)
@product.route('/tags/', methods=["GET"])
def tags():
"""show all products"""
return "under construction"
@product.route('/tags/<tagname>', methods=["GET"])
def tags_name(tagname):
"""show all products by selected tag"""
return "under construction"
@product.route('/dailybriefing/<day>', methods=['GET'])
@moderator_required
def dailybriefing(day):
""" Generate daily briefing """
qday, posts = query_products(day)
post_count = len(posts)
# Thanks to contributors
editors = []
for post in posts:
if post.ctagline and post.ctagline_locked:
editors += post.ctagline_editors
# Thank once is enough
editors = {}.fromkeys(editors).keys()
return render_template('product/dailybriefing.jinja.html',
post_count=post_count, posts=posts, day=qday, editors=editors)
@product.route('/qiniutoken', methods=['GET'])
def get_qiniu_token():
q = Auth(current_app.config["QINIU_ACCESS_KEY"], current_app.config["QINIU_SECRET_KEY"])
token = q.upload_token(current_app.config["QINIU_BUCKET"])
ret = {'uptoken': token}
return jsonify(**ret)
| StarcoderdataPython |
1779629 | from crypto import __version__
from crypto.cipher import Cipher
import random
import string
import sys
class cupid (Cipher):
"""
This is the cupid module
"""
# Order of the columns
order = ""
message = ""
def print_short_description(self):
print("cupid:\n\tCupid Cipher\n\tA column shifting cipher.\n")
def print_long_description(self):
print("Cupid Cipher:\n\t Long description coming soon")
def run(self, args):
if args.Action == 'info':
return self.print_long_description()
if not args.message:
self.message = sys.stdin.read().strip()
else:
self.message = args.message
if args.Action == 'encrypt':
print(self.encrypt())
elif args.Action == 'decrypt':
self.order = args.order
print(self.decrypt())
#elif args.Action == 'break':
# for i in range(0, 26):
# self.shift = i
# print("n=%2d" % i, self.decrypt())
else:
print("unknown action: "+args.Action)
def encrypt(self):
cipher = ''
num_columns = len(self.message)
avail_nums = dict()
c_order = []
grid = {}
c_order = list(range(num_columns))
random.shuffle(c_order)
cipher += '\n'
# Make the grid
for col in range(num_columns):
col_list = []
for row in range(num_columns):
if col == row:
col_list.append(self.message[col].upper())
else:
col_list.append(random.choice(string.ascii_uppercase))
grid[c_order[col]] = col_list
# Build the string
for col in range(num_columns):
cipher += ' '.join(grid[int(col)]) + '\n'
c_order = [str(x) for x in c_order]
cipher += '\n' + ' '.join(c_order) +'\n'
return cipher
def decrypt(self):
cipher = ''
return cipher
| StarcoderdataPython |
8060829 | import argparse
import json
import torch
def parse_opt():
parser = argparse.ArgumentParser()
# train settings
# train concept detector
parser.add_argument('--concept_lr', type=float, default=4e-4)
parser.add_argument('--concept_bs', type=int, default=80)
parser.add_argument('--concept_resume', type=str, default='')
parser.add_argument('--concept_epochs', type=int, default=40)
parser.add_argument('--concept_num_works', type=int, default=2)
# train sentiment detector
parser.add_argument('--senti_lr', type=float, default=4e-4)
parser.add_argument('--senti_bs', type=int, default=80)
parser.add_argument('--senti_resume', type=str, default='')
parser.add_argument('--senti_epochs', type=int, default=30)
parser.add_argument('--senti_num_works', type=int, default=2)
parser.add_argument('--img_senti_labels', type=str, default='./data/captions/img_senti_labels.json')
parser.add_argument('--sentiment_categories', type=list, default=['positive', 'negative', 'neutral'])
# train full model
# xe
parser.add_argument('--xe_lr', type=float, default=4e-4)
parser.add_argument('--xe_bs', type=int, default=20)
parser.add_argument('--xe_resume', type=str, default='')
parser.add_argument('--xe_epochs', type=int, default=40)
parser.add_argument('--xe_num_works', type=int, default=2)
parser.add_argument('--scheduled_sampling_start', type=int, default=0)
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=4)
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05)
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25)
# rl
parser.add_argument('--rl_lrs', type=json.loads,
default='{"cap_lr": 4e-5}') # , "senti_lr": 4e-5, "sent_lr": 1e-3}')
parser.add_argument('--rl_bs', type=int, default=40)
parser.add_argument('--rl_num_works', type=int, default=2)
parser.add_argument('--rl_resume', type=str, default='')
parser.add_argument('--rl_senti_resume', type=str, default='checkpoint/sentiment/model-10.pth')
parser.add_argument('--rl_epochs', type=int, default=40)
parser.add_argument('--rl_fact_times', type=int, default=1)
parser.add_argument('--rl_senti_times', type=int, default=0)
# common
parser.add_argument('--dataset_name', type=str, default='coco', choices=['coco', 'flickr30k'])
parser.add_argument('--corpus_type', type=str, default='part', choices=['part', 'full'])
parser.add_argument('--captions_dir', type=str, default='./data/captions')
parser.add_argument('--feats_dir', type=str, default='./data/features')
parser.add_argument('--corpus_dir', type=str, default='./data/corpus')
parser.add_argument('--checkpoint', type=str, default='./checkpoint/')
parser.add_argument('--result_dir', type=str, default='./result/')
# parser.add_argument('--sentence_sentiment_classifier_rnn', type=str, default='')
parser.add_argument('--max_seq_len', type=int, default=16)
parser.add_argument('--num_concepts', type=int, default=5)
parser.add_argument('--num_sentiments', type=int, default=10)
parser.add_argument('--grad_clip', type=float, default=0.1)
# eval settings
parser.add_argument('-e', '--eval_model', type=str, default='')
parser.add_argument('-r', '--result_file', type=str, default='')
parser.add_argument('--beam_size', type=int, default=3)
# test settings
parser.add_argument('-t', '--test_model', type=str, default='')
parser.add_argument('-i', '--image_file', type=str, default='')
# encoder settings
parser.add_argument('--resnet101_file', type=str, default='./data/pre_models/resnet101.pth',
help='Pre-trained resnet101 network for extracting image features')
args = parser.parse_args()
# network settings
settings = dict()
settings['word_emb_dim'] = 512
settings['fc_feat_dim'] = 2048
settings['att_feat_dim'] = 2048
settings['feat_emb_dim'] = 512
settings['dropout_p'] = 0.5
settings['rnn_hid_dim'] = 512
settings['att_hid_dim'] = 512
settings['concept_mid_him'] = 1024
settings['sentiment_convs_num'] = 2
# settings['num_kernels_per_sentiment'] = 4
settings['sentiment_feat_dim'] = 14*14
settings['sentiment_fcs_num'] = 2
settings['text_cnn_filters'] = (3, 4, 5)
settings['text_cnn_out_dim'] = 256
args.settings = settings
args.use_gpu = torch.cuda.is_available()
args.device = torch.device('cuda:0') if args.use_gpu else torch.device('cpu')
return args
| StarcoderdataPython |
365343 | '''
实验名称:PWM
版本:v1.0
日期:2019.7
作者:01Studio
说明:通过不同频率的PWM信号输出,驱动无源蜂鸣器发出不同频率的声音。
'''
from machine import Pin, PWM
import time
Beep = PWM(Pin(15), freq=0, duty=512) # 在同一语句下创建和配置 PWM
#蜂鸣器发出频率200Hz响声
Beep.freq(200)
time.sleep_ms(1000)
#蜂鸣器发出频率400Hz响声
Beep.freq(400)
time.sleep_ms(1000)
#蜂鸣器发出频率600Hz响声
Beep.freq(600)
time.sleep_ms(1000)
#蜂鸣器发出频率800Hz响声
Beep.freq(800)
time.sleep_ms(1000)
#蜂鸣器发出频率1000Hz响声
Beep.freq(1000)
time.sleep_ms(1000)
#停止
Beep.deinit()
| StarcoderdataPython |
4830346 | from Carver import CarverJob
| StarcoderdataPython |
1656222 | # -*- coding: utf-8 -*-
"""hategru.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fg_cClsHnbiRLknbfd1tVF2hTqP0UL4m
"""
from google.colab import drive
drive.mount('/content/drive')
import os
import pickle
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import concatenate
from keras.layers.embeddings import Embedding
from keras.layers import GRU
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from collections import defaultdict
import pprint
#from keras.models import load_model
data = pd.read_csv("/content/drive/My Drive/Hate Speech Detection/Training/Datasets/hsfinal.csv")
text_data = data["Comment"].tolist()
text_labels = data["Hate"].tolist()
# Split dataset in training and test
X_train, X_test, y_train, y_test = train_test_split(text_data, text_labels, test_size=0.1, random_state=0)
EMBEDDING_DIMENSION = 100
MAX_SENTENCE_LENGTH = 70
# One-hot-encoding the labels
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_labels_train = encoder.transform(y_train)
labels_train = to_categorical(encoded_labels_train)
# Tokenize the texts
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
sequences = tokenizer.texts_to_sequences(X_train)
word_index_data = tokenizer.word_index
# Padding to make all the texts of same length
final_data = pad_sequences(sequences, maxlen=MAX_SENTENCE_LENGTH)
# Get each word of Glove embeddings in a dictionary
embeddings_index = {}
with open(os.path.join('/content/drive/My Drive/Hate Speech Detection/Training/glove.6B.100d.txt')) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
# Generate Embedding Matrix from the word vectors above
embedding_matrix = np.zeros((len(word_index_data) + 1, EMBEDDING_DIMENSION))
for word, i in word_index_data.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# Both the tasks should have same dimensions of inputs
if(len(labels_train) % 2 != 0):
labels_train = labels_train[:-1]
final_data = final_data[:-1]
# Task-1 training data and labels
t1_y_train = labels_train[0:int(len(labels_train)/2)]
t1_x_train = final_data[0:int(len(labels_train)/2)]
# Task-2 training data and labels
t2_y_train = labels_train[int(len(labels_train)/2):]
t2_x_train = final_data[int(len(labels_train)/2):]
t1_input_layer = Input(shape=(MAX_SENTENCE_LENGTH,))
t2_input_layer = Input(shape=(MAX_SENTENCE_LENGTH,))
shared_emb_layer = Embedding(len(word_index_data) + 1, EMBEDDING_DIMENSION, weights=[embedding_matrix] , input_length=MAX_SENTENCE_LENGTH, trainable=True)
t1_emb_layer = shared_emb_layer(t1_input_layer)
t2_emb_layer = shared_emb_layer(t2_input_layer)
shared_grnn_layer = GRU(MAX_SENTENCE_LENGTH, activation='relu')
t1_grnn_layer = shared_grnn_layer(t1_emb_layer)
t2_grnn_layer = shared_grnn_layer(t2_emb_layer)
# Merging layers
merge_layer = concatenate([t1_grnn_layer, t2_grnn_layer], axis=-1)
# Task-1 Specified Layers
t1_dense_1 = Dense(30, activation='relu')(merge_layer)
t1_dropout_layer = Dropout(0.3)(t1_dense_1)
t1_dense_2 = Dense(30, activation='relu')(t1_dropout_layer)
t1_dense_3 = Dense(30, activation='relu')(t1_dense_2)
t1_prediction = Dense(labels_train.shape[1], activation='softmax')(t1_dense_3)
# Task-2 Specified Layers
t2_dense_1 = Dense(20, activation='relu')(merge_layer)
t2_dropout_layer = Dropout(0.3)(t2_dense_1)
t2_dense_2 = Dense(20, activation='relu')(t2_dropout_layer)
t2_dense_3 = Dense(20, activation='relu')(t2_dense_2)
t2_prediction = Dense(labels_train.shape[1], activation='softmax')(t2_dense_3)
# Build the model
hatespeech_model = Model(inputs=[t1_input_layer, t2_input_layer], outputs=[t1_prediction, t2_prediction])
# Compile the model
hatespeech_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001, clipvalue=1.0), metrics=['accuracy'])
# Fitting the model
hatespeech_model.fit([t1_x_train, t2_x_train], [t1_y_train, t2_y_train], epochs=3, batch_size=128)
# saving the model
hatespeech_model.save("/content/drive/My Drive/Hate Speech Detection/Production/Models/hate_model.h5")
# saving the tokenizer
with open('/content/drive/My Drive/Hate Speech Detection/Production/Models/tokenhater.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Model and Tokenizer saved....')
# Testing
# Tokenize
sequences_test = tokenizer.texts_to_sequences(X_test)
# Padding to make all the texts of same length
test_data = pad_sequences(sequences_test, maxlen=MAX_SENTENCE_LENGTH)
# Both the tasks should have same dimensions of inputs
if(len(y_test) % 2 != 0):
y_test = y_test[:-1]
test_data = test_data[:-1]
# Task-1 training data and labels
t1_y_test = y_test[0:int(len(y_test)/2)]
t1_x_test = test_data[0:int(len(y_test)/2)]
# Task-2 training data and labels
t2_y_test = y_test[int(len(y_test)/2):]
t2_x_test = test_data[int(len(y_test)/2):]
y_pred_combined = hatespeech_model.predict([t1_x_test, t2_x_test])
t1_y_pred = np.argmax(y_pred_combined[0], axis=-1)
t2_y_pred = np.argmax(y_pred_combined[1], axis=-1)
t1_y_pred = encoder.inverse_transform(t1_y_pred)
t2_y_pred = encoder.inverse_transform(t2_y_pred)
t1_acc = metrics.accuracy_score(t1_y_test, t1_y_pred)
print(f"Task 1 Accuracy: {t1_acc}")
t2_acc = metrics.accuracy_score(t2_y_test, t2_y_pred)
print(f"Task 2 Accuracy: {t2_acc}")
t1_y_test
t1_cf = metrics.confusion_matrix(t1_y_test, t1_y_pred)
print(f'The confusion matrix for Task 1 is: \n {t1_cf}')
t2_cf = metrics.confusion_matrix(t2_y_test, t2_y_pred)
print(f'The confusion matrix for Task 2 is: \n {t2_cf}') | StarcoderdataPython |
1889807 | <filename>analysis/summarizer.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
"""
@author: william
@contact: <EMAIL>
@site: http://www.xiaolewei.com
@file: summarizer.py
@time: 01/03/2018 23:08
"""
from sumy.parsers.html import HtmlParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from entities import Article
LANGUAGE = 'chinese'
SENTENCES_COUNT = 6
def summarize(article: Article):
parser = HtmlParser(article.html, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
return summarizer(parser.document, SENTENCES_COUNT)
| StarcoderdataPython |
11271018 | <filename>Python3/Exercises/BankAccount/BankAccount.py<gh_stars>0
class BankAccount:
def __init__(self, owner):
self.owner = owner
self.balance = 0.0
def getBalance(self):
return self.balance
def deposit(self, amount):
self.balance += amount
return self.balance
def withdraw(self, amount):
self.balance -= amount
return self.balance
acct = BankAccount("Darcy")
acct.owner #Darcy
acct.balance #0.0
acct.deposit(10) #10.0
acct.withdraw(3) #7.0
acct.balance #7.0 | StarcoderdataPython |
1792867 | <filename>sorts/heap_sort.py
# pass 3214,1234,123,-1234,123411,-128512,0
def make_heap(unsorted):
heap = []
while unsorted:
heap.append(unsorted.pop())
i = len(heap) - 1
while i:
if heap[(i - 1) // 2] > heap[i]:
heap[i], heap[(i - 1) // 2] = heap[(i - 1) // 2], heap[i]
i = (i - 1) // 2
else:
break
return heap
def heap_sort(unsorted):
heap = make_heap(unsorted)
while heap:
unsorted.append(heap.pop(0))
return unsorted
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(heap_sort(unsorted))
| StarcoderdataPython |
187753 | <reponame>bozhu/eprint-updates
#!/usr/bin/env python
import psycopg2
import urlparse
import pickle
from config import DATABASE_URL, DATABASE_KEY_STRING
class Storage():
def __init__(self):
_database_url = urlparse.urlparse(DATABASE_URL)
self._con = psycopg2.connect(
database=_database_url.path[1:],
user=_database_url.username,
password=_database_url.password,
host=_database_url.hostname,
port=_database_url.port
)
self._cur = self._con.cursor()
# check if the table exists
# http://stackoverflow.com/a/1874268/1766096
self._cur.execute("""SELECT *
FROM information_schema.tables
WHERE table_name='eprint'""")
# print self._cur.rowcount
if not bool(self._cur.rowcount):
self._cur.execute("""CREATE TABLE eprint (
key VARCHAR,
value BYTEA
)""")
self._con.commit()
# from config import sentry_client
# sentry_client.captureMessage('Created a new table')
def retrieve(self, key=DATABASE_KEY_STRING):
self._cur.execute("""SELECT value
FROM eprint
WHERE key = %s""",
(key,))
rows = self._cur.fetchall()
if len(rows) == 0:
return None
assert len(rows) == 1
return pickle.loads(rows[0][0])
def save(self, data, key=DATABASE_KEY_STRING):
# should use upsert?
self._cur.execute("""SELECT value
FROM eprint
WHERE key = %s""",
(key,))
rows = self._cur.fetchall()
if len(rows) == 0:
self._cur.execute("""INSERT INTO eprint
(key, value)
VALUES (%s, %s)""",
(DATABASE_KEY_STRING,
psycopg2.Binary(pickle.dumps(data))))
else:
self._cur.execute("""UPDATE eprint
SET value = %s
WHERE key = %s""",
(psycopg2.Binary(pickle.dumps(data)),
DATABASE_KEY_STRING))
self._con.commit()
if __name__ == '__main__':
my_storage = Storage()
my_storage.save(1)
res = my_storage.retrieve()
print type(res), res
print my_storage.retrieve(key='not exist')
| StarcoderdataPython |
290460 | <filename>packages/lrn/webpages/tests/test_tree.py
# -*- coding: utf-8 -*-
# includedview_bagstore.py
# Created by <NAME> on 2011-03-23.
# Copyright (c) 2020 Softwell. All rights reserved.
from gnr.core.gnrbag import Bag,BagResolver
from gnr.core.gnrdecorator import public_method
class MeteoResolver(BagResolver):
apikey='<KEY>'
url="http://api.openweathermap.org/data/2.5/weather?appid=%(apikey)s&q=%(city)s&mode=xml&units=metric"
def load(self):
meteo = Bag(self.url%dict(apikey=self.apikey,city=self.city))['current']
return f"{meteo['weather?value']}, temperature:{meteo['temperature?value']}"
class ResolverGeo(BagResolver):
classKwargs = {'cacheTime': 300,
'regione':None,
'provincia':None,
'_page': None}
def load(self):
if self.regione:
return self.elencoProvince()
elif self.provincia:
return self.elencoComuni()
else:
return self.elencoRegioni()
def elencoComuni(self):
result = Bag()
f = self._page.db.table('glbl.comune').query(where='$sigla_provincia=:p',
p=self.provincia).fetch()
for r in f:
content = Bag(dict(r))
content.addItem('meteo',MeteoResolver(city=r['denominazione']),name='Meteo')
result.addItem(r['id'],content,
nome=r['denominazione'],comune_id=r['id'])
return result
def elencoProvince(self):
result = Bag()
f = self._page.db.table('glbl.provincia').query(where='$regione=:r',r=self.regione).fetch()
for r in f:
content = ResolverGeo(_page=self._page,provincia=r['sigla'])
result.addItem(r['sigla'],content,
nome=r['nome'],sigla=r['sigla'])
return result
def elencoRegioni(self):
result = Bag()
regioni_grouped = self._page.db.table('glbl.regione').query().fetchGrouped('zona')
for zona,regioni in regioni_grouped.items():
zonabag = Bag()
result.addItem(zona,zonabag,nome=zona)
for r in regioni:
content = ResolverGeo(_page=self._page,regione=r['sigla'])
zonabag.addItem(r['sigla'],content,nome=r['nome'],sigla=r['sigla'])
return result
def resolverSerialize(self):
self._initKwargs.pop('_page')
return BagResolver.resolverSerialize(self)
class GnrCustomWebPage(object):
py_requires="""gnrcomponents/testhandler:TestHandlerFull,
gnrcomponents/framegrid:FrameGrid"""
def test_01_tree_res(self,pane):
"""
tree resolver
"""
pane.tree(storepath='.geodata',labelAttribute='nome')#hideValues=True
geodata = Bag()
geodata.addItem('italia',ResolverGeo(_page=self),nome='Italia')
pane.data('.geodata',geodata)
| StarcoderdataPython |
1950205 | <gh_stars>10-100
import abc
from typing import Iterable, MutableMapping, Optional
import turing.generated.models
from turing._base_types import DataObject
from turing.generated.model_utils import OpenApiModel
class EnsemblingJobSource:
"""
Configuration of source of the ensembling job
"""
def __init__(self, dataset: 'Dataset', join_on: Iterable[str]):
self._dataset = dataset
self._join_on = join_on
@property
def dataset(self) -> 'Dataset':
return self._dataset
@property
def join_on(self) -> Iterable[str]:
return self._join_on
def to_open_api(self) -> OpenApiModel:
return turing.generated.models.EnsemblingJobSource(
dataset=self.dataset.to_open_api(),
join_on=self.join_on
)
def select(self, columns: Iterable[str]) -> 'EnsemblingJobPredictionSource':
"""
Creates an instance of prediction source configuration
:param columns: list of columns from this source, that contain prediction data
:return: instance of `EnsemblingJobPredictionSource`
"""
return EnsemblingJobPredictionSource(
self.dataset,
self.join_on,
columns
)
class EnsemblingJobPredictionSource(EnsemblingJobSource):
"""
Configuration of the prediction data for the ensembling job
"""
def __init__(self, dataset, join_on, columns):
super(EnsemblingJobPredictionSource, self).__init__(dataset, join_on)
self._columns = columns
@property
def columns(self) -> Iterable[str]:
return self._columns
def to_open_api(self) -> OpenApiModel:
return turing.generated.models.EnsemblingJobPredictionSource(
dataset=self.dataset.to_open_api(),
join_on=self.join_on,
columns=self.columns
)
class Dataset(abc.ABC, DataObject):
"""
Abstract dataset
"""
def join_on(self, columns: Iterable[str]) -> 'EnsemblingJobSource':
"""
Create ensembling job source configuration from this dataset,
by specifying how this dataset could be joined with the
datasets containing predictions of individual models
:param columns: list of columns, that would be used to join this
dataset with predictions data
:return: instance of ensembling job source configuration
"""
pass
def to_open_api(self) -> OpenApiModel:
pass
class BigQueryDataset(Dataset):
"""
BigQuery dataset configuration
"""
TYPE = "BQ"
def __init__(self,
table: Optional[str] = None,
features: Optional[Iterable[str]] = None,
query: Optional[str] = None,
options: Optional[MutableMapping[str, str]] = None):
"""
Create new instance of BigQuery dataset
:param table: fully-qualified BQ table id e.g. `gcp-project.dataset.table_name`
:param features: list of columns from the `table` to be selected for this dataset
:param query: (optional) Alternatively, dataset can be defined by BQ standard SQL query.
This allows to define dataset from the data, stored in multiple tables
:param options: (optional) Additional BQ options to configure the dataset
"""
super(BigQueryDataset, self).__init__()
self._table = table
self._query = query
self._features = features
self._options = options
@property
def table(self) -> Optional[str]:
return self._table
@property
def query(self) -> Optional[str]:
return self._query
@property
def features(self) -> Optional[Iterable[str]]:
return self._features
@property
def options(self) -> Optional[MutableMapping[str, str]]:
return self._options
def to_open_api(self) -> OpenApiModel:
return turing.generated.models.BigQueryDataset(
bq_config=turing.generated.models.BigQueryDatasetConfig(**self.to_dict())
)
def join_on(self, columns: Iterable[str]) -> 'EnsemblingJobSource':
return EnsemblingJobSource(
dataset=self,
join_on=columns
)
| StarcoderdataPython |
5148683 | <filename>libs/applibs/compendium/c05homeactivity.py
import os
import sys
import libs.applibs.compendium.abstractcompendium as abstractcompendium
class HomeActivity(abstractcompendium.Compendium):
def __init__(self):
super().__init__()
self.metValue = {5010 : 3.3
,5011 : 2.3
,5012 : 3.8
,5020 : 3.5
,5021 : 3.5
,5022 : 3.2
,5023 : 2.5
,5024 : 4.5
,5025 : 2.8
,5026 : 3.5
,5027 : 4.3
,5030 : 3.3
,5032 : 2.3
,5035 : 3.3
,5040 : 2.5
,5041 : 1.8
,5042 : 2.5
,5043 : 3.3
,5044 : 3.0
,5045 : 6.0
,5046 : 2.3
,5048 : 4.0
,5049 : 3.5
,5050 : 2.0
,5051 : 2.5
,5052 : 2.5
,5053 : 2.5
,5055 : 2.5
,5056 : 7.5
,5057 : 3.0
,5060 : 2.3
,5065 : 2.3
,5070 : 1.8
,5080 : 1.3
,5082 : 2.8
,5090 : 2.0
,5092 : 4.0
,5095 : 2.3
,5100 : 3.3
,5110 : 5.0
,5120 : 5.8
,5121 : 5.0
,5125 : 4.8
,5130 : 3.5
,5131 : 2.0
,5132 : 6.5
,5140 : 4.0
,5146 : 3.5
,5147 : 3.0
,5148 : 2.5
,5149 : 2.5
,5150 : 9.0
,5160 : 2.0
,5165 : 3.5
,5170 : 2.2
,5171 : 2.8
,5175 : 3.5
,5180 : 5.8
,5181 : 3.0
,5182 : 2.3
,5183 : 2.0
,5184 : 2.5
,5185 : 2.0
,5186 : 3.0
,5188 : 1.5
,5189 : 2.0
,5190 : 2.5
,5191 : 2.8
,5192 : 3.0
,5193 : 4.0
,5194 : 5.0
,5195 : 3.5
,5197 : 2.3
,5200 : 4.0
,5205 : 2.3}
# Unpacking with * works with any object that is iterable and, since dictionaries return their keys when iterated through, you can easily create a list by using it within a list literal.
self.ckeys = [*self.metValue] # another option : list(self.metValue.keys())
self.metDescription = {5010 : "cleaning, sweeping carpet or floors, general"
,5011 : "cleaning, sweeping, slow, light effort"
,5012 : "cleaning, sweeping, slow, moderate effort"
,5020 : "cleaning, heavy or major (e.g. wash car, wash windows, clean garage), moderate effort"
,5021 : "cleaning, mopping, standing, moderate effort"
,5022 : "cleaning windows, washing windows, general"
,5023 : "mopping, standing, light effort"
,5024 : "polishing floors, standing, walking slowly, using electric polishing machine"
,5025 : "multiple household tasks all at once, light effort"
,5026 : "multiple household tasks all at once, moderate effort"
,5027 : "multiple household tasks all at once, vigorous effort"
,5030 : "cleaning, house or cabin, general, moderate effort"
,5032 : "dusting or polishing furniture, general"
,5035 : "kitchen activity, general, (e.g., cooking, washing dishes, cleaning up), moderate effort"
,5040 : "cleaning, general (straightening up, changing linen, carrying out trash, light effort"
,5041 : "wash dishes, standing or in general (not broken into stand/walk components)"
,5042 : "wash dishes, clearing dishes from table, walking, light effort"
,5043 : "vacuuming, general, moderate effort"
,5044 : "butchering animals, small"
,5045 : "butchering animal, large, vigorous effort"
,5046 : "cutting and smoking fish, drying fish or meat"
,5048 : "tanning hides, general"
,5049 : "cooking or food preparation, moderate effort"
,5050 : "cooking or food preparation - standing or sitting or in general (not broken into stand/walk components), manual appliances, light effort"
,5051 : "serving food, setting table, implied walking or standing"
,5052 : "cooking or food preparation, walking"
,5053 : "feeding household animals"
,5055 : "putting away groceries (e.g. carrying groceries, shopping without a grocery cart), carrying packages"
,5056 : "carrying groceries upstairs"
,5057 : "cooking Indian bread on an outside stove"
,5060 : "food shopping with or without a grocery cart, standing or walking"
,5065 : "non-food shopping, with or without a cart, standing or walking"
,5070 : "ironing"
,5080 : "knitting, sewing, light effort, wrapping presents, sitting"
,5082 : "sewing with a machine"
,5090 : "laundry, fold or hang clothes, put clothes in washer or dryer, packing suitcase, washing clothes by hand, implied standing, light effort"
,5092 : "laundry, hanging wash, washing clothes by hand, moderate effort"
,5095 : "laundry, putting away clothes, gathering clothes to pack, putting away laundry, implied walking"
,5100 : "making bed, changing linens"
,5110 : "maple syruping/sugar bushing (including carrying buckets, carrying wood)"
,5120 : "moving furniture, household items, carrying boxes"
,5121 : "moving, lifting light loads"
,5125 : "organizing room"
,5130 : "scrubbing floors, on hands and knees, scrubbing bathroom, bathtub, moderate effort"
,5131 : "scrubbing floors, on hands and knees, scrubbing bathroom, bathtub, light effort"
,5132 : "scrubbing floors, on hands and knees, scrubbing bathroom, bathtub, vigorous effort"
,5140 : "sweeping garage, sidewalk or outside of house"
,5146 : "standing, packing/unpacking boxes, occasional lifting of lightweight household items, loading or unloading items in car, moderate effort"
,5147 : "implied walking, putting away household items, moderate effort"
,5148 : "watering plants"
,5149 : "building a fire inside"
,5150 : "moving household items upstairs, carrying boxes or furniture"
,5160 : "standing, light effort tasks (pump gas, change light bulb, etc.)"
,5165 : "walking, moderate effort tasks, non-cleaning (readying to leave, shut/lock doors, close windows, etc.)"
,5170 : "sitting, playing with child(ren), light effort, only active periods"
,5171 : "standing, playing with child(ren) light effort, only active periods"
,5175 : "walking/running, playing with child(ren), moderate effort, only active periods"
,5180 : "walking/running, playing with child(ren), vigorous effort, only active periods"
,5181 : "walking and carrying small child, child weighing 15 lbs or more"
,5182 : "walking and carrying small child, child weighing less than 15 lbs"
,5183 : "standing, holding child"
,5184 : "child care, infant, general"
,5185 : "child care, sitting/kneeling (e.g., dressing, bathing, grooming, feeding, occasional lifting of child), light effort, general"
,5186 : "child care, standing (e.g., dressing, bathing, grooming, feeding, occasional lifting of child), moderate effort"
,5188 : "reclining with baby"
,5189 : "breastfeeding, sitting or reclining"
,5190 : "sit, playing with animals, light effort, only active periods"
,5191 : "stand, playing with animals, light effort, only active periods"
,5192 : "walk/run, playing with animals, general, light effort, only active periods"
,5193 : "walk/run, playing with animals, moderate effort, only active periods"
,5194 : "walk/run, playing with animals, vigorous effort, only active periods"
,5195 : "standing, bathing dog"
,5197 : "animal care, household animals, general"
,5200 : "elder care, disabled adult, bathing, dressing, moving into and out of bed, only active periods"
,5205 : "elder care, disabled adult, feeding, combing hair, light effort, only active periods"}
self.metDescription_fr = {5010 : "nettoyage, balayer la moquette ou les sols, général"
,5011 : "nettoyage, balayer, lentement, effort léger"
,5012 : "nettoyage, balayer, lentement, effort modéré"
,5020 : "nettoyage, important ou majeur (par ex. nettoyage de la voiture, des fenêtres, du garage), effort modéré"
,5021 : "nettoyage, lavage, debout, effort modéré"
,5022 : "nettoyage des fenêtres, lavage des fenêtres, général"
,5023 : "lavage, debout, effort léger"
,5024 : "cirage des sols, debout, marche lente, avec une cireuse électrique"
,5025 : "multiples tâches ménagères en même temps, effort léger"
,5026 : "multiples tâches ménagères en même temps, effort modéré"
,5027 : "multiples tâches ménagères en même temps, effort vigoureux"
,5030 : "nettoyage, maison ou cabane, général, effort modéré"
,5032 : "époussetage ou cirage des meubles, général"
,5035 : "activités de cuisine, en général (par ex. cuisiner, laver la vaisselle, nettoyer), effort modéré"
,5040 : "nettoyage, général (ranger, changer les draps, sortir les poubelles), effort léger"
,5041 : "faire la vaisselle, debout ou en général (non classé en composants debout/marche)"
,5042 : "faire la vaisselle, débarrasser les plats de la table, marcher, effort léger"
,5043 : "passer l'aspirateur, général, effort modéré"
,5044 : "dépecer des animaux, petits"
,5045 : "dépecer des animaux, grands, effort vigoureux"
,5046 : "découper et fumer du poisson, sécher du poisson ou de la viande"
,5048 : "tanner des peaux, général"
,5049 : "cuisiner ou préparer des repas, effort modéré"
,5050 : "cuisiner ou préparer des repas – position debout ou assise ou en général (non classé en composants debout/marche), appareils manuels, effort léger"
,5051 : "servir les aliments, mettre la table, impliquant de marcher ou de se tenir debout"
,5052 : "cuisiner ou préparer des repas, marcher"
,5053 : "nourrir des animaux domestiques"
,5055 : "ranger des provisions (par ex. porter des provisions, faire des courses sans chariot), porter des paquets"
,5056 : "porter des provisions en montant des escaliers"
,5057 : "cuisiner du pain indien dans un four d'extérieur"
,5060 : "faire des courses alimentaires avec ou sans chariot, debout ou en marchant"
,5065 : "faire des courses non-alimentaires avec ou sans chariot, debout ou en marchant"
,5070 : "repasser"
,5080 : "tricoter, coudre, effort léger, emballer des cadeaux, en position assise"
,5082 : "coudre à la machine"
,5090 : "faire la lessive, plier ou étendre des vêtements, mettre des vêtements au lave-linge ou au sèche-linge, faire une valise, laver des vêtements à la main, impliquant d'être debout, effort léger"
,5092 : "faire la lessive, pendre du linge, laver des vêtements à la main, effort modéré"
,5095 : "faire la lessive, ranger des vêtements, rassembler des vêtements pour les ranger, ranger du linge propre, impliquant de marcher"
,5100 : "faire le lit, changer les draps"
,5110 : "récolter et fabriquer du sirop d'érable (porter des seaux, du bois)"
,5120 : "déplacer des meubles, des objets, porter des cartons"
,5121 : "déplacer, soulever des charges légères"
,5125 : "ranger une pièce"
,5130 : "nettoyer à la brosse des sols, à genoux, nettoyer à la brosse une salle de bain, une baignoire, effort modéré"
,5131 : "nettoyer à la brosse des sols, à genoux, nettoyer à la brosse une salle de bain, une baignoire, effort léger"
,5132 : "nettoyer à la brosse des sols, à genoux, nettoyer à la brosse une salle de bain, une baignoire, effort vigoureux"
,5140 : "balayer le garage, le trottoir ou l'extérieur de la maison"
,5146 : "être debout, emballer/déballer des cartons, soulever occasionnellement des objets légers, charger ou décharger des articles dans la voiture, effort modéré"
,5147 : "impliquant de marcher, ranger des objets, effort modéré"
,5148 : "arroser les plantes"
,5149 : "faire un feu"
,5150 : "porter des objets dans des escaliers, porter des cartons ou des meubles"
,5160 : "être debout, tâches à effort léger (faire le plein d'essence, changer une ampoule, etc.)"
,5165 : "marcher, tâches à effort modéré, hors nettoyage (se préparer à sortir, fermer/verrouiller les portes, fermer les fenêtres, etc.)"
,5170 : "être assis, jouer avec un/des enfant(s), effort léger, périodes actives uniquement"
,5171 : "être debout, jouer avec un/des enfant(s), effort léger, périodes actives uniquement"
,5175 : "marcher/courir, jouer avec un/des enfant(s), effort modéré, périodes actives uniquement"
,5180 : "marcher/courir, jouer avec un/des enfant(s), effort vigoureux, périodes actives uniquement"
,5181 : "marcher et porter un petit enfant de 7 kg ou plus"
,5182 : "marcher et porter un petit enfant de moins de 7 kg"
,5183 : "être debout, porter un enfant"
,5184 : "s’occuper d’enfants, nourrisson, général"
,5185 : "s’occuper d’enfants, assis/à genoux (par ex. habiller, baigner, préparer, nourrir, soulever occasionnellement l'enfant) effort léger, général"
,5186 : "s’occuper d’enfants, debout (par ex. habiller, baigner, préparer, nourrir, soulever occasionnellement l'enfant) effort modéré"
,5188 : "être allongé avec un bébé"
,5189 : "allaiter, assise ou allongée"
,5190 : "être assis, jouer avec des animaux, effort léger, périodes actives uniquement"
,5191 : "être debout, jouer avec des animaux, effort léger, périodes actives uniquement"
,5192 : "marcher/courir, jouer avec des animaux, général, effort léger, périodes actives uniquement"
,5193 : "marcher/courir, jouer avec des animaux, effort modéré, périodes actives uniquement"
,5194 : "marcher/courir, jouer avec des animaux, effort vigoureux, périodes actives uniquement"
,5195 : "être debout, baigner le chien"
,5197 : "s’occuper des animaux, animaux domestiques, général"
,5200 : "s'occuper de personnes âgées, d'adultes handicapés, baigner, habiller, coucher et lever, périodes actives uniquement"
,5205 : "s'occuper de personnes âgées, d'adultes handicapés, nourrir, peigner, effort léger, périodes actives uniquement"}
def printValues(self):
print("Beginning dump for 'HomeActivity' ")
super().printValues()
def getMetValue(self, code):
return super().getMetValue(code)
if __name__ == "__main__":
b = HomeActivity()
b.printValues()
print(b.getMetValue(5205))
for l in b:
print(l) | StarcoderdataPython |
3372144 | <filename>tensorflow_learning/tf2/structured_data.py
# encoding: utf-8
'''
@author: jeffzhengye
@contact: <EMAIL>
@file: structured_data.py
@time: 2020/12/23 11:27
origin: https://www.tensorflow.org/tutorials/structured_data/feature_columns?hl=zh-cn
@desc: 样例: 如何使用tf.feature_column 来处理结构化数据,
'''
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import dtypes
tf.feature_column.numeric_column
keras.layers.DenseFeatures
tf.feature_column.embedding_column
tf.feature_column.categorical_column_with_hash_bucket
tf.feature_column.indicator_column
tf.feature_column.bucketized_column
# URL = 'https://storage.googleapis.com/applied-dl/heart.csv'
# dataframe = pd.read_csv(URL)
data_file = 'heart.csv'
dataframe = pd.read_csv(data_file)
dataframe = dataframe.replace({'thal': {0: 'normal', 1: "fixed", 2: "normal"}})
dataframe = dataframe.astype({'thal': str})
print(dataframe.head())
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
print(train.head())
# 一种从 Pandas Dataframe 创建 tf.data 数据集的实用程序方法(utility method)
def df_to_dataset(dataframe, shuffle=True, batch_size=2):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 3 # 小批量大小用于演示
train_ds = df_to_dataset(train, shuffle=False, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
# 我们将使用该批数据演示几种特征列
example_batch = next(iter(train_ds))[0]
print('example_batch', example_batch)
# sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': ['fixed', 'reversible', 'normal'], 'dense_shape': [2, 4]}
sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': [1, 1, 1], 'dense_shape': [2, 4]}
input_sparse = tf.sparse.SparseTensor(**sparse_input)
# input_sparse = tf.sparse.SparseTensor(indices=[[0, 0], [0, 1], [1, 2]], values=['fixed', 'reversible', 'normal'], dense_shape=[2, 4])
# example_batch = {
# 'thal': input_sparse
# }
# 用于创建一个特征列
# 并转换一批次数据的一个实用程序方法
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
name = feature_column.name.split('_')[0]
print('input:', example_batch[name])
print(feature_layer(example_batch).numpy())
age = feature_column.numeric_column("age")
demo(age)
#
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets)
#
# thal = feature_column.categorical_column_with_vocabulary_list(
# 'thal', ['fixed', 'normal', 'reversible'])
thal = feature_column.categorical_column_with_hash_bucket('thal', 20, dtype=dtypes.int32)
#
# thal_one_hot = feature_column.indicator_column(thal)
# demo(thal_one_hot)
# demo(thal)
# 注意到嵌入列的输入是我们之前创建的类别列
thal_embedding = feature_column.embedding_column(thal, dimension=8, combiner='sum')
# demo(thal_embedding)
| StarcoderdataPython |
6488214 | import numpy as np
from bpdb import set_trace
from numpy.random import multivariate_normal, normal
from sympy import Matrix, exp, symbols
from sympy.utilities.lambdify import lambdify
class Clock:
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.time_previous = None
self.define_parameters()
self.define_noise()
self.derivation()
def define_parameters(self):
if self.agent.config.getboolean("perfect_clock"):
self.alpha = 1.0
else:
sigma_Delta_dot = self.env.P0_config.getfloat("sigma_b_dot") / self.env.c
self.alpha = 1.0 + np.random.normal(0, sigma_Delta_dot)
if self.agent.config.getboolean("perfect_clock"):
self.beta = 0.0
else:
sigma_Delta = self.env.P0_config.getfloat("sigma_b") / self.env.c
self.beta = 0.0 + np.random.normal(0, sigma_Delta)
self._Delta = self.beta
self._Delta_dot = self.alpha - 1
self.q = np.array([self.beta, self.alpha])
# self.q = np.array([self.beta, self.alpha, 0])
def define_noise(self):
if self.agent.config.getboolean("perfect_clock"):
self.sigma_clock_process = 0
else:
self.sigma_clock_process = self.agent.config.getfloat("sigma_clock_process")
self.sigma_clock_reading = self.agent.config.getfloat("sigma_clock_reading")
# self.sigma_clock_reading = 0
# self.sigma_clock_process = 0
def update_time(self):
# T is time delta since last clock check
if not self.time_previous:
self.time_previous = self.env.now
T = self.env.now - self.time_previous
if T == 0:
return
self.time_previous = self.env.now
w = np.random.multivariate_normal(0 * self.q, self.Q(T))
q_new = self.F(T) @ self.q + w
self.q = q_new
self._Delta = self.q[0].copy() - self.env.now
self._Delta_dot = self.q[1].copy() - 1
def time(self):
self.update_time()
return self.q[0] + np.random.normal(0, self.sigma_clock_reading)
@property
def Delta(self):
self.update_time()
return self._Delta
@property
def b(self):
self.update_time()
return self._Delta * self.env.c
@property
def Delta_dot(self):
self.update_time()
return self._Delta_dot
@property
def b_dot(self):
self.update_time()
return self._Delta_dot * self.env.c
def magic_time(self):
return self.env.now
def get_transmit_wait(self):
if hasattr(self.agent, "estimator"):
Delta, Delta_dot = self.agent.estimator.get_clock_estimate()
else:
Delta, Delta_dot = [0, 0]
t = self.time()
index = self.agent.index
window = self.env.TRANSMISSION_WINDOW
num_agents = self.env.NUM_AGENTS
cycle_time = window * num_agents
cycle_number = np.floor((t + cycle_time - index * window) / cycle_time)
cycle_number = int(cycle_number)
if hasattr(self.agent, "radio"):
cycle_number_previous = self.agent.radio.cycle_number_previous
else:
cycle_number_previous = 0
if cycle_number == cycle_number_previous:
cycle_number += 1
t_transmit = cycle_number * cycle_time + index * window
# Force Delta_dot to be greater than -0.5 (should be greater than -1 but don't want /0 error)
Delta_dot = np.max([-0.5, Delta_dot])
# Estimated true wait time to plug into simpy timeout
wait_time = (t_transmit - (t - Delta)) / (1 + Delta_dot)
if wait_time < 0:
wait_time = window
return wait_time, cycle_number
def derivation(self):
A = np.array(
[
[0, 1],
[0, 0],
]
)
Q = np.array(
[
[0, 0],
[0, self.sigma_clock_process ** 2],
]
)
T = symbols("T", real=True)
Z = Matrix(np.block([[-A, Q], [np.zeros([2, 2]), A.T]])) * T
eZ = exp(Z)
F = eZ[2:, 2:].T
Q_d = F @ eZ[:2, 2:]
self.F = lambdify(T, F)
self.Q = lambdify(T, Q_d)
| StarcoderdataPython |
5090237 | <reponame>idfumg/MonitorWeb
import tornado
from models import *
from handler_base import *
class HandlerServersEvents(BaseHandler, tornado.web.RequestHandler, DBHandler):
<EMAIL>
def get(self):
user_id = self.get_cookie('user_id')
if not user_id:
self.write_error({
'reason': 'Missing user_id cookie'
})
return
events = self.select_servers_events(user_id)
self.write_success({
'events': events
});
def select_servers_events(self, user_id):
servers_events = tables['servers_events']
query = select([servers_events]).where(servers_events.c.user_id==user_id)
cursor = self.execute(query)
result = []
for id, user_id, server_id, text in cursor:
result.append({
'id': id,
'server_id': server_id,
'text': text
})
return result
| StarcoderdataPython |
3203061 | import os
import sys
import json
from text_summarization.lex_rank import LexRank
from datetime import datetime
import pytz
def populate_database(data_files):
# delete all rows
url_set = set()
all_articles = []
for data_file in data_files:
with open(data_file, "r") as f:
current_articles = json.load(f)
all_articles += current_articles
lex_rank = LexRank(list(map(lambda x: " ".join(x["text"]) if x["organization"] == "BBC" else x["text"],
all_articles)))
datetime_obj = None
current_index = 1
for i, article in enumerate(all_articles):
try:
if "article_url" not in article:
continue
plain_url = article["article_url"].replace("?", "")
if(plain_url in url_set):
continue
url_set.add(plain_url)
datetime_obj = None
article_text = None
if article["organization"] == "BBC":
datetime_obj = datetime.strptime(article["date"], '%d %B %Y')
article_text = " ".join(article["text"])
# elif article["organization"] == "ABC News":
else:
date_components = article["date"].replace(",", "").split()
datetime_obj = datetime.strptime(" ".join(date_components[:3]), '%b %d %Y')
article_text = article["text"]
datetime_obj = datetime_obj.replace(tzinfo=pytz.UTC)
lex_rank.compute_sentence_page_rank_ordering(article_text)
NewsArticle.objects.get_or_create(
date=datetime_obj,
title=article["title"],
organization=article["organization"],
author=article["author"],
original_article_link=article["article_url"],
text=(" ".join(article["text"])),
short_summary=(lex_rank.get_summary_sentences(1, block=False)),
medium_top_summary=(lex_rank.get_summary_sentences(3, block=False)),
long_top_summary=(lex_rank.get_summary_sentences(5, block=False)),
medium_block_summary=(lex_rank.get_summary_sentences(3, block=True)),
long_block_summary=(lex_rank.get_summary_sentences(5, block=True))
)
print("Finished Article {} out of {}".format(i+1, len(all_articles)))
except Exception as e:
print "\tEncountered exception for {}".format(current_index)
continue
print "There are {} articles in the DB".format(NewsArticle.objects.count())
# Start execution here!
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding("utf-8")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.local_settings')
import django
django.setup()
from article_summarizer_app.models import NewsArticle
populate_database(["data/bbc_new_1001.json", "data/abc_new_1001.json"])
| StarcoderdataPython |
6431852 | __author__ = '<NAME> <<EMAIL>>'
import json
import copy
from collections import OrderedDict
from typing import Callable, List, MutableMapping, Optional, Union
import ttree.utils
from ttree.common import ASCIIMode, TraversalMode
from ttree.exceptions import (
NodeNotFound, MultipleRoots, DuplicatedNode, LinkPastRootNode, LoopError
)
from .node import Node
class Tree(OrderedDict):
"""
The Tree object defines the tree-like structure based on
:class:`Node` objects. A new tree can be created from scratch without any
parameter or a shallow/deep copy of another tree. When ``deepcopy=True``,
a deepcopy operation is performed on feeding ``tree`` parameter and
*more memory is required to create the tree*.
"""
def __init__(self, tree: 'Tree' = None, deepcopy: bool = False):
"""Initiate a new tree or copy another tree with a shallow or
deepcopy copy.
"""
super(Tree, self).__init__()
#: id of the root node
self.root = None
if tree is not None:
if not isinstance(tree, Tree):
raise TypeError('Tree instance is required.')
self.root = tree.root
self.__merge_tree(tree, deepcopy)
def __str__(self) -> str:
return ttree.utils.print_tree(self, ascii_mode='simple')
def __getitem__(self, item):
try:
return super(Tree, self).__getitem__(item)
except KeyError:
raise NodeNotFound(f"Node '{item}' is not in the tree")
def __setitem__(self, key, value, **kwargs):
super(Tree, self).__setitem__(key, value, **kwargs)
if isinstance(value, Node):
value._tree = self
def __delitem__(self, key, **kwargs):
if key in self:
self[key]._tree = None
super(Tree, self).__delitem__(key, **kwargs)
def __merge_tree(self, other: 'Tree', deepcopy: bool = False):
if deepcopy:
for node_id in other:
self[node_id] = copy.deepcopy(other[node_id])
else:
self.update(other)
@property
def paths_to_leaves(self):
"""
Use this property to get the identifiers allowing to go from the root
nodes to each leaf.
Return a list of list of identifiers, root being not omitted.
For example:
.. code-block:: text
Harry
|___ Bill
|___ Jane
| |___ Diane
| |___ George
| |___ Jill
| |___ Mary
| |___ Mark
Result:
.. code-block:: python3
[['harry', 'jane', 'diane', 'mary'],
['harry', 'jane', 'mark'],
['harry', 'jane', 'diane', 'george', 'jill'],
['harry', 'bill']]
"""
return [[n for n in self.rsearch(l.id)][::-1] for l in self.leaves()]
def add_node(self, node: Node, parent: Node = None):
"""
Add a new node to tree.
Add a new node object to the tree and make the parent as the root
by default.
"""
if not isinstance(node, Node):
raise TypeError('First parameter must be instance of Node.')
if node.id in self:
raise DuplicatedNode(f"Node with ID '{node.id}' "
f"is already exists in tree.")
pid = parent.id if isinstance(parent, Node) else parent
if pid is None:
if self.root is not None:
raise MultipleRoots('A tree takes one root merely.')
self.root = node.id
elif pid not in self:
raise NodeNotFound(f"Parent node '{pid}' is not in the tree")
self[node.id] = node
if pid in self:
self[pid].add_child(node.id)
self[node.id].parent = pid
def children(self, node_id) -> List[Node]:
"""
Return the children (Node) list of ``node_id``.
Empty list is returned if ``node_id`` does not exist.
"""
return [self[i] for i in self.is_branch(node_id)]
def create_node(self, *args, parent=None, node_cls=Node, **kwargs):
"""
Create a new node and add it to this tree.
If ``id`` is absent, a UUID will be generated automatically.
"""
if not issubclass(node_cls, Node):
raise ValueError('node_cls must be a subclass of Node.')
node = node_cls(*args, **kwargs)
self.add_node(node, parent)
return node
def depth(self, node=None) -> int:
"""
Get the maximum level of this tree or the level of the given node
.. note ::
The parameter is node instance rather than node_identifier.
:param ~ttree.Node node:
:return: Depth level
"""
result = 0
if node is None:
# Get maximum level of this tree
for leaf in self.leaves():
level = self.level(leaf.id)
result = level if level >= result else result
else:
# Get level of the given node
node_id = node.id if isinstance(node, Node) else node
if node_id not in self:
raise NodeNotFound(f"Node '{node_id}' is not in the tree")
result = self.level(node_id)
return result
def expand_tree(self, node_id=None,
mode: Union[TraversalMode, str] = TraversalMode.DEPTH,
filtering: Callable[[Node], bool] = None,
key=None, reverse: bool = False):
"""
Traverse the tree nodes with different modes.
``node_id`` refers to the expanding point to start; ``mode`` refers
to the search mode (Tree.DEPTH, Tree.WIDTH). ``filter`` refers to the
function of one variable to act on the :class:`Node` object.
In this manner, the traversing will not continue to following children
of node whose condition does not pass the filter. ``key``, ``reverse``
are present to sort :class:Node objects at the same level.
Python generator. Loosly based on an algorithm from
'Essential LISP' by <NAME>, <NAME>, and
<NAME>, page 239-241
UPDATE: the @filtering function is performed on Node object during
traversing. In this manner, the traversing will not continue to
following children of node whose condition does not pass the filter.
UPDATE: the @key and @reverse are present to sort nodes at each
level.
"""
node_id = self.root if node_id is None else node_id
if node_id not in self:
raise NodeNotFound(f"Node '{node_id}' is not in the tree")
if filtering is not None and not callable(filtering):
raise TypeError('Filtering must be callable.')
mode = mode if isinstance(mode, TraversalMode) else TraversalMode(mode)
if filtering is not None and not filtering(self[node_id]):
return
yield node_id
queue = (self[i] for i in self[node_id].children)
if filtering is not None:
queue = filter(filtering, queue)
if mode in (TraversalMode.DEPTH, TraversalMode.WIDTH):
queue = sorted(queue, key=key, reverse=reverse)
while queue:
yield queue[0].id
expansion = sorted(
filter(filtering, (self[i] for i in queue[0].children)),
key=key, reverse=reverse
)
if mode is TraversalMode.DEPTH:
queue = expansion + queue[1:] # depth-first
elif mode is TraversalMode.WIDTH:
queue = queue[1:] + expansion # width-first
elif mode is TraversalMode.ZIGZAG:
# Suggested by <NAME> (<EMAIL>).
stack_fw = []
queue = list(queue)
queue.reverse()
stack = stack_bw = queue
direction = False
while stack:
expansion = filter(filtering,
(self[i] for i in stack[0].children))
yield stack.pop(0).id
expansion = list(expansion)
if direction:
expansion.reverse()
stack_bw = expansion + stack_bw
else:
stack_fw = expansion + stack_fw
if not stack:
direction = not direction
stack = stack_fw if direction else stack_bw
def is_branch(self, node_id):
"""
Get the children (only sons) list of the node with ID == node_id.
Empty list is returned if node_id does not exist
"""
if node_id is None:
raise ValueError("First parameter can't be None")
return self[node_id].children
def leaves(self, node_id=None):
"""Get leaves from given node."""
if node_id is None:
return [n for n in self.values() if n.is_leaf]
return [self[n] for n in self.expand_tree(node_id) if self[n].is_leaf]
def level(self, node_id, filtering: Callable[[Node], bool] = None):
"""
Get the node level in this tree.
The level is an integer starting with '0' at the root.
In other words, the root lives at level '0';
Update: @filtering params is added to calculate level passing
exclusive nodes.
"""
return len([n for n in self.rsearch(node_id, filtering)]) - 1
def link_past_node(self, node_id):
"""
Remove a node and link its children to its parent.
Root is not allowed.
For example, if we have a -> b -> c and delete node b, we are left
with a -> c
"""
if self.root == node_id:
raise LinkPastRootNode('Cannot link past the root node, '
'delete it with remove_node()')
# Get the parent of the node we are linking past
parent = self[self[node_id].parent]
# Set the children of the node to the parent
for child in self[node_id].children:
self[child].parent = parent.id
# Link the children to the parent
parent.children += self[node_id].children
# Delete the node
parent.remove_child(node_id)
del self[node_id]
def move_node(self, source, destination):
"""
Move node (source) from its parent to another parent (destination).
"""
if source not in self or destination not in self:
raise NodeNotFound
if self.is_ancestor(source, destination):
raise LoopError
parent = self[source].parent
self[parent].remove_child(source)
self[destination].add_child(source)
self[source].parent = destination
def is_ancestor(self, ancestor, grandchild) -> bool:
parent = self[grandchild].parent
child = grandchild
while parent is not None:
if parent == ancestor:
return True
child = self[child].parent
parent = self[child].parent
return False
def parent(self, node_id) -> Optional[Node]:
"""
Obtain specific node's parent (Node instance).
Return None if the parent is None or does not exist in the tree.
"""
pid = self[node_id].parent
if pid is None or pid not in self:
return None
return self[pid]
def paste(self, node_id, new_tree: 'Tree', deepcopy: bool = False):
"""
Paste a new tree to an existing tree, with ``node_id`` becoming the
parent of the root of this new tree.
Update: add @deepcopy of pasted tree.
"""
if not isinstance(new_tree, Tree):
raise TypeError('Instance of Tree is required as '
'"new_tree" parameter.')
if node_id is None:
raise ValueError('First parameter can not be None')
if node_id not in self:
raise NodeNotFound(f"Node '{node_id}' is not in the tree")
set_joint = set(new_tree) & set(self) # joint keys
if set_joint:
# TODO: a deprecated routine is needed to avoid exception
raise ValueError(f'Duplicated nodes {list(set_joint)} exists.')
self.__merge_tree(new_tree, deepcopy)
self[node_id].add_child(new_tree.root)
self[new_tree.root].parent = node_id
def remove_node(self, node_id) -> int:
"""
Remove a node indicated by 'id'; all the successors are
removed as well.
Return the number of removed nodes.
"""
if node_id is None:
return 0
parent = self[node_id].parent
removed = [n for n in self.expand_tree(node_id)]
for id_ in removed:
del self[id_]
# Update its parent info
if parent:
self[parent].remove_child(node_id)
return len(removed)
def remove_subtree(self, node_id) -> 'Tree':
"""
Return a subtree deleted from this tree. If node_id is None, an
empty tree is returned.
For the original tree, this method is similar to
`remove_node(self,node_id)`, because given node and its children
are removed from the original tree in both methods.
For the returned value and performance, these two methods are
different:
`remove_node` returns the number of deleted nodes;
`remove_subtree` returns a subtree of deleted nodes;
You are always suggested to use `remove_node` if your only to
delete nodes from a tree, as the other one need memory
allocation to store the new tree.
"""
subtree = Tree()
if node_id is None:
return subtree
subtree.root = node_id
parent = self[node_id].parent
self[node_id].parent = None # reset root parent for the new tree
removed = [n for n in self.expand_tree(node_id)]
for id_ in removed:
subtree[id_] = self.pop(id_)
# Update its parent info
self[parent].remove_child(node_id)
return subtree
def rsearch(self, node_id, filtering: Callable[[Node], bool] = None):
"""
Search the tree from ``node_id`` to the root along links reservedly.
Parameter ``filter`` refers to the function of one variable to act
on the :class:`Node` object.
"""
if node_id is None:
return
if node_id not in self:
raise NodeNotFound(f"Node '{node_id}' is not in the tree")
if filtering is not None and not callable(filtering):
raise TypeError('Filtering must be a callable.')
current = node_id
while current is not None:
if filtering is None or filtering(self[current]):
yield current
# subtree() hasn't update the parent
current = self[current].parent if self.root != current else None
def save2file(self, filename, node_id=None, id_hidden=True,
filtering=None, key=None, reverse=False,
ascii_mode=ASCIIMode.ex, data_property=None):
"""
Save the tree into file for offline analysis.
:param filename: Export file name
:param node_id: Traversal root node ID
:param id_hidden: Is ID hidden?
:param filtering: Filtering callable
:param key: Sorting key callable
:param reverse: Reverse mode?
:param ascii_mode: ASCII mode
:param data_property: Data property name
"""
with open(filename, 'ab') as fp:
ttree.utils.print_tree(
self, node_id, id_hidden, filtering, key, reverse,
ascii_mode, data_property, func=lambda n: fp.write(n + b'\n')
)
def print(self, node_id=None, id_hidden=True, filtering=None,
key=None, reverse=False, ascii_mode=ASCIIMode.ex,
data_property=None):
"""
Print the tree structure in hierarchy style.
You have three ways to output your tree data, i.e., stdout with
``print()``, plain text file with ``save2file()``, and json string
with ``to_json()``. The former two use the same backend to generate
a string of tree structure in a text graph.
:param node_id: refers to the expanding point to start
:param id_hidden: refers to hiding the node ID when printing
:param filtering: refers to the function of one variable to act on the
:class:`Node` object. In this manner, the traversing will not
continue to following children of node whose condition does
not pass the filter.
:param key: are present to sort :class:`Node` object in the same level
:param reverse: reverse mode
:param ascii_mode: ASCII mode
:param data_property: refers to the property on the node data object
to be printed.
"""
try:
ttree.utils.print_tree(
self, node_id, id_hidden, filtering,
key, reverse, ascii_mode, data_property,
func=print
)
except NodeNotFound:
print('Tree is empty')
def siblings(self, node_id) -> List[Node]:
"""
Return the siblings of given ``node_id``.
If ``node_id`` is root or there are no siblings, an empty list
is returned.
"""
siblings = []
if node_id != self.root:
pid = self[node_id].parent
siblings = [self[c] for c in self[pid].children if c != node_id]
return siblings
def size(self, level: int = None) -> int:
"""
Get the number of nodes of the whole tree if @level is not
given. Otherwise, the total number of nodes at specific level
is returned.
@param level The level number in the tree. It must be between
[0, tree.depth].
Otherwise, InvalidLevelNumber exception will be raised.
"""
if level is None:
return len(self)
if not isinstance(level, int):
raise TypeError(f"Level should be an integer instead "
f"of '{type(level)}'")
return len(
[node for node in self.values()
if self.level(node.id) == level]
)
def subtree(self, node_id) -> 'Tree':
"""
Return a shallow COPY of subtree with node_id being the new root.
If node_id is None, return an empty tree.
If you are looking for a deepcopy, please create a new tree
with this shallow copy,
e.g.
new_tree = Tree(t.subtree(t.root), deep=True)
This line creates a deep copy of the entire tree.
"""
result = self.__class__()
if node_id is None:
return result
if node_id not in self:
raise NodeNotFound(f"Node '{node_id}' is not in the tree")
result.root = node_id
for subtree_node in self.expand_tree(node_id):
result[self[subtree_node].id] = self[subtree_node]
return result
def to_dict(self, node_id=None, key=None, sort=True, reverse=False,
with_data=False) -> MutableMapping:
"""transform self into a dict"""
node_id = self.root if node_id is None else node_id
node_tag = self[node_id].tag
result = {node_tag: {'children': []}}
if with_data:
result[node_tag]['data'] = self[node_id].data
if self[node_id].expanded:
queue = (self[i] for i in self[node_id].children)
if sort:
sort_options = {'reverse': reverse}
if key is not None:
sort_options['key'] = key
queue = sorted(queue, **sort_options)
result[node_tag]['children'] = [
self.to_dict(n.id, with_data=with_data,
sort=sort, reverse=reverse)
for n in queue
]
if not result[node_tag]['children']:
result = (
self[node_id].tag
if not with_data
else {node_tag: {'data': self[node_id].data}}
)
return result
def to_json(self, with_data=False, sort=True, reverse=False) -> str:
"""Return the json string corresponding to self"""
return json.dumps(
self.to_dict(with_data=with_data, sort=sort, reverse=reverse)
)
| StarcoderdataPython |
5160520 | <reponame>HughQS/Gesture_Recognition
# -*- coding: utf-8 -*-
"""
Created on 2018 3.26
@author: hugh
"""
import numpy as np
import cv2
from skimage import exposure
class data_aug(object):
def __init__(self, img):
self.image= img
# 左右镜像
def _random_fliplr(self, random_fliplr = True):
if random_fliplr and np.random.choice([True, False]):
self.image = np.fliplr(self.image) # 左右
# 上下镜像
def _random_flipud(self, random_flipud = True):
if random_flipud and np.random.choice([True, False]):
self.image = np.flipud(self.image) # 上下
# 改变光照
def _random_exposure(self, random_exposure = True):
if random_exposure and np.random.choice([True, False]):
e_rate = np.random.uniform(0.5,1.5)
self.image = exposure.adjust_gamma(self.image, e_rate)
# 旋转
def _random_rotation(self, random_rotation = True):
if random_rotation and np.random.choice([True, False]):
w,h = self.image.shape[1], self.image.shape[0]
# 0-180随机产生旋转角度。
angle = np.random.randint(0,10)
RotateMatrix = cv2.getRotationMatrix2D(center=(w/2, h/2), angle=angle, scale=0.7)
# image = cv2.warpAffine(image, RotateMatrix, (w,h), borderValue=(129,137,130))
self.image = cv2.warpAffine(self.image, RotateMatrix, (w,h), borderMode=cv2.BORDER_REPLICATE)
# 裁剪
def _random_crop(self, crop_size = 299, random_crop = True):
if random_crop and np.random.choice([True, False]):
if self.image.shape[1] > crop_size:
sz1 = self.image.shape[1] // 2
sz2 = crop_size // 2
diff = sz1 - sz2
(h, v) = (np.random.randint(0, diff + 1), np.random.randint(0, diff + 1))
self.image = self.image[v:(v + crop_size), h:(h + crop_size), :]
#
def get_aug_img(self):
data_aug_list = [self._random_fliplr, self._random_flipud, self._random_rotation, self._random_exposure, self._random_crop]
data_aug_func = np.random.choice(data_aug_list, 2)
for func in data_aug_func:
func()
return self.image
| StarcoderdataPython |
11380472 | import re
import json
import urllib
import requests
import threading
from threading import Thread
from synonyms_finder_utils import fetch_url
class Synonyms_finder(Thread):
def __init__(self,request,group=None, target=None, name=None,
threadLimiter=None, args=(), kwargs=(), verboise=None):
super(Synonyms_finder,self).__init__(group=group,target=target,
name=name)
self.args = args
self.kwargs = kwargs
self.req = request
self.threadLimiter = threadLimiter
self.__load_credentials()
self.__install_openner()
#url used to fetch results
self.url = "https://www.wikidata.org/w/api.php"
#list of sites that we will be looking for
self.sites = ["enwiki","frwiki","eswiki","ruwiki","chwiki"]
#instances of classes we want to filter these are all types of name instances
# * family name (Q101352)
# * given name (Q202444)
# * unisex given name (Q3409032)
# * female given name (Q11879590)
# * male given name (Q12308941)
# * double name (Q1243157)
# * matronymic (Q1076664)
# * patronymic (Q110874)
# * disambiquation page (Q4167410)
self.instances = ["Q101352","Q202444","Q3409032","Q11879590","Q12308941","Q1243157","Q1076664","Q110874","Q4167410"]
self.qs = [] #list of qs associated with the value
self.alias = {}
self.synonyms = []
self.synonyms_dict = {}
def fit(self):
'''Function finds all synonyms of term stored in self.req'''
self.qs = self.__get_qs_norm(self.req)
for q in self.qs:
self.alias.update(self.__get_alias(q))
#now, let's fetch labels for linked elements. Only elements with propery "said to be the same as" will be kept
labels_dict = {}
for key,value in self.alias.items():
if value:
self.alias.update(self.__get_labels(key))
for alias in value:
labels_dict.update(self.__get_labels(alias))
combined_list = []
for _key, value in labels_dict.items():
combined_list = [*combined_list,*value]
self.synonyms = list(set(combined_list))
self.synonyms_dict = {self.req:self.synonyms}
def run(self):
"""override parent method, that is called when the thread is started
"""
if self.threadLimiter is not None:
self.threadLimiter.acquire()
try:
self.fit()
finally:
if self.threadLimiter is not None:
self.threadLimiter.release()
print(f"Done with: {self.req}")
def __str__(self):
'''override standard method for printing
'''
s = f"Request:\t{self.req}\n"+\
f"Qs:\t\t{self.qs}\n"+\
f"Alias:\t\t{self.alias}\n"+\
f"Synonyms:\t{self.synonyms}\n"
return s
def __load_credentials(self):
'''Load proxy credentials
use it if you have proxy
'''
# credentials = open(".\conf\local\credentials.txt","r")
# self.key = credentials.readline()[:-1]
# self.user = credentials.readline()[:-1]
# self.pwd = credentials.readline()[:-1]
# credentials.close()
def __install_openner(self):
'''Create a proxy gate for requests
use it if you have proxy
'''
# proxy_url = ""
# proxy = {"https": "https://" + self.user + ":" + self.pwd + proxy_url}
# proxy_support = urllib.request.ProxyHandler(proxy)
# opener = urllib.request.build_opener(proxy_support)
# urllib.request.install_opener(opener)
def __get_qs_norm(self,text):
'''Function fetches all qs associated with the given string in the given sites
we have two options:
1. either the q that we get is already of a desired type, then we just add it
2. or the q that we get is a disambiguation page. In this case we will need to check if this
page points toward one of Qs that we are interested in
Args:
string: string to be searched for
url: url used inthe search query
sites: wiki sites to be used
Returns:
list of Q....
'''
for site in self.sites:
params = urllib.parse.urlencode({
"action" : "wbgetentities",
"sites" : site,
"titles": text,
"props" : "claims",
"normalize" : 1,
"format": "json"
},)
url = f"{self.url}?{params}"
response = fetch_url(url)
result = json.load(response)
#check if we have a match
if "entities" in result:
for el in result["entities"]:
if "Q" in el:
#now, let's check that our element has a proper class (property P31):
if self.__check_p31(result,el):
self.qs.append(el)
#now, let's check if our element has a disambiguation page
if self.__check_disambiguation_page(result,el):
self.__fetch_qs_from_disambiguation_page(self,el)
#once we are done, let's get rid of all duplicates in our list
self.qs = list(set(self.qs))
return self.qs
def __check_p31(self,result,q):
'''Function checks if a given q has a property "instance of" and this property
belongs to list of classes we want
Args:
result: json list containing output from wikidata
q: q that we are looking for
Returns:
a boolean indicating whether the given q has a propery from one of the self.instances
'''
whish_list = self.instances
entity = result["entities"][q]
match = False
if "P31" in entity["claims"]:
for instance in entity["claims"]["P31"]:
if instance["mainsnak"]["datavalue"]["value"]["id"] in whish_list:
match = True
return match
def __check_disambiguation_page(self,result,q):
'''function checks if a provided q is a disambiguation page
Args:
result: json list containing output from wikidata
q: q that we are looking for
Returns:
a boolean indicatin whetehr the given q has is a disambiguation page
'''
match = False
entity = result["entities"][q]
if "P31" in entity["claims"]:
for instance in entity["claims"]["P31"]:
if instance["mainsnak"]["datavalue"]["value"]["id"] == "Q4167410":
match = True
return match
def __fetch_qs_from_disambiguation_page(self,result,q):
'''function checks the disambiguation page, searches if it points to any name (member of self.instances)
if yes, it adds all name instances into the list
Args:
q: Q of the disambiguation page
Returns:
'''
params = urllib.parse.urlencode({
"action" : "wbgetentities",
"sites" : "enwiki",
"ids": q,
"props" : "claims",
"normalize" : 1,
"format": "json"
},)
url = f"{self.url}?{params}"
response = fetch_url(url)
result = json.load(response)
#if the result is not empty
if "entities" in result:
#let's check claims that P1889 is among the claims
#P1889 stands for "different from"
entity = result["entities"][q]
for claim_id, claim_value in entity["claims"].items():
if claim_id == "P1889":
for el in claim_value:
#for every claim, we need to check its status.
#wheter it is in our wishlist
_id = el["mainsnak"]["datavalue"]["value"]["id"]
#in order to save requests, let's check if this _id has already been added
if _id in self.qs:
pass
else:
_params = urllib.parse.urlencode({
"action" : "wbgetentities",
"sites" : "enwiki",
"ids": _id,
"props" : "claims",
"normalize" : 1,
"format": "json"
})
_result = json.load(fetch_url(f"{self.url}?{_params}"))
#if the type is good, we keep it
if self.__check_p31(_result,_id):
self.qs.append(_id)
def __get_alias(self,q):
'''Function gets alias names for qs
Args:
qs: list of qs to be searched
Returns:
list of names located in P460
'''
params = urllib.parse.urlencode({
"action":"wbgetentities",
"ids":q,
"props":"info|claims",
"format":"json"
})
url = f"{self.url}?{params}"
response = fetch_url(url)
alias_res = json.load(response)
alias_list = []
#also known property
if "P460" in alias_res["entities"][q]["claims"]:
claims = alias_res["entities"][q]["claims"]["P460"]
for link in claims:
alias_list.append(link["mainsnak"]["datavalue"]["value"]["id"])
#different from property
elif "P1889" in alias_res["entities"][q]["claims"]:
claims = alias_res["entities"][q]["claims"]["P1889"]
for link in claims:
alias_list.append(link["mainsnak"]["datavalue"]["value"]["id"])
else:
return {q:None}
return {q:alias_list}
def __normalize_label(self,label):
'''Function normalizes label:
* removes everything between "(...)"
* splits a string into a list of strings if it contains a separator "/"
* strips a string
'''
label_mod = re.sub(r"\(.+?\)","",label)
label_mod = label_mod.strip()
if "/" in label_mod:
label_mod = [x.strip() for x in label_mod.split("/")]
return label_mod
def __get_labels(self,q):
'''Function gets all labels of a given q
Args:
q: Q.... to be searched for
url: url to be used to make a request
Returns:
dictionary with q as key and its unique list of labels as value
'''
params = urllib.parse.urlencode({
"action":"wbgetentities",
"ids":q,
"props":"labels",
"utf8":1,
"format":"json"
})
url = f"{self.url}?{params}"
response = fetch_url(url)
labels_res = json.load(response)
labels = []
for label,label_value in labels_res["entities"][q]["labels"].items():
value = label_value["value"]
label = self.__normalize_label(value)
if isinstance(label,list):
for sub_list in label:
labels.append(sub_list)
else:
labels.append(label)
return {q: sorted(set(labels))}
| StarcoderdataPython |
209745 | <reponame>fahdrazavi/urduhack
# coding: utf8
"""
Preprocess utilities
"""
import sys
import unicodedata
import regex as re
CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB',
'₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND',
'€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'}
EMAIL_REGEX = re.compile(
r"(?:^|(?<=[^\w@.)]))([\w+-](\.(?!\.))?)*?[\w+-]@(?:\w-?)*?\w+(\.([a-z]{2,})){1,3}(?:$|(?=\b))",
flags=re.IGNORECASE | re.UNICODE)
PHONE_REGEX = re.compile(r'(?:^|(?<=[^\w)]))(\+?1[ .-]?)?(\(?\d{3}\)?[ .-]?)?(\d{3}[ .-]?\d{4})(\s?(?:ext\.?'
r'|[#x-])\s?\d{2,6})?(?:$|(?=\W))')
NUMBERS_REGEX = re.compile(r'(?:^|(?<=[^\w,.]))[+–-]?(([1-9]\d{0,2}(,\d{3})+(\.\d*)?)|([1-9]\d{0,2}([ .]\d{3})+(,\d*)?)'
r'|(\d*?[.,]\d+)|\d+)(?:$|(?=\b))')
CURRENCY_REGEX = re.compile('({})+'.format('|'.join(re.escape(c) for c in CURRENCIES)))
LINEBREAK_REGEX = re.compile(r'((\r\n)|[\n\v])+')
NONBREAKING_SPACE_REGEX = re.compile(r'(?!\n)\s+')
URL_REGEX = re.compile(r"(?:^|(?<![\w/.]))"
# protocol identifier
# r"(?:(?:https?|ftp)://)" <-- alt?
r"(?:(?:https?://|ftp://|www\d{0,3}\.))"
# user:pass authentication
r"(?:\S+(?::\S*)?@)?"
r"(?:"
# IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})"
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 192.168.3.11
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}"
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"
r"|"
# host name
r"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)"
# domain name
r"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*"
# TLD identifier
r"(?:\.(?:[a-z\u00a1-\uffff]{2,}))"
r")"
# port number
r"(?::\d{2,5})?"
# resource path
r"(?:/\S*)?"
r"(?:$|(?![\w?!+&/]))",
flags=re.UNICODE | re.IGNORECASE) # source: https://gist.github.com/dperini/729294
SHORT_URL_REGEX = re.compile(r"(?:^|(?<![\w/.]))"
# optional scheme
r"(?:(?:https?://)?)"
# domain
r"(?:\w-?)*?\w+(?:\.[a-z]{2,12}){1,3}"
r"/"
# hash
r"[^\s.,?!'\"|+]{2,12}"
r"(?:$|(?![\w?!+&/]))",
flags=re.IGNORECASE)
PUNCTUATION_TRANSLATE_UNICODE = dict.fromkeys((i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P')), u' ')
def normalize_whitespace(text: str):
"""
Given ``text`` str, replace one or more spacings with a single space, and one
or more linebreaks with a single newline. Also strip leading/trailing whitespace.
Args:
text (str): raw ``urdu`` text
Returns:
str: returns a ``str`` object containing normalized text.
"""
return NONBREAKING_SPACE_REGEX.sub(' ', LINEBREAK_REGEX.sub(r'\n', text)).strip()
def replace_urls(text: str, replace_with='*URL*'):
"""
Replace all URLs in ``text`` str with ``replace_with`` str.
Args:
text (str): raw ``urdu`` text
replace_with (str): replace string
Returns:
str: returns a ``str`` object replace url with ``replace_with`` text.
"""
return URL_REGEX.sub(replace_with, SHORT_URL_REGEX.sub(replace_with, text))
def replace_emails(text: str, replace_with='*EMAIL*'):
"""
Replace all emails in ``text`` str with ``replace_with`` str.
Args:
text (str): raw ``urdu`` text
replace_with (str): replace string
Returns:
str: returns a ``str`` object replace emails with ``replace_with`` text.
"""
return EMAIL_REGEX.sub(replace_with, text)
def replace_phone_numbers(text: str, replace_with='*PHONE*'):
"""
Replace all phone numbers in ``text`` str with ``replace_with`` str.
Args:
text (str): raw ``urdu`` text
replace_with (str): replace string
Returns:
str: returns a ``str`` object replace number_no with ``replace_with`` text.
"""
return PHONE_REGEX.sub(replace_with, text)
def replace_numbers(text: str, replace_with='*NUMBER*'):
"""
Replace all numbers in ``text`` str with ``replace_with`` str.
Args:
text (str): raw ``urdu`` text
replace_with (str): replace string
Returns:
str: returns a ``str`` object replace number with ``replace_with`` text.
"""
return NUMBERS_REGEX.sub(replace_with, text)
def replace_currency_symbols(text: str, replace_with=None):
"""
Replace all currency symbols in ``text`` str with string specified by ``replace_with`` str.
Args:
text (str): raw text
replace_with (str): if None (default), replace symbols with
their standard 3-letter abbreviations (e.g. '$' with 'USD', '£' with 'GBP');
otherwise, pass in a string with which to replace all symbols
(e.g. "*CURRENCY*")
Returns:
str: returns a ``str`` object containing normalized text.
"""
if replace_with is None:
for key, value in CURRENCIES.items():
text = text.replace(key, value)
return text
return CURRENCY_REGEX.sub(replace_with, text)
def remove_punctuation(text: str, marks=None):
"""
Remove punctuation from ``text`` by replacing all instances of ``marks``
with whitespace.
Args:
text (str): raw text
marks (str): If specified, remove only the characters in this string,
e.g. ``marks=',;:'`` removes commas, semi-colons, and colons.
Otherwise, all punctuation marks are removed.
Returns:
str
Note:
When ``marks=None``, Python's built-in :meth:`str.translate()` is
used to remove punctuation; otherwise, a regular expression is used
instead. The former's performance is about 5-10x faster.
"""
if marks:
return re.sub('[{}]+'.format(re.escape(marks)), ' ', text, flags=re.UNICODE)
return text.translate(PUNCTUATION_TRANSLATE_UNICODE)
def remove_accents(text: str):
"""
Remove accents from any accented unicode characters in ``text`` str, either by
transforming them into ascii equivalents or removing them entirely.
Args:
text (str): raw urdu text
Returns:
str
"""
return ''.join(c for c in text if not unicodedata.combining(c))
def remove_english_alphabets(text: str):
"""
Removes ``English`` words and digits from a ``text``
Args:
text (str): raw urdu text
Returns:
str: ``str`` object with english alphabets removed
"""
characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890"
table = str.maketrans({key: None for key in characters})
return text.translate(table)
| StarcoderdataPython |
5080556 | <reponame>h2r/RobotLearningBaselines
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from matplotlib import pyplot as plt
class CoordConv2d(nn.Module):
"""
CoordConv implementation (from uber, but really from like the 90s)
"""
def __init__(self, *args, use_coords=False, attend=False, batch_norm=False, dropout=0, conditioning=0, **kwargs):
super(CoordConv2d, self).__init__()
self.use_coords = use_coords
self.attend = attend
self.conditioning = conditioning
self.width = 5
self.height = 5
self.dropout = nn.Dropout2d(0)
self.batch_norm = nn.BatchNorm2d(args[1]) if batch_norm else lambda x: x
coords = torch.zeros(2, self.width, self.height)
coords[0] = torch.stack([torch.arange(self.height).float()]*self.width, dim=0) * 2 / self.height - 1
coords[1] = torch.stack([torch.arange(self.width).float()]*self.height, dim=1) * 2 / self.width - 1
self.register_buffer('coords', coords)
if self.use_coords:
args = list(args)
args[0] += 2
args = tuple(args)
self.conv = nn.Conv2d(*args, **kwargs)
if self.attend:
self.attend = SpatialAttention2d(args[1], self.attend, self.conditioning)
def reset(self):
self.height = 5
self.width = 5
self.setup()
def setup(self):
pos_x, pos_y = np.meshgrid(
np.linspace(-1., 1., self.height),
np.linspace(-1., 1., self.width))
self.coords = torch.zeros(2, self.width, self.height).to(self.coords)
self.coords[0] = torch.from_numpy(pos_x).float().to(self.coords)
self.coords[1] = torch.from_numpy(pos_y).float().to(self.coords)
def forward(self, data, cond=None):
if self.use_coords:
flag = False
if not (self.width == data.shape[2]):
self.width = data.shape[2]
flag = True
if not (self.height == data.shape[3]):
self.height = data.shape[3]
flag = True
if flag:
self.setup()
data = torch.cat([data, torch.stack([self.coords]*data.size(0), dim=0) * data.mean() * 2], dim=1)
x = self.conv(data)
x = F.leaky_relu(x)
x2 = None
if self.attend:
x2 = self.attend(x, cond)
x3 = x2 / x
x = x2
x2 = x3
x = self.batch_norm(x)
if x2 is not None:
return self.dropout(x), x2
return self.dropout(x)
class SpatialAttention2d(nn.Module):
"""
CoordConv implementation (from uber, but really from like the 90s)
"""
def __init__(self, channels, k, conditioning=0):
super(SpatialAttention2d, self).__init__()
self.conditioning = conditioning
self.lin1 = nn.Linear(channels, k)
self.lin2 = nn.Linear(k+conditioning, 1)
def forward(self, data, cond=None, b_print=False, print_path=''):
orgnl_shape = list(data.shape)
if (cond is None) or (self.conditioning == 0):
cond = torch.zeros(orgnl_shape[0], self.conditioning).to(data)
temp_shape = [orgnl_shape[0], orgnl_shape[2]*orgnl_shape[3], orgnl_shape[1]]
orgnl_shape[1] = 1
atn = data.permute(0, 2, 3, 1).view(temp_shape)
atn = self.lin1(atn)
atn = torch.tanh(torch.cat([atn, cond.unsqueeze(1).expand(atn.size(0), atn.size(1), self.conditioning)], dim=2))
atn = self.lin2(atn)
soft = torch.softmax(atn, dim=1)
mask = soft.permute(0, 2, 1).view(orgnl_shape)
if b_print:
plt.figure(1)
plt.imshow(mask[0, 0].detach().cpu().numpy())
plt.savefig(print_path+'mask.png')
return data*mask
class ChannelAttention2d(nn.Module):
"""
CoordConv implementation (from uber, but really from like the 90s)
"""
def __init__(self, img_size):
super(ChannelAttention2d, self).__init__()
self.lin = nn.Linear(img_size, 1)
def forward(self, data):
atn = self.lin(data.view(data.shape[0], data.shape[1], 1, -1))
soft = torch.softmax(atn, dim=1)
mask = soft*atn.shape[1]
return data#*mask
class SpatialSoftmax(nn.Module):
"""
Spatial Softmax Implementation
"""
def __init__(self):
super(SpatialSoftmax, self).__init__()
self.height = 5
self.width = 5
self.channel = 5
pos_x, pos_y = np.meshgrid(
np.linspace(-1., 1., self.height),
np.linspace(-1., 1., self.width))
pos_x = torch.from_numpy(pos_x.reshape(self.height*self.width)).float()
pos_y = torch.from_numpy(pos_y.reshape(self.height*self.width)).float()
self.register_buffer('pos_x', pos_x)
self.register_buffer('pos_y', pos_y)
def reset(self):
self.height = 5
self.width = 5
self.channel = 5
self.setup()
def setup(self):
pos_x, pos_y = np.meshgrid(
np.linspace(-1., 1., self.height),
np.linspace(-1., 1., self.width))
self.pos_x = torch.from_numpy(pos_x.reshape(self.height*self.width)).to(self.pos_x).double()
self.pos_y = torch.from_numpy(pos_y.reshape(self.height*self.width)).to(self.pos_y).double()
def forward(self, feature):
flag = False
if not (self.channel == feature.shape[1]):
self.channel = feature.shape[1]
flag = True
if not (self.width == feature.shape[2]):
self.width = feature.shape[2]
flag = True
if not (self.height == feature.shape[3]):
self.height = feature.shape[3]
flag = True
if flag:
self.setup()
feature = torch.log(F.relu(feature.view(-1, self.height*self.width)) + 1e-6)
softmax_attention = F.softmax(feature, dim=-1)
expected_x = torch.sum(self.pos_x*softmax_attention, dim=1, keepdim=True)
expected_y = torch.sum(self.pos_y*softmax_attention, dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y], 1)
feature_keypoints = expected_xy.view(-1, self.channel*2)
return feature_keypoints
| StarcoderdataPython |
5185439 | import logging
import sys
import warnings
import os
import six
from tqdm import tqdm
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def loglevel_from_string(level):
"""
>>> _loglevel_from_string('debug')
10
>>> _loglevel_from_string(logging.INFO)
20
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
assert isinstance(level, six.integer_types)
return level
def get_loglevel_from_env(default_level):
"""
>>> os.environ['FLEXP_LOGLEVEL'] = 'info'
>>> get_loglevel_from_env(logging.DEBUG)
20
>>> del os.environ['FLEXP_LOGLEVEL']
>>> get_loglevel_from_env(logging.DEBUG)
10
"""
flexp_loglevel = os.environ.get('FLEXP_LOGLEVEL')
if flexp_loglevel is not None:
loglevel = flexp_loglevel
else:
loglevel = default_level
return loglevel_from_string(loglevel)
class TqdmLoggingHandler(logging.Handler):
"""
credit: https://stackoverflow.com/questions/38543506/change-logging-print-function-to-tqdm-write-so-logging-doesnt-interfere-wit
"""
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg, file=sys.stderr)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _setup_logging(level=logging.DEBUG, filename='log.txt', disable_stderr=False):
_close_file_handlers()
level = loglevel_from_string(level)
root_logger = logging.getLogger()
root_logger.setLevel(level)
if filename is not None:
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
if not disable_stderr:
tqdm_handler = TqdmLoggingHandler(level)
tqdm_handler.setFormatter(log_formatter)
root_logger.addHandler(tqdm_handler)
warnings.simplefilter("once")
def _close_file_handlers():
root_logger = logging.getLogger()
for file_handler in root_logger.handlers:
file_handler.close()
root_logger.handlers = []
| StarcoderdataPython |
3304412 | <reponame>BaryonPasters/diffprof
"""Module for loading data storing the best-fit diffprof parameters."""
import os
import numpy as np
from astropy.table import Table
import h5py
def impute_bad_ellipticity_fits(
e_t0, e_early, e_late, e_t0_min=0.1, e_early_min=0.1, e_late_min=0.1
):
"""Overwrite bad ellipticity parameter fit values."""
e_t0 = np.where(e_t0 < e_t0_min, e_t0_min, e_t0)
e_early = np.where(e_early < e_early_min, e_early_min, e_early)
e_late = np.where(e_late < e_late_min, e_late_min, e_late)
e_lgtc = np.log10(e_t0)
return e_lgtc, e_early, e_late
def impute_bad_concentration_fits(c_lgtc, c_lgtc_min=0.1):
"""Overwrite bad concentration parameter fit values."""
c_lgtc = np.where(c_lgtc < c_lgtc_min, c_lgtc_min, c_lgtc)
return c_lgtc
def load_mdpl2_fits(drn, bn="MDPL2_all_c_e_mah_params.csv"):
"""Load the collection of fits to MDPL2 for concentration, ellipticity, and mass.
Parameters
----------
drn : string
Directory where the hdf5 files are located
Returns
-------
data : astropy table
"""
fn = os.path.join(drn, bn)
data = Table.read(fn, format="ascii.commented_header")
data.remove_column("t0")
e_t0, e_early, e_late = data["e_t0"], data["e_early"], data["e_late"]
e_lgtc, e_early, e_late = impute_bad_ellipticity_fits(e_t0, e_early, e_late)
data["e_lgtc"] = e_lgtc
data["e_early"] = e_early
data["e_late"] = e_late
data.remove_column("e_t0")
data["conc_lgtc"] = impute_bad_concentration_fits(data["conc_lgtc"])
data.rename_column("conc_beta_early", "conc_early")
data.rename_column("conc_beta_late", "conc_late")
data.rename_column("mah_logtc", "mah_lgtc")
return data
def load_bpl_fits(drn):
"""Load the collection of fits to BPL for concentration, ellipticity, and mass.
Parameters
----------
drn : string
Directory where the hdf5 files are located
Returns
-------
data : astropy table
"""
m_fits = dict()
m_fn = os.path.join(drn, "bpl_cens_trunks_diffmah_fits.h5")
with h5py.File(m_fn, "r") as hdf:
for key in hdf.keys():
m_fits[key] = hdf[key][...]
c_fits = dict()
c_fn = os.path.join(drn, "bpl_cens_trunks_conc_fits.h5")
with h5py.File(c_fn, "r") as hdf:
for key in hdf.keys():
c_fits[key] = hdf[key][...]
e_fits = dict()
e_fn = os.path.join(drn, "bpl_cens_trunks_ellipticity_fits.h5")
with h5py.File(e_fn, "r") as hdf:
for key in hdf.keys():
e_fits[key] = hdf[key][...]
e_t0, e_early, e_late = e_fits["e_t0"], e_fits["e_early"], e_fits["e_late"]
e_lgtc, e_early, e_late = impute_bad_ellipticity_fits(e_t0, e_early, e_late)
e_fits["e_lgtc"] = e_lgtc
e_fits["e_early"] = e_early
e_fits["e_late"] = e_late
e_fits.pop("e_t0")
c_fits["conc_lgtc"] = impute_bad_concentration_fits(c_fits["conc_lgtc"])
data = Table()
data["halo_id"] = m_fits["halo_id"]
data["logmp"] = m_fits["logmp_fit"]
data["mah_lgtc"] = m_fits["mah_logtc"]
data["mah_k"] = m_fits["mah_k"]
data["mah_early"] = m_fits["early_index"]
data["mah_late"] = m_fits["late_index"]
data["e_lgtc"] = e_fits["e_lgtc"]
data["e_k"] = e_fits["e_k"]
data["e_early"] = e_fits["e_early"]
data["e_late"] = e_fits["e_late"]
data["conc_lgtc"] = c_fits["conc_lgtc"]
data["conc_k"] = c_fits["conc_k"]
data["conc_early"] = c_fits["conc_beta_early"]
data["conc_late"] = c_fits["conc_beta_late"]
return data
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.