index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,200 | 2790bd80949bafe4e98ab9aca9cf80a6a0f31490 | import wx
from six import print_
import os
FONTSIZE = 10
class TextDocPrintout(wx.Printout):
"""
A printout class that is able to print simple text documents.
Does not handle page numbers or titles, and it assumes that no
lines are longer than what will fit within the page width. Those
features are left as an exercise for the reader. ;-)
"""
def __init__(self, text, title, margins):
wx.Printout.__init__(self, title)
self.lines = text.split('\n')
self.margins = margins
def HasPage(self, page):
return page <= self.numPages
def GetPageInfo(self):
return (1, self.numPages, 1, self.numPages)
def CalculateScale(self, dc):
# Scale the DC such that the printout is roughly the same as
# the screen scaling.
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX)/float(ppiScreenX)
# Now adjust if the real page size is reduced (such as when
# drawing on a scaled wx.MemoryDC in the Print Preview.) If
# page width == DC width then nothing changes, otherwise we
# scale down for the DC.
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw)/float(pw)
# Set the DC's scale.
dc.SetUserScale(scale, scale)
# Find the logical units per millimeter (for calculating the
# margins)
self.logUnitsMM = float(ppiPrinterX)/(logScale*25.4)
def CalculateLayout(self, dc):
# Determine the position of the margins and the
# page/line height
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
# use a 1mm buffer around the inside of the box, and a few
# pixels between each line
self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight/self.lineHeight)
def OnPreparePrinting(self):
# calculate the number of pages
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
self.numPages = len(self.lines) / self.linesPerPage
if len(self.lines) % self.linesPerPage != 0:
self.numPages += 1
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
# draw a page outline at the margin points
dc.SetPen(wx.Pen("black", 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
# Draw the text lines for this page
line = (page-1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < (page * self.linesPerPage):
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480),
title="Print Framework Sample")
self.CreateStatusBar()
# A text widget to display the doc and let it be edited
self.tc = wx.TextCtrl(self, -1, "",
style=wx.TE_MULTILINE|wx.TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), "sample-text.txt")
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
# Create the menu and menubar
menu = wx.Menu()
item = menu.Append(-1, "Page Setup...\tF5",
"Set up page margins and etc.")
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, "Print Preview...\tF6",
"View the printout on-screen")
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, "Print...\tF7", "Print the document")
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
## item = menu.Append(-1, "Test other stuff...\tF9", "")
## self.Bind(wx.EVT_MENU, self.OnPrintTest, item)
## menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, "About", "About this application")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, "E&xit\tCtrl-Q", "Close this application")
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, "&File")
self.SetMenuBar(menubar)
# initialize the print data and set some default values
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = (wx.Point(15,15), wx.Point(15,15))
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox('Print framework sample application\n'
'\n'
'Using wxPython %s' % wx.version(),
'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint,
self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
self.pdata.SetPaperId(data.GetPaperId())
#print_("paperID %r, paperSize %r" % (self.pdata.GetPaperId(), self.pdata.GetPaperSize()))
self.margins = (data.GetMarginTopLeft(),
data.GetMarginBottomRight())
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, "title", self.margins)
printout2 = TextDocPrintout(text, "title", self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox("Unable to create PrintPreview!", "Error")
else:
# create the preview frame such that it overlays the app frame
frame = wx.PreviewFrame(preview, self, "Print Preview",
pos=self.GetPosition(),
size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, "title", self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog) \
and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"There was a problem printing.\n"
"Perhaps your current printer is not set correctly?",
"Printing Error", wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_("GetFromPage:", data.GetFromPage())
print_("GetToPage:", data.GetToPage())
print_("GetMinPage:", data.GetMinPage())
print_("GetMaxPage:", data.GetMaxPage())
print_("GetNoCopies:", data.GetNoCopies())
print_("GetAllPages:", data.GetAllPages())
print_("GetSelection:", data.GetSelection())
print_("GetCollate:", data.GetCollate())
print_("GetPrintToFile:", data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_("GetPrinterName:", self.pdata.GetPrinterName())
dlg.Destroy()
app = wx.App()
frm = PrintFrameworkSample()
frm.Show()
app.MainLoop()
|
6,201 | 1476d4f488e6c55234a34dc5b6182e3b8ad4f702 | from django.core import serializers
from django.db import models
from uuid import uuid4
from django.utils import timezone
from django.contrib.auth.models import User
class Message(models.Model):
uuid=models.CharField(max_length=50)
user=models.CharField(max_length=20)
message=models.CharField(max_length=200)
timestamp=models.DateTimeField()
def json_decode(self, jsondata):
self.uuid=jsondata['id']
self.message=jsondata['message']
self.user=jsondata['user']
self.timestamp=jsondata['timestamp']
def json_encode(self):
dict={}
dict['id']=self.uuid
dict['user']=self.user
dict['message']=self.message
dict['timestamp']=self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp)+" "+self.user+": "+self.message
|
6,202 | 8adf25fbffc14d6927d665931e54a7d699a3b439 | # -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: channel
# Author: Maria Camila Herrera Ramos
# Generated: Thu Aug 2 18:09:17 2018
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import channels
from gnuradio import gr
from gnuradio.filter import firdes
import Multiplexer
class channel(gr.hier_block2):
def __init__(self, k=4.0, tchannel=1, voltage=0):
gr.hier_block2.__init__(
self, "channel",
gr.io_signature(1, 1, gr.sizeof_float*1),
gr.io_signature(1, 1, gr.sizeof_float*1),
)
##################################################
# Parameters
##################################################
self.k = k
self.tchannel = tchannel
self.voltage = voltage
##################################################
# Blocks
##################################################
self.channels_fading_model_0_0 = channels.fading_model( 8, 5/32000, False, 4.0, 0 )
self.channels_fading_model_0 = channels.fading_model( 8, 5/32000, True, k, 0 )
self.blocks_float_to_complex_0_0 = blocks.float_to_complex(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_float_0_0_0 = blocks.complex_to_float(1)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.analog_noise_source_x_0 = analog.noise_source_f(analog.GR_GAUSSIAN, voltage, 0)
self.Multiplexer_mux_0 = Multiplexer.mux(tchannel)
##################################################
# Connections
##################################################
self.connect((self.Multiplexer_mux_0, 0), (self, 0))
self.connect((self.analog_noise_source_x_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.Multiplexer_mux_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.Multiplexer_mux_0, 2))
self.connect((self.blocks_complex_to_float_0_0_0, 0), (self.Multiplexer_mux_0, 1))
self.connect((self.blocks_float_to_complex_0, 0), (self.channels_fading_model_0_0, 0))
self.connect((self.blocks_float_to_complex_0_0, 0), (self.channels_fading_model_0, 0))
self.connect((self.channels_fading_model_0, 0), (self.blocks_complex_to_float_0_0_0, 0))
self.connect((self.channels_fading_model_0_0, 0), (self.blocks_complex_to_float_0, 0))
self.connect((self, 0), (self.blocks_add_xx_0, 1))
self.connect((self, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self, 0), (self.blocks_float_to_complex_0_0, 0))
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
self.channels_fading_model_0.set_K(self.k)
def get_tchannel(self):
return self.tchannel
def set_tchannel(self, tchannel):
self.tchannel = tchannel
self.Multiplexer_mux_0.set_sel(self.tchannel)
def get_voltage(self):
return self.voltage
def set_voltage(self, voltage):
self.voltage = voltage
self.analog_noise_source_x_0.set_amplitude(self.voltage)
|
6,203 | ac978accc821600ad8def04b9c7423fbe6759e43 | import re
import datetime as dt
from datetime import datetime
import time
import random
import json
import sys
import requests
import os
import pickle
import cv2
import numpy as np
import cPickle
import multiprocessing as mp
import math
root = "/datasets/sagarj/instaSample6000/"
# post_dir = root + "/"
videos_dir = root + "videos/"
#frame_dir = root + "AestheticSamples/"
sample_dir = root + "finesamples/"
sampledLog = "../Logs/instaLongSampling.txt"
def sampleVideo(videoPath , facesPath , postID , rate):
cap = cv2.VideoCapture(videoPath)
#print videoPath
totFrames = 0
i = 0
framesRead = 0
framesSaved = 0
frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
if math.isnan(frameRate):
frameRate = int(24 * rate)
frameRate = int(frameRate*rate)
if frameRate == 0:
frameRate = int(24 * rate)
while True:
ret, frame = cap.read()
if ret:
framesRead += 1
procs = []
totFrames += 1
cv2.waitKey(20)
if totFrames%frameRate == 0:
i = int(totFrames/frameRate)
framesSaved +=1
imageName = facesPath + "/" + str(postID) + "+" + str(i) + ".jpg"
cv2.imwrite( imageName , frame)
logline = str(postID) + "," + imageName
#print logline
logfile = open(sampledLog, 'a+')
cPickle.dump(logline , logfile);
logfile.close()
else:
print "Done processing Post: %s with %d frames Read and %d saved at %d FPS"%(postID,framesRead,framesSaved,frameRate)
return framesSaved
# def readJson(path):
# f = open(path)
# data = json.loads(f.read())
# return data
# def getPosts(postsDir):
# crawledPosts = os.listdir(postsDir)
# posts = []
# for post in crawledPosts:
# record = readJson(postsDir + post)
# p = record['data']
# if isinstance(p,dict):
# posts.append(p['records'][0])
# return posts
# def getMappingDict(postList):
# mapping = dict()
# for p in postList:
# postId = p['postId']
# vidName = p['videoUrl'].split('/')[5].split('?')[0]
# mapping[postId] = vidName
# return mapping
if __name__ == '__main__':
#postList = getPosts(post_dir)
#mappingDict = getMappingDict(postList)
vidList = os.listdir(videos_dir)
for k in vidList:
postID = k.split('.')[0]
#sampledNumbers = sampleVideo(videos_dir+mappingDict[k] ,frame_dir , postID , 1)
sampledNumbers = sampleVideo(videos_dir+k ,sample_dir , postID , 1) |
6,204 | d975b74370acc72101f808e70bef64cee39a5ab8 | from typing import Dict, List
from .power_bi_querier import PowerBiQuerier
class DeathsByEthnicity(PowerBiQuerier):
def __init__(self) -> None:
self.source = 'd'
self.name = 'deaths by race'
self.property = 'race'
super().__init__()
def _parse_data(self, response_json: Dict[str, List]) -> Dict[str, int]:
results = super()._parse_data(response_json)
return { ethnicity.strip(): count for ethnicity, count in results }
|
6,205 | d37187f067ddff94015e639a1759dddced817945 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import lightgbm as lgb
from typing import List, Text, Tuple, Union
from ...model.base import ModelFT
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from ...model.interpret.base import LightGBMFInt
from ...data.dataset.weight import Reweighter
from qlib.workflow import R
class LGBModel(ModelFT, LightGBMFInt):
"""LightGBM Model"""
def __init__(self, loss="mse", early_stopping_rounds=50, num_boost_round=1000, **kwargs):
if loss not in {"mse", "binary"}:
raise NotImplementedError
self.params = {"objective": loss, "verbosity": -1}
self.params.update(kwargs)
self.early_stopping_rounds = early_stopping_rounds
self.num_boost_round = num_boost_round
self.model = None
def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:
"""
The motivation of current version is to make validation optional
- train segment is necessary;
"""
ds_l = []
assert "train" in dataset.segments
for key in ["train", "valid"]:
if key in dataset.segments:
df = dataset.prepare(key, col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
if df.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
x, y = df["feature"], df["label"]
# Lightgbm need 1D array as its label
if y.values.ndim == 2 and y.values.shape[1] == 1:
y = np.squeeze(y.values)
else:
raise ValueError("LightGBM doesn't support multi-label training")
if reweighter is None:
w = None
elif isinstance(reweighter, Reweighter):
w = reweighter.reweight(df)
else:
raise ValueError("Unsupported reweighter type.")
ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))
return ds_l
def fit(
self,
dataset: DatasetH,
num_boost_round=None,
early_stopping_rounds=None,
verbose_eval=20,
evals_result=None,
reweighter=None,
**kwargs,
):
if evals_result is None:
evals_result = {} # in case of unsafety of Python default values
ds_l = self._prepare_data(dataset, reweighter)
ds, names = list(zip(*ds_l))
early_stopping_callback = lgb.early_stopping(
self.early_stopping_rounds if early_stopping_rounds is None else early_stopping_rounds
)
# NOTE: if you encounter error here. Please upgrade your lightgbm
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
evals_result_callback = lgb.record_evaluation(evals_result)
self.model = lgb.train(
self.params,
ds[0], # training dataset
num_boost_round=self.num_boost_round if num_boost_round is None else num_boost_round,
valid_sets=ds,
valid_names=names,
callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback],
**kwargs,
)
for k in names:
for key, val in evals_result[k].items():
name = f"{key}.{k}"
for epoch, m in enumerate(val):
R.log_metrics(**{name.replace("@", "_"): m}, step=epoch)
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if self.model is None:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I)
return pd.Series(self.model.predict(x_test.values), index=x_test.index)
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632
if dtrain.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
callbacks=[verbose_eval_callback],
)
|
6,206 | 09d13fe6b090850782feb601412cf135d497136f | from catalyst_rl.contrib.registry import (
Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,
Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,
TRANSFORMS
)
from catalyst_rl.core.registry import Callback, CALLBACKS
from catalyst_rl.utils.tools.registry import Registry
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = [
"Callback",
"Criterion",
"Optimizer",
"Scheduler",
"Module",
"Model",
"Sampler",
"Transform",
"CALLBACKS",
"CRITERIONS",
"GRAD_CLIPPERS",
"MODELS",
"MODULES",
"OPTIMIZERS",
"SAMPLERS",
"SCHEDULERS",
"TRANSFORMS",
]
|
6,207 | cb29ee8687b469923896ceb7d5a6cd7f54b2c34e | #!flask/bin/python
import os, json
import requests
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', default=None)
FROM_EMAIL = os.environ.get('FROM_EMAIL', default=None)
TO_EMAIL = os.environ.get('TO_EMAIL', default=None)
if not SENDGRID_API_KEY:
raise ValueError("Need to set Sendgrid API Key (SENDGRID_API_KEY)")
if not FROM_EMAIL or not TO_EMAIL:
raise ValueError("Need to set email info (FROM_EMAIL and TO_EMAIL")
sendgrid_url = 'https://api.sendgrid.com/v3/mail/send'
def build_request_body(email):
from_email = email['email']
name = email['name']
subject = email['subject']
body = email['body']
if not from_email:
from_email = FROM_EMAIL
if not name:
name = "Anonymous"
if not subject:
subject = "Portfolio contact form message"
req_body = json.dumps({
"personalizations": [
{
"to": [
{
"email": TO_EMAIL
}
],
"subject": subject
}
],
"from": {
"email": from_email,
"name": name
},
"content": [
{
"type": "text/plain",
"value": body
}
]
})
return req_body
def send_mail(email):
headers = {
"Authorization": f"Bearer {SENDGRID_API_KEY}",
"Content-Type": "application/json"
}
email_body = build_request_body(email)
response = requests.post(sendgrid_url, headers=headers, data=email_body)
print(response.text)
return response
|
6,208 | cf07344808f2d91d8949cfc4beb9f923926e6851 | import numpy as np
import pickle as p
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from numpy.random import randn
from neural_network import network
net = network([1,8,8,1], filename='./data/x', bias=True)
# net.load_random()
net.load()
n = 32
x = np.array([[x] for x in np.linspace(0,1,n)])
y = (1+np.sin(10*x))/2
X = [xx[0] for xx in x]
Y = [yy[0] for yy in y]
plt.plot(x,y)
c = 1
ii = 0
for ii in range(1001):
# c = net.gradient_training(x,y,dw=0.1)
c = net.retarded_training(x,y)
print(ii,c)
net.save()
# if ii%10==0 and ii!=0:
# net.shake(x,y,n=10)
# net.save()
# # ii+=1
N = 128
plt.plot(X,Y, 'ro')
X = np.linspace(0,1,N)
Y = []
for x in X:
Y += [net.forward([x])[0]]
plt.plot(X,np.array(Y))
plt.show()
# for i in range(len(self.z)):
# if i==0:
# yHat = self.forward(x)
# delta = np.multiply(yHat - y, sigmoidPrime(self.z[-1]))
# dJdW = np.dot(self.a[-2].T, delta)
# else:
# delta = np.dot(delta, self.W[-i].T)*sigmoidPrime(self.z[-1-i])
# dJdW = np.dot(self.a[-2-i].T, delta)
# dJ += [dJdW]
# dJ = dJ[::-1] |
6,209 | 8fcbaf2663c22015a0c47f00c2d4fb8db6a5c308 | # from models import dist_model
# model = dist_model.DistModel()
from os.path import join
import models
import util.util as util
import matplotlib.pylab as plt
use_gpu = True
fig_outdir = r"C:\Users\ponce\OneDrive - Washington University in St. Louis\ImageDiffMetric"
#%%
net_name = 'squeeze'
SpatialDist = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=True, use_gpu=True, gpu_ids=[0])
PerceptLoss = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0])
#%%
imgdir = r"\\storage1.ris.wustl.edu\crponce\Active\Stimuli\2019-06-Evolutions\beto-191212a\backup_12_12_2019_10_47_39"
file0 = "block048_thread000_gen_gen047_001896.jpg"
file1 = "block048_thread000_gen_gen047_001900.jpg"
img0_ = util.load_image(join(imgdir,file0))
img1_ = util.load_image(join(imgdir,file1))
img0 = util.im2tensor(img0_) # RGB image from [-1,1]
if(use_gpu):
img0 = img0.cuda()
img1 = util.im2tensor(img1_)
if(use_gpu):
img1 = img1.cuda()
#%
# Compute distance
dist01 = SpatialDist.forward(img0,img1)#.item()
dist_sum = PerceptLoss.forward(img0,img1).item()
# dists.append(dist01)
# print('(%s, %s): %.3f'%(file0,file1,dist01))
# f.writelines('(%s, %s): %.3f'%(file0,file1,dist01))
# %
plt.figure(figsize=[9,3.5])
plt.subplot(131)
plt.imshow(img0_)
plt.subplot(132)
plt.imshow(img1_)
plt.subplot(133)
plt.pcolor(dist01.cpu().detach().squeeze())
plt.axis('image')
plt.gca().invert_yaxis()
plt.title("Dist %.2f"%dist_sum)
plt.savefig(join(fig_outdir,"Diff1212_1896_1900_%s.png" % net_name))
plt.show() |
6,210 | 4a2437d3d6ba549910bc30a67bf391b9bbafd25f | from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import PostForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from .models import Post
from django.contrib import messages
# Create your views here.
@login_required
def post_create(request):
"""
This makes sure that the form accpets a POST requests (of some data) or Nothing.
Without this the form would even accept empty data.
"""
form = PostForm(request.POST or None, request.FILES or None)
if request.POST:
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, "Post created!")
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, "Sorry! Something went wrong.", extra_tags="")
context = {
'title': "Create Post",
'form' : form,
}
return render(request, 'post/create.html', context)
def post_view(request, slug):
instance = get_object_or_404(Post, slug=slug)
context = {
'instance' : instance
}
return render(request, 'post/view.html', context)
|
6,211 | dd59f3b1d8b17defe4e7f30fec594d01475319d2 | # -*- coding: utf-8 -*-
"""
Created on Sat May 2 21:31:37 2020
@author: Emmanuel Torres Molina
"""
"""
Ejercicio 10 del TP2 de Teoría de los Circuitos II:
Un tono de 45 KHz y 200 mV de amplitud es distorsionada por un tono de 12 KHz
y 2V de amplitud. Diseñar un filtro pasa altos que atenúe la señal
interferente, de tal forma que el remanente no sea mayor que el 2% de los 200 mV.
La ganancia en alta frecuencia deberá ser de 0 db y la máxima atenuación
en la banda de paso menor a 1 dB. Emplear la aproximación que necesite menor
número de etapas.
En este caso el Filtro está Sintetizado por un Estructura RLC Pasiva + RL Pasivo.
"""
import numpy as np
from scipy.signal import TransferFunction as transf_f
import scipy.signal as sig
from splane import bodePlot, pzmap
from matplotlib import pyplot as plt
plt.close ('all')
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Vector Tiempo:
t0 = 0.0 # Tiempo Inicial
tf = 0.005
dt = 0.00005 # Incremento
t = np.arange (t0, tf, dt)
# ---------------------------------------------------------------------------
# Tono de Interés:
f_t = 45 * 10**3 # Frecuecia del Tono de mi Interés [Hz]
w_t = 2 * np.pi * f_t # [rad/seg]
A_t = 0.2 # Amplitud de mi Tono [V]
s_t = A_t * np.sin ( w_t * t )
# ---------------------------------------------------------------------------
# Ruido Interferente:
f_r = 12 * 10**3 # Frecuencia del Ruido Interferente [Hz]
w_r = 2 * np.pi * f_r # [rad/seg]
A_r= 2 # Amplitud del Ruido [V]
r_t = A_r * np.sin ( w_r * t )
sgnal = s_t + r_t
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Valores de los Elementos del Circuito:
# Etapa 1: RLC Pasivo
R1 = 290
C1 = 3.5e-9
L1 = 3.5e-3
k1 = 1
# Etapa 2: RL Pasivo
R2 = 700
C2 = 3.5e-9
L2 = 1.03e-3
k2 = 1
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Diseño del Filtro: Vamos a Realizar un Filtro High-Pass:
# Requisitos de Plantilla
alfa_max = 0.9 # Piden que sea menor a 1dB
alfa_min = 54 # el remanente no sea mayor que el 2% de los 200 mV
wp_hp = w_t
ws_hp = w_r
# Normalizo las Pulsaciones Angulares usando como norma: wp_hp
wp_hp_norm = wp_hp / wp_hp
ws_hp_norm = ws_hp / wp_hp
w0 = np.sqrt ( 1 / (L1*C1) )
# ---------------------------------------------------------------------------
# Filtro Prototipo Low-Pass: Transformación en Frecuencia: w_HP = -1 / w_LP
wp_lp_norm = abs(-1 / wp_hp_norm)
ws_lp_norm = abs(-1 / ws_hp_norm)
# Voy a Utilizar Aproximación de Chebyshev para Diseñal el Filtro:
eps = np.sqrt ( (10 **(alfa_max/10) ) - 1 )
# Orden del Filtro
N = np.arccosh ( np.sqrt ( (10**(alfa_min/10) - 1) / eps**2 ) ) / np.arccosh (ws_lp_norm)
N = np.ceil ( N ) # Redondeo para arriba
den1_lp = [1, 0.29, 1]
den2_lp = [1, 0.7, 0.29]
p1_lp = np.roots ( den1_lp )
p2_lp = np.roots ( den2_lp )
my_z_lp = np.array ([])
my_p_lp = np.concatenate ( (p1_lp, p2_lp), axis = None )
my_k_lp = 1 * 0.29
NUM, DEN = sig.zpk2tf ( my_z_lp, my_p_lp, my_k_lp )
NUM_lp, DEN_lp = sig.lp2lp ( NUM, DEN, w0 )
my_tf_lp = transf_f (NUM_lp,DEN_lp)
# ---------------------------------------------------------------------------
# Filtro Destino - Filtro High-Pass:
# Calculo W0:
NUM_hp, DEN_hp = sig.lp2hp ( NUM, DEN, w0 )
my_tf_hp = transf_f ( NUM_hp, DEN_hp )
my_z_hp, my_p_hp, my_k_hp = sig.tf2zpk (NUM_hp, DEN_hp )
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Filtrado de la Señal:
t, s_filtrada, x = sig.lsim2 ((my_tf_hp), sgnal, t )
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Ploteo de las Señales, Respuesta en Frecuencia, etc.
fig1, axs = plt.subplots(4,1)
axs[0].plot ( t, s_t )
axs[0].grid ('True')
axs[0].set_title ('Señal Original')
axs[0].set_ylim(-0.2,0.2)
axs[0].set_ylabel('[V]')
axs[1].plot ( t, r_t )
axs[1].grid ('True')
axs[1].set_title ('Ruido Interferente')
axs[1].set_ylabel('[V]')
axs[1].set_xlim(0)
axs[2].plot (t, s_t + r_t )
axs[2].grid ('True')
axs[2].set_title ('Señal a Filtrar')
axs[2].set_ylabel('[V]')
axs[2].set_xlim(0)
axs[3].plot (t, s_filtrada )
axs[3].grid ('True')
axs[3].set_title ( 'Señal Filtrada' )
axs[3].set_xlabel ('t[seg]')
axs[3].set_ylabel('[V]')
axs[3].set_ylim(-0.2,0.2)
axs[3].set_xlim(0)
# Respuesta en Frecuencia:
bodePlot (my_tf_lp, 'Filtro Prototipo - Low Pass')
pzmap (my_tf_lp)
bodePlot (my_tf_hp, 'Filtro Destino - High Pass')
pzmap (my_tf_hp)
|
6,212 | 869bbc8da8cdb5de0bcaf5664b5482814daae53a | import requests
import codecs
import urllib.request
import time
from bs4 import BeautifulSoup
from html.parser import HTMLParser
import re
import os
#input
Result_File="report.txt"
#deleting result file if exists
if os.path.exists(Result_File):
os.remove(Result_File)
#reading html file and parsing logic
f=codecs.open("test.html", 'r', 'utf-8')
xhtml = f.read()
data = []
# instantiate the parser and feed data to it
soup = BeautifulSoup(xhtml,"html.parser")
#print(soup)
main_table = soup.find('table', { 'id': 'octable' })
#print(main_table)
with open(Result_File, 'w') as r:
r.write("OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE")
for rows in main_table.find_all('tr'):
for cell in rows.find_all('td'):
#print(data)
if(len(cell.text) != 0):
cell_text = cell.text.strip()
a = re.sub(r"\n", "", cell_text, 0)
r.write(a)
r.write("|")
r.write("\n")
|
6,213 | 774e607c693fa2d5199582302e466674f65b6449 | # 다이얼
dial = ['ABC', 'DEF', 'GHI','JKL','MNO','PQRS','TUV','WXYZ']
cha = input()
num = 0
for i in range(len(cha)):
for j in dial:
if cha[i] in j:
num = num + dial.index(j) + 3
print(num) |
6,214 | 0d6490ae5f60ef21ad344e20179bd1b0f6aa761e | n=int(input("n="))
x=int(input("x="))
natija=pow(n,x)+pow(6,x)
print(natija) |
6,215 | a1e54a0f593149c1d97e64342c99f0ab8aa28fa9 | """ Guess the number! """
import random, generic
def check_answer(player_guess, guess_value):
"""
Compares a player's guess and the number to guess
Returns True if the player guessed correctly
Returns False by default
"""
end_game = False
if player_guess > guess_value:
print('guess too high!')
elif player_guess < guess_value:
print('guess too low!')
else:
print('correct!')
end_game = True
return end_game
def check_input(min_guess_range, max_guess_range):
""" Asks user to enter guess and returns a guess within defined min and max guess range """
while True:
try:
playerGuess = int(input('enter your guess: '))
assert min_guess_range <= playerGuess <= max_guess_range
except AssertionError:
print('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))
except ValueError:
print('numbers only!')
else:
return playerGuess
def guess_number(min_guess_range, max_guess_range):
""" Returns a guess that is within defined min and max guess range """
print(f'guess the number between {min_guess_range} and {max_guess_range}!')
return check_input(min_guess_range, max_guess_range)
def generate_guess_value(min_guess_range=1, max_guess_range=10):
"""
Returns a random number to guess between a defined min and max range.
Min and Max range can be custom. Default values are 1 - 10
"""
return random.randrange(min_guess_range, max_guess_range), min_guess_range, max_guess_range
def main():
run_game = True
while run_game == True:
guess_value, min_guess_range, max_guess_range = generate_guess_value(1,4)
guess_count, guess_limit = 1, 3
end_game = False
while end_game == False:
print(f'You have {guess_limit - guess_count + 1} remaining. ')
player_guess = guess_number(min_guess_range, max_guess_range)
player_won = check_answer(player_guess, guess_value)
guess_count = guess_count + 1
if player_won == True:
print(f'You win! congrats! ')
end_game = True
elif guess_count > guess_limit:
print(f'You ran out of guesses! you lose!')
end_game = True
run_game = generic.run_again()
if __name__ == '__main__':
main()
# [*] number to guess is generated within min - max range
# [*] guess limit is set
# [*] guess is made -> check guess within range
# [*] guess made compared to number to guess
# [*] loop again until guess limit runs out or until guess made matchess number to guess
|
6,216 | 63e96b41906f49f557529a0815da7314d74f6c33 | width,height = int(input("Width? ")), int(input("Height? "))
on_row = 0
while on_row <= height:
if on_row == 0 or on_row == height:
print("*"*width)
else:
stars = "*" + " "*(width-2) + "*"
print(stars)
on_row += 1
# height = 0
# width = 0
# while True:
# try:
# height = int(input("Height? \n"))
# width = int(input("width? \n"))
# break
# except ValueError:
# print("choose an integer")
# print("* " * width)
# while height > 0:
# print(f"* " + " " * {width} + " *")
# height -+ 1
# print("* " * width) |
6,217 | aef45cb8ea9fcaeffcca147da7637536bcc4b226 | from rest_framework import viewsets
from recruitment.serializers.LocationSerializer import LocationSerializer
from recruitment.models.Location import Location
import django_filters
class LocationViewSet(viewsets.ModelViewSet):
queryset = Location.objects.all().filter(deleted=0)
serializer_class = LocationSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
|
6,218 | 34ecf2bd9bc72a98aba4584880a198dd24899dbe | import os, re
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.sites',
'maintenancemode',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'maintenancemode.middleware.MaintenanceModeMiddleware',
)
ROOT_URLCONF = 'maintenancemode.tests'
SITE_ID = 1
MAINTENANCE_MODE = True # or ``False`` and use ``maintenance`` command
MAINTENANCE_IGNORE_URLS = (
re.compile(r'^/ignored.*'),
)
|
6,219 | 3ac69068db94f45bc44a8295a10603126d004b34 | t = int(input())
m = 0
while(m < t):
n = int(input())
arr = list(map(int, input().strip().split(" ")))
s = int(input())
hash_map = {}
curr_sum = 0
count = 0
for i in range(len(arr)):
curr_sum += arr[i]
if curr_sum == s:
count += 1
if curr_sum - s in hash_map:
count += hash_map[curr_sum - s]
if curr_sum not in hash_map:
hash_map[curr_sum] = 0
hash_map[curr_sum] += 1
print(count)
m += 1
|
6,220 | 3da4896f368f067a339db5cc89201c93ba8166ce | from __future__ import annotations
import asyncio
import signal
from functools import wraps
from typing import TYPE_CHECKING, Awaitable, Callable
import click
from .utils import import_obj
if TYPE_CHECKING:
from donald.manager import Donald
from .types import TV
def import_manager(path: str) -> Donald:
"""Import a manager from a python path."""
manager: Donald = import_obj(path)
return manager
def process_await(fn: Callable[..., Awaitable[TV]]) -> Callable[..., TV]:
@wraps(fn)
@click.pass_context
def wrapper(ctx, *args, **kwargs):
loop = ctx.obj["loop"]
return loop.run_until_complete(fn(ctx, *args, **kwargs))
return wrapper
@click.group()
@click.option(
"-M",
"--manager",
"manager",
required=True,
help="Python path to the manager",
)
@click.pass_context
def cli(ctx: click.Context, manager: str):
ctx.obj["manager"] = import_manager(manager)
@cli.command(help="Launch a worker")
@click.option("-S", "--scheduler", "scheduler", is_flag=True, help="Start a scheduler")
@process_await
async def worker(ctx: click.Context, *, scheduler: bool = False, **params):
"""Launch a worker."""
loop = ctx.obj["loop"]
async def stop():
loop.remove_signal_handler(signal.SIGTERM)
loop.remove_signal_handler(signal.SIGINT)
await worker.stop()
if scheduler:
await manager.scheduler.stop()
await manager.stop()
loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))
loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))
manager: Donald = ctx.obj["manager"]
await manager.start()
if scheduler:
manager.scheduler.start()
worker = manager.create_worker(show_banner=True, **params)
worker.start()
await worker.wait()
@cli.command(help="Launch a scheduler")
@process_await
async def scheduler(ctx: click.Context):
loop = ctx.obj["loop"]
async def stop():
loop.remove_signal_handler(signal.SIGTERM)
loop.remove_signal_handler(signal.SIGINT)
await manager.scheduler.stop()
await manager.stop()
loop.add_signal_handler(signal.SIGINT, lambda: loop.create_task(stop()))
loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(stop()))
manager: Donald = ctx.obj["manager"]
await manager.start()
manager.scheduler.start()
await manager.scheduler.wait()
def main():
loop = asyncio.get_event_loop()
cli(obj={"loop": loop})
if __name__ == "__main__":
main()
|
6,221 | 4c0c88f46c2d4607d9ac00755bf122e847ea2f6a | """Datasets, Dataloaders, and utils for dataloading"""
from enum import Enum
import torch
from torch.utils.data import Dataset
class Partition(Enum):
"""Names of dataset partitions"""
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int = 2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
|
6,222 | fd7fe2e4ffaa4de913931e83fd1de40f79b08d98 | from django.shortcuts import render
from django.http import response, HttpResponse, Http404
from django.views.generic import TemplateView
from django.db.models import Q
# Create your views here.
class Countries(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
return Countries.objects.all()
|
6,223 | 4cd1e385d18086b1045b1149d5f4573eaf9270c3 | '''
leetcode 338. 比特位计数
给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。
'''
class Solution(object):
def countBits(self, n):
"""
:type n: int
:rtype: List[int]
"""
out = [0] * (n+1)
for i in range(1,n+1,1):
if i%2==1: out[i]=out[i-1]+1
else:
out[i]=out[i>>1]
return out |
6,224 | 28eb1d7a698480028fb64827746b3deec0f66a9a | def erato(n):
m = int(n ** 0.5)
sieve = [True for _ in range(n+1)]
sieve[1] = False
for i in range(2, m+1):
if sieve[i]:
for j in range(i+i, n+1, i):
sieve[j] = False
return sieve
input()
l = list(map(int, input().split()))
max_n = max(l)
prime_l = erato(max_n)
ans = 0
for i in l:
if prime_l[i]:
ans += 1
print(ans)
|
6,225 | 82801ce564f4f29e084e6f842d7868eb60f582cb | from pymt_heat import Heatmodel
heat = Heatmodel()
n = heat.get_component_name()
print(n)
|
6,226 | ff6b7e2097d78b013f8f5989adee47156579cb9e | from flask import Flask
from flask import request
from flask import session
from flask import jsonify
from flask import make_response
import mariadb
import datetime
import json
import scad_utils
testing: bool = True
if testing:
fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)
app = Flask(__name__)
app.config["SECRET_KEY"] = "clave ultra secreta"
app.permanent_session_lifetime = datetime.timedelta(minutes=20)
teacher_time_tolerance = datetime.timedelta(minutes=20)
db = mariadb.ConnectionPool(
user="brocolio",
password="brocolio",
host="localhost",
pool_name="pul",
pool_size=20,
database="scad",
)
# tmp_cursor: mysql.cursor.MySQLCursor = db.cursor()
# tmp_cursor.execute("SET lc_time_names = 'es_PE';")
# tmp_cursor.close()
spanish_days: dict = {
"Monday": "lunes",
"Tuesday": "martes",
"Wednesday": "miércoles",
"Thursday": "jueves",
"Friday": "viernes",
"Saturday": "sábado",
"Sunday": "domingo",
}
json.JSONEncoder.default = lambda self, obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)
else str(obj)
)
@app.route("/login", methods=["POST"])
def login() -> dict:
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
data: dict = request.get_json()
# consulta a la base de datos si el usuario y contrasena son validos
# consulta en la tabla docente
query: str = (
"select DocenteDNI, Nombre, Apellido, Usuario "
"from Docente "
"where Usuario=? and Contrasena=?"
)
db_cursor.execute(query, (data["Usuario"], data["Contrasena"]))
rows = db_cursor.fetchall()
if len(rows) == 1:
session.permanent = True
session["account_type"] = "Docente"
session["DocenteDNI"] = rows[0].DocenteDNI
session["Nombre"] = rows[0].Nombre
session["Apellido"] = rows[0].Apellido
session["Usuario"] = rows[0].Usuario
db_cursor.close()
db_connection.close()
return make_response({"account_type": session["account_type"]}, 200)
else:
# consulta en la tabla administrador
query: str = (
"select Usuario,Contrasena "
"from Administrador "
"where Usuario=? and Contrasena=?"
)
db_cursor.execute(query, (data["Usuario"], data["Contrasena"]))
rows = db_cursor.fetchall()
if len(rows) == 1:
session.permanent = True
session["account_type"] = "Administrador"
session["Usuario"] = rows[0].Usuario
db_cursor.close()
db_connection.close()
return make_response({"account_type": session["account_type"]}, 200)
# no se encontro nada
else:
db_cursor.close()
db_connection.close()
return make_response("pos a lo mejor se equivoco?", 401)
@app.route("/teacher_fullname", methods=["GET"])
def teacherFullname() -> dict:
if "account_type" not in session:
return make_response("pa que quieres saber eso jaja salu2", 401)
elif session["account_type"] == "Docente":
return {"Nombre": session["Nombre"], "Apellido": session["Apellido"]}
elif session["account_type"] == "Administrador":
return make_response("wey no!!!", 400)
@app.route("/time", methods=["GET"])
def time() -> dict:
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
return {
"date": current_datetime.strftime("%d/%m/%Y"),
"time": current_datetime.strftime("%H,%M,%S"),
}
@app.route("/teacher_course_list", methods=["GET"])
def teacherCourseList() -> list:
# verificar la sesion
if "account_type" not in session:
# no inicio sesion
return make_response("nope", 401)
elif session["account_type"] == "Docente":
# consultar la lista de cursos y si se han marcado o no
# un curso marcado se diferencia porque el valor de Hora de la tabla Marcacion
# es diferente de NULL
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
db_cursor.execute("SET lc_time_names = 'es_PE'")
query: str = (
"select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"where Dia=dayname(?) and DocenteDNI=? "
)
db_cursor.execute(
query, (current_datetime.strftime("%Y/%m/%d"), session["DocenteDNI"])
)
today_assigned_courses: list = db_cursor.fetchall()
# se formatea la lista de cursos
today_assigned_courses = scad_utils.rowToDict(
(
"AsignacionCursoID",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
today_assigned_courses,
)
if len(today_assigned_courses) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for course in today_assigned_courses:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y/%m/%d"),
course["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
course["state"] = "marked"
else:
if current_datetime >= scad_utils.timeToDatetime(
course["HoraInicio"], current_datetime
):
if (
current_datetime
- scad_utils.timeToDatetime(
course["HoraInicio"], current_datetime
)
<= teacher_time_tolerance
):
course["state"] = "mark_now"
else:
course["state"] = "not_marked"
else:
course["state"] = "waiting"
db_cursor.close()
db_connection.close()
return jsonify(today_assigned_courses)
elif session["account_type"] == "Administrador":
# el administrador no deberia usar este servicio
return make_response("ya nos jakiaron", 400)
@app.route("/teacher_mark", methods=["POST"])
def teacherMark() -> dict:
# validar si es posible marcar el registro del curso
if "account_type" not in session:
# no inicio sesion
return make_response("stap", 401)
elif session["account_type"] == "Docente":
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
# consultar si hay algun curso para marcar
course_to_mark: dict
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
db_cursor.execute("SET lc_time_names = 'es_PE'")
query: str = (
"select AsignacionCursoID,SalonID "
"from AsignacionCurso "
"where DocenteDNI=? "
"and Dia=dayname(?) "
"and HoraInicio <=? "
"and timediff(?,HoraInicio)<=?;"
)
db_cursor.execute(
query,
(
session["DocenteDNI"],
current_datetime.strftime("%Y/%m/%d"),
current_datetime.strftime("%H:%M:%S"),
current_datetime.strftime("%H:%M:%S"),
str(teacher_time_tolerance),
),
)
course_to_mark = db_cursor.fetchall()
if len(course_to_mark) == 1:
insertion_query: str = ("insert into Marcacion() " "values(?,?,?,?);")
db_cursor.execute(
insertion_query,
(
int(course_to_mark[0].AsignacionCursoID),
current_datetime.strftime("%Y/%m/%d"),
current_datetime.strftime("%H:%M:%S"),
int(course_to_mark[0].SalonID),
),
)
db_cursor.close()
db_connection.close()
return make_response("se marco la asistencia", 200)
else:
db_cursor.close()
db_connection.close()
return make_response("ya es tarde", 406)
elif session["account_type"] == "Administrador":
return make_response(
"papu, si ya nos jakiaste por lo menos usa los servicios correctos no?", 400
)
@app.route("/admin_get_report", methods=["GET"])
def adminGetReport() -> list:
if "account_type" not in session:
# no inicio sesion
return make_response("nope", 401)
elif session["account_type"] == "Administrador":
time_range = request.get_json()["time_range"]
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
db_cursor.execute("SET lc_time_names = 'es_PE'")
report: list
if time_range == "today":
query: str = (
"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, "
"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI) "
"where Dia=dayname(?) and a.HoraInicio<? "
)
db_cursor.execute(
query,
(
current_datetime.strftime("%Y-%m-%d"),
current_datetime.strftime("%H:%M:%S"),
),
)
report = db_cursor.fetchall()
# se formatea la lista de cursos
report = scad_utils.rowToDict(
(
"AsignacionCursoID",
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
report,
)
if len(report) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for assignment in report:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y-%m-%d"),
assignment["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
assignment["state"] = "marked"
else:
assignment["state"] = "not_marked"
db_cursor.close()
db_connection.close()
return make_response(jsonify(report), 200)
elif time_range == "yesterday":
query: str = (
"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, "
"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI) "
"where Dia=dayname(?)"
)
current_datetime -= datetime.timedelta(days=1)
db_cursor.execute(
query, (current_datetime.strftime("%Y-%m-%d"),),
)
report = db_cursor.fetchall()
# se formatea la lista de cursos
report = scad_utils.rowToDict(
(
"AsignacionCursoID",
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
report,
)
if len(report) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for assignment in report:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y-%m-%d"),
assignment["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
assignment["state"] = "marked"
else:
assignment["state"] = "not_marked"
db_cursor.close()
db_connection.close()
return make_response(jsonify(report), 200)
elif time_range == "this_week":
pass
elif time_range == "this_month":
pass
elif time_range == "all":
pass
else:
return make_response("peticion invalida", 406)
elif session["account_type"] == "Docente":
# el administrador no deberia usar este servicio
return make_response("ya nos jakiaron", 400)
@app.route("/admin_add_teacher", methods=["POST"])
def adminAddTeacher() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
data = request.get_json()
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("insert into Docente() values(?,?,?,?,?)")
db_cursor.execute(
query,
(
data["DocenteDNI"],
data["Nombre"],
data["Apellido"],
data["Usuario"],
data["Contrasena"],
),
)
db_cursor.close()
db_connection.close()
return make_response("se agrego la entrada", 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_teacher_table", methods=["GET"])
def adminGetTeacherTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select * from Docente")
db_cursor.execute(query)
teacher_table = scad_utils.rowToDict(
("DocenteDNI", "Nombre", "Apellido", "Usuario", "Contrasena"),
db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(teacher_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_course_table", methods=["GET"])
def adminGetCourseTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select * from Curso")
db_cursor.execute(query)
course_table = scad_utils.rowToDict(
("CursoNombre", "FechaInicio", "FechaFin"), db_cursor.fetchall(),
)
for course in course_table:
course["FechaInicio"] = course["FechaInicio"].isoformat()
course["FechaFin"] = course["FechaFin"].isoformat()
db_cursor.close()
db_connection.close()
return make_response(jsonify(course_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_classroom_table", methods=["GET"])
def adminGetClassroomTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select Pabellon,Numero from Salon")
db_cursor.execute(query)
classroom_table = scad_utils.rowToDict(
("Pabellon", "Numero"), db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(classroom_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_course_assignment_table", methods=["GET"])
def adminGetCourseAssignmentTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = (
"select d.DocenteDNI, d.Nombre, d.Apellido,"
"a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI)"
)
db_cursor.execute(query)
course_assignment_table = scad_utils.rowToDict(
(
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"Pabellon",
"Numero",
"HoraInicio",
"HoraFin",
"Dia",
),
db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(course_assignment_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/logout", methods=["DELETE"])
def logout() -> dict:
if "account_type" not in session:
return make_response("primero inicia session broz", 301)
else:
if session["account_type"] == "Docente":
session.pop("Usuario")
session.pop("Nombre")
session.pop("Apellido")
return make_response("hasta luego prosor", 200)
elif session["account_type"] == "Administrador":
session.pop("Usuario")
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
|
6,227 | 77b43d7d9cd6b912bcee471c564b47d7a7cdd552 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length
from flask_ckeditor import CKEditorField
class BoldifyEncryptForm(FlaskForm):
boldMessage = StringField('Bolded Message: ',
validators=[DataRequired()])
submit = SubmitField('Submit') |
6,228 | cad00f80afa142b69ced880de000b6b5b230640c | import pandas as pd
import numpy as np
import random
import csv
import pprint
import datamake
import dafunc_H
def simulation(cnt, a, b):
df, df_collist = datamake.make_df(
'/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'
)
n, m, k = datamake.stu_num()
df_stu = np.zeros((1, n + 1))
for j in range(cnt):
random.seed(48 + j)
student = datamake.make_stu(n, m, k, a, b)
#print(df_collist)
univ = datamake.univ_make(df, df_collist)
for i in range(200):
dafunc_H.da_H(student, univ, df_collist)
if j == 0:
df_stu = student[:, 0:5].T.copy()
else:
df_stuadd = student[:, 0:5].T.copy()
df_stu = np.vstack((df_stu, df_stuadd))
url = '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/' + str(
cnt) + "-" + str(a) + "-" + str(b) + 'DA-Q.txt'
np.savetxt(url, df_stu, delimiter=',', fmt='%d')
return df_stu
#def stu_summary(df_stu):
res0 = simulation(4, 0.7, 0.8)
#def do_simulation():
print(res0)
|
6,229 | 58c7b405096a5fdc5eeacb5e5f314f2d1bb85af6 | #!/usr/bin/python
import argparse
import os
import subprocess
import batch
vmc_dir = os.environ['VMCWORKDIR']
macro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C')
exec_bin = os.path.join(vmc_dir,'build/bin/koa_execute')
# arguments definitions
parser = argparse.ArgumentParser()
parser.add_argument("infile",help="the file list to be processed")
parser.add_argument("-d","--directory",
default="./",
help="directory where files are located")
parser.add_argument("-s","--suffix",
default="_EmsRawEvent.root",
help="suffix of the input file")
parser.add_argument("--elist_suffix",
help="suffix of the file containing the event list, empty if not used")
parser.add_argument("--elist_dir",
default="/",
help="directory containing the event list")
parser.add_argument("--elist_name",
default="ems_rate_elist",
help="name of the event list")
args = parser.parse_args()
in_dir = os.path.expanduser(args.directory)
# add rec each file in the list
list_input = batch.get_list(args.infile, args.suffix, in_dir)
if args.elist_suffix:
list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir)
for fin, felist in zip(list_input, list_elist):
command = [exec_bin, macro, fin, felist, args.elist_dir, args.elist_name]
print(command)
process = subprocess.Popen(command)
process.wait()
else:
for fin in list_input:
command = [exec_bin, macro, fin]
print(command)
process = subprocess.Popen(command)
process.wait()
|
6,230 | 86928f4358e4999a5cec8bfad1fe055c9a2778d1 | """
Create all figures and Excel files that combine data from all embryos in a given genetic background
Copyright (C) 2017 Ahmet Ay, Dong Mai, Soo Bin Kwon, Ha Vu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, shared, os
from subprocess import call
DEFAULT_NUM_BIN = 5
def main():
args = sys.argv[1:]
num_args = len(args)
req_args = [False]*3
num_bins = DEFAULT_NUM_BIN
if num_args >= 6:
i = 0
while i<num_args-1:
option = args[i]
value = args[i+1]
if (option == '-ne' or option == '--number-of-embryos') and shared.isInt(value):
num_embryos = int(value)
req_args[0] = True
i+=2
elif (option == '-nb' or option == '--number-of-bins') and shared.isInt(value):
num_bins = int(value)
i+=2
elif option == '-d' or option == '--output-directory':
directory = value
req_args[1] = True
i+=2
elif req_args[0] and (option == '-i' or option == '--input-files') and ((num_args-7)==num_embryos):
slice_files = args[i+1:i+1+num_embryos]
for f in slice_files:
if not os.path.isfile(f):
print("combine_embryos.py: File "+f+" does not exist.")
exit(1)
req_args[2] = True
i+=num_embryos
else:
usage()
for arg in req_args:
if not arg:
usage()
else:
usage()
shared.ensureDir(directory)
### Spatial amplitude ###
print("Plotting spatial amplitude...")
command = ["python","plot_spatial_amplitude.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_spatial_amplitude.py can run after plot_spatial_amplitude.py is run for all genetic backgrounds)
'''### Burst size and frequency ###
# 1. create_burst_data.py
print("Creating data for estimate_burst_parameters.m...")
command = ["python","create_burst_data.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# 2. estimate_burst_parameters.m
print("Running estimate_burst_parameters.m on MATLAB...")
command = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(\''+directory+'/burst_data.xls\',\''+directory+'\')']
if 1==call(command): # this will automatically open and run MATLAB
exit(1)
# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m
print("Plotting estimated burst size and frequencies...")
command = ["python","plot_estimated_burst_parameters.py",directory+"/burst_result.xls",directory]
if 1==call(command):
exit(1)'''
# (compare_burst_parameters.py can run after plot_estimated_burst_parameters.py is run for all genetic backgrounds)
# Fano factor (to demonstrate burstiness)
command = ["python","plot_fano_factor.py",str(num_embryos)] + slice_files + [directory]
print("Plotting fano factor...")
if 1==call(command):
exit(1)
# (compare_fano_factor.py can run after plot_fano_factor.py is run for all genetic backgrounds)
### Noise ###
# Intrinsic and extrinsic noise
print("Plotting intrinsic and extrinsic noise...")
command = ["python","plot_noise.py",str(num_embryos), str(num_bins)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_noise.py can run after plot_noise.py is run for all genetic backgrounds)
### Scatter plot of her1 and her7 for all bins ####
print("Plotting scatter plots of her1 vs her7 mRNAs in all bins ...")
command = ["python", "plot_scatter_her1_her7.py", directory + "/combined_slices.xls", str(num_bins), directory]
if 1 == call(command):
exit(1)
# Spatial noise (coefficient of variation squared across space)
print("Plotting spatial noise (coefficient of variation squared across space)...")
command = ["python","plot_CVsquared.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_grouped_CVsquared.py and compare_CV_squared.py can run after plot_CVsquared.py is run for all genetic backgrounds)
### Raw data Excel files ###
command = ["python","create_raw_expression_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for RNA expression levels...")
if 1==call(command):
exit(1)
command = ["python","create_raw_spacial_noise_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for spacial noise...")
if 1==call(command):
exit(1)
command = ["python","create_raw_noise_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for noise...")
if 1==call(command):
exit(1)
def usage():
print("combine_embryos.py: Invalid command-line arguments.")
print("Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>")
print("Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \
../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls")
exit(1)
main()
|
6,231 | 378032a8d02bc49e5ed8ebccbeddfbb281c2cbd7 | v0 = 5
g = 9.81
t = 0.6
y=v0*t - 0.5*g*t**2
print (y) |
6,232 | aebf1d64923c5f325c9d429be092deaa06f20963 | #!/usr/bin/env python
import sys
def add_them(a, b):
return a + b
def main():
print add_them(10, 21)
if __name__ == '__main__':
sys.exit(main())
|
6,233 | 81a53d08ab36e85dd49cf1f3d9c22c1f18605149 | #!/usr/bin/python
#encoding=utf8
import sys
import tushare as ts
def local_main():
if len(sys.argv) != 2:
print sys.argv[0], " [stock id]"
return
stock_id = sys.argv[1]
df = ts.get_hist_data(stock_id)
df.to_excel(stock_id + '_his.xlsx', sheet_name = stock_id)
if __name__ == '__main__':
local_main() |
6,234 | e3de072d6bce2ecc105306c06b9a9aa0362130ff | """
Auxiliary functions for calculating the utility of achieving a certain data rate (for a UE).
Attention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!
"""
import numpy as np
from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY
def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):
"""
Utility that directly equals the data rate, increasing linearly up to a given maximum.
:param max_dr: Maximum data rate at which the utility does not increase further
:return: Utility
"""
assert curr_dr >= 0 and max_dr >= 0
assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \
"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!"
return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
# 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more
# 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr
# with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)
# better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr
# ensure min/max utility are set correctly for this utility function
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, "The chosen log utility requires min/max utility to be -20/+20"
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
|
6,235 | 164665c7d037f1e4128d8227d5fc148940d5c2b8 | #!/bin/python
import sys
arr = map(int, raw_input().strip().split(' '))
smallest = 1000000001
largest = 0
smi = -1
lri = -1
for i, num in enumerate(arr):
if num < smallest:
smallest = num
smi = i
if num > largest:
largest = num
lri = i
smsum = 0
lrsum = 0
for i in range(len(arr)):
if i != smi:
lrsum += arr[i]
if i != lri:
smsum += arr[i]
print smsum, lrsum |
6,236 | aebe749a20482636d7ed508f9cbd9cde56656b73 | #!/usr/bin/env python
#
# This will take a snapshot and convert it into a volume. To create a volume
# without any links to the old snapshot you need to convert it to a temporary
# volume first, convert that into an image and convert the image back into
# your final volume. Once this is all done, the temporary volume and image
# will be removed.
#
import argparse
import openstack
import time
import sys
from sdk import Snapshot
def main(args):
# Set up the connection to OpenStack -- this is read from clouds.yaml
openstack.enable_logging(debug=False)
api = openstack.connect(cloud=args.cloud)
snapshot_id = args.snapshot
server = args.volume
# Create a snapshot object
try:
snapshot = Snapshot(
api=api,
snapshot=api.volume.get_snapshot(snapshot_id),
)
except openstack.exceptions.ResourceNotFound:
print('Snapshot id {} not found.'.format(snapshot_id))
sys.exit(1)
today = time.strftime("%d-%m-%Y")
# Convert the snapshot to a volume
print('')
print('Converting snapshot to volume..')
volume = snapshot.to_volume('{}-restore-{}'.format(server, today))
print('Converting volume to image..')
image = volume.to_image('{}-restore-{}'.format(server, today))
print('Converting image to volume..')
image.to_volume(server, size=volume.volume.size)
image.delete()
volume.delete()
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Restore snapshots')
parser.add_argument(
'--snapshot',
required=True,
help='',
metavar=('<snapshot_id>'),
)
parser.add_argument(
'--volume',
required=True,
help='',
metavar=('<volume name>'),
)
parser.add_argument(
'--cloud',
help='',
metavar=('<cloud in clouds.yaml>'),
default='fuga',
)
args = parser.parse_args()
main(args)
|
6,237 | 6f253da5dc1caa504a3a8aadae7bce6537b5c8c6 | # Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message.
# If the score is between 0.0 and 1.0, print a grade using the following table:
# Score Grade
# >= 0.9 A
# >= 0.8 B
# >= 0.7 C
# >= 0.6 D
# < 0.6 F
# Vinayak Nayak
# 27th December 2018
# 12:30 pm
try:
i = float(input("Enter the score : "))
if(i > 1 or i < 0):
print("Entered score isn't valid.")
else:
if (i < 0.6):
print("Grade: F")
elif (i < 0.7):
print("Grade: D")
elif (i < 0.8):
print("Grade: C")
elif (i < 0.9):
print("Grade: B")
elif (i <= 1.0):
print("Grade: A")
except Exception as e:
print(str(e))
|
6,238 | dfee0407eaed7b1ab96467874bbfe6463865bcb4 | from __future__ import absolute_import, print_function, unicode_literals
import six
from six.moves import zip, filter, map, reduce, input, range
import pathlib
import unittest
import networkx as nx
import multiworm
TEST_ROOT = pathlib.Path(__file__).parent.resolve()
DATA_DIR = TEST_ROOT / 'data'
SYNTH1 = DATA_DIR / 'synth1'
SYNTH1_N_BLOBS = 12
class TestExperimentOpen(unittest.TestCase):
def test_pathlib(self):
ex = multiworm.Experiment(SYNTH1)
def test_strpath(self):
ex = multiworm.Experiment(str(SYNTH1))
def test_root_and_id(self):
ex = multiworm.Experiment(
data_root=DATA_DIR,
experiment_id='synth1',
)
def test_strroot_and_id(self):
ex = multiworm.Experiment(
data_root=str(DATA_DIR),
experiment_id='synth1',
)
def test_empty_fail(self):
try:
multiworm.Experiment()
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor worked with no arguments')
def test_dataroot_only_fail(self):
try:
multiworm.Experiment(data_root=DATA_DIR)
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor allowed data-root only without erroring')
def test_custom_id(self):
my_id = 'peterspeppers'
ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)
self.assertEquals(ex.id, my_id)
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12)))
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
|
6,239 | 4d4dd451d83d8d602c6264e77f52e5e143aef307 | import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sess = tf.Session()
# 1.one-shot iterator
dataset = tf.data.Dataset.range(100)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
for i in range(100):
value = sess.run(next_element)
# print(value)
assert i == value
# 2.initializable iterator
max_value = tf.placeholder(tf.int64, shape=[])
dataset = tf.data.Dataset.range(max_value)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Initialize an iterator over a dataset with 10 elements.
sess.run(iterator.initializer, feed_dict={max_value: 10})
for i in range(10):
value = sess.run(next_element)
# print(value)
assert i == value
# Initialize the same iterator over a dataset with 100 elements.
sess.run(iterator.initializer, feed_dict={max_value: 100})
for i in range(100):
value = sess.run(next_element)
# print(value)
assert i == value
# 3.reinitializable iterator
# Define training and validation datasets with the same structure.
training_dataset = tf.data.Dataset.range(100).map(
lambda x: x + tf.random_uniform([], -10, 10, tf.int64))
validation_dataset = tf.data.Dataset.range(50)
# A reinitializable iterator is defined by its structure. We could use the
# `output_types` and `output_shapes` properties of either `training_dataset`
# or `validation_dataset` here, because they are compatible.
iterator = tf.data.Iterator.from_structure(training_dataset.output_types,
training_dataset.output_shapes)
next_element = iterator.get_next()
training_init_op = iterator.make_initializer(training_dataset)
validation_init_op = iterator.make_initializer(validation_dataset)
# Run 20 epochs in which the training dataset is traversed, followed by the validation dataset.
for _ in range(20):
# Initialize an iterator over the training dataset.
sess.run(training_init_op)
for _ in range(10):
value1 = sess.run(next_element)
# print('value1:', value1)
# Initialize an iterator over the validation dataset.
sess.run(validation_init_op)
for _ in range(50):
value2 = sess.run(next_element)
# print('value2:', value2)
# 4.feedable iterator
# Define training and validation datasets with the same structure.
training_dataset = tf.data.Dataset.range(100).map(
lambda x: x + tf.random_uniform([], -10, 10, tf.int64)).repeat()
validation_dataset = tf.data.Dataset.range(50)
# A feedable iterator is defined by a handle placeholder and its structure. We
# could use the `output_types` and `output_shapes` properties of either
# `training_dataset` or `validation_dataset` here, because they have
# identical structure.
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, training_dataset.output_types, training_dataset.output_shapes)
next_element = iterator.get_next()
# You can use feedable iterators with a variety of different kinds of iterator
# (such as one-shot and initializable iterators).
training_iterator = training_dataset.make_one_shot_iterator()
validation_iterator = validation_dataset.make_initializable_iterator()
# The `Iterator.string_handle()` method returns a tensor that can be evaluated
# and used to feed the `handle` placeholder.
training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(validation_iterator.string_handle())
# Loop forever, alternating between training and validation.
while True:
# Run 200 steps using the training dataset. Note that the training dataset is
# infinite, and we resume from where we left off in the previous `while` loop
# iteration.
for _ in range(200):
sess.run(next_element, feed_dict={handle: training_handle})
# Run one pass over the validation dataset.
sess.run(validation_iterator.initializer)
for _ in range(50):
sess.run(next_element, feed_dict={handle: validation_handle})
# 5.Consuming values from an iterator
dataset = tf.data.Dataset.range(5)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Typically `result` will be the output of a model, or an optimizer's
# training operation.
result = tf.add(next_element, next_element)
sess.run(iterator.initializer)
print(sess.run(result)) # ==> "0"
print(sess.run(result)) # ==> "2"
print(sess.run(result)) # ==> "4"
print(sess.run(result)) # ==> "6"
print(sess.run(result)) # ==> "8"
try:
sess.run(result)
except tf.errors.OutOfRangeError:
print("End of dataset") # ==> "End of dataset"
#
dataset1 = tf.data.Dataset.from_tensor_slices(tf.random_uniform([4, 10]))
dataset2 = tf.data.Dataset.from_tensor_slices((tf.random_uniform([4]), tf.random_uniform([4, 100])))
dataset3 = tf.data.Dataset.zip((dataset1, dataset2))
iterator = dataset3.make_initializable_iterator()
sess.run(iterator.initializer)
next1, (next2, next3) = iterator.get_next()
# 6.Saving iterator state
# Create saveable object from iterator.
saveable = tf.contrib.data.make_saveable_from_iterator(iterator)
# Save the iterator state by adding it to the saveable objects collection.
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable)
saver = tf.train.Saver()
with tf.Session() as sess:
if should_checkpoint:
saver.save(path_to_checkpoint)
# Restore the iterator state.
with tf.Session() as sess:
saver.restore(sess, path_to_checkpoint) |
6,240 | ac35672661e1dd0b97567ae4335f537dc69f98f7 |
###########################################################
# 2019-02-07: 删除了marginalized prior
#
###########################################################
import sys,os
import numpy as np
import matplotlib.pylab as plt
from scipy.linalg import eig
from scipy.stats import norm, kstest, normaltest
# use default colors defined by MatPlotlib
colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b']
###########################################################
fig = plt.figure(figsize=(11,4))
###########################################################
# 1) histograms of the normalized dmu(zi). The purpose is
# to show that the mock sample is not too peculiar
###########################################################
def read_jla_mock( mock_filename ):
fp = open(mock_filename,'r')
lines = fp.readlines()
fp.close()
jla = []
for line in lines:
sn = line.split()
temp = []
temp.append(float(sn[1]))
temp.append(float(sn[2]))
temp.append(float(sn[3]))
temp.append(float(sn[4]))
jla.append(temp)
return np.array(jla)
# jla = read_jla_mock('MOCK_JLA_51.txt')
# eos_SP = np.loadtxt('eos_51.txt')
# jla = read_jla_mock('MOCK_JLA_16.txt')
# eos_SP = np.loadtxt('eos_16.txt')
# jla = read_jla_mock('MOCK_JLA_10.txt')
# eos_SP = np.loadtxt('eos_10.txt')
# jla = read_jla_mock('MOCK_JLA_9.txt')
# eos_SP = np.loadtxt('eos_9.txt')
# jla = read_jla_mock('MOCK_JLA_30.txt')
# eos_SP = np.loadtxt('eos_30.txt')
# jla = read_jla_mock('MOCK_JLA_3.txt')
# eos_SP = np.loadtxt('eos_3.txt')
jla = read_jla_mock('MOCK_JLA_40.txt')
eos_SP = np.loadtxt('eos_40.txt')
eos_no_prior = np.loadtxt('eos_no_prior.txt')
eos_no_prior2 = np.loadtxt('eos_no_prior2.txt')
z = jla[:,0]
dmu = (jla[:,1]-jla[:,3])/jla[:,2] # normalize the errors
nbin_all = 15
nbin_1 = 15
nbin_2 = 15
z1 = 0.2
z2 = 0.6
ID1 = (z < z1 )
ID2 = (z >= z2 )
p = round(kstest(dmu,cdf='norm')[1],2)
p1 = round(kstest(dmu[ID1],'norm')[1],2)
p2 = round(kstest(dmu[ID2],'norm')[1],2)
plt.subplot(1,2,1)
ax = plt.gca()
rwidth=0.6
ax.hist(dmu, bins=nbin_all, label=r'ALL ' + r' p = '+str(p), alpha=0.5, rwidth=rwidth, color=colors[0])
ax.hist(dmu[ID1], bins=nbin_1, label=r'$z<' + str(z1) + '$' + r' p = '+str(p1)+'0', alpha=0.7, rwidth=rwidth, color=colors[1])
ax.hist(dmu[ID2], bins=nbin_2, label=r'$z>' + str(z2) + '$' + r' p = '+str(p2), alpha=0.8, rwidth=rwidth, color=colors[2])
ax.set_xlim(-3.5,3.5)
ax.set_xticks([-3,-2,-1,0,1,2,3])
ax.set_xticklabels([-3,-2,-1,0,1,2,3],fontsize=14)
ax.set_xlabel(r'$\widetilde{\Delta\mu}$',fontsize=14)
yticks = [0,50,100,150]
ax.set_ylim(0,170)
ax.set_yticks(yticks)
ax.set_yticklabels(yticks,fontsize=14)
ax.set_ylabel(r'Counts',fontsize=14)
ax.tick_params(axis='both',direction='in')
lgd=ax.legend(loc='upper left',fontsize=13,frameon=False)
texts = lgd.get_texts()
for i in range(len(texts)):
plt.setp(texts[i],color=colors[i])
###########################################################
# 3) reconstructed EoS
###########################################################
plt.subplot(1,2,2)
ax = plt.gca()
a = np.linspace(1,.4,20)
z = 1/a-1
colors=['blue','red','gray']
ax.hlines(-1,xmin=0,xmax=1.5,linestyle='dashed',lw=2,alpha=1,color=colors[0],label=r'Fiducal model')
# EoS result with prior enforced
ax.errorbar(z,eos_SP[:,0],yerr=[eos_SP[:,0]-eos_SP[:,2],eos_SP[:,3]-eos_SP[:,0]],
marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Prior enforced')
# ax.errorbar(z,eos_SP[:,0],yerr=eos_SP[:,1],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Reconstruction')
# EoS result without prior
# ax.errorbar(z,eos_no_prior[:,0],yerr=[eos_no_prior[:,0]-eos_no_prior[:,2],eos_no_prior[:,3]-eos_no_prior[:,0]],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')
# ax.errorbar(z,eos_no_prior[:,0],yerr=eos_no_prior[:,1],
# marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')
ax.plot(z,eos_no_prior[:,0],'--',lw=2.5,color=colors[2])
# ax.fill_between(z,y1=eos_no_prior[:,0]-eos_no_prior[:,1],y2=eos_no_prior[:,0]+eos_no_prior[:,1],
# color=colors[2],alpha=0.5,label=r'Without prior')
ax.fill_between(z,y1=eos_no_prior[:,2],y2=eos_no_prior[:,3],
color=colors[2],label=r'Reconstruction without prior')
# ax.fill_between(z,y1=eos_no_prior2[:,0]-eos_no_prior2[:,1],y2=eos_no_prior2[:,0]+eos_no_prior2[:,1],
# color='g',alpha=0.5,label=r'Reconstruction without prior')
# ax.fill_between(z,y1=eos_no_prior2[:,2],y2=eos_no_prior2[:,3],
# color='g',alpha=0.5,label=r'Reconstruction without prior')
ax.set_xlim(-0.025,1.525)
ax.set_xticks([0,0.25,0.5,0.75,1.0,1.25,1.5])
ax.set_xticklabels([0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=14)
ax.set_xlabel(r'$z$',fontsize=14)
yticks=[-3,-2,-1,-0]
ax.set_yticks(yticks)
ax.set_yticklabels(yticks,fontsize=14)
ax.set_ylabel(r'$w(z)$',fontsize=14)
# lgd=ax.legend(loc='lower left',frameon=False,fontsize=14)
ax.tick_params(axis='both',direction='in')
# texts = lgd.get_texts()
# for i in range(len(texts)):
# plt.setp(texts[i],color=colors[i])
handles,labels = ax.get_legend_handles_labels()
handles = [handles[0], handles[2], handles[1]]
labels = [labels[0], labels[2], labels[1]]
lgd=ax.legend(handles,labels,loc='lower left',frameon=False,fontsize=14)
# lgd=legend(loc='upper left',frameon=False,fontsize=12)
texts = lgd.get_texts()
cid = [0,2,1]
for i in range(len(texts)):
plt.setp(texts[i],fontsize=14,color=colors[i])
# add reduced chisq
dof = 719
chisq_red = 876.39/dof
ax.text(0.05,-2,r'$\chi^2_{\rm reduced} = '+str(round(chisq_red,2))+'$',fontsize=14,color='r')
###########################################################
# final adjustments ...
plt.subplots_adjust(wspace=0.15,
hspace=0.25,
left=0.065,
right=0.985,
top=0.975,
bottom=0.175)
plt.savefig('example_eos_result.pdf')
plt.show()
|
6,241 | deeba82536d0366b3793bcbe78f78e4cfeabb612 | def solution(n, money):
save = [0] * (n+1)
save[0] = 1
for i in range(len(money)):
for j in range(1, n+1):
if j - money[i] >= 0:
save[j] += (save[j - money[i]] % 1000000007)
return save[n] |
6,242 | c24be05700e5ee043d09d6f2e78cb3de1e7088f1 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Sat Dec 17 14:41:56 2011 +0100
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""Run tests on the libsvm machine infrastructure.
"""
import os
import numpy
import tempfile
import pkg_resources
import nose.tools
import bob.io.base
from . import File, Machine
def F(f):
"""Returns the test file on the "data" subdirectory"""
return pkg_resources.resource_filename(__name__, os.path.join('data', f))
def tempname(suffix, prefix='bobtest_machine_'):
(fd, name) = tempfile.mkstemp(suffix, prefix)
os.close(fd)
os.unlink(name)
return name
TEST_MACHINE_NO_PROBS = F('heart_no_probs.svmmodel')
HEART_DATA = F('heart.svmdata') #13 inputs
HEART_MACHINE = F('heart.svmmodel') #supports probabilities
HEART_EXPECTED = F('heart.out') #expected probabilities
IRIS_DATA = F('iris.svmdata')
IRIS_MACHINE = F('iris.svmmodel')
IRIS_EXPECTED = F('iris.out') #expected probabilities
def load_expected(filename):
"""Loads libsvm's svm-predict output file with probabilities"""
all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]])
data = numpy.loadtxt(filename, dtype='float64', skiprows=1)
return all_labels, data[:,0].astype('int64'), data[:,1:]
#extracted by running svm-predict.c on the heart_scale example data
expected_heart_predictions = (1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1,
1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1,
-1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1,
-1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1,
1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1,
1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
-1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1,
-1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1,
1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1,
-1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1,
1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1,
-1, -1, -1, -1, -1, -1, 1)
expected_iris_predictions = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3)
def test_can_load():
machine = Machine(HEART_MACHINE)
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
assert type(machine.__repr__()) is str
def test_can_save():
machine = Machine(HEART_MACHINE)
tmp = tempname('.model')
machine.save(tmp)
del machine
# make sure that the save machine is the same as before
machine = Machine(tmp)
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
os.unlink(tmp)
def run_for_extension(ext):
machine = Machine(HEART_MACHINE)
tmp = tempname(ext)
machine.save(bob.io.base.HDF5File(tmp, 'w'))
del machine
# make sure that the save machine is the same as before
machine = Machine(bob.io.base.HDF5File(tmp))
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
assert numpy.all(abs(machine.input_subtract - 0) < 1e-10)
assert numpy.all(abs(machine.input_divide - 1) < 1e-10)
os.unlink(tmp)
def test_can_save_arbitrary():
run_for_extension('.arbitrary')
def test_can_save_h5():
run_for_extension('.h5')
def test_can_save_hdf5():
run_for_extension('.hdf5')
def test_data_loading():
#tests if I can load data in libsvm format using SVMFile
data = File(HEART_DATA)
nose.tools.eq_(data.shape, (13,))
nose.tools.eq_(data.good(), True)
nose.tools.eq_(data.fail(), False)
nose.tools.eq_(data.eof(), False)
#tries loading the data, one by one
all_data = []
all_labels = []
while data.good():
entry = data.read()
if entry is not None:
all_labels.append(entry[0])
all_data.append(entry[1])
nose.tools.eq_(len(all_data), len(all_labels))
nose.tools.eq_(len(all_data), 270)
#tries loading the data with numpy arrays allocated internally
counter = 0
data.reset()
entry = data.read()
while entry:
nose.tools.eq_( entry[0], all_labels[counter] )
assert numpy.array_equal(entry[1], all_data[counter])
counter += 1
entry = data.read()
#tries loading the file all in a single shot
data.reset()
labels, data = data.read_all()
assert numpy.array_equal(labels, all_labels)
for k, l in zip(data, all_data):
assert numpy.array_equal(k, l)
#makes sure the first 3 examples are correctly read
ex = []
ex.append(numpy.array([0.708333 , 1, 1, -0.320755 , -0.105023 , -1, 1,
-0.419847 ,-1, -0.225806 ,0. ,1, -1], 'float64'))
ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1,
0.358779, -1, -0.483871, 0., -1, 1], 'float64'))
ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,
-1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))
ls = [+1, -1, +1]
for k, (l, e) in enumerate(zip(ls, ex)):
nose.tools.eq_( l, labels[k] )
assert numpy.array_equal(e, data[k])
@nose.tools.raises(RuntimeError)
def test_raises():
#tests that the normal machine raises because probabilities are not
#supported on that model
machine = Machine(TEST_MACHINE_NO_PROBS)
labels, data = File(HEART_DATA).read_all()
machine.predict_class_and_probabilities(data)
def test_correctness_heart():
#tests the correctness of the libSVM bindings
machine = Machine(HEART_MACHINE)
labels, data = File(HEART_DATA).read_all()
pred_label = machine.predict_class(data)
assert numpy.array_equal(pred_label, expected_heart_predictions)
#finally, we test if the values also work fine.
pred_lab_values = [machine.predict_class_and_scores(k) for k in data]
#tries the variant with multiple inputs
pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)
assert numpy.array_equal(expected_heart_predictions, pred_labels2)
assert numpy.array_equal(tuple([k[1] for k in pred_lab_values]), pred_scores2)
#tries to get the probabilities - note: for some reason, when getting
#probabilities, the labels change, but notice the note bellow:
# Note from the libSVM FAQ:
# Q: Why using the -b option does not give me better accuracy?
# There is absolutely no reason the probability outputs guarantee you
# better accuracy. The main purpose of this option is to provide you the
# probability estimates, but not to boost prediction accuracy. From our
# experience, after proper parameter selections, in general with and
# without -b have similar accuracy. Occasionally there are some
# differences. It is not recommended to compare the two under just a fixed
# parameter set as more differences will be observed.
all_labels, real_labels, real_probs = load_expected(HEART_EXPECTED)
pred_labels, pred_probs = machine.predict_class_and_probabilities(data)
assert numpy.array_equal(pred_labels, real_labels)
assert numpy.all(abs(pred_probs - real_probs) < 1e-2), abs(pred_probs - real_probs)
def test_correctness_iris():
#same test as above, but with a 3-class problem.
machine = Machine(IRIS_MACHINE)
labels, data = File(IRIS_DATA).read_all()
pred_label = machine.predict_class(data)
assert numpy.array_equal(pred_label, expected_iris_predictions)
#finally, we test if the values also work fine.
pred_lab_values = [machine.predict_class_and_scores(k) for k in data]
#tries the variant with multiple inputs
pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)
assert numpy.array_equal(expected_iris_predictions, pred_labels2)
assert numpy.all(abs(numpy.vstack([k[1] for k in
pred_lab_values]) - numpy.vstack(pred_scores2)) < 1e-20 )
#tries to get the probabilities - note: for some reason, when getting
#probabilities, the labels change, but notice the note bellow:
all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)
pred_labels, pred_probs = machine.predict_class_and_probabilities(data)
assert numpy.array_equal(pred_labels, real_labels)
assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs)) < 1e-6)
@nose.tools.raises(RuntimeError)
def test_correctness_inputsize_exceeds():
#same test as above, but test for excess input
machine = Machine(IRIS_MACHINE)
labels, data = File(IRIS_DATA).read_all()
# add extra columns to the input data
data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])
pred_label = machine.predict_class(data)
|
6,243 | 2ab6488276c74da8c3d9097d298fc53d1caf74b1 | import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
|
6,244 | 050f060bb9d3d46f8b87c9802356bd0da8f926f8 | with open('rosalind_ba3d.txt','r') as f:
kmer_length = int(f.readline().strip())
seq = f.readline().strip()
dict = {}
for offset in range(len(seq)-kmer_length+1):
prefix = seq[offset:offset+kmer_length-1]
suffix = seq[offset+1:offset+kmer_length]
if prefix in dict:
dict[prefix].append(suffix)
else:
dict[prefix] = [suffix]
for key in sorted(dict):
print(key + " -> " + ','.join(sorted(dict[key]))) |
6,245 | e50c1ef7368aabf53bc0cfd45e19101fa1519a1f | import os
from typing import List
from pypinyin import pinyin, lazy_pinyin
# map vowel-number combination to unicode
toneMap = {
"d": ['ā', 'ē', 'ī', 'ō', 'ū', 'ǜ'],
"f": ['á', 'é', 'í', 'ó', 'ú', 'ǘ'],
"j": ['ǎ', 'ě', 'ǐ', 'ǒ', 'ǔ', 'ǚ'],
"k": ['à', 'è', 'ì', 'ò', 'ù', 'ǜ'],
}
weightMap = {}
def getWeightMap():
with open(os.path.join(os.path.dirname(__file__),
"../cells/cubor-base.dict.yaml"), "r", encoding='utf8') as base:
_lines = base.readlines()
for wunit in _lines:
units = wunit.split('\t')
if len(units) == 3 and len(units[0]) == 1: # 保证不是空行、且为单字,并一定能得到权重
units[2] = units[2][:-1]
if weightMap.get(units[0]) is None:
weightMap.__setitem__(units[0], units[2])
else: # 说明权重字典中已经有了该词
if int(units[2]) > int(weightMap.get(units[0])): # 如果权重更大则更新,我们的声调码 权重要高!
weightMap.__setitem__(units[0], units[2])
def flatten(a):
for each in a:
if not isinstance(each, list):
yield each
else:
yield from flatten(each)
def getPinyins(c: List[List[str]]) -> list:
return list(flatten(pinyin(c, heteronym=True)))
def getToneKeys(tone: str) -> str:
for toneKey, toneList in toneMap.items():
for toneChar in toneList:
if tone.__contains__(toneChar):
return toneKey
return 'l'
def readSingleCharYaml():
retlines = []
with open(os.path.join(os.path.dirname(__file__),
"../cubor/cubor-single.dict.yaml"), "r", encoding='utf8') as yaml:
line = yaml.readline()
# 准备追加音调
while len(line) is not 0:
chars = line.split('\t')
if len(chars) is 3:
# 获得词语内容、本来的读音
word, srcPy = chars[0], chars[1]
tones = getPinyins(pinyin(word, heteronym=True))
tempToneKeys = []
for tone in tones:
toneKey = getToneKeys(tone)
if toneKey not in tempToneKeys: # 如果类似 啊 这样的字有多音 a e 都是一声就避免重复
tempToneKeys.append(toneKey)
# base-dict 来源于 pinyin_simp 如果按简体字表配置的权重Map 中找不到,则说明是权重很低的繁体字
weight = weightMap.get(word) if weightMap.get(word) is not None else '0'
line = f'{word}\t{srcPy}{toneKey}\t{weight}\r\n' # 部署时用
# line = f'{word}\t{toneKey}\t{weight}\r\n' # 重构单字声调码表用
retlines.append(line)
# fullPinyin = lazy_pinyin(word)[0] if len(lazy_pinyin(word)) > 0 else ''
# if len(fullPinyin) > 0 and fullPinyin != srcPy:
# retlines.append(f'{word}\t{fullPinyin}{toneKey}\t{weight}\r\n') # 部署时用
line = yaml.readline()
with open(os.path.join(os.path.dirname(__file__),
"../cells/cubor-base.dict.yaml"), "a", encoding='utf8') as writer: # 部署时用
# "../cubor/cubor-tones.dict.yaml"), "w", encoding='utf8') as writer: # 重构单字码表时用
writer.writelines(retlines)
if __name__ == '__main__':
print('[INFO] - 构建库珀输入法 声调表...')
getWeightMap() # 先找到权重表
readSingleCharYaml() # 再写入声调码表
print('[INFO] - 构建完成!')
|
6,246 | 71e7a209f928672dbf59054b120eed6a77522dde | from springframework.web.servlet import ModelAndView
from springframework.web.servlet.HandlerAdapter import HandlerAdapter
from springframework.web.servlet.mvc.Controller import Controller
from springframework.web.servlet.mvc.LastModified import LastModified
from springframework.utils.mock.inst import (
HttpServletResponse,
HttpServletRequest,
)
class SimpleControllerHandlerAdapter(HandlerAdapter):
def supports(self, handler: object) -> bool:
return isinstance(handler, Controller)
def handle(
self,
request: HttpServletRequest,
response: HttpServletResponse,
handler: object,
) -> ModelAndView:
handler: Controller = handler
return handler.handle_request(request, response)
def get_last_modified(
self, request: HttpServletRequest, handler: object
) -> int:
if isinstance(handler, LastModified):
handler: Controller = handler
return handler.get_last_modified(request)
return -1
|
6,247 | 94a3a74260fac58b4cad7422608f91ae3a1a0272 | from inotifier import Notifier
from IPython.display import display, Audio, HTML
import pkg_resources
import time
class AudioPopupNotifier(Notifier):
"""Play Sound and show Popup upon cell completion"""
def __init__(self, message="Cell Completed", audio_file="pad_confirm.wav"):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications', 'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
|
6,248 | 3b71ef6c3681b8c5e6aadf2d125c35cbf3a12661 | import loops
class Card():
#to make a card you must type Card("Name of Card")
def check_cat(self,string):
if "Cat" in string:
return True
return False
def __init__(self,string):
self.type = string
self.cat = self.check_cat(self.type)
# self.image_back = image_back
# self.image_front = image_front
def __str__(self):
return self.type
#negates any action, except a defuse
def nope(self,arr_players,cards,turn_order):
count = 0
for i,k in enumerate(arr_players):
if i != turn_order:
for i,k in enumerate(k.hand):
if k == cards[11]:
count += 1
if count > 0:
print("A nope card can be played")
decision = input("Would a player like to play a nope card? (y/n)")
while decision != "y" and decision != "n":
decision = input("Would a player like to play a nope card? (y/n) ")
if decision == "n":
return False
elif decision == 'y':
for i,k in enumerate(arr_players):
print(str(i)+"-"+k.name)
player = int(input("Which player would like to play the nope card?"))
while (player < 0 or player > len(arr_players)) and players == turn_order:
player = int*input("Which player would like to play the nope card?")
arr_players[player].hand.remove(cards[11])
return True
return False
#makes another player choose a card to give away to current player
def favor(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))
print(card_taken,"was given")
recipient.hand.remove(card_taken)
player.hand.append(card_taken)
return True,False
#allows a player to steal a card from another player
def steal(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))
print("You stole",card_stolen.type)
hand.remove(played_card)
player.hand.append(card_stolen)
return True,False
#makes the player skip a turn
def skip(self,attack,pick):
print("Your turn has been skipped")
pick = False
return pick,attack
#the player makes the next person take his turn as well, forcing them to take 2 turns
def attack(self,attack,pick):
attack = True
pick = False
return pick,attack
#see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions
def see_future(self,decker):
if decker.cards_left() < 3:
for i in range(decker.cards_left()):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i)
else:
for i in range(3):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i) |
6,249 | 5762271de166994b2f56e8e09c3f7ca5245b7ce0 | import binascii
import collections
import enum
Balance = collections.namedtuple("Balance", ["total", "available", "reward"])
Balance.__doc__ = "Represents a balance of asset, including total, principal and reward"
Balance.total.__doc__ = "The total balance"
Balance.available.__doc__ = "The principal, i.e. the total minus staking rewards"
Balance.reward.__doc__ = "The staking rewards (interest)"
BlockPosition = collections.namedtuple(
"BlockPosition", ["epoch", "slot", "absolute_slot", "height"]
)
BlockPosition.__doc__ = "Represents block's position within the blockchain"
BlockPosition.epoch.__doc__ = "Epoch number"
BlockPosition.slot.__doc__ = "Slot number"
BlockPosition.absolute_slot.__doc__ = "Absolute slot number"
BlockPosition.height.__doc__ = "Block number (height of the chain) [optional]"
Epoch = collections.namedtuple("Epoch", ["number", "starts"])
class AssetID(object):
"""
Represents the ID of a native Cardano asset. It consists of asset name and policy ID.
It renders as string representation of ``asset_name:policy_id``.
The ``asset_name`` is always kept encoded as hexadecimal string and must be passed
to the constructor as such.
The ``.name_bytes`` property is a :class:`bytes` decoded representation of the hex.
Because Cardano allows full ASCII set to be used in asset names, some of them are not
safe to be displayed directly.
"""
asset_name = ""
policy_id = None
name_bytes = None
def __init__(self, asset_name, policy_id):
asset_name = asset_name if asset_name is not None else self.asset_name
policy_id = policy_id or self.policy_id
# binascii.hexlify() returns bytes() for some unknown reason. We may expect them to be
# passed here:
if isinstance(asset_name, bytes):
self.name_bytes = binascii.unhexlify(asset_name)
self.asset_name = asset_name.decode()
elif isinstance(asset_name, str):
self.name_bytes = binascii.unhexlify(asset_name.encode())
self.asset_name = asset_name
else:
raise ValueError(
"The asset_name is neither str or bytes but {}".format(
type(self.asset_name)
)
)
self.policy_id = policy_id
def __repr__(self):
return "{:s}:{:s}".format(self.asset_name, self.policy_id)
def __eq__(self, other):
if isinstance(other, AssetID):
return str(self) == str(other)
elif isinstance(other, str):
return str(self) == other
elif isinstance(other, bytes):
return str(self).encode() == other
return super(AssetID, self).__eq__(other)
def __hash__(self):
return hash(str(self))
StakePoolStatus = enum.Enum("StakePoolStatus", "ACTIVE RETIRING DELISTED")
StakePoolStatus.__doc__ = "Represents stake pool status"
StakeRewardMetrics = collections.namedtuple(
"StakeRewardMetrics",
[
"expected",
"stake",
],
)
StakeRewardMetrics.__doc__ = "Represents stake pool reward metrics"
StakeRewardMetrics.expected.__doc__ = "Expected rewards at the end of an epoch, in ADA"
StakeRewardMetrics.stake.__doc__ = (
"Staked amount against which rewards were calculated, in ADA"
)
StakePoolInfo = collections.namedtuple(
"StakePoolInfo",
[
"id",
"status",
"ticker",
"name",
"description",
"homepage",
"rewards",
"cost",
"margin",
"pledge",
"relative_stake",
"saturation",
"produced_blocks",
"retirement",
],
)
StakePoolInfo.__doc__ = "Stores stake pool data"
StakePoolInfo.id.__doc__ = "Unique ID"
StakePoolInfo.status.__doc__ = "Status, one of :class:`StakePoolStatus` enum"
StakePoolInfo.ticker.__doc__ = "3-5 chars long ticker"
StakePoolInfo.name.__doc__ = "Name"
StakePoolInfo.description.__doc__ = "Description"
StakePoolInfo.homepage.__doc__ = "Homepage URL"
StakePoolInfo.cost.__doc__ = "Fixed pool running cost in ADA"
StakePoolInfo.margin.__doc__ = "Operator's margin on the total reward before splitting it among stakeholders (as :class:`Decimal` fraction)"
StakePoolInfo.pledge.__doc__ = "Minimal stake amount that the pool is willing to honor"
StakePoolInfo.relative_stake.__doc__ = "The live pool stake relative to the total stake"
StakePoolInfo.saturation.__doc__ = (
"Saturation-level of the pool based on the desired number "
"of pools aimed by the network. A value above 1 indicates that the pool is saturated."
)
StakePoolInfo.produced_blocks.__doc__ = (
"Number of blocks produced by a given stake pool in its lifetime."
)
StakePoolInfo.retirement.__doc__ = "The :class:`Epoch` in which the pool retires"
StakingStatus = collections.namedtuple(
"StakingStatus",
[
"delegating",
"target_id",
"changes_at",
],
)
StakingStatus.__doc__ = "Wallet's staking status"
StakingStatus.delegating.__doc__ = "Whether the wallet is actively delegating"
StakingStatus.target_id.__doc__ = "The ID of the pool the wallet is delegating to"
StakingStatus.changes_at.__doc__ = ":class:`Epoch` since which the change comes live"
|
6,250 | f0a03f9a6dc78d01455913f7db3ab1948b19ea63 | vocales = "aeiou"
resultado = []
frase = input("Por favor ingrese la frase que desea verificar").lower()
print(frase)
for vocal in vocales:
conteo_vocales = frase.count(vocal)
mensaje = (f"En la frase hay {conteo_vocales} veces, la vocal{vocal}")
resultado.append(mensaje)
for elemento in resultado:
print(elemento) |
6,251 | 73337246bd54df53842360510148f3a6f4763ace | from .net import * |
6,252 | 6dc7c7de972388f3984a1238a2d62e53c60c622e | from django.test import TestCase
from student.forms import StudentForm
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "Emre",
'lastName': "Tan",
'department': "Panama",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "",
'lastName': "",
'department': "",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {
'firstName': ['This field is required.'],
'lastName': ['This field is required.'],
'department': ['This field is required.']
})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={
'student_id': 120000,
'firstName': "Berkay",
'lastName': "Tan",
'department': "Bilisim",
'mathScore': 200,
'physicsScore': 150,
'chemistryScore': 150,
'biologyScore': 101
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {
'student_id': ['Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'],
'physicsScore': ['Ensure this value is less than or equal to 100.'],
'chemistryScore': ['Ensure this value is less than or equal to 100.'],
'biologyScore': ['Ensure this value is less than or equal to 100.'],
})
|
6,253 | 18b10a68b2707b7bfeccbd31c5d15686453b3406 | # Copyright (c) 2020 Hai Nguyen
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import tensorflow.keras.backend as K
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
union = K.sum(y_true_f) + K.sum(y_pred_f)
return (2. * intersection + smooth) / (union + smooth)
def true_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
return tp
def true_neg(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tn = K.sum(y_neg * y_pred_neg)
tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)
return tn_ratio
def false_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
fp = K.sum(y_neg * y_pred_pos)
fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)
return fp_ratio
|
6,254 | 08abb94424598cb54a6b16db68759b216682d866 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: legolas
#
# Created: 05.03.2015
# Copyright: (c) legolas 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
print "This is checking the feature of the function 'input'."
familyName = input("What's your name?")
family = 'Hey'
print familyName
|
6,255 | b46a14c821777873eb60df609d9f112c737a3635 | __author__ = 'Erwin'
class usuario:
def __init__(self, nombre, user, pasw, permiso, foto):
self.nombre = nombre
self.login = user
self.pasw = pasw
self.permiso = permiso
self.foto=foto
self.database=True
class medicamento:
def __init__(self, nombre , descripcion, foto):
self.nombre = nombre
self.descripcion = descripcion
self.foto = foto
self.database=True
def tipo(self):
return 'medicamentos'
class animal:
def __init__(self, nombre, descripcion, foto):
self.nombre=nombre
self.descripcion=descripcion
self.foto=foto
self.database=True
def tipo(self):
return 'animal'
class enfermedad:
def __init__(self,nombre,descripcion,foto):
self.nombre=nombre
self.descripcion=descripcion
self.foto=foto
self.database=True
def tipo(self):
return 'enfermedad'
class Dosis:
def __init__(self,id,animal,medicamento,enfermedad,max,min,dosis):
self.id=id
self.animal=animal
self.enfermedad=enfermedad
self.medicamento=medicamento
self.pesoMax=max
self.pesoMin=min
self.dosis=dosis
self.database=True
class prescripcion:
def __init__(self,id,animal,usuario,enfermedad,rangoPeso,dosis):
self.id=id
self.animal=animal
self.usuario=usuario
self.enfermedad=enfermedad
self.peso=rangoPeso
self.dosis=dosis
self.database=True
|
6,256 | 05052e9ccbd076e71e9ec6148887ce7b82ed316d | from flask import Flask, render_template, request
from distance import get_distance
app = Flask(__name__)
@app.route('/hello')
@app.route('/hello/<name>')
def hello(name=None):
name = "World" if not name else name
return "Hello %s" % name
@app.route('/')
def index():
return render_template('index.html', title='home')
@app.route('/distance', methods=['POST', 'GET'])
def distance():
result = None
if request.method == 'POST':
location_a = request.form['location_a']
location_b = request.form['location_b']
result = get_distance(location_a, location_b)
return render_template('distance.html', title='Afstand', result=result)
if __name__ == '__main__':
app.run(debug=True)
|
6,257 | 47c6f9767b97469fe7e97ab3b69650265a8021d8 | import numpy as np
np.set_printoptions(precision = 1)
pi = np.pi
def convertRadian(theta):
radian = (theta) * (np.pi) / 180
return radian
def mkMatrix(radian, alpha, dis):
matrix = np.matrix([[np.cos(radian),(-1)*np.sin(radian)*np.cos(alpha), np.sin(radian)*np.sin(alpha), a1 * np.cos(radian)],
[np.sin(radian), np.cos(radian)*np.cos(alpha), (-1)*np.cos(radian)*np.sin(alpha), a1 * np.sin(radian)],
[0,np.sin(alpha), np.cos(alpha), dis],
[0,0,0,1]])
return matrix
#nao robot DH parameters
#link1
a1 = 0
alpha1 = convertRadian(90)
d1 = 0
# link2
a2 = 105/1000 # convert to meters
alpha2 = convertRadian(90)
d2 = 15/1000
# link3
a3 = 0
alpha3 = convertRadian(-90)
d3 = 0
# link4
a4 = 55.95/1000
alpha4 = convertRadian(90)
d4 = 0
# link5
a5 = 0
alpha5 = 0
d5 = 55.75/1000
# input theta angles and convert them directly to radians
radian1 = convertRadian(input("Enter theta 1: "))
radian2 = convertRadian(input("Enter theta 2: "))
radian3 = convertRadian(input("Enter theta 3: "))
radian4 = convertRadian(input("Enter theta 4: "))
# compute them place them in their respective places
# link1 homogeneous transformation 4x4
A1 = mkMatrix(radian1, alpha1, d1)
# link2 homogeneous transformation 4x4
A2 = mkMatrix(radian2, alpha2, d2)
# link3 homogeneous transformation 4x4
A3 = mkMatrix(radian3, alpha3, d3)
# link4 homogeneous transformation 4x4
A4 = mkMatrix(radian4, alpha3, d4)
# link5 homogeneous transformation 4x4
A5 = mkMatrix(0, alpha5, d5)
print "A1: \n", A1
print "\nA2: \n", A2
print "\nA3: \n", A3
print "\nA4: \n", A4
print "\nA5: \n", A5
# save the matrix returned
# print the matrix
finalMatrix = A1*A2*A3*A4
print "\nFinal matrix\n", finalMatrix
|
6,258 | fa833e9cd1e624d9ecfb2fcc6d9e22955c9e4b1e | # -*- coding: utf-8 -*-
""""
Created on Saturday, January 18, 2020
@author: lieur
This test case sets silver and gold to 0, which in most cases prevent the computer from
buying provinces. This tests to see if the game ends when one more supply car hits 0 (since
silver and gold are already at 0 and the game ends when 3 supply deck hits 0)
"""""
import Dominion
import testUtility
import random
from collections import defaultdict
# Get player names
player_names = ["Annie", "*Ben", "*Carla"]
# Set number of curses and victory cards
nV, nC = testUtility.set_vc_number(player_names)
# Define box and supply_order
box, supply_order = testUtility.define_box(nV)
# Choose and set supply cards
supply = testUtility.set_supply(box, player_names, nV, nC)
# Initialize the trash
trash = []
# Test silver and gold = 0
supply["Copper"]=[Dominion.Copper()]*(60-len(player_names)*7)
supply["Silver"]=[Dominion.Silver()]*0
supply["Gold"]=[Dominion.Gold()]*0
# Construct the Player objects
players = testUtility.set_players(player_names)
# Play the game
testUtility.play_game(supply, supply_order, players, trash)
# Final score
testUtility.display_game_results(players) |
6,259 | c3de9e6129bcafd863cd330ac281345fb563cc8c | """The prediction classes. Instances of the class are returned by
the recommender.
"""
class RelationshipPrediction(object):
"""The prediction of the predicted_relationship appearing between
the given subject-object pair.
@type subject: the domain-specific subject
@ivar subject: the subject
@type object_: the domain-specific object
@ivar object_: the object
@type expectancy: float
@ivar expectancy: the estimated probability of the predict_relationship
occuring between the subject and the object
@type explanation: str
@ivar explanation: the explanation for the prediction
"""
def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):
"""The initializer"""
self.subject = subject
"""The subject"""
self.object_ = object_
"""The object"""
self.expectancy = expectancy
"""The estimated probability of the predicted_relationship
occuring between the subject and the object.
"""
self.is_uncertain = is_uncertain
"""Is the prediction made without having any information available?"""
self.explanation = explanation
"""The explanation for the prediction"""
def __unicode__(self):
return u"%s <- %s: %f, %s" % (
self.subject,
self.object_,
self.expectancy,
self.explanation
)
def __repr__(self):
return "< %s >" % str(self.__unicode__())
|
6,260 | a520a93ed2dcd26b9470ed56e96b65a1b3550176 | # -*- coding: utf-8 -*-
'''
Created on 2014-03-25
@author: ZhaoJianning
Modified by WangHairui on 2014-09-12
'''
import unittest
import Stability
import time
import os,sys
import runtests
import re
import android
import datetime
class TestCamera(unittest.TestCase):
def setUp(self):
self.error = ''
self.setup = Stability.SetupDeviceConnections()
self.a = self.setup.initializeTestDevice()
self.id = self.setup.device_id
self.stabdl = Stability.StabDL(self.a)
self.path = "/mnt/sdcard/LepiPhoto"
self.a.input.back(3)
def tearDown(self):
self.a.input.back(3)
def launchCamera(self):
try:
act = "android.intent.action.MAIN"
cat = "android.intent.category.LAUNCHER"
flg = "0x10200000"
cmp = "com.letv.camera/.CameraActivity"
cmd = "am start -a %s -c %s -f %s -n %s" %(act, cat, flg, cmp)
#self.a.device.sh("su")
result = self.a.device.sh(cmd)
print result
if "Exception" in str(result) or "Error" in str(result):
return False
return True
except:
self.error += "launch camera meets exception"
return False
def checkPhoto(self):
photoList = self.a.device.sh("ls %s" %self.path)
print photoList
today = datetime.date.today().strftime('%Y%m%d')
if today in str(photoList):
return True
return False
def pullPhoto(self):
workd = os.path.join(android.log.report_directory(), android.log.logs_directory())
os.system('adb -s %s pull %s %s' %(self.id,self.path,workd))
self.a.device.sh("rm -rf %s/*.jpg" %self.path)
'''
def testCamera(self):
"""测试摄像头驱动工作正常|操作步骤:1. 命令行启动摄像头 2. 拍下照片 Fail项:1. 启动摄像头失败 2. 照片未拍下"""
try:
print "test camera"
self.a.device.sh("rm %s/*" %self.path)
if not self.launchCamera():
self.error += "launch camera failed"
raise Exception
time.sleep(10)
self.a.input.center()
time.sleep(10)
if not self.checkPhoto():
self.error = "failed to capture the photo"
raise Exception
self.pullPhoto()
except Exception, e:
self.a.log.debug("", "\n test camera")
self.fail("Error happened: %s %s" %(self.error, e))
'''
def testCamera(self):
"""测试摄像头驱动工作正常|操作步骤:1. 命令行启动摄像头 2. 拍下照片 Fail项:1. 启动摄像头失败 2. 照片未拍下"""
self.jar = "UiAutomator.jar"
#self.jar = "UiAutomator.jar"
self.case = "com.letv.camera.Camera#testCapture"
try:
self.a.device.sh("rm -rf %s/*" %self.path)
ua = Stability.UiAutomator(self.id, self.jar, self.case)
result, info = ua.runtest()
if result != 'PASS':
self.error = str(info)
raise Exception
if not self.checkPhoto():
self.error = "failed to capture the photo"
raise Exception
self.pullPhoto()
except Exception, e :
self.a.log.debug("", "\n testCamera")
self.fail("Error happened: %s %s" % (self.error, e))
def testOpenExit(self):
"""测试打开关闭摄像头|1. 打开乐拍 2, 退出乐拍"""
self.jar = "UiAutomator.jar"
#self.jar = "UiAutomator.jar"
self.case = "com.letv.camera.Camera#testOpenExit"
try:
#self.a.device.sh("rm -rf %s/*" %self.path)
ua = Stability.UiAutomator(self.id, self.jar, self.case)
result, info = ua.runtest()
if result != 'PASS':
self.error = str(info)
raise Exception
except Exception, e :
self.a.log.debug("", "\n testCamera")
self.fail("Error happened: %s %s" % (self.error, e))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testBrowser']
unittest.main()
|
6,261 | 2a95a68d8570a314b2b6e5731d7a695e5d7e7b30 | #This program is a nice example of a core algorithm
#Remove Individual Digits
# To remove individual digits you use two operations
# 1 MOD:
# mod return the remainder after division. 5%2 = 1.
# If we mod by 10 we get the units digit. 723%10 = 3
# 2 Integer Division:
# Integer division is when we divide and remove decimals;
# we DO NOT round, simply cut them off. To integer divide
# in Python we use //. For example 723//10 = 72. This is
# a quick way to remove decimals.
def findSum(n):
s = 0 #store the sum of the values
while (n > 0):
x = n % 10 #chop off units digit store in x
s = s + x #add unit digit to sum, stored in s
n = n // 10 #remove unit digit from n
return s
def isHarshad(n):
if (n % findSum(n) == 0): #note that if a % b == 0 b is a factor of a
return True
return False
def findHarshad(low, high):
low = 500
high = 525
streak = 0
maxStreak = 0
for i in range(low,high + 1,1):
if (isHarshad(i)):
streak = streak + 1;
else:
maxStreak = max(streak,maxStreak)
streak = 0;
#print(i,streak) #Test code for debugging
maxStreak = max(streak,maxStreak)
print(maxStreak)
f = open("DwiteHarshadNumbersData.txt", "r")
#Python short cut which loops as long as there is a new line in the file
for line in f:
l = f.readline()
h = f.readline()
findHarshad(l,h)
f.close()
|
6,262 | 6cb29ebd9c0f2660d0eb868bec87ffd97cf4d198 | """ A set of constants to describe the package.
Don't put any code in here, because it must be safe to execute in setup.py. """
__title__ = 'space_tracer' # => name in setup.py
__version__ = '4.10.2'
__author__ = "Don Kirkby"
__author_email__ = "donkirkby@gmail.com"
__description__ = "Trade time for space when debugging your code."
__url__ = "https://donkirkby.github.io/live-py-plugin/"
|
6,263 | 51cd74bff5a0883a7bee2b61b152aecb2c5ccc66 | import sys
sys.path.append("/home/mccann/bin/python/obsolete")
from minuit import *
execfile("/home/mccann/antithesis/utilities.py")
nobeam = getsb("cos")
ebeam = getsb("bge")
pbeam = getsb("bgp")
import gbwkf
import gbwkftau
runstart = pickle.load(open("/home/mccann/antithesis/old_dotps/runstart.p"))
runend = pickle.load(open("/home/mccann/antithesis/old_dotps/runend.p"))
import time
bsbha = pickle.load(open("/home/mccann/synthesis/run/bsbha.p"))
nbish2nb = 23.0481
bhabha_interference = 1. # this is a multiplier: 0. to turn off
class FitRecord: pass
ggfits = pickle.load(open("/home/mccann/antithesis/fit_results/octoberfits_fixen_0_1.0.p"))
# I learned this from Matt and the beam energy program logs
runsummary[123828].energy = 4.72992
runsummary[123832].energy = 4.72990
def run_date(r):
if r in runstart and r in runend:
return (runstart[r] + runend[r])/2.
elif r in runstart:
return runstart[r]
elif r in runend:
return runend[r]
else:
raise Exception
# The 48-hour limit is built into setup_runs
def setup_runs(res, low, high):
beginning = run_date(low)
tmpruns = []
for r in initialrunlist:
if r not in mybadruns and low <= r <= high and runsummary[r].res == res:
if runsummary[r].kind == 's' or runsummary[r].kind == 'p':
if run_date(r) < beginning + 48.*60.*60:
tmpruns.append(r)
return tmpruns
def mygbwkf(mass, fullgam, rmsbeam, yint, phi, w):
"yint = 0.018, 0.018, 0.018; phi=0"
if w > mass + 200.:
return 0.076/(w-mass)
return gbwkf.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)
def mygbwkftau(mass, fullgam, rmsbeam, yint, phi, w):
"yint = 0.20, 0.37, 0.27; phi = 0"
if w > mass + 200.:
return 0.076/(w-mass)
return gbwkftau.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)
def background(w):
tmp = 0.
tmp += 9.35538858434 * (1.-0.0792) * 9000.**2 / w**2
tmp += 9.35538858434 * 0.0792 * log(w**2/9000.**2)
return tmp
def u1func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, w):
tmp = 0.
tmp += area * 0.9793 * mygbwkf(9460.30, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(9460.30, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
return tmp
def u2func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, w):
tmp = 0.
tmp += area * 0.9618 * mygbwkf(10023.26, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(10023.26, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)
return tmp
def u3func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, u2area, w):
tmp = 0.
tmp += area * 0.9641 * mygbwkf(10355.2, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(10355.2, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)
tmp += u2area * mygbwkf(10023.26, 0., 0., 0., 0., w)
return tmp
def whichamiin(r):
if runsummary[r].res == 1:
for s in ["jan16", "jan30", "feb06", "feb13", "feb20", "feb27", "mar06", "mar13", "apr03", "apr08", "apr09", "apr10"]:
if r in u1runs[s]:
return 1, s
elif runsummary[r].res == 2:
for s in ["may29", "jun11", "jun12", "jul10", "jul24", "aug07"]:
if r in u2runs[s]:
return 2, s
elif runsummary[r].res == 3:
for s in ["nov28", "dec05", "dec12", "dec19", "dec26", "jan02", "jan09"]:
if r in u3runs[s]:
return 3, s
return runsummary[r].res, None
def get_run(r):
gamgam_lumi = None
gamgam_lumi_err = None
bhabha_lumi = None
bhabha_lumi_err = None
num_hadrons = None
num_hadrons_err = None
the_energy = None
the_shift = None
therun = getdb(r)
for lumisource in (0, 3):
g = 0.
h = 0.
e = 0.
p = 0.
c = 0.
ngg = therun.gamgam
if r in mycarefulscan: ngg = therun.gamgam_vstime.sum(0.,0.99)
fitrec = pickle.load(open("/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p"))
if runsummary[r].res == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values
elif runsummary[r].res == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values
elif runsummary[r].res == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values
whichres, whichweek = whichamiin(r)
thisshift = 0.
if whichweek != None:
thisshift = eval("my"+whichweek)
the_energy = runsummary[r].energy*2000.
the_shift = thisshift
if runsummary[r].res == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = ggfits[1].values
elif runsummary[r].res == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = ggfits[2].values
elif runsummary[r].res == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = ggfits[3].values
whichres, whichweek = whichamiin(r)
thisrmsbeam = myrmsbeam
if whichres == 1:
if whichweek != None:
if whichweek in ["jan16", "jan30", "feb06", "feb13", "feb20"]: thisrmsbeam = myrjan
if whichweek in ["feb27", "mar06", "mar13"]: thisrmsbeam = myrfeb
if whichweek in ["apr03", "apr08", "apr09"]: thisrmsbeam = myrapr1
if whichweek in ["apr10"]: thisrmsbeam = myrapr2
if whichres == 3:
if whichweek != None:
thisrmsbeam = eval("myr"+whichweek)
thisshift = 0.
if whichweek != None:
thisshift = 0. - eval("my"+whichweek)
if r in mycarefulscan:
h += therun.hadroncool_vstime.sum(0.,0.99)
e += therun.beamgase_vstime.sum(0.,0.99)
p += therun.beamgasp_vstime.sum(0.,0.99)
c += therun.cosmic_vstime.sum(0.,0.99)
if lumisource == 0:
g += therun.gamgam_vstime.sum(0.,0.99)
elif lumisource == 1:
g += therun.bhabha_cosp.sum(0., 0.6) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 2:
g += therun.bhabha_cosp.sum(0.6, 0.8) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 3:
g += 1.*bsbha[r] * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
else:
h += therun.hadroncool
e += therun.beamgase
p += therun.beamgasp
c += therun.cosmic
if lumisource == 0:
g += therun.gamgam
elif lumisource == 1:
g += therun.bhabha_cosp.sum(0., 0.6)
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 2:
g += therun.bhabha_cosp.sum(0.6, 0.8)
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 3:
g += 1.*bsbha[r]
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
average_energy = runsummary[r].energy
ebkgnd = 1. * (ebeam.hadroncool - 1.*nobeam.hadroncool*ebeam.cosmic/nobeam.cosmic) * e / ebeam.beamgase
pbkgnd = 1. * (pbeam.hadroncool - 1.*nobeam.hadroncool*pbeam.cosmic/nobeam.cosmic) * p / pbeam.beamgasp
cbkgnd = 1. * nobeam.hadroncool * c / nobeam.cosmic
hadrons = h - ebkgnd/2. - pbkgnd/2. - cbkgnd
hadrons_err = sqrt(h + c * (1.*nobeam.hadroncool/nobeam.cosmic)**2 + ebkgnd/2. + pbkgnd/2.)
num_hadrons = hadrons
num_hadrons_err = hadrons_err
if lumisource == 3:
if whichres == 1:
cs = hadrons / g / average_energy**2 * 199.5 # these differences are due to different efficiencies, as predicted by the MC
bhabha_lumi = g * average_energy**2 / 199.5
bhabha_lumi_err = sqrt(g) * average_energy**2 / 199.5
elif whichres == 2:
cs = hadrons / g / average_energy**2 * 197.4 # and verified by my lumi counts
bhabha_lumi = g * average_energy**2 / 197.4
bhabha_lumi_err = sqrt(g) * average_energy**2 / 197.4
elif whichres == 3:
cs = hadrons / g / average_energy**2 * 196.0 # (I totally believe this.)
bhabha_lumi = g * average_energy**2 / 196.0
bhabha_lumi_err = sqrt(g) * average_energy**2 / 196.0
cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)
else:
cs = hadrons / g / average_energy**2 * nbish2nb
cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)
gamgam_lumi = g * average_energy**2 / nbish2nb
gamgam_lumi_err = sqrt(g) * average_energy**2 / nbish2nb
if lumisource == 1:
cs /= 0.23684
cs_err /= 0.23684
if lumisource == 2:
cs /= 0.118999
cs_err /= 0.118999
return float(the_energy), float(the_shift), float(gamgam_lumi), float(gamgam_lumi_err), float(bhabha_lumi), float(bhabha_lumi_err), float(num_hadrons), float(num_hadrons_err)
class ARun:
def __init__(self, r):
self.run = r
self.en, self.shift, self.gg, self.gg_err, self.bb, self.bb_err, self.had, self.had_err = get_run(r)
def getfval(self):
fitrec = pickle.load(open("/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p"))
whichres, whichweek = whichamiin(self.run)
if whichres == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values
thisrmsbeam = myrmsbeam
if whichweek != None:
if whichweek in ["jan16", "jan30", "feb06", "feb13", "feb20"]: thisrmsbeam = myrjan
if whichweek in ["feb27", "mar06", "mar13"]: thisrmsbeam = myrfeb
if whichweek in ["apr03", "apr08", "apr09"]: thisrmsbeam = myrapr1
if whichweek in ["apr10"]: thisrmsbeam = myrapr2
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
self.func = u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift)
self.deriv = (self.func - u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift-0.1))/0.1
elif whichres == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values
self.func = u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift)
self.deriv = (self.func - u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift-0.1))/0.1
elif whichres == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values
thisrmsbeam = myrmsbeam
if whichres == 3:
if whichweek != None:
thisrmsbeam = eval("myr"+whichweek)
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
self.func = u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift)
self.deriv = (self.func - u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift-0.1))/0.1
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
u1runs = {}
u2runs = {}
u3runs = {}
u1runs["cont"] = []
u2runs["cont"] = []
u3runs["cont"] = []
u1runs["high"] = []
u2runs["high"] = []
u3runs["high"] = []
u1runs["jan16"] = setup_runs(1, 123164, 123178)
u1runs["jan30"] = setup_runs(1, 123596, 123718)
u1runs["feb06"] = setup_runs(1, 123781, 123893)
u1runs["feb13"] = setup_runs(1, 124080, 124092)
u1runs["feb20"] = setup_runs(1, 124102, 124214)
u1runs["feb27"] = setup_runs(1, 124279, 124394)
u1runs["mar06"] = setup_runs(1, 124436, 124519)
u1runs["mar13"] = setup_runs(1, 124625, 124736)
u1runs["apr03"] = setup_runs(1, 125119, 125127)
u1runs["apr08"] = setup_runs(1, 125254, 125262)
u1runs["apr09"] = setup_runs(1, 125285, 125295)
u1runs["apr10"] = setup_runs(1, 125303, 125416)
u2runs["may29"] = setup_runs(2, 126449, 126568)
u2runs["jun11"] = setup_runs(2, 126776, 126783)
u2runs["jun12"] = setup_runs(2, 126814, 126915)
u2runs["jul10"] = setup_runs(2, 127588, 127615)
u2runs["jul24"] = setup_runs(2, 127924, 127933)
u2runs["aug07"] = setup_runs(2, 128303, 128316)
u3runs["nov28"] = setup_runs(3, 121884, 122007)
u3runs["dec05"] = setup_runs(3, 122069, 122178)
u3runs["dec12"] = setup_runs(3, 122245, 122326)
u3runs["dec19"] = setup_runs(3, 122409, 122527)
u3runs["dec26"] = setup_runs(3, 122535, 122757)
u3runs["jan02"] = setup_runs(3, 122766, 122881)
u3runs["jan09"] = setup_runs(3, 122993, 123101)
for r in initialrunlist:
if r not in mybadruns:
if runsummary[r].res == 1 and runsummary[r].kind == 'c':
u1runs["cont"].append(r)
if runsummary[r].res == 2 and runsummary[r].kind == 'c':
u2runs["cont"].append(r)
if runsummary[r].res == 3 and runsummary[r].kind == 'c':
u3runs["cont"].append(r)
for r in initialrunlist:
if r not in mybadruns:
if runsummary[r].res == 1 and runsummary[r].kind == 'h':
u1runs["high"].append(r)
if runsummary[r].res == 2 and runsummary[r].kind == 'h':
u2runs["high"].append(r)
if runsummary[r].res == 3 and runsummary[r].kind == 'h':
u3runs["high"].append(r)
data = {}
for un in (u1runs, u2runs, u3runs):
for s in un:
for r in un[s]:
print "arun", r
data[r] = ARun(r)
for r in data:
if runsummary[r].res == 1:
print "u1", r
data[r].getfval()
for r in data:
if runsummary[r].res == 2:
print "u2", r
data[r].getfval()
for r in data:
if runsummary[r].res == 3:
print "u3", r
data[r].getfval()
# x = []
# y = []
# dy = []
# for r in data:
# if runsummary[r].res == 1:
# x.append(data[r].en + data[r].shift)
# y.append(data[r].had/data[r].bb - data[r].func)
# dy.append(data[r].had/data[r].bb*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2))
# p = biggles.FramedPlot()
# p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
# p.add(biggles.SymmetricErrorBarsY(x, y, dy))
# p.show()
histneg = hist.h1(20, -4, 4)
histpos = hist.h1(20, -4, 4)
histpeak = hist.h1(20, -4, 4)
histcont = hist.h1(20, -4, 4)
histtail = hist.h1(20, -4, 4)
profile = hist.prof(20, -4, 4)
x = []
y = []
for r in data:
crosssec = data[r].had/data[r].bb
crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)
pull = (crosssec - data[r].func)/crosssec_err
x.append(data[r].deriv)
y.append(pull**2)
profile.fill(x[-1], y[-1])
if x[-1] < -1:
histneg.fill(pull)
if x[-1] > 1:
histpos.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "p":
histpeak.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "c":
histcont.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "h":
histtail.fill(pull)
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.y1.range = 0, 10
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Function derivative (nb/MeV)"
p.show()
p.write_eps("residualntuple_1.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.y1.range = 0, 5
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Function derivative (nb/MeV)"
p.show()
p.write_eps("residualntuple_2.eps")
histneg.rootn()
histpos.rootn()
histpeak.rootn()
histcont.rootn()
histtail.rootn()
p = (histneg / histneg.sum()).plot()
p.add((histpos / histpos.sum()).steps(linecolor="red"))
p.add((histpeak / histpeak.sum()).steps(linecolor="blue"))
p.add((histcont / histcont.sum()).steps(linecolor="green"))
p.add((histtail / histtail.sum()).steps(linecolor="purple"))
p.add((histneg / histneg.sum()).errorbars())
p.add((histpos / histpos.sum()).errorbars(linecolor="red"))
p.add((histpeak / histpeak.sum()).errorbars(linecolor="blue"))
p.add((histcont / histcont.sum()).errorbars(linecolor="green"))
p.add((histtail / histtail.sum()).errorbars(linecolor="purple"))
p.x1.range = 5, 30
p.y1.range = 0, 0.4
p.x1.label = r"Pull distributions of different types of datasets"
p.show()
p.write_eps("residualntuple_3.eps")
x = []
y = []
profile = hist.prof(20, 5, 30)
for r in data:
crosssec = data[r].had/data[r].bb
crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)
x.append(crosssec)
y.append((crosssec - data[r].func)**2/crosssec_err**2)
profile.fill(x[-1], y[-1])
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.x1.range = 5, 30
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_4.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.x1.range = 5, 30
p.y1.range = 0, 5
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_5.eps")
x = []
y = []
dy = []
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
x.append(data[r].had/data[r].bb)
y.append(ratio)
dy.append(ratio_err)
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.add(biggles.LineY(1.))
p.x1.range = 5, 30
p.y1.label = r"Bhabha luminosity / gamgam luminosity"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_6.eps")
x = []
y = []
profile = hist.prof(20, 5, 30)
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
x.append(data[r].had/data[r].bb)
y.append((ratio-1)/ratio_err)
profile.fill(x[-1], y[-1])
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.LineY(0.))
p.x1.range = 5, 30
p.y1.label = r"BB/GG sigmas"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_7.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.add(biggles.LineY(0.))
p.x1.range = 5, 30
p.y1.range = -3, 3
p.y1.label = r"BB/GG sigmas"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_8.eps")
offres = []
on1 = []
on2 = []
on3 = []
off1 = []
off2 = []
off3 = []
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
if runsummary[r].kind == "c":
offres.append((ratio, ratio_err))
if runsummary[r].res == 1:
off1.append((ratio, ratio_err))
elif runsummary[r].res == 2:
off2.append((ratio, ratio_err))
elif runsummary[r].res == 3:
off3.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 1:
on1.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 2:
on2.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 3:
on3.append((ratio, ratio_err))
print jt.wmean(offres)
print jt.wmean(on1)
print jt.wmean(on2)
print jt.wmean(on3)
print jt.wmean(off1)
print jt.wmean(off2)
print jt.wmean(off3)
|
6,264 | 97ff8dae060475b0efbc8d39e9fc251be8ac091b | from __future__ import annotations
import ibis
from ibis import _
def test_format_sql_query_result(con, snapshot):
t = con.table("airlines")
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({"carrier": "string", "avg_arrdelay": "double"})
with con.set_query_schema(query, schema):
expr = t.sql(query)
# name is autoincremented so we need to set it manually to make the
# snapshot stable
expr = expr.op().copy(name="foo").to_expr()
expr = expr.mutate(
island=_.carrier.lower(),
avg_arrdelay=_.avg_arrdelay.round(1),
)
snapshot.assert_match(repr(expr), "repr.txt")
def test_memoize_database_table(con, snapshot):
table = con.table("test1")
table2 = con.table("test2")
filter_pred = table["f"] > 0
table3 = table[filter_pred]
join_pred = table3["g"] == table2["key"]
joined = table2.inner_join(table3, [join_pred])
met1 = (table3["f"] - table2["value"]).mean().name("foo")
expr = joined.aggregate(
[met1, table3["f"].sum().name("bar")], by=[table3["g"], table2["key"]]
)
result = repr(expr)
assert result.count("test1") == 1
assert result.count("test2") == 1
snapshot.assert_match(result, "repr.txt")
def test_memoize_insert_sort_key(con, snapshot):
table = con.table("airlines")
t = table["arrdelay", "dest"]
expr = t.group_by("dest").mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
worst = expr[expr.dev.notnull()].order_by(ibis.desc("dev")).limit(10)
result = repr(worst)
assert result.count("airlines") == 1
snapshot.assert_match(result, "repr.txt")
|
6,265 | 6f259210cbe8969046cba1031ab42d77e913abea | # -*- coding: utf-8 -*-
from celery import shared_task
from djcelery.models import PeriodicTask, CrontabSchedule
import datetime
from django.db.models import Max, Count
from services import *
# 测试任务
@shared_task()
def excute_sql(x,y):
print "%d * %d = %d" % (x, y, x * y)
return x * y
# 监控任务:查询数据库并进行告警
@shared_task()
def monitor_sql(*args, **kwargs):
print kwargs["name"]
print kwargs["sql"]
task_name = kwargs["name"]
datasource = kwargs["datasource"]
sql = kwargs["sql"]
operator = kwargs["operator"]
threshold = kwargs["threshold"]
tasks = PeriodicTask.objects.filter(name=task_name)
if tasks.exists():
task = tasks[0]
data = get_sql_data(datasource, sql)
# -1:表示没查到数据,无法判断是否异常
sql_result = -1
monitor_result = -1
if len(data) > 0:
sql_result = data[0][0]
monitor_result = 0
# 达到设定阈值
if operator == ">=":
if sql_result >= threshold:
monitor_result = 1 # 异常
# 小于设定阈值
elif operator == "<":
if sql_result < threshold:
monitor_result = 1 # 异常
# 查询记录不变
elif operator == "==":
task_results = TaskResult.objects.filter(task_id=task.id)
if task_results.exists():
task_result_before = task_results.latest('last_run_time')
sql_data_before = task_result_before.sql_data
if sql_result == sql_data_before:
monitor_result = 1 # 异常
# 保存采集数据
task_result = TaskResult(task_id=task.id, task_name=task.name, last_run_time=datetime.datetime.now(),
operator=operator, threshold=threshold, sql_data=sql_result,
monitor_result=monitor_result)
task_result.save()
return sql_result
|
6,266 | e52b01cc7363943f5f99b1fa74720c6447b1cfae | from setuptools import setup, find_packages
__version__ = '2.0'
setup(
name='sgcharts-pointer-generator',
version=__version__,
python_requires='>=3.5.0',
install_requires=[
'tensorflow==1.10.0',
'pyrouge==0.1.3',
'spacy==2.0.12',
'en_core_web_sm==2.0.0',
'sgcharts-stringx==1.1.1'
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
description='News Summarizer'
)
|
6,267 | 88109909d0c80f25373f917426c3c3634bfc8114 | import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
"""
Encapsulates unit tests for the pdarrayclass module that provide
summarized values via reduction methods
"""
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
def testStd(self):
self.assertEqual(self.na.std(), self.pda.std())
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
def testMax(self):
self.assertEqual(self.na.max(), self.pda.max())
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
|
6,268 | bd310ab0bc193410b8f93ad5516b0731d2eba54f | '''
Various tools for cleaning out nulls and imputing
'''
|
6,269 | 33365d5ce5d2a7d28b76a7897de25e1f35d28855 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created for COMP5121 Lab on 2017 JUN 24
@author: King
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score
data = [[0],[1],[2],[3],[4], [5],[6],[7],[8],[9]] # input dataframe samples
labels = [0,0,0,0,0, 1,1,1,1,1] # the function we're training is " >4 "
data_train, data_test, label_train, label_test = train_test_split(data, labels, test_size=0.5, random_state=7)
model = SVC(kernel='linear')
model.fit(data_train, label_train)
predictions = model.predict(data_test)
print(model.score(data_test, label_test))
print(accuracy_score(label_test, predictions))
print(accuracy_score(label_test, predictions, normalize=False))
print(metrics.confusion_matrix(predictions, label_test))
print(metrics.classification_report(label_test, predictions))
|
6,270 | 49d76458b8adcf6eea9db2ef127609ff96e03ad1 | from django.contrib import admin
from django.urls import path, include
from serverside.router import router
from rest_framework.authtoken import views as auth_views
from . import views
from .views import CustomObtainAuthToken
urlpatterns = [
path('users/', views.UserCreateAPIView.as_view(), name='user-list'),
path('users/login/', CustomObtainAuthToken.as_view()),
path('users/<int:pk>/', views.ReadUserAPIView.as_view()),
path('users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),
path('charities/', views.ListCharitiesAPIView.as_view()),
path('categories/', views.ListCategoriesAPIView.as_view())
]
|
6,271 | db140bf66f3e3a84a60a6617ea4c03cc6a1bc56d | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 13:42:47 2018
@author: zhan
"""
from scipy.spatial.distance import pdist, squareform, cdist
import numpy as np
import scipy.io as sci
import os,sys
import datetime
###################################################################
# I_tr:features of training set for image data
# I_te:features of testing set for image data
# T_te:features of training set for text data
# T_te:features of testing set for text data
# L_tr:category label of training set
# L_te:category label of testing set
###############################################################
def unifyKnnKernel(Z,tr_n_I, te_n_I, tr_n_T, te_n_T,k):
x1 = np.concatenate([range(tr_n_I,tr_n_I+te_n_I),
range(tr_n_I+te_n_I+tr_n_T,tr_n_I+te_n_I+tr_n_T+te_n_T)]);
x2 = np.concatenate([range(0,tr_n_I),
range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
y1 = np.concatenate([range(0,tr_n_I), range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
W = Z[x1,:];
W = W[:,y1];
W = W;
Y = Z[x2,:];
Y = Y[:,y1];
Y = Y;
KN = -np.sort(-W);
I = np.argsort(-W);
for i in range(0,te_n_I + te_n_T):
k1 = np.reshape(KN[i,0:k], [1, k]);
knn = np.concatenate([k1, np.zeros([1,tr_n_I + tr_n_T-k])],1);
W[i,I[i,:]] = knn;
WI = W[0:te_n_I, :];
WT = W[te_n_I:te_n_I+te_n_T, :];
WI_s = np.reshape(np.sum(WI, 1), [len(WI),1]);
WT_s = np.reshape(np.sum(WT, 1), [len(WI),1]);
WI = WI/np.tile(WI_s, [1, tr_n_I+tr_n_T]);
WT = WT/np.tile(WT_s, [1, tr_n_T+tr_n_I]);
#W = np.concatenate([WI,WT]);
m = np.reshape(range(tr_n_I), [tr_n_I,1]);
m1 = np.tile(np.concatenate([m, m]),[1,(tr_n_I+tr_n_T)]);
Y0 = (m1 == m1.T);
Y1 = np.multiply(Y,(1.-Y0))+Y0;
h = Y1;
W_IT = np.matmul(np.matmul(WI,h), WT.T);
return W_IT
def computer_av(distance, label):
m, n = np.shape(distance)
av_precision = np.zeros([m, 1])
sort = np.argsort(-distance)
for i in range(m):
cumulate = 0.0
tp_counter = 0.0
for j in range(50):
if np.sum(np.abs(label[sort[i,j]] - label[i])) == 0:
tp_counter += 1.0
cumulate = cumulate + (float(tp_counter)/ float(j+1))
if tp_counter !=0:
av_precision[i] = cumulate/float(tp_counter)
mean_precision = np.mean(av_precision)
return mean_precision
if __name__ == '__main__':
data1 = sci.loadmat('best_data.mat')
begin = datetime.datetime.now()
D1 = pdist(np.concatenate([data1['I_tr'], data1['I_te'],
data1['T_tr'], data1['T_te']]),'cosine');
Z1 = 1.0-squareform(D1)/2.0;
h = []
p = []
for k in range(10, 1000, 10):
distance = unifyKnnKernel(Z1,
len(data1['I_tr']),len(data1['I_te']),
len(data1['T_tr']),len(data1['T_te']),
k)
end = datetime.datetime.now()
re1 = computer_av(distance,data1['L_te'].T)
re2 = computer_av(distance.T, data1['L_te'].T)
avg = (re1 + re2)/2.0
print k
print('The KNN test result:ItoT:{: .4}; TtoI: {: .4}; avg: {: .4}'.format(re1, re2, avg))
f1 = open('knn_test.txt', "a")
f1.write('k: ')
f1.write(str(k))
f1.write('\t')
f1.write('T2I: ')
f1.write(str(re1))
f1.write('\t')
f1.write('I2T: ')
f1.write(str(re2))
f1.write('\t')
f1.write('AVG: ')
f1.write(str(avg))
f1.write('\n')
f1.close()
|
6,272 | b00c07ee3cdba55800c9701b7b8b0e3c9079e9f8 | from util import *
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)),
(x[1], Equal(n, 2)),
(K(x[:n - 1]) * x[n - 1] + K(x[:n - 2]), True))
K = Function.K(integer=True, eval=K_step, shape=())
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
# created on 2021-08-18
|
6,273 | ad054febac3a04c625653a2f3864506eeb672d9e | ...
...
model = Sequential()
model.add(Conv2D(32, kernel_size=3, input_shape=(256, 256, 3))
...
...
|
6,274 | 939011fca968d5f9250beb29a0bb700200e637df | # coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.basic_channel_info import BasicChannelInfo
from .models.basic_follower_info import BasicFollowerInfo
from .models.basic_following_info import BasicFollowingInfo
from .models.categories import Categories
from .models.category import Category
from .models.channel_details import ChannelDetails
from .models.channel_search_results import ChannelSearchResults
from .models.channel_video import ChannelVideo
from .models.channel_videos import ChannelVideos
from .models.description_panel import DescriptionPanel
from .models.event import Event
from .models.events import Events
from .models.language import Language
from .models.languages import Languages
from .models.mobile_notify_settings import MobileNotifySettings
from .models.multi_participant import MultiParticipant
from .models.notification import Notification
from .models.notification_1 import Notification1
from .models.notifications import Notifications
from .models.online_channels import OnlineChannels
from .models.online_details import OnlineDetails
from .models.online_notify_settings import OnlineNotifySettings
from .models.thumbnail import Thumbnail
from .models.user_data import UserData
from .models.user_email_settings import UserEmailSettings
from .models.video_search_result import VideoSearchResult
from .models.video_search_results import VideoSearchResults
from .models.webhook import Webhook
# import apis into sdk package
from .apis.bot_api import BotApi
from .apis.channel_api import ChannelApi
from .apis.multistream_api import MultistreamApi
from .apis.public_api import PublicApi
from .apis.sensitive_api import SensitiveApi
from .apis.user_api import UserApi
from .apis.webhook_api import WebhookApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
6,275 | 025c740813f7eea37abadaa14ffe0d8c1bedc79d | """
Design and implement a TwoSum class. It should support the following operations: add and find.
add - Add the number to an internal data structure.
find - Find if there exists any pair of numbers which sum is equal to the value.
Example 1:
add(1); add(3); add(5);
find(4) -> true
find(7) -> false
Example 2:
add(3); add(1); add(2);
find(3) -> true
find(6) -> false
"""
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.cnt = {}
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
self.cnt[number] = self.cnt.get(number, 0) + 1
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for num in self.cnt:
if value - num in self.cnt:
if value - num == num:
if self.cnt[num] > 1:
return True
else:
return True
return False
|
6,276 | 760a5a168575a0ea12b93cb58c1e81e313704e35 | # Generated by Django 2.2 on 2020-10-26 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('viajes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='viajes',
options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},
),
]
|
6,277 | ee7efea569b685ad8d6922e403421227e9ea6922 | from sklearn.linear_model import LinearRegression, LogisticRegression
import numpy as np
import pickle
import os
def Run(datasetFile):
# Get file from user
userFile = open(datasetFile, "r")
# Starter list of all instances of the data file
instanceList = []
instanceCount = 0
featureCount = 0
# put all instances in data file line by line into instanceList[]
for instance in userFile:
tempStr = instance
instanceCount += 1
# Be sure to seperate the entries by commas
for entry in tempStr.split(','):
instanceList.append(entry)
featureCount += 1
# Close file
userFile.close()
# Adjust size of feature count
featureCount = int(featureCount / instanceCount)
# With data now seperated we can make the numpy array and transpose it
dataFull = np.asarray(instanceList).reshape(instanceCount * featureCount).reshape(instanceCount, featureCount)
# Get rid of all the '\n' in array
for instance in range(instanceCount):
dataFull[instance][featureCount-1] = dataFull[instance][featureCount-1].rstrip("\n")
features = np.array(dataFull.T[0:featureCount-1]).astype(float).reshape(featureCount-1, instanceCount).T
target = np.array(dataFull.T[featureCount-1]).astype(float)
# Setup Machine Learning
isClassification = False
for i in range(len(target)):
if int(target[i]) == 0 or int(target[i]) == 1:
isClassification = True
else:
isClassification = False
break
mlModel = None
if isClassification:
mlModel = LogisticRegression().fit(features, target)
else:
mlModel = LinearRegression().fit(features, target)
# Make new file for Model data
tmpFileName, file_exe = os.path.splitext(datasetFile)
newFilePath = tmpFileName + "MODEL" + ".sav"
pickle.dump(mlModel, open(newFilePath, 'wb')) |
6,278 | 659f45d2c6c7138f26b4a8d15d1710ae60450b08 | from OTXv2 import OTXv2
from pandas.io.json import json_normalize
from datetime import datetime, timedelta
import getopt
import sys
from sendemail import sendemail
from main import otx
import csv
import pandas as pd
from pandas import read_csv
import os.path
def tools():
search = str(input('Please enter search: '))
search.strip()
pulsesJSON = otx.search_pulses(search, 40) # Retrieves list (in json format) of top 40 pulses with tag "crypto"
# Loops through each individual pulse retrieved from OTX, and prints name & requested fields.
for aPulse in pulsesJSON["results"]:
name = aPulse.get('name')
description = aPulse.get('description')
modified = aPulse.get('modified')
pulseid = aPulse.get('id')
'''
If needed, add more categories to pull for each pulse here.
'''
#list with data to add to csv file
raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description': description, 'Modified': modified}]
#the path to the file
filename = 'shenzi_pulses.csv'
#use to check for the file
#file_exists = os.path.isfile(filename)
#opens the file to append ID, Name, Modified, Description
with open(filename, "w") as csv_file:
csv_columns_headers = ['Pulse ID','Name','Description','Modified']
writer = csv.DictWriter(csv_file, delimiter=',',lineterminator='\n', fieldnames=csv_columns_headers)
#if file does not exist write the headers
if not file_exists:
writer.writeheader()
#write the information from raw_data by rows
else:
for data in raw_data:
writer.writerow(data)
#simple option to email or quit
option = input('1: To Email 2: To quit : ')
option = int(option)
if option == 1:
#uses the email function to send email
sendemail()
#delete file once email has sent
os.remove('pulseIdsList.csv')
elif option == 2:
#option to quit
SystemExit()
|
6,279 | d5a5c6f9d483b2998cd0d9e47b37ab4499fa1c2a | import discord
from discord.ext import commands
class TestCommands(commands.Cog, description="Unstable test commands", command_attrs=dict(hidden=True, description="Can only be used by an Owner")):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print("Loaded", __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, "debug", False):
bot.add_cog(TestCommands(bot))
|
6,280 | 818e6842d4a1f8978ec14bca06981ec933c00376 | import os
bind = "0.0.0.0:" + str(os.environ.get("MAESTRO_PORT", 5005))
workers = os.environ.get("MAESTRO_GWORKERS", 2)
|
6,281 | ee03263d92372899ec1feaf3a8ea48677b053676 | """API - Files endpoints."""
import os
import click
import cloudsmith_api
import requests
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from .. import ratelimits
from ..rest import create_requests_session
from ..utils import calculate_file_md5
from .exceptions import ApiException, catch_raise_api_exception
from .init import get_api_client
def get_files_api():
"""Get the files API client."""
return get_api_client(cloudsmith_api.FilesApi)
def validate_request_file_upload(owner, repo, filepath, md5_checksum=None):
"""Validate parameters for requesting a file upload."""
client = get_files_api()
md5_checksum = md5_checksum or calculate_file_md5(filepath)
with catch_raise_api_exception():
_, _, headers = client.files_validate_with_http_info(
owner=owner,
repo=repo,
data={"filename": os.path.basename(filepath), "md5_checksum": md5_checksum},
)
ratelimits.maybe_rate_limit(client, headers)
return md5_checksum
def request_file_upload(owner, repo, filepath, md5_checksum=None):
"""Request a new package file upload (for creating packages)."""
client = get_files_api()
md5_checksum = md5_checksum or calculate_file_md5(filepath)
with catch_raise_api_exception():
data, _, headers = client.files_create_with_http_info(
owner=owner,
repo=repo,
data={"filename": os.path.basename(filepath), "md5_checksum": md5_checksum},
)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
ratelimits.maybe_rate_limit(client, headers)
return data.identifier, data.upload_url, data.upload_fields
def upload_file(upload_url, upload_fields, filepath, callback=None):
"""Upload a pre-signed file to Cloudsmith."""
upload_fields = list(upload_fields.items())
upload_fields.append(
("file", (os.path.basename(filepath), click.open_file(filepath, "rb")))
)
encoder = MultipartEncoder(upload_fields)
monitor = MultipartEncoderMonitor(encoder, callback=callback)
config = cloudsmith_api.Configuration()
if config.proxy:
proxies = {"http": config.proxy, "https": config.proxy}
else:
proxies = None
headers = {"content-type": monitor.content_type}
client = get_files_api()
headers["user-agent"] = client.api_client.user_agent
session = create_requests_session()
resp = session.post(upload_url, data=monitor, headers=headers, proxies=proxies)
try:
resp.raise_for_status()
except requests.RequestException as exc:
raise ApiException(
resp.status_code, headers=exc.response.headers, body=exc.response.content
)
|
6,282 | e0e00688a75021c2f8b608d4c942f5e68f6a6a48 | # -*- coding:utf-8 -*-
import re
# 普通字符串 匹配本身
re_str = r'abc'
result = re.fullmatch(re_str, 'abc')
print(result)
# 匹配任意字符 一个.只能匹配一个字符
re_str = r'a.c'
result = re.fullmatch(re_str, 'abc')
print(result)
# \w匹配字母数字或下划线
# 匹配一个长度是5的字符串并且字符串的前两位是数字字母或者下划线后面是三个任意字符串 \w中文也能匹配
re_str = r'\w\w...'
result = re.fullmatch(re_str, '_a123')
print(result)
# \s匹配空白字符
# 空白字符串包括空格,制表符,换行符:\t,\r,\n
re_str = r'\w\w\s\w'
result = re.fullmatch(re_str, 'hj\t8')
print(result)
# \d匹配数字字符
re_str = r'\d\d\d..'
result = re.fullmatch(re_str, '082ww')
print(result)
# \b检测单词边界
re_str = r'hello\bworld'
result = re.fullmatch(re_str, 'hello world')
print(result)
re_str = r'\bhello,\bworld'
result = re.fullmatch(re_str, 'hello,world')
print(result)
# ^检测字符串开头
re_str = r'^The..'
result = re.fullmatch(re_str, 'The2;')
print(result)
# $检测字符串结尾
re_str = r'The$'
result = re.fullmatch(re_str, 'The')
print(result)
# \大写字母对应的功能是\小写字母功能取反
# \W 匹配非字母数字下划线
# \D 匹配非数字字符
# \S 匹配空白字符串
# \B 检测非单词边界
re_str = r'\d\D\s\s\Ba'
print(re.fullmatch(re_str, '2a a'))
# 字符集
# 匹配中括号出现的任意一个字符
re_str = r'\d[bcd]'
result = re.fullmatch(re_str, '2d')
print(result)
# [a-z] 表示匹配所有的小写字母
# [A_Z] 表示匹配所有的大写字母
# [a-zA-Z] 匹配所有的字母
# [1-7] 匹配数字字符1到7
# [\u4e00-\u9fa5] 匹配所有的中文
# [字符1字符2-] 这儿的-表示减号本身
re_str = r'[1-7][abc-][a-z]'
result = re.fullmatch(re_str, '3-b')
print(result)
# [^abc] 匹配不再abc以外的任意一个字符
# [^\d] 匹配除了数字字符以外的任意一个字符
# [^a-z] 匹配除了小写字母以外的其他任意一个字符
# [abc^] 匹配abc^中的任意一个字符
re_str = r'[^a-z]'
result = re.fullmatch(re_str, '是')
print(result)
# 正则控制匹配次数
# *(匹配0次或者多次) a* a出现0次或多次 \d* 任意数字出现0次或多次 [abc]* a,b,c出现0次或多次 [A-F] A到F中任意字符出现0次或多次
print(re.fullmatch(r'a*b', 'b'))
# +(匹配1次或者多次)
print(re.fullmatch(r'a+b', 'aaaab'))
# ?(匹配0次或1次)
print(re.fullmatch(r'[+-]?[1-9]\d*', '+145345'))
# {N} 匹配N次 a{3} 匹配三个a
# {M,N}} 匹配M到N次
# {,N} 最多匹配N次
# {M,} 至少匹配M次
re_str = r'[a-zA-Z][a-zA-Z\d]{5,11}'
# str1 = input('请输入密码:')
str1 = 'ab123456'
result = re.fullmatch(re_str, str1)
if result:
print('密码正确')
else:
print('密码错误')
# 分之、捕获、贪婪
# 分之 条件1|条件2 匹配条件1或条件2
# \d{2}|[a-z] 匹配两个数字字符或者一个小写字母
# 正则中的分之也会出现短路,当条件1可以匹配就不会在使用条件2匹配
re_str = r'[-+]?[1-9]\d*[.]?\d*|[-+]?0[.][0-9]*[1-9]|0'
result = re.fullmatch(re_str, '0.0000009')
print(result)
# 捕获 通过正则获取符合条件的字串的时候可以在正则表达式中加括号,匹配后之获取括号里面匹配到的内容
# re.findall(正则表达式,字符串) 在字符串中获取符合正则表达式条件的所有的字串返回一个列表
str1 = 'ahs123+34asdf24'
print(re.findall(r'\d+', str1))
str2 = 'a153s123+34asfa24'
print(re.findall(r'a\d+', str2))
print(re.findall(r'a(\d+)', str2))
str3 = 'http://www.qq.com'
print(re.findall(r'^(http://)?www.(\w+).com', str3))
# 重复匹配 带多个分组的正则表达式可以在分组的后面通过添加\数字来重复前面第几个分组中匹配到的内容
re_str = r'(\d{3})([a-z]{2})a\1{2}-\2'
print(re.findall(re_str, '123efa123123-ef'))
# 贪婪 匹配次数后加?就是贪婪匹配:*?,+?,??,{M,N}?,{M,}?表示尽可能少的重复
re_str = 'a.+b'
re_str1 = 'a.+?b'
str1 = 'xxahdjbnnkhasssbkkkkk'
print(re.findall(re_str, str1))
print(re.findall(re_str1, str1))
# 转义字符 \
re_str = r'a\+\(\d{2}\)'
print(re.fullmatch(re_str, 'a+(23)'))
# re模块
# complie
re_str = r'\d{3}'
re_obj = re.compile(re_str)
print(re_obj.fullmatch('234'))
# match 不完全匹配之匹配字符串开头 之匹配字符串开头 匹配成功返回匹配对象匹配失败返回None
# fullmatch 完全匹配从字符串开头匹配到字符串结束
re_str = r'\d([A-Z]{2})'
result = re.fullmatch(re_str, '2HKdfsd')
print(result)
result = re.match(re_str, '8KLsifdfd==')
print(result)
# 匹配对象
# start,end 获取匹配结果的开始下标和结束下标
# 匹配对象.start(n)/匹配对象.end(n) 获取正则表达式中第n个分组匹配到的开始下标/结束下标
print(result.start(), result.end())
# print(result.start(1), result.end(2))
# ggroup 获取匹配到的内容
# 匹配对象.group() 获取整个正则表达式匹配到的内容
# 匹配对象.group(n) 获取正则表达式第n个分组匹配到的内容
print(result.group())
print(result.group(1))
# string 获取匹配的原字符串
# 匹配对象.string
print(result.string)
# search
# search(正则表达式,字符串)匹配字符串中第一个满足正则表达式的字串,如果匹配成功返回匹配对象否则返回None
str1 = 'abc123hks362shjjk990kll'
result = re.search(r'\d{3}[a-z]{2}', str1)
print(result)
# split split(正则表达式,字符串) 在字符串中按照满足正则表达式条件的字串对字符串进行切割
str1 = 'ab+c7hdjd8jss-sk9s9kk*k'
result = re.split(r'\d+|[+*-]+', str1)
print(result)
# findall findall(正则表达式,字符串) 在字符串中获取满足正则表达式的所有的字符返回一个列表列表元素是字符串
str = 'abcd1235asdf'
result = re.findall(r'a[a-zA-Z]+', str)
print(result)
# finditer finditer(正则表达式,字符串) 获取字符串中满足正则表达式的内容返回的是一个迭代器
# def yt_finditer(pattern, string):
# re1 = re.search(pattern, string)
# while re1:
# yield re1
# string = string[re1.end():]
# re1 = re.search(pattern, string)
#
# str1='haja37jjkd89sdhs909nnna238==='
# result = yt_finditer(r'[a-zA-Z]{2,}(\d+)(a-z)+?', str1)
# print(next(result))
|
6,283 | 4a8663531f303da29371078e34dc7224fc4580e3 | # Author: Kenneth Lui <hkkenneth@gmail.com>
# Last Updated on: 01-11-2012
## Usage: python ~/code/python/001_Fastq_Trimming.py <FIRST BASE> <LAST BASE> <FASTQ FILES....>
## Bases are inclusive and 1-based
#from Bio.SeqIO.QualityIO import FastqGeneralIterator
#handle = open(sys.argv[2], 'w')
#for title, seq, qual in FastqGeneralIterator(open(sys.argv[1])):
# handle.write("@%s\n%s\n+\n%\n" % (title, seq[...:...], qual[...:...]))
#handle.close()
from Bio import SeqIO
import sys
RECORD_BUFFER_SIZE = 100000
start = int(sys.argv[1]) - 1
end = int(sys.argv[2])
for s in sys.argv[3:]:
file = open(s + "." + sys.argv[1] + "-" + sys.argv[2] + ".trimmed", 'w')
r_list = []
size = 0
for r in SeqIO.parse(s, "fastq"):
r_list.append(r[start:end])
size += 1
if size == RECORD_BUFFER_SIZE:
SeqIO.write(r_list, file, "fastq")
r_list = []
size = 0
if size > 0:
SeqIO.write(r_list, file, "fastq")
file.close()
|
6,284 | a2a94e87bb9af1ccaf516581d6662d776caf0b0d | """
Project: tomsim simulator
Module: FunctionalUnit
Course: CS2410
Author: Cyrus Ramavarapu
Date: 19 November 2016
"""
# DEFINES
BUSY = 1
FREE = 0
class FunctionalUnit:
"""FunctionalUnit Class to encompass methods needed for
Integer, Divide, Multipler, Load, Store Functional
Units in tomsim
"""
def __init__(self, func_id, lat):
self.instruction_count = 0
self.latency = lat
self.status = FREE
self.func_id = func_id
self.end_cycle = None
self.destination = None
def __str__(self):
return """
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
""".format(self.func_id,
self.instruction_count,
self.latency,
self.status,
self.end_cycle,
self.destination)
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
def set_status(self, status):
"""Sets the status of a functional unit
Keyword arguments:
status -- the status to set a functional unit to
either BUSY or FREE
Returns
None
"""
self.status = status
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return (self.func_id, self.instruction_count)
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
def increment_instr(self):
"""Increments the instruction count for the FU
Keyword arguments:
None
Returns: None
"""
self.instruction_count += 1
|
6,285 | 549d7368d49cf2f4d2c6e83e300f31db981b62bd | #!
import os
import sys
#get current working directory
cwd = os.getcwd()
ovjtools = os.getenv('OVJ_TOOLS')
javaBinDir = os.path.join(ovjtools, 'java', 'bin')
# get the envirionment
env = Environment(ENV = {'JAVA_HOME' : javaBinDir,
'PATH' : javaBinDir + ':' + os.environ['PATH']})
env.Execute('cd vjunit && make -f makevjunit')
env.Execute('cd VJQA/src && make -f makexmlcheck')
Execute('rm -rf ovj_qa')
path = os.path.join(cwd, 'ovj_qa')
if not os.path.exists(path):
os.makedirs(path)
Execute('cp -r VJQA ovj_qa/OVJQA')
Execute('rm -rf ovj_qa/OVJQA/src')
Execute('rm -f ovj_qa/OVJQA/bin/lib/vjunit.jar')
Execute('cp vjunit/vjunit.jar ovj_qa/OVJQA/bin/lib/')
Execute('rm -f ovj_qa/OVJQA/java/*')
Execute('cp VJQA/src/XmlTest.jar VJQA/src/*.class ovj_qa/OVJQA/java/')
Execute('cp -r vjtest ovj_qa/ovjtest')
|
6,286 | b739a5d359b4d1c0323c7cd8234e4fe5eb9f3fcb | # -*- coding: utf-8 -*-
import logging
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from keymanager.settings import PAGE_SIZE
from .forms import LoginForm
from .forms import UserCreateForm, UserEditForm
from utils.filters import require_superuser
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or \
user_id.encode("utf-8") == str(request.user.id):
return func(request, user_id)
return render(request, "403.html")
return check
@require_superuser
def index(request):
template_name = "users/index.html"
msg = ""
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _("Unable to list users.")
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {"users": users, "message": msg})
@require_superuser
def create(request):
template_name = "users/create_user.html"
msg = ""
user_form = UserCreateForm()
if request.method == "POST":
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(
request.POST['username'],
request.POST['email'],
request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.info(msg)
except IntegrityError:
msg = _("User already exist, please try another username.")
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.error(msg)
return render(request, template_name, {"user_form": user_form,
"message": msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _("Unable to delete user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _("Unable to deactivate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _("Unable to activate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = "users/update_user.html"
msg = ""
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={"username": user.username,
"email": user.email})
if request.method == "POST":
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode("utf-8")
LOG.info(msg)
return render(request, template_name, {"user_id": user_id,
"user_form": user_form,
"message": msg})
def login(request):
template_name = 'auth/login.html'
msg = ""
if request.user.is_authenticated():
return redirect(reverse("keys:index"))
form = LoginForm
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _("%s logged in successfully.") % \
username.encode('utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _("Invalid username or password.")
LOG.error(msg)
return render(request, template_name, {"user_form": form,
"message": msg})
def logout(request):
django_logout(request)
return redirect(reverse("index")) |
6,287 | 2a83bc9157e2210da46e58c56fc0b7199856f4c0 | msg = "eduardo foi a feira"
if 'feira' in msg:
print('Sim, foi a feira')
else:
print('não ele não foi a feira')
|
6,288 | adf8b52f6e71546b591ceb34a9425c28f74883fa | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Project Euler: 0010
https://projecteuler.net/problem=10
Summation of primes
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
import math
import sys
PROBLEM = 10
SOLVED = True
SPEED = 29.16
TAGS = ['primes']
class Primes(object):
"""Iteratable class that handles prime number generation and testing"""
# cache of currently known primes
known_primes = [2, 3]
def __init__(self, maximum=float('inf'), count=float('inf')):
self.maximum = maximum
self.count = count
self.__iter = 0
def __iter__(self):
return self
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError('Cannot use "%s" as a list index' % type(key))
while len(self.known_primes) <= key:
self.next()
return self.known_primes[key]
def next(self):
"""Fetch the next prime number"""
if self.__iter >= self.count:
# print 'Reached maximum count %d (%d)' % (self.count, self.__iter)
raise StopIteration()
if self.__iter < len(self.known_primes):
if self.known_primes[self.__iter] > self.maximum:
raise StopIteration()
key = self.__iter
self.__iter += 1
return self.known_primes[key]
candidate = self.known_primes[-1] + 2
while True:
# print 'Checking to see if candidate %d is prime' % candidate
if candidate > self.maximum:
raise StopIteration()
if not self.first_factor(candidate):
self.known_primes.append(candidate)
self.__iter += 1
return candidate
candidate += 2
@classmethod
def first_factor(cls, number):
"""Returns the lowest factor of the number.
If the number is prime, None is returned instead.
"""
for prime in cls(maximum=math.sqrt(number)):
if not number % prime:
return prime
return None
@classmethod
def factor(cls, number):
"""Returns a list of prime factors that this number is composed of"""
factors = []
for prime in cls():
if prime > number:
break
# print 'Checking to see if %d is a factor of %d' % (prime, number)
# reduce the total iterations
if prime > math.sqrt(number):
factors.append(number)
break
while not number % prime:
number /= prime
factors.append(prime)
return factors
def main(args=sys.argv[1:]):
"""Solve problem."""
print 'Project Euler: %04d' % PROBLEM
maximum = 2000000
if len(args) > 0:
maximum = int(args[0])
print sum(Primes(maximum=maximum))
if __name__ == '__main__':
main()
|
6,289 | 4a892c3532a3e3ddcd54705336dce820ff49b91b | """
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Graph Search Policy Network.
"""
from typing import List, NamedTuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import src.utils.ops as ops
from src.knowledge_graph import KnowledgeGraph, ActionSpace, Observation, Action
from src.utils.ops import var_cuda, zeros_var_cuda
class BucketActions(NamedTuple):
action_spaces: List[ActionSpace]
action_dists: List[torch.Tensor]
inv_offset: Union[List[int], None]
entropy: torch.Tensor
def pad_and_cat_action_space(
action_spaces: List[ActionSpace], inv_offset, kg: KnowledgeGraph
):
db_r_space, db_e_space, db_action_mask = [], [], []
forks = []
for acsp in action_spaces:
forks += acsp.forks
db_r_space.append(acsp.r_space)
db_e_space.append(acsp.e_space)
db_action_mask.append(acsp.action_mask)
r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]
e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]
action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ActionSpace(forks, r_space, e_space, action_mask)
return action_space
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
# Set policy network modules
self.define_modules()
self.initialize_modules()
# Fact network modules
self.fn = None
self.fn_kg = None
def transit(
self,
current_entity,
obs: Observation,
kg: KnowledgeGraph,
use_action_space_bucketing=True,
merge_aspace_batching_outcome=False,
) -> BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
# Representation of the current state (current node and other observations)
X = self.encode_history(
current_entity, obs.source_entity, kg, obs.query_relation
)
# MLP
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(
torch.squeeze(A @ torch.unsqueeze(X2, 2), 2)
- (1 - acs.action_mask) * ops.HUGE_INT,
dim=-1,
)
# action_dist = ops.weighted_softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2), 2), action_mask)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs,
policy_nn_fun,
)
else:
assert False
action = self.do_it_without_bucketing(
X2, current_entity, kg, obs, policy_nn_fun
)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
# def do_it_without_bucketing(self, X2, current_entity, kg, obs, policy_nn_fun):
# def get_action_space(e, obs, kg):
# r_space = kg.action_space["relation-space"][e]
# e_space = kg.action_space["entity-space"][e]
# action_mask = kg.action_space["action-mask"][e]
# return self.apply_action_masks(acsp, e, obs, kg)
#
# action_space = get_action_space(current_entity, obs, kg)
# action_dist, entropy = policy_nn_fun(X2, action_space)
# db_outcomes = [(action_space, action_dist)]
# inv_offset = None
# return db_outcomes, entropy, inv_offset
def do_it_with_bucketing(
self,
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs: Observation,
policy_nn_fun,
):
entropy_list = []
references = []
buckect_action_spaces, inthis_bucket_indizes = self.get_action_space_in_buckets(
current_entity, obs, kg
)
action_spaces = []
action_dists = []
for as_b, inthis_bucket in zip(buckect_action_spaces, inthis_bucket_indizes):
X2_b = X2[inthis_bucket, :]
action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)
references.extend(inthis_bucket)
action_spaces.append(as_b)
action_dists.append(action_dist_b)
entropy_list.append(entropy_b)
inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda x: x[1])]
entropy = torch.cat(entropy_list, dim=0)[inv_offset]
action = BucketActions(action_spaces, action_dists, inv_offset, entropy)
if merge_aspace_batching_outcome:
action_space = pad_and_cat_action_space(
buckect_action_spaces, inv_offset, kg
)
action_dist = ops.pad_and_cat(action.action_dists, padding_value=0)[
inv_offset
]
action = BucketActions([action_space], [action_dist], None, entropy)
return action
def initialize_path(self, action: Action, kg: KnowledgeGraph):
# [batch_size, action_dim]
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
# [num_layers, batch_size, dim]
init_h = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
init_c = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
self.path = [self.path_encoder(init_action_embedding, (init_h, init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
# update action history
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(
self.path_encoder(action_embedding.unsqueeze(1), self.path[-1])[1]
)
def get_action_space_in_buckets(
self,
current_entity: torch.Tensor,
obs: Observation,
kg: KnowledgeGraph,
collapse_entities=False,
):
"""
To compute the search operation in batch, we group the action spaces of different states
(i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to
save the memory consumption of paddings.
For example, in large knowledge graphs, certain nodes may have thousands of outgoing
edges while a long tail of nodes only have a small amount of outgoing edges. If a batch
contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of
5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes
lots of memory.
With the bucketing approach, each bucket is padded separately. In this case the node
with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer
little from padding the action space to 5.
Once we grouped the action spaces in buckets, the policy network computation is carried
out for every bucket iteratively. Once all the computation is done, we concatenate the
results of all buckets and restore their original order in the batch. The computation
outside the policy network module is thus unaffected.
:return db_action_spaces:
[((r_space_b0, r_space_b0), action_mask_b0),
((r_space_b1, r_space_b1), action_mask_b1),
...
((r_space_bn, r_space_bn), action_mask_bn)]
A list of action space tensor representations grouped in n buckets, s.t.
r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)
:return db_references:
[l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]
l_batch_refsi stores the indices of the examples in bucket i in the current batch,
which is used later to restore the output results to the original order.
"""
db_action_spaces, db_references = [], []
assert not collapse_entities # NotImplementedError
bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(current_entity)
for b_key in set(bucket_ids.tolist()):
inthisbucket_indices = (
torch.nonzero(bucket_ids.eq(b_key)).squeeze().tolist()
)
if not isinstance(inthisbucket_indices, list): # TODO(tilo) wtf!
inthisbucket_indices = [inthisbucket_indices]
inbucket_ids_of_entities_inthisbucket = inbucket_ids[
inthisbucket_indices
].tolist()
bucket_action_space = kg.bucketid2ActionSpace[b_key]
e_b = current_entity[inthisbucket_indices]
obs_b = obs.get_slice(inthisbucket_indices)
as_bucket = bucket_action_space.get_slice(
inbucket_ids_of_entities_inthisbucket
)
action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)
action_space_b = ActionSpace(
as_bucket.forks, as_bucket.r_space, as_bucket.e_space, action_mask
)
db_action_spaces.append(action_space_b)
db_references.append(inthisbucket_indices)
return db_action_spaces, db_references
def apply_action_masks(
self, acsp: ActionSpace, e, obs: Observation, kg: KnowledgeGraph
):
r_space, e_space, action_mask = acsp.r_space, acsp.e_space, acsp.action_mask
e_s, q, e_t, last_step, last_r, seen_nodes = obs
# Prevent the agent from selecting the ground truth edge
ground_truth_edge_mask = self.get_ground_truth_edge_mask(
e, r_space, e_space, obs, kg
)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
# Mask out false negatives in the final step
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s, q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
# Prevent the agent from stopping in the middle of a path
# stop_mask = (last_r == NO_OP_RELATION_ID).unsqueeze(1).float()
# action_mask = (1 - stop_mask) * action_mask + stop_mask * (r_space == NO_OP_RELATION_ID).float()
# Prevent loops
# Note: avoid duplicate removal of self-loops
# seen_nodes_b = seen_nodes[l_batch_refs]
# loop_mask_b = (((seen_nodes_b.unsqueeze(1) == e_space.unsqueeze(2)).sum(2) > 0) *
# (r_space != NO_OP_RELATION_ID)).float()
# action_mask *= (1 - loop_mask_b)
return action_mask
def get_ground_truth_edge_mask(
self, current_nodes, r_space, e_space, obs: Observation, kg: KnowledgeGraph
):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (
(current_nodes == source_nodes).unsqueeze(1)
* (r_space == relation.unsqueeze(1))
* (e_space == target_nodes.unsqueeze(1))
)
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(
e_space[i].unsqueeze(0) == answer_vector, dim=0
).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
# This is a trick applied during training where we convert a multi-answer predction problem into several
# single-answer prediction problems. By masking out the other answers in the training set, we are forcing
# the agent to walk towards a particular answer.
# This trick does not affect inference on the test set: at inference time the ground truth answer will not
# appear in the answer mask. This can be checked by uncommenting the following assertion statement.
# Note that the assertion statement can trigger in the last batch if you're using a batch_size > 1 since
# we append dummy examples to the last batch to make it the required batch size.
# The assertion statement will also trigger in the dev set inference of NELL-995 since we randomly
# sampled the dev set from the training data.
# assert(float((answer_mask * (e_space == e_t.unsqueeze(1)).long()).sum()) == 0)
false_negative_mask = (
answer_mask * (e_space != e_t.unsqueeze(1)).long()
).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
def get_action_embedding(self, action: Action, kg: KnowledgeGraph):
"""
Return (batch) action embedding which is the concatenation of the embeddings of
the traversed edge and the target node.
:param action (r, e):
(Variable:batch) indices of the most recent action
- r is the most recently traversed edge
- e is the destination entity.
:param kg: Knowledge graph enviroment.
"""
relation_embedding = kg.get_relation_embeddings(action.rel)
if self.relation_only:
action_embedding = relation_embedding
else:
entity_embedding = kg.get_entity_embeddings(action.ent)
action_embedding = torch.cat([relation_embedding, entity_embedding], dim=-1)
return action_embedding
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = self.history_dim + self.entity_dim * 2 + self.relation_dim
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(
input_size=self.relation_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
else:
self.path_encoder = nn.LSTM(
input_size=self.action_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
nn.init.xavier_normal_(param)
|
6,290 | 31b420adebbe0d3ee6da2ed8236ece1526bdb063 | for _ in range(int(input())):
n = int(input())
s = input()
cur = 0
for i in s[::-1]:
if i==')':
cur+=1
else:
break
print("Yes") if cur>n//2 else print("No")
|
6,291 | c70b4ff26abe3d85e41bfc7a32cf6e1ce4c48d07 | import pytest
import torch
from homura.utils.containers import Map, TensorTuple
def test_map():
map = Map(a=1, b=2)
map["c"] = 3
for k, v in map.items():
assert map[k] == getattr(map, k)
for k in ["update", "keys", "items", "values", "clear", "copy", "get", "pop"]:
with pytest.raises(KeyError):
setattr(map, k, 1)
def test_tensortuple():
a = torch.randn(3, 3), torch.randn(3, 3)
t = TensorTuple(a)
assert t[0].dtype == torch.float32
assert t.to(torch.int32)[0].dtype == torch.int32
|
6,292 | 9efd83524ebb598f30c8fb6c0f9f0c65333578e6 | #implement variable!
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
class Function:
'''
Base class
specific functions are implemented in the inherited class
'''
def __call__(self, input):
x = input.data #data extract
y = self.foward(x)
output = Variable(y) #here! is key point
return output
def foward(self, x):
raise NotImplementedError()
class Square(Function):
def foward(self, x):
return x ** 2
class Exp(Function):
def foward(self, x):
return np.exp(x)
# input/output of a Function.__call__ is unified as a variable instance.
square = Square()
exp = Exp()
# like a composite function
# x -> [Square] -> a -> [Exp] -> b -> [Square] -> y
x = Variable(np.array(0.5))
a = square(x)
b = exp(a)
y = square(b)
print(y.data)
|
6,293 | e8eac1e4433eee769d317de9ba81d5181168fdca | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""""""""""""""""""""""""""""""""""""""""""""""
" Filename: time.py
"
" Author: xss - callmexss@126.com
" Description: Show local time
" Create: 2018-07-02 20:20:17
"""""""""""""""""""""""""""""""""""""""""""""""
from datetime import datetime
print('''\
<html>
<body>
<p>Generated {0}</p>
</body>
</html>'''.format(datetime.now()))
|
6,294 | 9f86ff37d3a72364b5bd83e425d8151136c07dd3 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
import os
import torch
import numpy as np
import pickle
from sklearn.linear_model import Ridge, Lasso
from biplnn.log import getLogger
from biplnn.utils import load_data_new, load_model, PathManager, load_data_new
from biplnn.mdl.lmt import LinearModelTree
from biplnn import config
logger = getLogger(__name__)
IMG_SIZE = 28
MIN_NODE_SIZE = 15
MIN_SPLIT_IMPROVEMENT = 10
def fit_linear_model(x, y):
logger.info("Using Lasso")
lr = Lasso(alpha=0.01)
lr.fit(x, y)
return SharedScalerModel(lr)
class SharedScalerModel:
def __init__(self, lm):
self.lm = lm
self.coef_ = torch.unsqueeze(torch.tensor(lm.coef_, dtype=torch.float64), dim=0)
self.intercept_ = lm.intercept_
def predict(self, X):
return torch.tensor(self.lm.predict(X), dtype=torch.float64)
def train(c_1, c_2, id2pn, dataset):
if not os.path.exists("../model"):
os.mkdir("../model")
mdl_name = "{}_{}".format(c_1, c_2)
logger.info("Train the model: {} {}".format(mdl_name, id2pn))
train_data, train_labels = load_data_new(c_1, c_2, train=True, dataset=dataset)
train_data = train_data.view(-1, IMG_SIZE * IMG_SIZE).cpu()
train_labels = torch.tensor([id2pn[i.item()] for i in train_labels], dtype=torch.float64)
test_data, test_labels = load_data_new(c_1, c_2, train=False, dataset=dataset)
test_data = test_data.view(-1, IMG_SIZE * IMG_SIZE)
test_labels = np.array([id2pn[i.item()] for i in test_labels])
counter = defaultdict(int)
for i in train_labels:
counter[i.item()] += 1
train_pos, train_neg = counter[1], counter[0]
counter = defaultdict(int)
for i in test_labels:
counter[i.item()] += 1
test_pos, test_neg = counter[1], counter[0]
logger.info("Train_labels {} ".format(train_labels))
logger.info("Test_labels {} ".format(test_labels))
logger.info("""
======================================================================
Data Information
& \# Positive & \# Negative & \# Positive & \# Negative \\
\hline
{} {} & {} & {} & {} & {} \\
\hline
======================================================================
""".format(c_1, c_2, train_pos, train_neg, test_pos, test_neg))
logger.info("train_labels {}".format(train_labels))
lmt = LinearModelTree(MIN_NODE_SIZE, fit_linear_model, min_split_improvement=MIN_SPLIT_IMPROVEMENT)
lmt.build_tree(train_data, train_labels)
logger.info("Finish building trees")
lmt.merge_lrs(lmt.root)
logger.info("Finish merging trees")
path_manager = PathManager(mdl_name="LMT", c_1=c_1, c_2=c_2, dataset=dataset, if_train_set=None)
model_path = path_manager.mdl_path()
with open(model_path, "wb") as f:
pickle.dump(lmt, f)
lmt = load_model(c_1, c_2, dataset=dataset, model_name="LMT")
train_data = train_data.to(config.DEVICE)
_test("LMT", lmt, train_data, train_labels, "Trainset")
_test("LMT", lmt, test_data, test_labels, "Testset")
def _test(mdl_name, lmt, test_data, test_labels, if_train):
y_pred = lmt.predict_positive(test_data)
correct = 0
for i in range(len(test_labels)):
p_label = 1 if y_pred[i] > 0.5 else 0
logger.debug("p_label: {} Prob: {} train_label: {}".format(p_label, y_pred[i], test_labels[i]))
if p_label == test_labels[i]:
correct += 1
precision = correct * 1.0 / len(test_labels)
logger.info("[{} dataset] Model: {} Accuracy: {}/{}={}".format(if_train, mdl_name, correct, len(test_labels), precision))
def load(model_path):
lmt = pickle.load(open(model_path, "rb"))
return lmt
def test_1():
mdl = load_model("Pullover", "Coat", "FMNIST", "LMT")
images, labels = load_data_new("Pullover", "Coat", train=False, dataset="FMNIST")
images = images.view(-1, 784)
forward = mdl.forward(images)
logger.info("forward.size() => {}".format(forward.size()))
prob = mdl.predict_positive(images)
logger.info("prob.size() => {}".format(prob.size()))
if __name__ == '__main__':
# main()
# train_main("Pullover", "Coat")
# test("Pullover", "Coat", FMNIST.id2pn_label(FMNIST.str2id("Pullover"), FMNIST.str2id("Coat")))
test_1()
|
6,295 | 5a3b88f899cfb71ffbfac3a78d38b748bffb2e43 | # coding=utf-8
import tensorflow as tf
import numpy as np
state = [[1.0000037e+00, 1.0000037e+00, 1.0000000e+00, 4.5852923e-01],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 8.3596563e-01],
[1.0000478e+00, 1.0000000e+00, 1.0000478e+00, 1.4663711e+00],
[1.0000037e+00, 1.0000478e+00, 1.0000037e+00, 2.2898180e+00],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.1520940e+00],
[1.0025654e+00, 1.0000037e+00, 1.0025654e+00, 1.1330953e+00],
[9.9988985e-01, 1.0015163e+00, 9.9989718e-01, 3.2291660e-01],
[1.0000074e+00, 1.0007313e+00, 1.0000000e+00, 1.3017136e+00],
[1.0000000e+00, 1.0002130e+00, 1.0000000e+00, 5.5019444e-01],
[1.0004810e+00, 1.0000000e+00, 1.0004810e+00, 4.6343117e+00],
[1.0005982e+00, 1.0004810e+00, 1.0005982e+00, 1.4715418e+00],
[1.0019695e+00, 1.0006936e+00, 1.0019695e+00, 2.6879137e+00],
[1.0003624e+00, 1.0018740e+00, 1.0003624e+00, 2.1107325e-01],
[1.0007318e+00, 1.0003587e+00, 1.0007318e+00, 6.0730678e-01],
[1.0022414e+00, 1.0007354e+00, 1.0022414e+00, 3.6850076e+00],
[1.0002189e+00, 1.0022377e+00, 1.0002226e+00, 3.1635866e-01],
[1.0000985e+00, 1.0002261e+00, 1.0000985e+00, 3.7566774e-02],
[1.0000000e+00, 1.0000948e+00, 1.0000000e+00, 3.4551585e+00],
[1.0000037e+00, 1.0000000e+00, 1.0000000e+00, 2.0110803e+00],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 4.1560230e+00],
[9.9999636e-01, 1.0000000e+00, 1.0000000e+00, 5.2758682e-01],
[1.0009884e+00, 1.0000000e+00, 1.0009847e+00, 7.1787882e+00],
[1.0004736e+00, 1.0010941e+00, 1.0004736e+00, 1.9821526e-01],
[1.0014385e+00, 1.0003643e+00, 1.0014385e+00, 2.6534543e+00],
[1.0000110e+00, 1.0014458e+00, 1.0000145e+00, 1.3187310e-01],
[1.0000037e+00, 1.0000110e+00, 1.0000000e+00, 5.4083563e-02],
[1.0000037e+00, 1.0000000e+00, 1.0000037e+00, 1.0267736e+02],
[9.9999636e-01, 1.0000000e+00, 1.0000000e+00, 1.2240252e-01],
[9.9883270e-01, 9.9860728e-01, 1.0000000e+00, 2.5922091e+00],
[1.0001420e+00, 1.0003678e+00, 9.9897093e-01, 4.8789456e-02],
[9.9811441e-01, 9.9811441e-01, 1.0000000e+00, 3.6449478e+01],
[1.0004449e+00, 1.0004413e+00, 9.9914092e-01, 2.6223356e-01],
[9.9755394e-01, 9.9668634e-01, 9.9941343e-01, 5.9868164e+00],
[9.9938607e-01, 1.0000219e+00, 9.9694884e-01, 6.7625743e-01],
[1.0026985e+00, 1.0002377e+00, 1.0026948e+00, 6.9658375e-01],
[1.0023047e+00, 1.0030642e+00, 1.0023156e+00, 3.3709707e+00],
[9.9980354e-01, 1.0017425e+00, 9.9979264e-01, 3.9518688e-02],
[9.9800944e-01, 9.9800944e-01, 9.9999636e-01, 1.1397472e+01],
[9.9951142e-01, 9.9893892e-01, 9.9800944e-01, 1.6633330e-01],
[9.9993432e-01, 1.0005038e+00, 9.9944943e-01, 6.1848432e-01],
[9.9934697e-01, 9.9858081e-01, 1.0000000e+00, 1.2730729e+01],
[1.0000000e+00, 1.0000439e+00, 9.9935061e-01, 2.7382636e-01],
[9.9964225e-01, 1.0003690e+00, 9.9998903e-01, 8.0884582e-01],
[1.0003396e+00, 1.0003396e+00, 1.0000000e+00, 1.1051704e+00],
[9.9856526e-01, 9.9856526e-01, 9.9988681e-01, 4.2217617e+00],
[1.0006508e+00, 1.0003839e+00, 9.9940121e-01, 2.4998356e-01],
[1.0007343e+00, 1.0001937e+00, 1.0006503e+00, 1.2478063e+00],
[9.9991238e-01, 1.0007198e+00, 1.0000876e+00, 9.0076250e-01],
[1.0006719e+00, 1.0001680e+00, 1.0004965e+00, 1.1675052e-01],
[1.0001423e+00, 1.0003358e+00, 1.0001423e+00, 1.5750233e+00],
[1.0000693e+00, 1.0003102e+00, 1.0000730e+00, 8.6596227e-01],
[1.0000037e+00, 1.0000693e+00, 1.0000000e+00, 4.4483128e-01],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.0220652e+00],
[9.9591053e-01, 9.9591416e-01, 9.9999636e-01, 5.2908678e+00],
[9.9952745e-01, 9.9952745e-01, 9.9625713e-01, 5.1447195e-01],
[9.9799907e-01, 9.9791110e-01, 9.9916875e-01, 2.8284600e+00],
[1.0000000e+00, 1.0000881e+00, 9.9801368e-01, 3.6261553e-01],
[1.0013660e+00, 1.0003525e+00, 1.0015863e+00, 4.5040986e-01],
[1.0020353e+00, 1.0010132e+00, 1.0018148e+00, 5.9262075e+00],
[1.0004795e+00, 1.0018336e+00, 1.0004795e+00, 2.6766129e-02],
[1.0000000e+00, 1.0006808e+00, 1.0000000e+00, 1.3004695e+00],
[1.0004133e+00, 1.0001390e+00, 1.0004170e+00, 3.0653101e-01],
[1.0000000e+00, 1.0002706e+00, 9.9999636e-01, 8.8930998e+00],
[1.0009323e+00, 1.0000037e+00, 1.0009323e+00, 1.1688923e+00],
[1.0008073e+00, 1.0002706e+00, 1.0008255e+00, 2.3573229e+00],
[1.0008943e+00, 1.0014695e+00, 1.0014564e+00, 8.2072916e+00],
[9.9835533e-01, 9.9924809e-01, 9.9942052e-01, 6.7818984e-02],
[9.9547058e-01, 9.9547058e-01, 9.9819118e-01, 1.8739729e+01],
[1.0011302e+00, 9.9961841e-01, 9.9737322e-01, 8.4949577e-01],
[9.9999636e-01, 1.0007929e+00, 9.9938828e-01, 2.4047704e-02],
[9.9964082e-01, 1.0003008e+00, 1.0000000e+00, 2.3303616e+01],
[1.0006710e+00, 9.9926299e-01, 1.0004765e+00, 1.5378336e+00],
[9.9894476e-01, 1.0004073e+00, 1.0001392e+00, 4.5283547e-01],
[1.0013168e+00, 1.0013168e+00, 9.9994874e-01, 2.8568311e-02],
[9.9961168e-01, 9.9847978e-01, 9.9999636e-01, 4.3409687e+01],
[1.0003628e+00, 1.0002202e+00, 1.0000000e+00, 5.4002404e-01],
[9.9915743e-01, 9.9998534e-01, 9.9964833e-01, 1.6760775e-01],
[9.9970669e-01, 9.9973959e-01, 9.9948329e-01, 3.2992566e+00],
[9.9944991e-01, 9.9986422e-01, 9.9965900e-01, 7.3027074e-01],
[1.0001541e+00, 9.9957436e-01, 9.9965888e-01, 4.6144853e+00],
[1.0013025e+00, 1.0005836e+00, 1.0013025e+00, 5.1441771e-01],
[1.0000037e+00, 1.0012988e+00, 1.0000000e+00, 2.7368376e-01],
[1.0000000e+00, 1.0000037e+00, 1.0000000e+00, 7.1176308e-01],
[1.0002931e+00, 1.0000000e+00, 1.0002968e+00, 7.0013076e-01],
[9.9833333e-01, 9.9862599e-01, 9.9999636e-01, 4.4356618e+00],
[9.9953038e-01, 9.9953038e-01, 9.9909890e-01, 3.4925804e-01],
[1.0011857e+00, 1.0011857e+00, 9.9994868e-01, 5.7747304e-01],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 3.7471976e+00],
[9.9962968e-01, 9.9954170e-01, 1.0000000e+00, 1.2463460e+00],
[9.9968088e-01, 9.9976891e-01, 1.0000000e+00, 1.3109040e+00],
[1.0000000e+00, 9.9999630e-01, 9.9931067e-01, 1.3606377e+00],
[9.9988991e-01, 9.9989361e-01, 1.0000991e+00, 9.4597393e-01],
[1.0007999e+00, 1.0000000e+00, 1.0005907e+00, 4.7434125e+00],
[9.9878269e-01, 9.9958169e-01, 1.0000367e+00, 2.7460583e-02],
[1.0000074e+00, 9.9997431e-01, 9.9875343e-01, 7.2948947e+00],
[1.0025660e+00, 1.0000330e+00, 1.0025660e+00, 1.0637591e+00],
[1.0020907e+00, 1.0025660e+00, 1.0020907e+00, 2.8522357e-01],
[9.9930209e-01, 1.0013914e+00, 1.0000731e+00, 5.8429270e+00],
[1.0002303e+00, 9.9776953e-01, 9.9980271e-01, 1.7989714e+00],
[9.9936390e-01, 1.0018287e+00, 9.9974787e-01, 3.1775057e-01],
[1.0000000e+00, 1.0000000e+00, 9.9927628e-01, 1.8160661e-01],
[9.9877459e-01, 9.9871969e-01, 9.9999636e-01, 9.3605161e-01],
[1.0013918e+00, 1.0000550e+00, 1.0002012e+00, 1.0985806e+00],
[1.0006510e+00, 1.0017506e+00, 1.0006144e+00, 2.5814357e+00],
[9.9984282e-01, 9.9973679e-01, 1.0000695e+00, 3.3408213e-01],
[1.0002303e+00, 1.0006289e+00, 1.0000037e+00, 1.3414766e-01],
[1.0000000e+00, 9.9999636e-01, 1.0000000e+00, 1.7838446e+01],
[1.0000000e+00, 1.0000037e+00, 1.0000000e+00, 8.3536047e-01],
[9.9928731e-01, 9.9928731e-01, 9.9999636e-01, 5.4223043e-01],
[1.0001097e+00, 1.0001097e+00, 9.9944812e-01, 4.3785262e+00],
[9.9854457e-01, 9.9854457e-01, 9.9995613e-01, 4.5171154e-01],
[9.9767447e-01, 9.9722767e-01, 9.9908942e-01, 1.1091426e+01],
[1.0000293e+00, 1.0002681e+00, 9.9715602e-01, 1.2069954e-01],
[1.0014242e+00, 1.0000625e+00, 1.0014316e+00, 1.4433969e+00],
[1.0015651e+00, 1.0015750e+00, 1.0015578e+00, 9.6098411e-01],
[1.0013723e+00, 1.0018364e+00, 1.0017749e+00, 7.5566912e-01],
[9.9895477e-01, 1.0000513e+00, 9.9959815e-01, 8.1964232e-02],
[1.0007354e+00, 1.0006769e+00, 9.9975878e-01, 1.6638399e+00],
[1.0012978e+00, 1.0003327e+00, 1.0013379e+00, 2.1380491e+00],
[9.9991965e-01, 1.0008260e+00, 9.9981016e-01, 3.8806599e-01],
[1.0004784e+00, 1.0001132e+00, 1.0004784e+00, 1.0268871e+01],
[1.0000767e+00, 1.0004857e+00, 1.0000767e+00, 5.5289781e-01],
[1.0000000e+00, 1.0000694e+00, 1.0000000e+00, 1.7833089e+00],
[1.0000037e+00, 1.0000073e+00, 1.0000037e+00, 1.2945263e-02],
[9.9963504e-01, 9.9963504e-01, 1.0000000e+00, 7.9770073e+01],
[9.9780595e-01, 9.9780595e-01, 9.9963140e-01, 3.1662747e-01],
[1.0003695e+00, 9.9953902e-01, 9.9871492e-01, 4.0821886e+00],
[9.9894667e-01, 9.9893117e-01, 9.9945903e-01, 1.6124320e+00],
[1.0015341e+00, 1.0012165e+00, 1.0005376e+00, 5.4633446e-02],
[1.0000768e+00, 1.0012407e+00, 1.0000256e+00, 5.2312702e-01],
[9.9902403e-01, 9.9870968e-01, 9.9999636e-01, 1.4360392e+01],
[9.9968535e-01, 1.0000000e+00, 9.9891073e-01, 2.7689624e-01],
[1.0003331e+00, 9.9978405e-01, 1.0001317e+00, 1.5892577e+00],
[1.0000182e+00, 1.0005674e+00, 1.0000182e+00, 2.9608828e-01],
[1.0000000e+00, 1.0000000e+00, 1.0000037e+00, 7.9447589e+00],
[1.0014927e+00, 1.0000037e+00, 1.0014892e+00, 6.9757897e-01],
[1.0001096e+00, 1.0015988e+00, 1.0001096e+00, 2.3838118e-02],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.3727959e+00],
[1.0000037e+00, 1.0000037e+00, 1.0000037e+00, 1.5357798e+00],
[1.0000073e+00, 1.0000073e+00, 1.0000110e+00, 7.8537841e+00],
[1.0000037e+00, 1.0000000e+00, 1.0000000e+00, 5.1222291e+00],
[1.0000000e+00, 1.0000037e+00, 1.0000000e+00, 1.2102352e+00],
[9.9941921e-01, 9.9941921e-01, 1.0000000e+00, 4.7011951e-01],
[1.0000768e+00, 9.9971491e-01, 9.9949592e-01, 2.7753963e+00],
[9.9968934e-01, 1.0000511e+00, 1.0000000e+00, 3.1682637e-01],
[1.0008628e+00, 1.0006763e+00, 1.0005518e+00, 2.8207576e-01],
[1.0000950e+00, 9.9964195e-01, 1.0000950e+00, 1.2079760e+00],
[1.0000913e+00, 1.0006396e+00, 1.0000913e+00, 4.5529306e-01],
[1.0000511e+00, 1.0001425e+00, 1.0002812e+00, 6.6403847e+00],
[1.0005186e+00, 1.0002228e+00, 1.0003577e+00, 4.8837531e-01],
[1.0001971e+00, 1.0000037e+00, 1.0001278e+00, 4.6924916e-01],
[9.9998540e-01, 1.0000839e+00, 9.9998540e-01, 2.6365486e-01],
[1.0000694e+00, 1.0002774e+00, 1.0000694e+00, 1.1816318e+00],
[9.9990511e-01, 1.0000876e+00, 1.0004160e+00, 1.9651167e+01],
[9.9966425e-01, 9.9963504e-01, 9.9948573e-01, 2.1069668e-01],
[9.9996716e-01, 9.9999636e-01, 9.9967158e-01, 6.9688112e-01],
[9.9978459e-01, 9.9978459e-01, 1.0000767e+00, 1.4935952e-01],
[1.0001241e+00, 1.0001241e+00, 1.0000110e+00, 1.2045263e+01],
[1.0001862e+00, 9.9979192e-01, 9.9997079e-01, 1.1947027e+00],
[1.0000876e+00, 1.0003579e+00, 1.0000876e+00, 1.1323529e+00],
[9.9993795e-01, 1.0000621e+00, 1.0003796e+00, 1.4615406e+00],
[1.0008469e+00, 9.9994892e-01, 1.0004050e+00, 1.5914217e+00],
[9.9965715e-01, 1.0005549e+00, 9.9995989e-01, 2.4419294e-01],
[1.0004014e+00, 1.0000949e+00, 1.0000985e+00, 6.6721039e+00],
[9.9982858e-01, 1.0000620e+00, 9.9986869e-01, 2.6552710e-01],
[1.0001714e+00, 1.0000730e+00, 1.0001312e+00, 1.0065858e-01],
[1.0000000e+00, 1.0001678e+00, 1.0000000e+00, 2.8208659e+00],
[1.0004340e+00, 1.0000693e+00, 1.0004340e+00, 9.4247788e-01],
[1.0003937e+00, 1.0003684e+00, 1.0003937e+00, 3.0226307e+00],
[1.0004227e+00, 1.0003937e+00, 1.0004227e+00, 3.3974197e+00],
[1.0006338e+00, 1.0004227e+00, 1.0006338e+00, 1.2969593e+00],
[1.0002985e+00, 1.0006374e+00, 1.0002985e+00, 1.5663968e-01],
[1.0000000e+00, 1.0002912e+00, 1.0000000e+00, 1.0392715e+00],
[1.0007278e+00, 1.0000037e+00, 1.0007278e+00, 2.0675045e+01],
[1.0000000e+00, 1.0007242e+00, 1.0000000e+00, 6.4647868e-02],
[1.0000000e+00, 1.0000037e+00, 1.0000000e+00, 9.3014984e+00],
[1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 2.1045491e-01],
[1.0000000e+00, 9.9999636e-01, 1.0000000e+00, 1.6283804e+00],
[1.0000000e+00, 1.0000037e+00, 1.0000000e+00, 1.9846686e+00],
[1.0004218e+00, 1.0000000e+00, 1.0004218e+00, 2.8680152e-01]]
state = np.zeros((20, 4))
state = np.random.randint(1,5,size=(20,4))
state[0:5, :] = 1
split_size, window_size, history_length, num_channels = (2, 10, 20, 4)
batch_size = 1
s_t = tf.placeholder(dtype=tf.float32, shape=[None, history_length, num_channels])
reshape = tf.reshape(s_t, shape=[batch_size, split_size, window_size, num_channels])
kernel_size = 2
filter_size = 3 # 滤波器数量,1输出1列数据,n输出n列数据
conv2d = tf.layers.conv2d(inputs=reshape, filters=filter_size,
kernel_size=[1, kernel_size], strides=(1, 1),
padding='valid', activation=None)
'''列卷积'''
normal = tf.layers.batch_normalization(inputs=reshape, training=True, scale=True)
'''对一列数据做正规化,独立计算列,如果一列数据都相同,此列输出0'''
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
result = sess.run(fetches=s_t, feed_dict={s_t: [state]})
print(result)
result = sess.run(fetches=conv2d, feed_dict={s_t: [state]})
print(result)
|
6,296 | 4d524bb4b88b571c9567c651be1b1f1f19fd3c0b | #Recursively parse a string for a pattern that can be either 1 or 2 characters long |
6,297 | af217d0cc111f425282ee21bd47d9007a69a6239 | import math
print ("programa que calcula hipotenusa tomando el valor de los catetos en tipo double---")
print ("------------------------------------------------------------------------")
print (" ")
catA = float(input("igrese el valor del cateto A"))
catB = float(input("ingrese el valor del catebo B"))
def calcularHipotenusa(catA,catB):
hipotenusa=(catA**2)+(catB**2)
hipotenusa=math.sqrt(hipotenusa)
hipotenusa=float(hipotenusa)
print ("la hipotenusa es: " , hipotenusa)
calcularHipotenusa(catA,catB)
|
6,298 | fde62dd3f5ee3cc0a1568b037ada14835c327046 | import tkinter as tk
import tkinter.messagebox as tkmb
import psutil
import os
import re
import subprocess
from subprocess import Popen, PIPE, STDOUT, DEVNULL
import filecmp
import re
import time
import threading
import datetime
import re
debian = '/etc/debian_version'
redhat = '/etc/redhat-release'
def PrintaLog(texto):
t = time.time()
logtime= time.ctime(t)
stringprint = "%s %s\n" % (logtime, texto)
f = open("/var/log/patriot", "a")
f.write(stringprint)
f.flush()
f.close()
def PrintaMSG(texto):
command = 'python3 alertiqt.py "'+texto+'"'
processalert = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
def TestIntegrity(File):
if os.path.exists(redhat) :
command = 'rpm -Vf "'+File+'"'
processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputrpm = processrpm.communicate()[0]
if outputrpm :
return(1)
else:
return(0)
else :
commandDPKG = 'dpkg -S "'+File+'"'
processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputdpkg = processdpkg.communicate()[0]
if processdpkg.returncode == 1:
#dpkg is buggy to find package files
fixdpkgbug= re.sub('/usr', '', File)
commandDPKG2 = 'dpkg -S "'+fixdpkgbug+'"'
processdpkg2 = subprocess.Popen([commandDPKG2], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputdpkg2 = processdpkg2.communicate()[0]
outputdpkg = outputdpkg2
if processdpkg2.returncode == 1:
return(1)
packagename = outputdpkg.split(":")
commandDEBSUM = 'dpkg --verify "'+packagename[0]+'"'
processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess.PIPE,shell=True)
outputdebsum = processdebsum.communicate()[0]
print (outputdebsum)
if outputdebsum :
return(1)
else:
return(0)
def ScanUnsigned():
pidsinicial = psutil.pids()
while True:
pidsshots = psutil.pids()
s = set(pidsinicial)
newpids = [x for x in pidsshots if x not in s]
if newpids:
#print(newpids)
for i in newpids:
#print(i)
try:
p = psutil.Process(pid=i)
with p.oneshot():
integrity= TestIntegrity(p.exe())
#print (integrity)
pidproceso = p.pid
exeproceso = p.exe()
evadeau = bool(re.match(exeproceso, "/usr/sbin/ausearch"))
if integrity == 1 and evadeau == 0:
stringprint = "New process that not belongs to any package or package was modified: %i %s" % (pidproceso, exeproceso)
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
except Exception as e:
print (e)
pidsinicial = pidsshots
time.sleep(2)
def ScanConnections():
initialcon =psutil.net_connections()
netprocess =[]
for i in initialcon:
#print (i.pid)
p = psutil.Process(pid=i.pid)
with p.oneshot():
#print (p.exe())
netprocess.append(p.exe())
#print (netprocess)
while True:
runcon =psutil.net_connections()
netprocessrun =[]
for e in runcon:
#print (e.pid)
p = psutil.Process(pid=e.pid)
with p.oneshot():
#print (p.exe())
netprocessrun.append(p.exe())
#print (netprocessrun)
s = set(netprocess)
newpconprogs = [x for x in netprocessrun if x not in s]
if newpconprogs:
#print(newpconprogs)
for h in newpconprogs:
stringprint = "New Process initiating TCP/IP connection: %s" % h
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
netprocess.append(h)
time.sleep(2)
def AuSearch():
auparams = {"modules": "New module loaded in Kernel","code_injection": "DLL Inject","register_injection": "DLL Inject"}
while True:
tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)
timeraw = str(tomo.time().replace(second=0, microsecond=0))
for key in auparams.keys():
#print(key)
command = 'ausearch -k "'+key+'" --start "'+timeraw+'"'
processausearch = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputausearch = processausearch.communicate()[0]
if outputausearch:
stringprint = "Audit Alert: %s" % auparams[key]
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
time.sleep(115)
def KeyBoardSearch():
command = "xinput --list"
keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputkeysearch= keyfirstcommand.communicate()[0]
while True:
keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputkeyrunsearch= keyruncommand.communicate()[0]
if outputkeyrunsearch != outputkeysearch:
stringprint = "New keyboard detected"
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
outputkeysearch = outputkeyrunsearch
time.sleep(60)
s = threading.Thread(target=KeyBoardSearch)
s.setDaemon(True)
s.start()
x = threading.Thread(target=ScanUnsigned)
x.setDaemon(True)
x.start()
y = threading.Thread(target=ScanConnections)
y.setDaemon(True)
y.start()
z = threading.Thread(target=AuSearch)
z.setDaemon(True)
z.start()
while True:
time.sleep(100)
|
6,299 | 596814032218c3db746f67e54e4f1863753aea06 | # -*- coding: utf-8 -*-
# @Time : 2019/3/21 20:12
# @Author : for
# @File : test01.py
# @Software: PyCharm
import socket
s=socket.socket()
host=socket.gethostname()
port=3456
s.connect((host,port))
cmd=input(">>>")
s.sendall(cmd.encode())
data=s.recv(1024)
print(data.decode())
s.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.