text stringlengths 38 1.54M |
|---|
import os
import math
import torch
import torch.nn as nn
import numpy as np
def cnn_block(im_height, im_width, input_channels, channels, kernels, strides, paddings, activation, last_act, batchnorm, **kwargs):
"""
building blocks for a convnet.
each block is in form of:
Conv2d
BatchNorm2d(optinal)
Activation
Dropout(optional)
"""
if activation == 'Swish':
act = Swish()
elif activation == 'LeakyReLU':
act = nn.LeakyReLU(negative_slope=kwargs['leak_slope'], inplace=True)
else:
act = getattr(nn, activation)()
assert len(channels) == len(kernels), "length of channels: %s, length of kernels: %s" % (len(channels), len(kernels))
assert len(channels) == len(strides), "length of channels: %s, length of strides: %s" % (len(channels), len(strides))
assert len(channels) == len(paddings), "length of channels: %s, length of kernels: %s" % (len(channels), len(paddings))
layers = []
in_c = input_channels
for i, out_c in enumerate(channels):
layers.append(nn.Conv2d(in_c, out_c, kernel_size=kernels[i], stride=strides[i], padding=paddings[i]))
if (i < (len(channels)-1)) or last_act:#Last layer will be customized
if batchnorm:
layers.append(nn.BatchNorm2d(out_c))
layers.append(act)
if 'dropout_prob' in kwargs:
layers.append(nn.Dropout2d(kwargs['dropout_prob']))
if 'maxpool_kernels' in kwargs and 'maxpool_strides' in kwargs:
layers.append(nn.MaxPool2d(kernel_size=kwargs['maxpool_kernels'][i], stride=kwargs['maxpool_strides'][i]))
in_c = out_c
return nn.Sequential(*layers)
def deconv_block(im_height, im_width, input_channels, channels, kernels, strides, paddings, activation, last_act, batchnorm, **kwargs):
"""
building blocks for a deconvnet
"""
if activation == 'Swish':
act = Swish()
elif activation == 'LeakyReLU':
act = nn.LeakyReLU(negative_slope=kwargs['leak_slope'], inplace=True)
else:
act = getattr(nn, activation)()
assert len(channels) == len(kernels), "length of channels: %s, length of kernels: %s" % (len(channels), len(kernels))
assert len(channels) == len(strides), "length of channels: %s, length of strides: %s" % (len(channels), len(strides))
assert len(channels) == len(paddings), "length of channels: %s, length of kernels: %s" % (len(channels), len(paddings))
layers = []
in_c = input_channels
for i, out_c in enumerate(channels):
layers.append(nn.ConvTranspose2d(in_c, out_c, kernel_size=kernels[i], stride=strides[i], padding=paddings[i]))
if (i < (len(channels)-1)) or last_act:
if batchnorm:
layers.append(nn.BatchNorm2d(out_c))
layers.append(act)
in_c = out_c
return nn.Sequential(*layers)
def mlp_block(input_dim, hidden_dims, activation, **kwargs):
"""
building blocks for a mlp
"""
if activation == 'Swish':
act = Swish()
elif activation == 'LeakyReLU':
act = nn.LeakyReLU(negative_slope=kwargs['leak_slope'], inplace=True)
else:
act = getattr(nn, activation)()
layers = []
in_dim = input_dim
for i, out_dim in enumerate(hidden_dims):
layers.append(nn.Linear(in_dim, out_dim))
layers.append(act)
in_dim = out_dim
return nn.Sequential(*layers)
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
class Reshape(nn.Module):
def __init__(self, shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(-1, *self.shape)
class Swish(nn.Module):
"""
The swish activation function
"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
def conv_output_shape(h_w, kernel_size=1, stride=1, padding=0, dilation=1):
"""
Utility function for computing output of convolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(padding) is not tuple:
padding = (padding, padding)
h = (h_w[0] + (2 * padding[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1
w = (h_w[1] + (2 * padding[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1
return h, w
def deconv_output_shape(h_w, kernel_size=1, stride=1, padding=0, dilation=1):
"""
Utility function for computing output of deconvolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(padding) is not tuple:
padding = (padding, padding)
h = (h_w[0] - 1) * stride[0] - 2 * padding[0] + (dilation * (kernel_size[0] - 1)) + 1
w = (h_w[1] - 1) * stride[1] - 2 * padding[1] + (dilation * (kernel_size[1] - 1)) + 1
return h, w
def cnn_output_shape(h, w, kernels, strides, paddings):
h_w = (h, w)
for i, kernel in enumerate(kernels):
h_w = conv_output_shape(h_w, kernels[i], strides[i], paddings[i])
return h_w
def dcnn_output_shape(h, w, kernels, strides, paddings):
h_w = (h, w)
for i, kernel in enumerate(kernels):
h_w = deconv_output_shape(h_w, kernels[i], strides[i], paddings[i])
return h_w
########################################################
####################Residual Network####################
########################################################
def wres_block_params(stride, swap_cnn):
kernels = [3,3]
paddings = [1,1]
if swap_cnn:
strides = [1, stride]
else:
strides = [stride, 1]
return kernels, strides, paddings
class Wres_Block(nn.Module):
"""
residual block module
stride -- stride of the 1st cnn (2nd cnn if swap_cnn=True) in the 1st block in a group
swap_cnn -- if False, archiecture is the original wresnet block
if True, architecture is the JEM block
bn_flag -- whether do batch normalization
"""
def __init__(self, in_c, out_c, stride, activation, dropout_rate=0.2, leak=0.2, swap_cnn=False, bn_flag=False):
super(Wres_Block, self).__init__()
self.activation = activation
if bn_flag:
self.bn1 = nn.BatchNorm2d(in_c, momentum=0.9)
self.bn2 = nn.BatchNorm2d(out_c, momentum=0.9)
else:
self.bn1 = Identity()
self.bn2 = Identity()
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate)
if swap_cnn:
self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=3, stride=1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, stride=stride, padding=1, bias=True)
else:
self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=3, stride=stride, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, stride=1, padding=1, bias=True)
if in_c != out_c:
if bn_flag:
self.shortcut = nn.Sequential(
nn.BatchNorm2d(in_c, momentum=0.9),
self.activation,
nn.Conv2d(in_c, out_c, kernel_size=1, stride=stride, bias=True))
else:
self.shortcut = nn.Conv2d(in_c, out_c, kernel_size=1, stride=stride, bias=True)
else:
self.shortcut = Identity()
def forward(self, x):
h1 = self.dropout(self.conv1(self.activation(self.bn1(x))))
h2 = self.conv2(self.activation(self.bn2(h1)))
out = h2 + self.shortcut(x)
return out
class Wide_Residual_Net(nn.Module):
"""
Implementation of Wide Residual Network https://arxiv.org/pdf/1605.07146.pdf
"""
def __init__(self, depth, width, im_height=32, im_width=32, input_channels=3, num_classes=10,
activation='LeakyReLU', latent_dim=128, dropout_rate=0.2, leak=0.2, swap_cnn=True, bn_flag=False, start_act=True, sum_pool=False):
super(Wide_Residual_Net, self).__init__()
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
widths = [16] + [int(v * width) for v in (16, 32, 64)]
print('WRESNET-%d-%d' %(depth, width))
if activation == 'LeakyReLU':
self.activation = nn.LeakyReLU(leak)
elif activation == 'Swish':
self.activation = Swish()
else:
self.activation = getattr(nn, activation)()
self.dropout_rate = dropout_rate
self.leak = leak
self.swap_cnn = swap_cnn
self.bn_flag = bn_flag
self.start_act = start_act
self.sum_pool = sum_pool
conv_params = {
'kernels' : [],
'strides' : [],
'paddings' : []
}
self.group1, conv_params = self._init_group(input_channels, widths[0], conv_params)
self.group2, conv_params = self._wres_group(n, widths[0], widths[1], 1, conv_params)
self.group3, conv_params = self._wres_group(n, widths[1], widths[2], 2, conv_params)
self.group4, conv_params = self._wres_group(n, widths[2], widths[3], 2, conv_params)
self.flatten = nn.Flatten()
out_h, out_w = cnn_output_shape(im_height,
im_width,
kernels=conv_params['kernels'],
strides=conv_params['strides'],
paddings=conv_params['paddings'])
self.flatten_output_dim = out_h * out_w * widths[3]
self.pooling = nn.AvgPool2d(4)
def _wres_group(self, num_blocks, in_c, out_c, stride, conv_params):
blocks = []
for b in range(num_blocks):
blocks.append(Wres_Block(in_c=(in_c if b == 0 else out_c),
out_c=out_c,
stride=(stride if b == 0 else 1),
activation=self.activation,
dropout_rate=self.dropout_rate,
leak=self.leak,
swap_cnn=self.swap_cnn,
bn_flag=self.bn_flag))
k, s, p = wres_block_params(stride=(stride if b == 0 else 1), swap_cnn=self.swap_cnn)
conv_params['kernels'] = conv_params['kernels'] + k
conv_params['strides'] = conv_params['strides'] + s
conv_params['paddings'] = conv_params['paddings'] + p
return nn.Sequential(*blocks), conv_params
def _init_group(self, in_c, out_c, conv_params):
if self.start_act:
init_group = nn.Sequential(
self.activation,
nn.Conv2d(in_c, out_c, kernel_size=3, stride=1, padding=1, bias=True))
else:
init_group = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=3, stride=1, padding=1, bias=True))
conv_params['kernels'].append(3)
conv_params['strides'].append(1)
conv_params['paddings'].append(1)
return init_group, conv_params
def forward(self, x):
h = self.group1(x)
h = self.group2(h)
h = self.group3(h)
h = self.group4(h)
h = self.pooling(h)
return h
|
from __future__ import print_function
from __future__ import absolute_import
import json, urllib2, os, sys
from bs4 import BeautifulSoup
## MainPageGenerator class is used for generating main page that contains domain trees (Analysis, Calibration and Alignment, Core, DAQ etc.)
class MainPageGenerator:
## Constructor method.
# @param dataPath parameter gives path of data directory that contains .js, .css and image files needed for generating tree pages
# @param path is the reference manual directory path and it is used as destination and source.
# @param cmsVer is version of CMSSW.
def __init__(self, dataPath, path, cmsVer = ""):
self.path = path
self.dataPath = dataPath
self.CMSVER = cmsVer
self.managersURL = 'http://cmsdoxy.web.cern.ch/cmsdoxy/tcproxy.php?type=managers'
self.usersURL = 'http://cmsdoxy.web.cern.ch/cmsdoxy/tcproxy.php?type=users'
self.CMSSWURL = 'http://cmsdoxy.web.cern.ch/cmsdoxy/tcproxy.php?type=packages&release=CMSSW_4_4_2'
self.tWikiLinks = {'Analysis':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideCrab',
'Calibration and Alignment':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideCalAli',
'Core':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrameWork',
'DAQ':'https://twiki.cern.ch/twiki/bin/view/CMS/TriDASWikiHome',
'DQM':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideDQM',
'Database':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideCondDB',
'Documentation':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuide',
'Fast Simulation':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFastSimulation',
'Full Simulation':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideSimulation',
'Generators':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideEventGeneration',
'Geometry':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideDetectorDescription',
'HLT':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideHighLevelTrigger',
'L1':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideL1Trigger',
'Reconstruction':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideReco',
'Visualization':'https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideVisualization'}
self.data = None
self.GitLink = "https://github.com/cms-sw/cmssw/tree/" + self.CMSVER + "/%s/%s"
self.title = "<center>\n<h1>CMSSW Documentation</h1>\n<h2>" + self.CMSVER + "</h2>\n</center>\n"
self.links = """
<p style="margin-left:10px;">
Learn <a href="ReferenceManual.html">how to build Reference Manual</a><br>
Learn more about <a target="_blank" href="http://www.stack.nl/~dimitri/doxygen/commands.html">special doxygen commands</a>
</p>\n\n"""
self.head = """
<!-- Content Script & Style -->
<script type="text/javascript">
var itemList = [];
function toggleHoba(item, path)
{
for(var i = 0; i < itemList.length; i++)
{
if(itemList[i] == item)
{
var iframe = $("#"+itemList[i]+"_div").children("iframe:first");
if(!iframe.attr("src"))
{
iframe.attr("src", path)
}
$("#"+item+"_div").slideToggle();
}
else
$("#"+itemList[i]+"_div").slideUp();
}
}
$(document).ready(function() {
searchBox.OnSelectItem(0);
$(".doctable").find("td").each(function(){ if (this.id.indexOf("hoba_") != -1)itemList.push(this.id);});
});
</script>
<style>
.DCRow
{
background: #eeeeff;
border-spacing: 0px;
padding: 0px;
border-bottom: 1px solid #c1c1dc;
}
.DCRow:hover
{
background: #cde4ec;
}
</style>
<!-- Content Script & Style -->
"""
self.contentStamp = '$CONTENT$'
self.mainPageTemplate = self.ReadFile("index.html")
self.WriteFile("index_backup.html", self.mainPageTemplate) #backup file
soup = BeautifulSoup(self.mainPageTemplate)
soup.head.insert(len(soup.head), self.head)
contents = soup.find("div", { "class" : "contents" })
for child in contents.findChildren():
child.extract()
contents.insert(0, self.contentStamp)
self.mainPageTemplate = str(soup)
self.mainPageTemplate = self.mainPageTemplate.replace("CSCDQM Framework Guide", "")
self.mainPageTemplate = self.mainPageTemplate.replace('<','<').replace('>', '>')
print("Main page template created...")
self.CreateBuildRefMan()
print("RefMan created...")
self.treePageTamplate = self.ReadFile(self.dataPath + "tree_template.html", pathFlag = False)
self.classesSource = self.ReadFile("classes.html")
self.filesSource = self.ReadFile("files.html")
self.packageSource = self.ReadFile("pages.html")
def ReadFile(self, fileName, pathFlag = True):
"""This method reads file directly or from path."""
if pathFlag:
print("Read:", self.path + fileName)
f = open(self.path + fileName)
else:
f = open(fileName)
print("Read:", fileName)
data = f.read()
f.close()
return data
def WriteFile(self, fileName, data):
"""This method writes data"""
print("Write:", self.path + fileName)
f = open(self.path + fileName, "w")
f.write(data)
f.close()
def GetFileName(self, fileName):
"""This method returns file name without extension"""
if '.' in fileName:
return fileName[0:fileName.find('.')]
else:
return fileName
def ParseJsonFromURL(self, URL):
"""This method returns data which is read from URL"""
u = urllib2.urlopen(URL)
return json.loads(u.read())
def __ParseItem(self, str_):
return str_[0:str_.find('/')]
def __ParseSubItem(self, str_):
if '/' in str_:
return str_[str_.find('/')+1:]
else:
return None
def __GetHTMLItemDepth(self, item):
return item["id"].count("_") - 1 # 1 for doxygen 1.8.5, 2 for old ver.
def __HTMLFileName(self, fileName):
return fileName.lower().replace(' ', '_')
def PrepareData(self):
self.managers = self.ParseJsonFromURL(self.managersURL)
print("Managers loaded and parsed...")
self.users = self.ParseJsonFromURL(self.usersURL)
print("Users loaded and parsed...")
self.data = {}
for i in self.managers.keys():
self.data[i] = {"__DATA__":{"Contact":[]}}
for j in self.managers[i]:
self.data[i]["__DATA__"]["Contact"].append(self.users[j])
self.domains = self.ParseJsonFromURL(self.CMSSWURL)
print("Domains loaded and parsed...")
for i in self.domains.keys():
for j in self.domains[i]:
if self.__ParseItem(j) not in self.data[i]:
self.data[i][self.__ParseItem(j)] = {}
if self.__ParseSubItem(j) not in self.data[i][self.__ParseItem(j)]:
self.data[i][self.__ParseItem(j)][self.__ParseSubItem(j)] = {}
self.data[i][self.__ParseItem(j)][self.__ParseSubItem(j)]["__DATA__"] = {
'git': self.GitLink % (self.__ParseItem(j), self.__ParseSubItem(j))
}
# for getting package links
soup = BeautifulSoup(self.packageSource)
contents = soup.find("div", { "class" : "contents" })
li = contents.findAll("tr", {})
self.packages = {}
for i in li:
if i.a["href"]:
self.packages[i.a.text] = i.a["href"]
print("Packages parsed(%d)..." % len(self.packages))
# for getting items from file.html
soup = BeautifulSoup(self.filesSource)
contents = soup.find("div", { "class" : "contents" })
tr = contents.findAll("tr", {})
self.classes= {}
origin = 0
if tr[0].text == 'src': origin = -1
# depth of interface items can be only 3
flag = False
for i in tr:
if self.__GetHTMLItemDepth(i) + origin == 1:
self.classes[i.text] = {}
level1 = i.text
flag = False
if self.__GetHTMLItemDepth(i) + origin == 2:
self.classes[level1][i.text] = {}
level2 = i.text
flag = False
if self.__GetHTMLItemDepth(i) + origin == 3 and i.text == u'interface':
flag = True
if self.__GetHTMLItemDepth(i) + origin == 3 and i.text != u'interface':
flag = False
# print i.text, self.__GetHTMLItemDepth(i)
# raw_input()
if flag and i.text != u'interface':
self.classes[level1][level2][i.text] = i.a["href"]
#self.ZEG = i
print("Class hierarchy loaded(%d)..." % len(self.classes))
# self.WriteFile("dbg.json", json.dumps(self.classes, indent = 1))
# for parsing classes links from classes.html
soup = BeautifulSoup(self.classesSource)
contents = soup.find("div", { "class" : "contents" })
td = contents.findAll("td", {})
self.classesURLs = {}
# add items to self.classesURLs
for i in td:
if i.a and 'href' in i.a:
self.classesURLs[i.a.text] = i.a['href']
print("Class URLs was loaded... (%s)" % len(self.classesURLs))
for i in self.data.keys():
for j in self.data[i].keys():
if j not in self.classes: continue
for k in self.data[i][j].keys():
if "Package " + j + "/" + k in self.packages:
self.data[i][j][k]["__DATA__"]["packageDoc"] = '../' + self.packages["Package " + j + "/" + k]
if k not in self.classes[j]: continue
for h in self.classes[j][k]:
if self.GetFileName(h) in self.classesURLs:
self.data[i][j][k][self.GetFileName(h)] = {"__DATA__": '../' + self.classesURLs[self.GetFileName(h)]}
else:
self.data[i][j][k][self.GetFileName(h) + ".h"] = {"__DATA__": '../' + self.classes[j][k][h]}
def ExportJSON(self, fileName):
if self.data == None:
self.PrepareData()
self.WriteFile(fileName, json.dumps(self.data, indent = 1))
def CreateBuildRefMan(self):
content = """<h1>The Reference Manual </h1>
This is the CMSSW Reference Manual, the reference documentation of all classes and packages in CMSSW.<p>
This page explains how to write the documentation for your code.
</p><h2>Class Documentation</h2>
Classes and methods are documented with properly formatted <a target="_blank" class="el" href="d3/d88/namespacecomments.html">comments</a> in the code.<p>
Here is a template of a documented <a target="_blank" href="http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/Documentation/CodingRules/Template.h?rev=HEAD&cvsroot=CMSSW&content-type=text/vnd.viewcvs-markup">.h file</a>, and of a <a target="_blank" href="http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/Documentation/CodingRules/Template.cc?rev=HEAD&cvsroot=CMSSW&content-type=text/vnd.viewcvs-markup">.cc file</a>. The resulting doxygen page is <a target="_blank" class="el" href="d6/d3e/classTemplate.html">here</a>.
</p><h2>Package Documentation</h2>
Each package should contain a very brief description of its content and purpose. Remember that this is a reference, and not a user's guide: tutorials, howtos, etc. are best documented in the <a target="_blank" href="https://twiki.cern.ch/twiki/bin/view/CMS/SWGuide">CMS Offline Guide</a> and in the <a target="_blank" href="https://twiki.cern.ch/twiki/bin/view/CMS/WorkBook">WorkBook</a>. Cross links between the CMS Offline Guide and the WorkBook and this manual are a good way to avoid duplication of content.<p>
This documentation should be written in a file [Package]/doc/[Package].doc. The simplest way of doing this is to go to the doc/ directory in your package and then run the script
<a target="_blank" href="http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/*checkout*/CMSSW/Documentation/ReferenceManualScripts/scripts/makePackageDoc?rev=HEAD&cvsroot=CMSSW">makePackageDoc</a>,
which is available in your PATH.
</p><h2> How to generate your documentation locally </h2>
One you have updated your documentation, you can look at how it displays in the following way:
<ul>
<li>check out the following packages:
<pre> > cmsrel CMSSW_7_X_X
> cd CMSSW_7_X_X/
> cmsenv
> git cms-addpkg Documentation
> generate_reference_manual
wait...
> firefox doc/html/index.html </pre>
</li>
</ul>"""
self.WriteFile('ReferenceManual.html', self.mainPageTemplate.replace(self.contentStamp, content))
def CreateNewMainPage(self, outputFileName):
if self.data == None:
self.PrepareData()
contents = """
<table class="doctable" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr class="top" valign="top">
<th class="domain">Domain</th><th class="contact">Contact</th>
</tr>
"""
keysI = sorted(self.data.keys())
for i in keysI:
#########################
if i == 'Other': continue
self.__NewTreePage(i)
contents = contents + '\n<tr class="DCRow">\n' ######### TAG: TR1
#########################
if i == 'Operations':
contents = contents + """<td width="50%%" style="padding:8px">%s</td>\n""" % i
else:
contents = contents + """<td width="50%%" style="padding:8px;cursor:pointer" onclick="toggleHoba('hoba_%s', 'iframes/%s.html')" id="hoba_%s"><a>%s</a></td>\n""" % (i.replace(' ', '_'), i.lower().replace(' ', '_'), i.replace(' ', '_'), i)
#########################
contents = contents + '<td width="50%" class="contact">'
for j in range(len(self.data[i]["__DATA__"]["Contact"])):
if j == len(self.data[i]["__DATA__"]["Contact"]) - 1:
contents = contents + '<a href="mailto:%s">%s</a> ' % (self.data[i]["__DATA__"]["Contact"][j][1], self.data[i]["__DATA__"]["Contact"][j][0])
else:
contents = contents + '<a href="mailto:%s">%s</a>, ' % (self.data[i]["__DATA__"]["Contact"][j][1], self.data[i]["__DATA__"]["Contact"][j][0])
contents = contents + '</td>\n'
contents = contents + '</tr>\n\n' ######### TAG: TR1
#########################
if i == 'Operations': continue
#########################
contents = contents + """
<tr><td colspan="2" style="background:#d7dbe3">
<div style="display:none;" id="hoba_%s_div"><iframe width="100%%" frameborder="0"></iframe></div>
</td></tr>
""" % (i.replace(' ', '_'))
contents = contents + "</table>"
self.WriteFile(outputFileName, self.mainPageTemplate.replace(self.contentStamp, self.title + contents + self.links))
def __NewTreePage(self, domain):
if domain not in self.data: return
content = ''
keysI = sorted(self.data[domain].keys())
for i in keysI:
if i == '__DATA__': continue
content += self.HTMLTreeBegin(i)
keysJ = sorted(self.data[domain][i].keys())
for j in keysJ:
# if len(self.data[domain][i][j].keys()) == 1:
# if self.data[domain][i][j].has_key("__DATA__"):
# content += self.HTMLTreeAddItem(j, self.data[domain][i][j]["__DATA__"])
# else:
# content += self.HTMLTreeAddItem(j)
# continue
keysK = sorted(self.data[domain][i][j].keys())
length = len(keysK)
# content += "<!-- Begin -->"
if length > 1:
if "__DATA__" in self.data[domain][i][j]:
content += self.HTMLTreeBegin(j, self.data[domain][i][j]["__DATA__"])
else:
content += self.HTMLTreeBegin(j)
else:
if "__DATA__" in self.data[domain][i][j]:
content += self.HTMLTreeAddItem(j, self.data[domain][i][j]["__DATA__"], folder = True)
else:
content += self.HTMLTreeAddItem(j, folder = True)
for k in keysK:
if k == '__DATA__': continue
if self.data[domain][i][j][k]["__DATA__"]: content += self.HTMLTreeAddItem(k, self.data[domain][i][j][k]["__DATA__"])
else: content += self.HTMLTreeAddItem(k)
if length > 1:
content += self.HTMLTreeEnd()
# content += "<!-- End -->"
content += self.HTMLTreeEnd()
if domain in self.tWikiLinks:
self.WriteFile("iframes/%s.html" % domain.lower().replace(' ', '_'), self.treePageTamplate % (domain, self.tWikiLinks[domain], content))
else:
print('Warning: The twiki link of "%s" domain not found...' % domain)
self.WriteFile("iframes/%s.html" % domain.lower().replace(' ', '_'), self.treePageTamplate % (domain, '#', content))
def HTMLTreeBegin(self, title, links = {}):
html = '\n<li>\n<div class="hitarea expandable-hitarea"></div>\n'
html = html + '<span class="folder">%s\n' % title
for i in links.keys():
html = html + '<a target="_blank" href="%s">[%s]</a> \n' % (links[i], i)
html = html + '</span>\n'
html = html + '<ul style="display: block;">\n'
return html
def HTMLTreeEnd(self):
return '</li></ul>\n\n'
def HTMLTreeAddItem(self, title, links = None, endNode = False, folder = False):
if endNode: html = '\t<li class="last">'
else: html = '\t<li>'
if isinstance(links, str) or isinstance(links, type(u'')):
if folder:
html = html + '\t<a href="%s" target="_blank" class=""><span class="emptyFolder">%s</span></a>\n' % (links, title)
else:
html = html + '\t<a href="%s" target="_blank" class=""><span class="file">%s</span></a>\n' % (links, title)
elif isinstance(links, dict):
if folder:
html = html + '<span class="emptyFolder">%s ' % title
else:
html = html + '<span class="file">%s ' % title
for i in links.keys():
html = html + '<a target="_blank" href="%s">[%s]</a> \n' % (links[i], i)
html = html + '</span>'
else:
html = html + '\t<span class="file">%s</span>\n' % title
return html + '\t</li>\n'
if len(sys.argv) == 5:
DATA_PATH = sys.argv[1]
PATH = sys.argv[2]
VER = sys.argv[3]
OUTF = sys.argv[4]
#os.system("cp -rf %s../data/iframes/ %s" % (os.path.split(__file__)[0], PATH))
l = MainPageGenerator(DATA_PATH, PATH, cmsVer = VER)
l.CreateNewMainPage(OUTF)
else:
print("parameter error. It must be like this: python MainPageGenerator.py DATA_PATH/ CMSSW/doc/html/ CMS_VER OUTPUT_FILE_NAME")
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 27 13:49:12 2016
@author: jingyan
"""
def Priyanka_toy(array, N):
array = sorted(array)
count = 1
i = 1
weight = array[0] + 4
while i < N:
if array[i] <= weight:
i += 1
else:
count += 1
weight = array[i] + 4
return count
N = int(raw_input().strip())
array = map(int, raw_input().strip().split(' '))
print Priyanka_toy(array, N)
|
#作者:HP
#日期:2020-03-20 20:48
#文件:test_http_request
import unittest
from tools.http_request import HttpRequest
from tools.get_data import GetData
from ddt import ddt,data #
from tools.do_excel import DoExcel
from tools.project_path import *
from tools.my_log import MyLog
my_logger=MyLog()
test_data=DoExcel.get_data(test_case_path) #执行所有用例
@ddt #对数据进行分离
class TestHttpRequest(unittest.TestCase):
def setUp(self):
pass
@data(*test_data)
def test_api(self,item):
res=HttpRequest.http_request(item['url'],eval(item['data']),item['http_method'],getattr(GetData,'Cookie'))
if res.cookies: #利用反射存取cookies
setattr(GetData,'Cookie',res.cookies)
try:
self.assertEqual(item['expected'],res.json()['stat'])
TestResult='PASS'
except Exception as e:
TestResult = 'Failed'
my_logger.info("执行用例出错:{0}".format(e))
raise e
finally:
DoExcel.write_back(test_case_path,item['sheet_name'],item['case_id']+1,str(res.json()),TestResult)
my_logger.error("获取到结果是:{0}".format(res.json()))
def tearDown(self):
pass
|
from django.http import HttpResponseRedirect
from django.shortcuts import render
from projet.forms.inscription import Inscription
from hashlib import md5
def index(request):
return render(request, 'projet/formulaire.html', {'form' : Inscription({})})
def valider(request):
if request.method=='POST':
formulaire=Inscription(request.POST, request.FILES)
if formulaire.is_valid():
request.session['pseudo']=formulaire.get('pseudo')
request.session['mot_de_passe']=formulaire.get('mot_de_passe')
formulaire.enregistrer()
return HttpResponseRedirect('/projet/mes-projets/')
return render(request, 'projet/formulaire.html', {'form' : formulaire})
return HttpResponseRedirect('/projet/Inscription/')
|
from django.urls import path, include, re_path
from . import views
urlpatterns = [
path('', views.index),
path('projects', views.index),
path('<int:project_id>/datasets', views.index),
path('<int:project_id>/<int:dataset_id>/annotator', views.index),
]
|
from .client_latency_test import ClientLatencyTest
from .server_latency_test import ServerLatencyTest
|
from .component import *
from .afterbody import *
from .centreFuselage import *
from .engine import *
from .fuselage import *
from .horizontalStabilizer import *
from .nose import *
from .primitives import *
from .verticalStabilizer import *
from .wing import *
import numpy as np
class Aircraft(Component):
def __init__(self, fuselage, wing, engines, tail):
self.fuselage = fuselage
self.wing = wing
self.engines = engines
self.tail = tail
self.TW = None
self.MTOW = None
def __init__(self, defs):
# Create aircraft from definitions class
# Instance fuselage
afterbodyLength = defs.D / np.tan(defs.afterbodyAngle)
self.Fuselage = Fuselage(
CentreFuselage(
defs.L - afterbodyLength - defs.noseLength,
defs.D
),
Afterbody(
afterbodyLength,
defs.D
),
Nose(
defs.noseLength,
defs.D
),
defs.L
)
# Instance engine
self.Engine = Engine(
defs.N_E,
defs.T,
defs.BPR,
defs.D_E,
defs.W_E,
defs.cj
)
# Instance wing
self.Wing = Wing(
defs.S,
defs.λ,
defs.AR,
defs.Λ,
defs.α,
defs.α_0,
defs.dihedral,
defs.N_z
)
# Instance horizontal stabilizer
self.HorizontalStabilizer = HorizontalStabilizer(
defs.λ_H,
defs.AR_H,
defs.Λ_H,
defs.α_H,
defs.α_0H,
defs.C_HT
)
# Instance vertical stabilizer
self.VerticalStabilizer = VerticalStabilizer(
defs.λ_V,
defs.AR_V,
defs.Λ_V,
defs.α_V,
defs.α_0V,
defs.C_HV
)
|
from django.db import models
from django.utils import timezone
class ModelBase(models.Model):
name = models.CharField(max_length=40)
detail = models.CharField(max_length=280)
def __str__(self):
return self.name
class APD(ModelBase):
class Meta:
verbose_name = "Alat Pelindung Diri"
verbose_name_plural = "Alat Pelindung Diri"
pass
class Hospital(ModelBase):
class Meta:
verbose_name = "Rumah Sakit"
verbose_name_plural = "Rumah Sakit"
address = models.CharField(max_length=140)
verified = models.BooleanField(default=False)
class Transaction(models.Model):
class Type(models.TextChoices):
INPUT = '1', 'Pemasukan'
REQUEST = '2', 'Permintaan'
class Meta:
verbose_name = "Transaksi"
verbose_name_plural = "Transaksi"
hospital = models.ForeignKey(Hospital, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=Type.choices,
default=Type.INPUT)
date_created = models.DateTimeField('created', default=timezone.now)
def __str__(self):
return f"{self.hospital} - {self.type} - {self.date_created}"
class TransactionDetail(models.Model):
apd = models.ForeignKey(APD, on_delete=models.CASCADE)
transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE)
count = models.IntegerField()
def __str__(self):
return f"{self.apd}@{self.count} for {self.transaction}"
|
import numpy as np
from utils import *
class STATISTICS:
def __init__(self, val, count):
self.Value = val
self.Count = count
self.Mean = val
self.Variance = 0.
self.Min = 0.
self.Max = 0.
def SetValue(self, val):
self.Value = val
def SetCount(self, count):
self.Count = count
def Add(self, val):
meanOld = float(self.Mean)
countOld = float(self.Count)
self.Count += 1.0
assert(self.Count > 0)
self.Mean += float(float((val - self.Mean))/float(self.Count))
self.Variance = float(float((countOld*(self.Variance + meanOld**2) + val**2))/float(self.Count) - self.Mean**2)
if val > self.Max:
self.Max = val
if val < self.Min:
self.Min = val
def Clear(self):
self.Count = 0
self.Mean = 0.0
self.Variance = 0.0
self.Min = Infinity
self.Max = -Infinity
def Initialise(self, val, count):
self.Mean = val
self.Count = count
def GetValue(self):
return self.Value
def GetTotal(self):
return self.Mean * self.Count
def GetStdDev(self):
return np.sqrt(self.Variance)
def GetStdError(self):
return np.sqrt(self.Variance/float(self.Count))
def GetMean(self):
return self.Mean
def GetCount(self):
return self.Count
def __str__(self):
return "[ " + str(self.Mean) + " , " + str(self.Variance) + " ]" |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import yaml
from . import Tools
class Config(Tools):
"""analyze confige file with yaml
"""
def __init__(self, config_path):
"""init Config object
Args: configPath: string,and path of config file
Return: None
"""
self.path = config_path
def run(self):
""" analyze config file
Args: None
Return: None
"""
try:
config_file = open(self.path)
data = yaml.load(config_file)
config_file.close()
# ret = json.dumps(data)
# print ret
except IOError:
result = {"status": 1}
return result
result = {}
result["status"] = 0
result["ret"] = data
return result
|
# -*- coding:utf8 -*-
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.urlpatterns import format_suffix_patterns
from users import views
urlpatterns = (
url(r'user_detail/$', views.UserDetail.as_view()),
url(r'user_list/$', views.UserList.as_view()),
url(r'logout/$', views.AuthLogout.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
// https://leetcode.com/problems/two-sum-iii-data-structure-design
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.dict = {}
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
if number in self.dict:
self.dict[number] += 1
else:
self.dict[number] = 0
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for _ in self.dict:
if _ == value/2 :
if self.dict[_] > 1:
return True
else:
if (value-_) in self.dict:
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value) |
with open('day11.txt') as f:
c = list(map(int, f.readline().split(',')))
# Opcodes
ADD = 1
MULTIPLY = 2
STORE = 3
DUMP = 4
JT = 5
JF = 6
LT = 7
EQ = 8
RB = 9
HALT = 99
class IntCode:
def __init__(self):
self.code = c[:]
self.ip = 0
self.rb = 0
self.out = 0
self.halt = False
def p(self, arg):
while True:
try:
if arg[1] == 0:
return self.code[arg[0]]
elif arg[1] == 1:
return arg[0]
elif arg[1] == 2:
return self.code[arg[0]+self.rb]
except IndexError:
self.code.append(0)
def run(self, inpt=None):
code=self.code
while self.ip < len(code):
try:
# Process instruction
i = str(code[self.ip])
opcode = int(''.join(i[-2:]))
pmods = list(map(int, i[-3::-1])) # get parameter modes
pmods.extend([0] * (3-len(pmods))) # pad w/ 0s
args = list(zip(code[self.ip+1:self.ip+4], pmods)) # bind parameter modes to params
#print(self.ip, ':', opcode, args, ',', self.rb)
# Run the op
if opcode == ADD:
if args[2][1] == 2:
code[args[2][0]+self.rb] = self.p(args[0]) + self.p(args[1])
else:
code[args[2][0]] = self.p(args[0]) + self.p(args[1])
self.ip += 4
elif opcode == MULTIPLY:
if args[2][1] == 2:
code[args[2][0]+self.rb] = self.p(args[0]) * self.p(args[1])
else:
code[args[2][0]] = self.p(args[0]) * self.p(args[1])
self.ip += 4
elif opcode == STORE:
if inpt != None:
code[args[0][0]+self.rb] = inpt
self.ip += 2
elif opcode == DUMP:
self.out = self.p(args[0])
self.ip += 2
return
elif opcode == JT:
if self.p(args[0]):
self.ip = self.p(args[1])
else:
self.ip += 3
elif opcode == JF:
if not self.p(args[0]):
self.ip = self.p(args[1])
else:
self.ip += 3
elif opcode == LT:
if self.p(args[0]) < self.p(args[1]):
if args[2][1] == 2:
code[args[2][0]+self.rb] = 1
else:
code[args[2][0]] = 1
else:
if args[2][1] == 2:
code[args[2][0]+self.rb] = 0
else:
code[args[2][0]] = 0
self.ip += 4
elif opcode == EQ:
if self.p(args[0]) == self.p(args[1]):
if args[2][1] == 2:
code[args[2][0]+self.rb] = 1
else:
code[args[2][0]] = 1
else:
if args[2][1] == 2:
code[args[2][0]+self.rb] = 0
else:
code[args[2][0]] = 0
self.ip += 4
elif opcode == RB:
#print(self.ip, ':', opcode, args, ',', self.rb)
self.rb += self.p(args[0])
#print('rb:',self.rb)
self.ip += 2
elif opcode == HALT:
self.halt=True
return
else:
#print(code)
print("Something's royally hecked up. (Opcode {})".format(opcode))
return
except IndexError:
code.append(0)
painted_points = {}
intc = IntCode()
# TURTLE GRAPHICSSSSS
loc = [0,0]
painted_points[tuple(loc)] = 1
headings = [(0,1), (1,0), (0,-1), (-1,0)]
heading = 0
while not intc.halt:
if tuple(loc) not in painted_points.keys():
painted_points[tuple(loc)] = 0
intc.run(painted_points[tuple(loc)])
color = intc.out
painted_points[tuple(loc)] = color
intc.run()
rot = intc.out
# Rotate bot
if rot == 1:
heading = (heading + 1) % len(headings)
elif rot == 0:
heading = (heading - 1) % len(headings)
loc[0] += headings[heading][0]
loc[1] += headings[heading][1]
# Ironically displaying the answer might end up being the hardest part...
min_x = min([s[0] for s in painted_points.keys()])
min_y = min([s[1] for s in painted_points.keys()])
max_x = max([s[0] for s in painted_points.keys()])
max_y = max([s[1] for s in painted_points.keys()])
hull = []
for _ in range(max_y - min_y +1):
hull.append([0] * (max_x - min_x+1))
for pt, v in painted_points.items():
try:
# offset cords by minimum
y = pt[1] + abs(min_y)
x = pt[0] + abs(min_x)
hull[y][x] = v
except IndexError as e:
print(pt)
print("Shit's fucked - ({},{})".format(pt[1],pt[0]))
for h in hull[::-1]:
for c in range(len(h)):
if h[c] == 0:
h[c] = ' '
else:
h[c] = 'X'
print(''.join(map(str,h)))
# Just gonna ignore the fact that this seems to print an extra point. |
# Imports
import numpy as np
import matplotlib.pyplot as plt
# Within group iteration procedure (weighted diamond difference)
def InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, group, converge):
# Initialize new center fluxes arrays for the curret group
psi_new = np.zeros((N,6))
phi_new = np.zeros(N)
# Calculate starting scalar flux (Sum 6 directions with equal weight)
if (group == 1): phi = (2./np.size(mu)) * psi_group1.sum(axis=1)
elif (group == 2): phi = (2./np.size(mu)) * psi_group2.sum(axis=1)
elif (group == 3): phi = (2./np.size(mu)) * psi_group3.sum(axis=1)
# Start iterating
innerconverge = False
inneritr = 0
while (innerconverge == False):
# Boundary condition
if (group == 0): psi_in = np.array([0.5,0.5,0.5])
else : psi_in = np.array([0.0,0.0,0.0])
# Sweep in mu > 0 (right)
for i in range(int(N)):
# Calculate source
# Weight is normalized to two, and there are 18 terms (6 directions in 3 groups)
s = q_e[group-1] + (2./(np.size(mu)*np.size(q_e))) * ((sig_s[group-1,0]*psi_group1[i,:].sum()) + (sig_s[group-1,1]*psi_group2[i,:].sum()) + (sig_s[group-1,2]*psi_group3[i,:].sum()))
# Calculate center flux
psi_new[i,0:3] = (s + (2. * np.fabs(mu[0:3]) / (d * (1.+a))) * psi_in) / (sig_t[group-1] + (2. * np.fabs(mu[0:3]) / (d*(1.+a))))
# Calcualte outgoing flux (set it equal to incoming flux for next cell)
psi_in = ((2. / (1.+a)) * psi_new[i,0:3]) - ((1.-a)/(1.+a))*psi_in
# Sweep in mu < 0 (left)
for i in range(int(N)):
s = q_e[group-1] + (2./(np.size(mu)*np.size(q_e))) * ((sig_s[group-1,0]*psi_group1[N-i-1,:].sum()) + (sig_s[group-1,1]*psi_group2[N-i-1,:].sum()) + (sig_s[group-1,2]*psi_group3[N-i-1,:].sum()))
psi_new[N-i-1,3:6] = (s + (2. * np.fabs(mu[3:6]) / (d * (1.+a))) * psi_in) / (sig_t[group-1] + (2. * np.fabs(mu[3:6]) / (d*(1.+a))))
psi_in = ((2. / (1.+a)) * psi_new[N-i-1,3:6]) - ((1.-a)/(1.+a))*psi_in
# Calculate scalar flux from angular flux ((1/2)*SUM(w*psi))
# Sum 6 directions with equal weight
phi_new = (2./np.size(mu)) * psi_new.sum(axis=1)
# Calculate convergence criterion (l2 norm of differences)
innercrit = np.sqrt(np.sum((phi_new - phi)**2))
# Check convergence
if (innercrit < converge): innerconverge = True
# Update anglular fluxes
phi= np.copy(phi_new)
if (group == 1): psi_group1 = np.copy(psi_new)
elif (group == 2): psi_group2 = np.copy(psi_new)
elif (group == 3): psi_group3 = np.copy(psi_new)
# Increment inner iteration number
inneritr += 1
# How many inner iterations did we do?
print '(Group = %i) Number of iterations = %i' % (group,inneritr)
# Return angular and scalar flux for group
return psi_new, phi
# -----------------------------------------------
# Define variables
a = 0.5
mu = np.array([0.2,0.5,0.7,-0.2,-0.5,-0.7])
sig_t = np.array([0.5,0.8,1.0])
sig_s = np.array([[0.1,0.0,0.0],[0.3,0.1,0.1],[0.1,0.3,0.3]])
q_e = np.array([1.5,0.0,0.2])
d = 0.1
x0 = 0.0
x1 = 2.0
N = (x1 - x0) / d
x = np.linspace((d/2.), 2.-(d/2.),N) # center points for plotting
# Group fluxes (inital guesses)
psi_group1 = psi_group2 = psi_group3 = np.zeros((N,6))
phi_group1 = phi_group2 = phi_group3 = np.zeros(N)
# Choose solver method
#method = 'GaussSeidel'
method = 'Jacobi'
# Convergence value
converge = 1.0e-4
# Perform outer iteration over energy groups
outerconverge = False
outeritr = 0
while (outerconverge == False):
# Calculate current group source values
groupsource = np.zeros(N)
for group in range(3):
groupsource += q_e[group] + (2./(np.size(mu)*np.size(q_e))) * (sig_s[group,0]*psi_group1.sum(axis=1)) + (sig_s[group,1]*psi_group2.sum(axis=1)) + (sig_s[group,2]*psi_group3.sum(axis=1))
if (method == 'Jacobi'):
# Do within group iterations (all with inital guesses)
# Store in intermediate variable (as to not update between groups, which is Gauss-Seidel)
psi_group1_, phi_group1_ = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 1, converge)
psi_group2_, phi_group2_ = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 2, converge)
psi_group3_, phi_group3_ = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 3, converge)
# Update fluxes
psi_group1 = np.copy(psi_group1_); phi_group1 = np.copy(phi_group1_)
psi_group2 = np.copy(psi_group2_); phi_group2 = np.copy(phi_group2_)
psi_group3 = np.copy(psi_group3_); phi_group3 = np.copy(phi_group3_)
elif (method == 'GaussSeidel'):
# Do within group iterations, updating after each group
psi_group1, phi_group1 = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 1, converge)
psi_group2, phi_group2 = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 2, converge)
psi_group3, phi_group3 = InnerIteration(a, mu, sig_t, sig_s, q_e, N, psi_group1, psi_group2, psi_group3, 3, converge)
# Calculate new group source values
groupsource_new = np.zeros(N)
for group in range(3):
groupsource_new += q_e[group] + (2./(np.size(mu)*np.size(q_e))) * (sig_s[group,0]*psi_group1.sum(axis=1)) + (sig_s[group,1]*psi_group2.sum(axis=1)) + (sig_s[group,2]*psi_group3.sum(axis=1))
# Calculate convergence cirterion
outercrit = np.sqrt(np.sum((groupsource_new - groupsource)**2))
# Check convergence
if (outercrit < converge): outerconverge = True
# Increment outer iteration number
outeritr += 1
print 'Outer group iteration = %i' % outeritr
# Plot scalar fluxes
plt.figure()
plt.plot(x,phi_group1, marker='s', color='c',linestyle='none', label='Group 1')
plt.plot(x,phi_group2, marker='s', color='b',linestyle='none', label='Group 2')
plt.plot(x,phi_group3, marker='s', color='r',linestyle='none', label='Group 3')
plt.xlim(-0.1,2.1)
plt.ylim(0,8)
plt.xlabel('x'), plt.ylabel('Center $\phi$')
plt.title('Cell Center Scalar Flux Profile, $\\alpha$=%.2f' % a)
plt.legend(numpoints=1,fontsize=10)
# Render plots
plt.show()
|
def vowel():
str="We are going to calculate the no of vowels."
vow="aeiouAEIOU"
count=0;
for character in str:
if character in vow:
count+=1
print(character)
print("No. of vowels in string: ",count)
vowel();
|
import sys
import argparse
import inspect
from pprint import pprint
try:
import docstring_parser
except ModuleNotFoundError:
class docstring_parser:
def __init__(self):
self.short_description = ""
self.long_description = ""
self.params = []
@classmethod
def parse(cls, doc):
return docstring_parser()
from object_publisher.base import *
class CLI(PublisherBase):
def __init__(self, *, object=None, klass=None, allocator=None, deallocator=None, parser=None):
super(CLI, self).__init__(object=object, klass=klass, allocator=allocator, deallocator=deallocator)
klass = self.klass
if not parser:
parser = argparse.ArgumentParser(prog=klass.__name__)
subparsers = parser.add_subparsers(help='sub-command help', dest="#method")
for k,v in self._enumerate_published(klass):
doc = docstring_parser.parse(v.func.__doc__)
doc_params = doc.params
param_dict = dict([(p.arg_name, p.description) for p in doc_params])
subparser = subparsers.add_parser(k, help=doc.short_description)
params = list(inspect.signature(v.func).parameters.items())[1:]
for param_name, param in params:
description = param_dict[param_name] if param_name in param_dict else ""
if param.default == inspect.Parameter.empty:
subparser.add_argument("*%s"%param_name, metavar="<%s>"%param_name, help=description)
else:
subparser.add_argument("--%s"%param_name, metavar="<%s>"%param_name, dest="**%s"%param_name, default=param.default, help=description)
self.parser = parser
def run_on_parsed(self, parsed):
method = None
args = []
kwargs = {}
object = self.allocator()
try:
for k, v in vars(parsed).items():
if k == "#method":
method = v
elif k.startswith("**"):
kwargs[k.replace("**","")] = v
elif k.startswith("*"):
args.append(v)
else:
raise Exception("Unknown argument: %s"%k)
if not method:
raise Exception("Invalid argument format.")
if hasattr(object, method):
result = getattr(object, method)(*args, **kwargs)
pprint(result)
return result
finally:
self.deallocator(object)
raise Exception("method not found.")
def run(self, sys_argv):
parsed = self.parser.parse_args(sys_argv)
self.run_on_parsed(parsed) |
"""
定义一个XuZhu类,继承于童姥。虚竹宅心仁厚不想打架。所以虚竹只有一个read(念经)的方法。每次调用都会打印“罪过罪过”
加入模块化改造
希望各位同学在此基础上可以添加自己的“freestyle”哦
"""
# 定义XuZhu类,继承于童姥
from practice.python_oo.tonglao import TongLao
class XuZhu(TongLao):
# 定义read方法
def read(self):
print("罪过罪过")
# 类的实例化
XZ = XuZhu(1000, 1000)
XZ.read()
|
from .models import Vertical
from .serializers import VerticalSerializer
from rest_framework import viewsets
class VerticalViewSet(viewsets.ModelViewSet):
queryset = Vertical.objects.all()
serializer_class = VerticalSerializer
|
# Generated by Django 3.1.5 on 2021-02-02 09:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20210202_1731'),
]
operations = [
migrations.CreateModel(
name='PostBlogModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(blank=True, null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='blog.categorymodel')),
],
),
migrations.CreateModel(
name='PostWorkModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(blank=True, null=True)),
],
),
migrations.DeleteModel(
name='PostModel',
),
]
|
import re
import time
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
def scrape_text(soup):
"""
現在のページからレビューテキストを取得する関数
:param soup: bs4.BeautifulSoup, html
:return: list, レビューテキスト
"""
target_htmls = soup.find_all("div", class_="text_content")
text = []
for html in target_htmls:
tmp_text = re.sub(" Verified Review \| ", "", html.text)
text.append(tmp_text)
return text
def scrape_datetime(soup):
"""
現在のページから投稿日時を取得する関数
:param soup: bs4.BeautifulSoup, html
:return: list, 投稿日時
"""
target_htmls = soup.find_all('time')
datetime = []
for html in target_htmls:
datetime.append(html.text)
return datetime
def scrape_score(soup):
"""
現在のページからレビュー得点を取得する関数
:param soup: bs4.BeautifulSoup, html
:return: list, レビュー得点
"""
target_htmls = soup.find_all('span', itemprop='ratingValue')[1:]
sub_target_htmls = soup.find_all('div', class_='rating-10')[1:]
score = []
i = 0
for sub_html in sub_target_htmls:
if re.search(r'na', sub_html.text):
score.append('NA')
else:
score.append(target_htmls[i].text)
i += 1
return score
def scrape_table(soup):
"""
現在のページから各評価項目の点数、利用日時、渡航タイプ、トランジットか出発か到着か、レコメンドを取得する関数
:param soup: bs4.BeautifulSoup, html
:return: pd.DataFrame, 各評価項目の点数、利用日時、渡航タイプ、トランジットか出発か到着か、レコメンド
"""
table_htmls = soup.find_all('div', class_='review-stats')
table_dict = {'Experience At Airport': [],
'Date Visit': [],
'Type Of Traveller': [],
'Queuing Times': [],
'Terminal Cleanliness': [],
'Terminal Seating': [],
'Terminal Signs': [],
'Food Beverages': [],
'Airport Shopping': [],
'Wifi Connectivity': [],
'Airport Staff': [],
'Recommended': []}
review_rate_list = ['Queuing Times',
'Terminal Cleanliness',
'Terminal Seating',
'Terminal Signs',
'Food Beverages',
'Airport Shopping',
'Wifi Connectivity',
'Airport Staff']
for table_html in table_htmls:
row_htmls = table_html.find_all('tr')
row_name_lists = []
for row_html in row_htmls:
html = row_html.find_all('td')
row_name = html[0].text
if row_name in review_rate_list:
review_html = html[1]
review_rate = len(review_html.find_all('span', class_='star fill'))
table_dict[row_name].append(review_rate)
else:
table_dict[row_name].append(html[1].text)
row_name_lists.append(row_name)
for table_key in table_dict.keys():
if table_key not in row_name_lists:
table_dict[table_key].append('NA')
return pd.DataFrame(table_dict)
def get_other_page(soup):
"""
現在のページから他のページに飛ぶURLを取得する関数
:param soup: bs4.BeautifulSoup, html
:return: list, URL
"""
next_tag = soup.find_all("article", class_="comp comp_reviews-pagination querylist-pagination position-")[0]
url_tags = next_tag.find_all("a")
url = []
for tag in url_tags:
url.append('http://www.airlinequality.com' + tag['href'])
return url
def get_html(driver, url):
"""
htmlを取得する関数
:param driver: selenium.webdriver.chrome.webdriver.WebDriver, ウェブドライバー
:param url: str, html取得元のURL
:return: bs4.BeautifulSoup, html
"""
driver.get(url)
content = driver.page_source
soup = BeautifulSoup(content, 'lxml')
return soup
def get_all_reviews(top_url):
"""
Traxの各空港のレビュートップページからすべてのレビューを取得する関数
:param top_url: str, Traxの各空港のレビュートップページURL
:return: pd.DataFrame, 全データ
"""
driver = webdriver.Chrome("/Users/kosuke/Downloads/Chromedriver")
soup = get_html(driver, top_url)
text = scrape_text(soup)
datetime = scrape_datetime(soup)
score = scrape_score(soup)
all_reviews = scrape_table(soup)
try:
next_url = get_other_page(soup)[-1]
present_url = top_url
while next_url != present_url:
present_url = next_url
soup = get_html(driver, present_url)
text.extend(scrape_text(soup))
datetime.extend(scrape_datetime(soup))
score.extend(scrape_score(soup))
all_reviews = all_reviews.append(scrape_table(soup))
next_url = get_other_page(soup)[-1]
time.sleep(3)
except IndexError:
print('IndexError')
driver.quit()
all_reviews['text'] = text
all_reviews['datetime'] = datetime
all_reviews['score'] = score
return all_reviews
def main():
"""
https://en.wikipedia.org/wiki/List_of_busiest_airports_by_passenger_traffic#2016_statistics
より2016年のランキング上位を抽出
Notice: Chengdu, Kunming, Shenzhen, shanghai hongqqiao空港はレビュー数が少ないので除外. Sydneyは地域対象外
"""
output = pd.read_csv("airport_review_complete.csv")
airports = output['airport']
urls = {
'Atlanta': 'http://www.airlinequality.com/airport-reviews/atlanta-hartsfield-airport/',
'Beijing Capital': 'http://www.airlinequality.com/airport-reviews/beijing-capital-airport/',
'Dubai': 'http://www.airlinequality.com/airport-reviews/dubai-airport/',
'Los Angeles': 'http://www.airlinequality.com/airport-reviews/los-angeles-lax-airport/',
'Tokyo Handeda': 'http://www.airlinequality.com/airport-reviews/tokyo-haneda-airport/',
'Chicago Ohare': 'http://www.airlinequality.com/airport-reviews/chicago-ohare-airport/',
'London Heathrow': 'http://www.airlinequality.com/airport-reviews/london-heathrow-airport/',
'hong Kong': 'http://www.airlinequality.com/airport-reviews/hong-kong-airport/',
'Shanghai Pudong': 'http://www.airlinequality.com/airport-reviews/shanghai-pudong-airport/',
'Paris CDG': 'http://www.airlinequality.com/airport-reviews/paris-cdg-airport/',
'Dallas Fort-Worth': 'http://www.airlinequality.com/airport-reviews/dallas-fort-worth-airport/',
'Amsterdam': 'http://www.airlinequality.com/airport-reviews/amsterdam-schiphol-airport/',
'Frankfurt Main': 'http://www.airlinequality.com/airport-reviews/frankfurt-main-airport/',
'Istanbul Ataturk': 'http://www.airlinequality.com/airport-reviews/istanbul-ataturk-airport/',
'Guangzhou': 'http://www.airlinequality.com/airport-reviews/guangzhou-airport/',
'New York': 'http://www.airlinequality.com/airport-reviews/new-york-jfk-airport/',
'Singapore Changi': 'http://www.airlinequality.com/airport-reviews/singapore-changi-airport/',
'Denver': 'http://www.airlinequality.com/airport-reviews/denver-airport/',
'Incheon': 'http://www.airlinequality.com/airport-reviews/incheon-airport/',
'Delhi': 'http://www.airlinequality.com/airport-reviews/delhi-airport/',
'Bangkok Survarnabhumi': 'http://www.airlinequality.com/airport-reviews/bangkok-suvarnabhumi-airport/',
'Jakarta': 'http://www.airlinequality.com/airport-reviews/jakarta-airport/',
'San Francisco': 'http://www.airlinequality.com/airport-reviews/san-francisco-airport/',
'Kuala Lumpur': 'http://www.airlinequality.com/airport-reviews/klia-kuala-lumpur-airport/',
'Madrid Barajas': 'http://www.airlinequality.com/airport-reviews/madrid-barajas-airport/',
'Las Vegas': 'http://www.airlinequality.com/airport-reviews/las-vegas-airport/',
'Seattle': 'http://www.airlinequality.com/airport-reviews/seattle-airport/',
'Mumbai': 'http://www.airlinequality.com/airport-reviews/mumbai-airport/',
'Miami': 'http://www.airlinequality.com/airport-reviews/miami-airport/',
'Charlotte': 'http://www.airlinequality.com/airport-reviews/charlotte-airport/',
'Toronto': 'http://www.airlinequality.com/airport-reviews/toronto-pearson-airport/',
'Barcelona': 'http://www.airlinequality.com/airport-reviews/barcelona-airport/',
'Phoenix': 'http://www.airlinequality.com/airport-reviews/phoenix-airport/',
'London Gatwick': 'http://www.airlinequality.com/airport-reviews/london-gatwick-airport/',
'Taipei': 'http://www.airlinequality.com/airport-reviews/taipei-taoyuan-airport/',
'Munich': 'http://www.airlinequality.com/airport-reviews/munich-airport/',
'Sydney': 'http://www.airlinequality.com/airport-reviews/sydney-airport/',
'Orlando': 'http://www.airlinequality.com/airport-reviews/orlando-airport/',
'Rome Fiumicino': 'http://www.airlinequality.com/airport-reviews/rome-fiumicino-airport/',
'Houston IAH': 'http://www.airlinequality.com/airport-reviews/houston-george-bush-intercontinental-airport/',
'Mexico City': 'http://www.airlinequality.com/airport-reviews/mexico-city-airport/',
'Newark': 'http://www.airlinequality.com/airport-reviews/newark-airport/',
'Manila': 'http://www.airlinequality.com/airport-reviews/manila-ninoy-aquino-airport/',
'Tokyo Narita': 'http://www.airlinequality.com/airport-reviews/tokyo-narita-airport/',
'Minneapolis': 'http://www.airlinequality.com/airport-reviews/minneapolis-st-paul-airport/',
'Hamad Doha': 'http://www.airlinequality.com/airport-reviews/hamad-doha-airport/'
}
for airport, url in urls.items():
if airport in list(airports):
continue
else:
print(airport)
data = get_all_reviews(url)
data['airport'] = airport
output = output.append(data)
output.to_csv("airport_review_complete.csv", index=False)
|
import pytest
from txkoji import Connection
from txkoji.channel import Channel
from txkoji.tests.util import FakeProxy
import pytest_twisted
class TestGetChannel(object):
@pytest.fixture
def channel(self, monkeypatch):
# To create this fixture file:
# cbs call getChannel 2 \
# --json-output > txkoji/tests/fixtures/calls/getChannel.json
monkeypatch.setattr('txkoji.connection.Proxy', FakeProxy)
koji = Connection('mykoji')
d = koji.getChannel(2)
return pytest_twisted.blockon(d)
def test_type(self, channel):
assert isinstance(channel, Channel)
def test_id(self, channel):
assert channel.id == 2
def test_name(self, channel):
assert channel.name == 'createrepo'
def test_connection(self, channel):
assert isinstance(channel.connection, Connection)
@pytest_twisted.inlineCallbacks
def test_hosts(self, channel):
hosts = yield channel.hosts(enabled=True)
expected = [
{'arches': 'x86_64 i386',
'capacity': 16.0,
'comment': None,
'description': None,
'enabled': True,
'id': 1,
'name': 'x86_64-0.centos.org',
'ready': True,
'task_load': 0.0,
'user_id': 7},
{'arches': 'x86_64 i386',
'capacity': 30.0,
'comment': None,
'description': None,
'enabled': True,
'id': 3,
'name': 'x86_64-2.cbs.centos.org',
'ready': True,
'task_load': 0.0,
'user_id': 49}
]
assert hosts == expected
@pytest_twisted.inlineCallbacks
def test_total_capacity(self, channel):
total_capacity = yield channel.total_capacity()
expected = 46.0
assert total_capacity == expected
class TestListChannels(object):
@pytest.fixture
def channels(self, monkeypatch):
# To create this fixture file:
# cbs call listChannels \
# --json-output > txkoji/tests/fixtures/calls/listChannels.json
monkeypatch.setattr('txkoji.connection.Proxy', FakeProxy)
koji = Connection('mykoji')
d = koji.listChannels()
return pytest_twisted.blockon(d)
def test_type(self, channels):
assert isinstance(channels, list)
def test_expected(self, channels):
channel = channels[0]
assert channel.id == 1
assert channel.name == 'default'
assert isinstance(channel.connection, Connection)
|
import random
import os, sys
import string
import subprocess
import time
def generate_tests(qty):
tests = []
tests.append(str(start_size))
for i in range(0, qty):
a = random.random()
size = random.randrange(1, str_length, 1)
letters = string.ascii_lowercase
if a >= 0.7:
val = "".join(random.choice(letters) for _ in range(size))
key = random.randrange(0, max_plank, 1)
tests.append((" ".join(["+", str(val)])) + " " + str(key))
in_arr.append(val)
elif a < 0.7 and a >= 0.30:
if len(in_arr) == 0:
val = "".join(random.choice(letters) for _ in range(size))
key = random.randrange(0, max_plank, 1)
tests.append((" ".join(["+", str(val)])) + " " + str(key))
in_arr.append(val)
else:
a = random.randint(0, len(in_arr) - 1);
tests.append((" ".join(["-", str(in_arr[a])])))
in_arr.remove(in_arr[a])
elif a < 0.3 and a >= 0.1:
val = "".join(random.choice(letters) for _ in range(size))
tests.append(str(val))
# else:
# a = random.random()
# if a >= 0.50:
# tests.append("! Save 1")
# else:
# pass
# tests.append("! Load 1")
return tests
def makeFile(size, file):
f = open(file, "w")
test = generate_tests(size)
for i in test:
f.write(i + '\n')
f.close()
# main loop
str_length = 256
max_plank = 10000
in_arr = []
range_ = input("How many loop?\t")
start_size = input("Start size?\t")
os.system("make clean")
os.system("make")
os.system("g++ main.cpp")
os.system("rm text.txt res.txt res2.txt 1")
while (range_ != 0):
makeFile(start_size, "text.txt")
os.system("./main < text.txt >> res.txt")
os.system("./a.out < text.txt >> res2.txt")
start_size *= 2;
range_ -= 1;
os.system("make clean")
os.system("rm text.txt 1")
|
# Generated by Django 3.2 on 2021-04-28 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AnalysisApp', '0006_auto_20210428_1039'),
]
operations = [
migrations.AlterField(
model_name='polen',
name='AnalysisDate',
field=models.DateField(),
),
]
|
# -*- coding:utf-8 -*-
from enum import Enum
class TinkerErrorCode(Enum):
MISSING_PARAMETER = 0
UNKNOWN_COMMAND = 1
RESERVED_COMMAND = 2
DUPLICATED_COMMAND = 3
SEND_MSG_ERROR = 4
LOGIN_ERROR = 5
GET_UUID_ERROR = 6
SYNC_ERROR = 7
SYNC_CHECK_ERROR = 8
SYNC_HOST_CHECK_ERROR = 9
BOT_INIT_ERROR = 10
TRANSLATIONS = {
TinkerErrorCode.MISSING_PARAMETER: u'you may omit some parameter',
TinkerErrorCode.UNKNOWN_COMMAND: u'sorry, i can not understand.',
TinkerErrorCode.RESERVED_COMMAND: u'sorry, this command is a reserved keyword.',
TinkerErrorCode.DUPLICATED_COMMAND: u'duplicate command.',
TinkerErrorCode.SEND_MSG_ERROR: u'failed to send msg',
TinkerErrorCode.LOGIN_ERROR: u'failed to login',
TinkerErrorCode.GET_UUID_ERROR: u'failed to get uuid',
TinkerErrorCode.SYNC_CHECK_ERROR: u'failed to sync check',
TinkerErrorCode.SYNC_HOST_CHECK_ERROR: u'failed to sync host check',
TinkerErrorCode.BOT_INIT_ERROR: u'failed to init bot',
TinkerErrorCode.SYNC_ERROR: u'failed to sync'
}
class TinkerException(Exception):
def __init__(self, err_code, err_msg=None):
self.err_code = err_code
self.err_msg = err_msg
def __str__(self):
return repr(self.err_msg if self.err_msg else
TRANSLATIONS[self.err_code])
class TinkerUserExceptioin(TinkerException):
pass
class TinkerSystemException(TinkerException):
pass
class TinkerServerException(TinkerException):
pass
|
import os,csv
path = r"C:\Users\a7825\Desktop\工作空间\语音数据\CSJ\WAV\正解文\zhengjie_1"
#包含语音文件的文件夹的路劲
for name in os.listdir(path):
path_1 = os.path.join(path,name)
dakai = open(path_1, 'r', encoding='utf-8')
txtwenjian = csv.reader(dakai)
b = [i for i in txtwenjian]
b_1 = []
for y in b:
u = y[0].split()
u.pop(0) #把前两段都pop掉
u.pop(0)
b_1.append(''.join(u)) #把被分开的字符串再次合并起来,加入到新的列表里面
dakai.close()
with open(path_1, 'w',encoding='utf-8') as f:
for u in b_1:
f.writelines(u + '\n') # 每写一句就空一行,把原来的文本都覆盖了
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import tinctest
import unittest2 as unittest
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.walrepl.gpactivatestandby import GpactivateStandby
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
from mpp.gpdb.tests.storage.walrepl.crash import WalReplKillProcessTestCase
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
from mpp.gpdb.tests.storage.walrepl import lib as walrepl
pgutil = GpUtility()
class WalReplKillProcessScenarioTestCase(ScenarioTestCase):
origin_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
def __init__(self, methodName):
self.standby_dir = os.environ.get('MASTER_DATA_DIRECTORY')
self.pgdatabase = self.pgdatabase = os.environ.get('PGDATABASE')
super(WalReplKillProcessScenarioTestCase,self).__init__(methodName)
def setUp(self):
pgutil.check_and_start_gpdb()
# We should forcibly recreate standby, as it might has been promoted.
# here we need to install locally, otherwise can not run remote sql
pgutil.remove_standby()
pgutil.install_standby(new_stdby_host=socket.gethostname())
gpact_stdby = GpactivateStandby()
gpinit_stdb = GpinitStandby()
WalReplKillProcessTestCase.stdby_port = gpact_stdby.get_standby_port()
WalReplKillProcessTestCase.stdby_host = gpinit_stdb.get_standbyhost()
self.standby_dir = gpact_stdby.get_standby_dd()
def tearDown(self):
walrepl.cleanupFilespaces(dbname=os.environ.get('PGDATABASE'))
@classmethod
def setUpClass(cls):
pgutil.check_and_start_gpdb()
gp_walrepl = WalReplKillProcessTestCase('initial_setup')
gp_walrepl.initial_setup()
@classmethod
def tearDownClass(cls):
pgutil.remove_standby()
def test_failover_run__workload(self):
''' activate the standby, run workload, check master and standby
integrity, currently support local standby, can not run workload
remotely
'''
activatestdby = GpactivateStandby()
activatestdby.activate()
with walrepl.NewEnv(MASTER_DATA_DIRECTORY=self.standby_dir,
PGPORT=WalReplKillProcessTestCase.stdby_port,
PGDATABASE=self.pgdatabase) as env:
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list3)
pgutil.failback_to_original_master(self.origin_mdd, WalReplKillProcessTestCase.stdby_host, self.standby_dir,WalReplKillProcessTestCase.stdby_port)
def test_initstandby_run_workload(self):
#run workload while initstandby, check master mirror integrity
pgutil.remove_standby()
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.gpinitstandby_helper")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list2)
def test_initstandby_after_run_workload(self):
#run workload before initstandby, check master mirror integrity
pgutil.remove_standby()
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.gpinitstandby_helper")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list3)
def test_run_workload_with_standby(self):
#run workload while initstandby already installed, check master mirror integrity
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list2)
def test_run_workload_remove_standby(self):
#run workload while removing initstandby, check master mirror integrity
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.removestandby_helper")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list2)
def test_run_workload_before_activate_standby(self):
#run workload while removing initstandby, check master mirror integrity
activatestdby = GpactivateStandby()
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase")
test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase")
self.test_case_scenario.append(test_case_list0)
activatestdby.activate()
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase")
self.test_case_scenario.append(test_case_list2)
pgutil.failback_to_original_master(self.origin_mdd,WalReplKillProcessTestCase.stdby_host, self.standby_dir,WalReplKillProcessTestCase.stdby_port)
|
from myapp.models import Bike
from django.views.generic.detail import DetailView
class BikeDetailView(DetailView):
model = Bike
template_name = 'bike_detail.html'
from django.views.generic import ListView
class BikeListView(ListView):
model = Bike
paginate_by = 3
#queryset=Bike.objects.filter(type='mountain')
template_name = 'bike_list.html'
from django.views.generic.edit import CreateView
class BikeCreateView(CreateView):
model = Bike
template_name = 'bike_create.html'
fields = ['type','price']
from django.views.generic.edit import UpdateView
class BikeUpdateView(UpdateView):
model = Bike
template_name = 'bike_update.html'
fields = ['type','price'] |
# coding: utf-8
from random import randint
def game():
s= randint(1,1000)
i=-1
while i!=s:
i=int(input('請猜一個1~1000間的數字:'))
if i>s:
print('太大了,找小一點喔~')
elif i==s:
print('恭喜你~~答對了!')
else:
print('太小啦,再找大一點吧')
play=True
while play:
game()
print('--------------')
again=input('再玩一次?')
if again=='no':
play=False
|
import random
score = 10
randomNumber = random.randint(1,10)
while True:
userNumberInput = int(input('Guess : '))
if userNumberInput == randomNumber:
print("Congratulation you guessed it right! your score is" + str(score))
else:
print('better luck next time')
score -= 1 |
import json, gzip, re, requests
with gzip.open('jawiki-country.json.gz', 'rt') as f:
text = '\n'.join([json.loads(l)['text'] for l in f.readlines() if json.loads(l)['title'] == 'イギリス'])
text = re.compile(r'^\{\{基礎情報.*?$.*?^\}\}$', re.MULTILINE + re.DOTALL).findall(text)[0]
pattern = r'\'{2,5}(.*?)(\1)|\[\[(?:[^:\]]+?\|)?|\]\]'
d = dict(re.compile(r'^\|(.*?)\s*=\s*(.*?)$', re.MULTILINE).findall(re.compile(pattern).sub(r'\1', text)))
print(requests.get('https://www.mediawiki.org/w/api.php', params = {
'action': 'query',
'titles': 'File:' + d['国旗画像'],
'format': 'json',
'prop': 'imageinfo',
'iiprop': 'url'
}).json()['query']['pages'].popitem()[1]['imageinfo'][0]['url']) |
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\_Framework\ModeSelectorComponent.py
# Compiled at: 2018-11-30 15:48:11
from __future__ import absolute_import, print_function, unicode_literals
from .ButtonElement import ButtonElement
from .ControlSurfaceComponent import ControlSurfaceComponent
from .MomentaryModeObserver import MomentaryModeObserver
class ModeSelectorComponent(ControlSurfaceComponent):
""" Class for switching between modes, handle several functions with few controls """
def __init__(self, *a, **k):
super(ModeSelectorComponent, self).__init__(*a, **k)
self._modes_buttons = []
self._mode_toggle = None
self._mode_listeners = []
self.__mode_index = -1
self._modes_observers = {}
self._modes_heap = []
return
def _get_protected_mode_index(self):
return self.__mode_index
def _set_protected_mode_index(self, mode):
assert isinstance(mode, int)
self.__mode_index = mode
for listener in self._mode_listeners:
listener()
_mode_index = property(_get_protected_mode_index, _set_protected_mode_index)
def _get_public_mode_index(self):
return self.__mode_index
def _set_public_mode_index(self, mode):
assert False
mode_index = property(_get_public_mode_index, _set_public_mode_index)
def disconnect(self):
self._clean_heap()
if self._mode_toggle != None:
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = None
self._modes_buttons = None
self._mode_listeners = None
super(ModeSelectorComponent, self).disconnect()
return
def on_enabled_changed(self):
self.update()
def set_mode_toggle(self, button):
assert button == None or isinstance(button, ButtonElement)
if self._mode_toggle != None:
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if self._mode_toggle != None:
self._mode_toggle.add_value_listener(self._toggle_value)
self.set_mode(0)
return
def set_mode_buttons(self, buttons):
assert buttons != None
assert isinstance(buttons, tuple)
assert len(buttons) - 1 in range(16)
for button in buttons:
if not isinstance(button, ButtonElement):
raise AssertionError
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
self.set_mode(0)
return
def set_mode(self, mode):
self._clean_heap()
self._modes_heap = [(mode, None, None)]
if self._mode_index != mode:
self._update_mode()
return
def _update_mode(self):
mode = self._modes_heap[(-1)][0]
assert mode in range(self.number_of_modes())
if self._mode_index != mode:
self._mode_index = mode
self.update()
def _clean_heap(self):
for _, _, observer in self._modes_heap:
if observer != None:
observer.disconnect()
self._modes_heap = []
return
def number_of_modes(self):
raise NotImplementedError
def mode_index_has_listener(self, listener):
return listener in self._mode_listeners
def add_mode_index_listener(self, listener):
assert listener not in self._mode_listeners
self._mode_listeners.append(listener)
def remove_mode_index_listener(self, listener):
assert listener in self._mode_listeners
self._mode_listeners.remove(listener)
def _mode_value(self, value, sender):
assert len(self._modes_buttons) > 0
assert isinstance(value, int)
assert sender in self._modes_buttons
new_mode = self._modes_buttons.index(sender)
if sender.is_momentary():
if value > 0:
mode_observer = MomentaryModeObserver()
mode_observer.set_mode_details(new_mode, self._controls_for_mode(new_mode), self._get_public_mode_index)
self._modes_heap.append((new_mode, sender, mode_observer))
self._update_mode()
elif self._modes_heap[(-1)][1] == sender and not self._modes_heap[(-1)][2].is_mode_momentary():
self.set_mode(new_mode)
else:
for mode, button, observer in self._modes_heap:
if button == sender:
self._modes_heap.remove((mode, button, observer))
break
self._update_mode()
else:
self.set_mode(new_mode)
def _toggle_value(self, value):
assert self._mode_toggle != None
assert isinstance(value, int)
if value is not 0 or not self._mode_toggle.is_momentary():
self.set_mode((self._mode_index + 1) % self.number_of_modes())
return
def _controls_for_mode(self, mode):
return
def _on_timer(self):
for _, _, mode_observer in self._modes_heap:
if mode_observer != None:
mode_observer.on_timer()
return |
from dataentry.models import BorderStation
def border_stations_processor(request):
border_stations = BorderStation.objects.order_by('station_name')
return {'border_stations': border_stations}
|
a = [1,2,3,4]
result = []
for i in a :
result.append(i*3)
print(result)
##################################
a = [1,2,3,4]
result = [num * 3 for num in a]
print(result)
##################################
a = [1,2,3,4]
result = [num * 3 for num in a if num % 2 ==0]
print(result)
'''
[리스트 내포 문법]
표현식 for 항목 in 반복_가능_객체 if 조건
''' |
NumList = []
total = 0
Number = int(input("Please enter the Total Number of List Elements : "))
for i in range(1, Number + 1):
value = int(input("Please enter the Value of %d Element : " %i))
NumList.append(value)
for j in range(Number):
total = total + NumList[j]
print("\n The Sum of All Element in this List is : ", total) |
# -*- coding: utf-8 -*-
import pytest
import ffmpeg
from ffmpeg.formatctx import *
from ffmpeg.lib import *
from fractions import Fraction
#from ffmpeg.error import FFMPEGException, AVERROR_STREAM_NOT_FOUND
from pprint import pprint as pp
id_h264 = avcodec.AV_CODEC_ID_H264
@pytest.fixture(scope='module')
def setup():
register_all()
def test_open_input(setup):
# LOCAL TEST ONLY
path = 'tests/data/film佐伯.mp4'
fmt_ctx = FormatCtx.open(path)
assert InputFormat == type(fmt_ctx)
print fmt_ctx
fmt_ctx.open_decoder()
#print fmt_ctx.video_codec_ctx, fmt_ctx.video_codec_ctx.coder
#print fmt_ctx.audio_codec_ctx, fmt_ctx.audio_codec_ctx.coder
pp( fmt_ctx.to_primitive(True) )
img = None
which_frame = None
for frame in fmt_ctx.next_frame():
t = float(frame.pkt_pts_f)
if frame.type == 'video':
which_frame = frame
if t >= 15.0:
break
if which_frame: img = which_frame.process()
fmt_ctx.close_decoder()
if img: img.show()
@pytest.mark.skipif(True, reason="not using check_ret since more complicated exception handling")
def test_open_input_when_error(setup, mocker):
path = 'tests/data/film佐伯.mp4'
mocker.patch('ffmpeg.formatctx.avformat_open_input',
return_value=AVERROR_STREAM_NOT_FOUND)
with pytest.raises(FFMPEGException) as excinfo:
fmt_ctx = FormatCtx.open(path)
assert "Stream not found" in excinfo.value.message
def test_create_output(setup):
path_out = 'tests/logs/output.webm'
fmt_ctx = FormatCtx.create(path_out)
fmt_ctx.create_video_stream()
fmt_ctx.create_audio_stream()
fmt_ctx.open_encoder()
fmt_ctx.write_header()
while fmt_ctx.v_next_pts_f <= 1.0:
if fmt_ctx.v_next_pts_f <= fmt_ctx.a_next_pts_f:
fmt_ctx.write_video_frame()
else:
fmt_ctx.write_audio_frame()
if fmt_ctx.v_next_pts_f <= fmt_ctx.a_next_pts_f:
fmt_ctx.flush_video_frame()
fmt_ctx.flush_audio_frame()
else:
fmt_ctx.flush_audio_frame()
fmt_ctx.flush_video_frame()
fmt_ctx.write_trailer()
fmt_ctx.close_encoder()
|
#!/usr/bin/env python
# coding: utf-8
# ## Black Light, White Light
# Zumi can recognize simple colors. In this demo, we will control Zumi using black and white cards.
#
# ### Step 1: Setup
# <div>
# <br/><p>In Jupyter Notebook, code is written inside boxes called "cells". You can run a cell by clicking on it, then clicking the "Run" button <img src="../Data/images/run.png" style="display: inline"> in the second toolbar.<p/>
# </div>
#
# Try running the cell below to set up Zumi to recognize black and white.
# In[ ]:
print("Cell activated!\n")
print("Now setting up Zumi...\n")
import sys
sys.path.insert(0,'../Data/demo-run')
import demo_BW_light as demo
print("\n\nDone!")
# <div>
# If you run into any errors, you can click the "Restart" button <img src="../Data/images/reset.png" style="display: inline"> in the second toolbar to try again. Clicking this button will force you to restart from Step 1.
# </div>
#
# Also, did you notice the "In \[ \]:" text next to the cell? While the code in a cell is still running, there will be an asterisk inside the box, like this: "In \[\*\]:". You can tell a cell is finished because there will be a number in there instead, like this: "In \[1\]". The number is the number of cells you've ran since you started the notebook, so the first cell you run will have a "1" inside the box, the second cell will have a "2", and so on.
#
# ### Step 2: Play with Zumi
#
# Now that Zumi has been set up, run the cell below to start controlling Zumi.
#
# * Place Zumi somewhere so she can move around.
# * Once the demo starts, place a white or black card in front of Zumi, and press enter.
# * A white card will make Zumi go forward, while a black card will make her stop.
# * <div>You can end the demo by pressing the "Stop" button <img src="../Data/images/stop.png" style="display: inline"> in the second toolbar, or by typing "q" before pressing enter.</div>
# In[ ]:
demo.run()
|
import sys
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
txt="onionionspl"
pat="onions"
# Knuth Moris Parth
def computeLPSarray(LPS, pat, m):
length=0;
i=1;
LPS[0]=0
while(i<m):
if(pat[length]==pat[i]):
length+=1;
i+=1;
LPS[i]=length#this will below increament of length
else:
if(length==0):
LPS[i]=0
i+=1
else:
length=LPS[length-1]
def KMPsearch(txt, pat):
n=len(txt)
m=len(pat)
LPS=[0]*m
computeLPSarray(LPS, pat, m)
i=0;j=0;
print(LPS)
while(i<n):
if(txt[i]==pat[j]):
i+=1;j+=1;
else:
if(j>0):
j=LPS[j-1]
i+=1
else:
i+=1
if(j==m):
print(i-j)
KMPsearch(txt,pat) |
from room import Room
from player import Player
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""),
}
# Link rooms together using dot notation
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
#
# Main
#
### Notes - to make the game run ###
# My game starts with a welcome message, and explanation of the scenario, and asks what their player name is.
# Then, It will run the functions I've written.
# After the game, it will ask if you want to play again. If yes, run the functions again. If no, it will exit the game.
### Defining my functions ###
# Make a new player object that is currently in the 'outside' room.
# def start_game():
# player_name = input("Choose your username: \n")
# greet_user(player_name)
# player = Player(player_name, room["outside"])
def greet_user(name):
while True:
if len(name) >0:
greeting = f"\n Hello {name}! Let's begin the adventure. Navigate to different rooms to find the hidden treasure. Good luck!\n"
print(greeting)
else:
print("Please provide a username.")
name = input("Type your name: \n")
# Write a loop that:
#
# * Prints the current room name
# * Prints the current description (the textwrap module might be useful here).
# * Waits for user input and decides what to do.
#
# If the user enters a cardinal direction, attempt to move to the room there.
# Print an error message if the movement isn't allowed.
#
# If the user enters "q", quit the game.
|
import urllib
import gzip
def get_data(contract_name, date, prefix='CTP', saved_filename=''):
url = 'https://s3.amazonaws.com/wanlitech-data/polled/1s/' + date + '/' + prefix + '/' + contract_name + '.top.gz'
print(url)
zfile_name = ''
if saved_filename == '':
zfile_name = zfile_name + contract_name+'.gz'
else:
zfile_name = zfile_name + saved_filename +'.gz'
print(zfile_name)
urllib.urlretrieve(url, zfile_name)
content = gzip.GzipFile(zfile_name).read()
return content
def AddIndex(content, index):
lines = content.split('\n') # split file content by line
content_dict = {} # target to form this dict, use index can get a vector
for i in index:
content_dict[i] = [] # init this vector as null
content_size = len(lines[0].split(' ')) # size of content
if len(index) != content_size:
print('index\' size is incorrect!')
return content_dict
for i in range(len(lines)-1):
line_content = lines[i].split(' ')
if len(line_content) != content_size: # exception: size incorrect
print("error line in line", i, ", this line's size is", len(line_content), ", target size is ", content_size)
continue
for j in range(len(index)):
content_dict[index[j]].append(line_content[j])
return content_dict
# test_dict = content_dict(zip(b['time'], content_dict['last_price'])) # contract a dict indexing time
# sorted(test_dict.iteritems(), key=lambda d:d[0], reverse=True)[0] # sort in the dict using d[0]->time
def GenGridData(content):
line_content = content.split('\n')
#c = get_data('AUAZ7', '20171114')
|
# Generated by Django 2.0.2 on 2018-07-07 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gutlife_app', '0011_auto_20180707_0944'),
]
operations = [
migrations.AlterField(
model_name='bmlog',
name='time',
field=models.CharField(default='00:00', max_length=5),
preserve_default=False,
),
]
|
#test for expression retargeting
#testA is the expression embedding folder, testB is the face model folder
import os
from v2eNet import v2e
from utils.gridModel import gridModel
import numpy as np
import cv2
from utils.dataPreprocess import deDataNormalizeDelta
import torch
from utils.faceData import faceData
from torch.utils.data import DataLoader
def saveResult(resultFolder,images,imgPath,faceGrid,delta):
neu=images['neu'].cpu().numpy()[0] #neutral
neu=np.transpose(neu,(1,2,0))
expg=images['out'].cpu().detach().numpy()[0] #generated model
expg=np.transpose(expg,(1,2,0))
dataName=imgPath
if delta:
expg=deDataNormalizeDelta(expg)
expg=expg+neu
vetExpg=faceGrid.map2Model(expg)
faceGrid.saveFaceModel(vetExpg,os.path.join(resultFolder,dataName))
pass
def saveData(resultFolder,data,saveName,faceGrid):
#save data to model
img=data.numpy()[0]
img=np.transpose(img,(1,2,0))
vets=faceGrid.map2Model(img)
faceGrid.saveFaceModel(vets,os.path.join(resultFolder,saveName))
pass
if __name__ == '__main__':
expression='expParam'
delta=True
embedding_nc=14
loadModel='checkpoints/ckpt.pth'
dataRoot='dataset/'
saveFolder='results'
dataFolder='testA'
dataA=faceData(dataRoot+dataFolder, expression,delta,embedding_nc)
datasetA = DataLoader(dataA, 1, shuffle=False,num_workers=0)
dataFolder='testA'
dataB=faceData(dataRoot+dataFolder, expression,delta,embedding_nc)
datasetB = DataLoader(dataB, 1, shuffle=False,num_workers=0)
model =v2e(input_nc=3, output_nc=3,embeddingNum=embedding_nc,inputSize=256)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: '+str(device))
model.to(device=device)
model.load_state_dict(torch.load(loadModel, map_location=device))
print('Load model from: '+loadModel)
model.eval()
modelTemplate='dataset/meryShow.obj'
faceArea=np.array([152,871,192,868])/1024 #face area in uv space for mery
faceGrid=gridModel(modelTemplate=modelTemplate,faceArea=faceArea)
#save models
for i, dataA in enumerate(datasetA):
if delta:
deltaB=deDataNormalizeDelta(dataA['exp'])
saveData(dataRoot+saveFolder,dataA['neu']+deltaB,'A'+str(i),faceGrid)
else:
saveData(dataRoot+saveFolder,dataA['exp'],'A'+str(i),faceGrid)
#for j, dataB in enumerate(datasetB):
# saveData(dataRoot+saveFolder,dataB['neu'],'B'+str(j),faceGrid)
for i, dataA in enumerate(datasetA):
for j, dataB in enumerate(datasetB):
if not i==j:
continue
neu=dataB['neu']
exp=dataB['exp']
embedding=dataA['Expression']
neu = neu.to(device=device, dtype=torch.float32)
exp = exp.to(device=device, dtype=torch.float32)
embedding=embedding.to(device=device, dtype=torch.float32)
output = model(neu,embedding)
images={'neu':neu,'exp':exp,'out':output}
saveResult(dataRoot+saveFolder,images,str(i)+'-'+str(j),faceGrid,delta)
print('Finish{0}-{1}({2}-{3})'.format(i+1,j+1,len(datasetA),len(datasetB)))
|
class Restaurant():
"""A class representing a restaurant"""
def __init__(self, name, cuisineType):
"""Initialize the restaurant"""
self.name = name
self.cuisineType = cuisineType
def describeRestaurant(self):
"""Display a summary of the restaurant"""
print (f"{self.name} has the best {self.cuisineType}")
myRestaurant = Restaurant ("Pepe's Pasta", "Spaghetti")
print (myRestaurant.name)
print (myRestaurant.cuisineType)
myRestaurant.describeRestaurant ()
|
from kappa.dao.DAO import DAO
from kappa.dao import ConnectionManager
from kappa.models.ImageModel import ImageModel
from kappa.controllers.FaceVectorController import FaceVectorController
from kappa.controllers.ObjectVectorController import ObjectVectorController
from kappa.dao.ObjectVectorDAO import ObjectVectorDAO
from kappa.dao.FaceVectorDAO import FaceVectorDAO
class ImageDAO(DAO):
def __init__(self):
super().__init__()
def getAllOrderByDate(self):
cm = ConnectionManager.ConnectionManager('KappaBase.db')
res = cm.executeSQL("SELECT * FROM Image order by creation_date")
oVectDao = ObjectVectorDAO()
fVectDao = FaceVectorDAO()
imageList = []
for elem in res:
imageList.append(ImageModel(elem[0], elem[1],elem[2],elem[3],elem[4],elem[5],elem[6], fVectDao.getByImageId(elem[0]), oVectDao.getByImageId(elem[0])))
return imageList
def getAll(self):
cm = ConnectionManager.ConnectionManager('KappaBase.db')
res = cm.executeSQL("SELECT * FROM Image")
oVectDao = ObjectVectorDAO()
fVectDao = FaceVectorDAO()
imageList = []
for elem in res:
imageList.append(ImageModel(elem[0], elem[1],elem[2],elem[3],elem[4],elem[5],elem[6], fVectDao.getByImageId(elem[0]), oVectDao.getByImageId(elem[0])))
return imageList
def getById(self,id):
cm = ConnectionManager.ConnectionManager('KappaBase.db')
res = cm.executeSQL("SELECT * FROM Image where id_image ="+id+";")
return res
def getNextId(self):
cm = ConnectionManager.ConnectionManager('KappaBase')
res = cm.executeSQL("SELECT MAX(id_image) FROM IMAGE")
res2 = res
for elem in res:
if(elem[0] == None):
res2=0
break
res2=elem[0]+1
return res2
def linkToVector(self,imgModel, vector):
cm = ConnectionManager.ConnectionManager('KappaBase.db')
res = cm.executeAndCommitSQL("INSERT INTO Include (id_image, id_vector) VALUES (" + str(imgModel.id) + ","+ str(vector.id)+ ")")
def update(self, imageMod):
print("update")
def create(self, imgModel):
print("create")
cm = ConnectionManager.ConnectionManager('KappaBase.db')
res = cm.executeAndCommitSQL("INSERT INTO Image (id_image, comment,creation_date,length,width,size,path) VALUES (" + imgModel.id + ", \"" + imgModel.comment + "\""+imgModel.creation_date+", "+imgModel.length+","+imgModel.size+","+imgModel.path+")")
|
import speech_recognition as sr
import os
import sys
import re
import webbrowser
import smtplib
import requests
import subprocess
from pyowm import OWM
import youtube_dl
import urllib.request
import json
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen
import wikipedia
import random
from time import strftime
def sofiaResponse(audio):
print(audio)
for line in audio.splitlines():
os.system("say " + audio)
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Say something...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
#loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
print('....')
command = myCommand();
return command
def assistant(command):
"if statements for executing commands"
#open subreddit Reddit
if 'open reddit' in command:
reg_ex = re.search('open reddit (.*)', command)
url = 'https://www.reddit.com/'
if reg_ex:
subreddit = reg_ex.group(1)
url = url + 'r/' + subreddit
webbrowser.open(url)
sofiaResponse('The Reddit content has been opened for you .')
elif 'hello' in command:
day_time = int(strftime('%H'))
if day_time < 12:
sofiaResponse('Hello . Good morning')
elif 12 <= day_time < 18:
sofiaResponse('Hello . Good afternoon')
else:
sofiaResponse('Hello . Good evening')
elif 'Thank you' in command:
sofiaResponse('Bye bye . Have a nice day')
sys.exit()
elif 'open' in command:
reg_ex = re.search('open (.+)', command)
if reg_ex:
domain = reg_ex.group(1)
print(domain)
url = 'https://www.' + domain + '.com/watch?v=l9v1ewQXv5M'
webbrowser.open(url)
sofiaResponse('The website you have requested has been opened for you .')
else:
pass
while True:
assistant(myCommand())
|
def fibonacci_sum(n: int)->int:
def fibonacci_sum_helper(n: int, acc: int, prev: int, curr: int)->int:
if n == 1:
return acc
elif n == 2:
return acc + curr
else:
return fibonacci_sum_helper(n - 1, acc + curr, curr, prev + curr)
return fibonacci_sum_helper(n, 0, 0, 1)
print(fibonacci_sum(1))
print(fibonacci_sum(2))
print(fibonacci_sum(3))
print(fibonacci_sum(4))
print(fibonacci_sum(5))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
:module:
========
Prints a report of incidents, happened through connection to service, by\
ip and username.
:arguments: module get optional argument - minimal number of incidents per\
one user. If not, all icdinets will be printed.
:task:
======
Изучите примеры к лекции, запустите программу с примерами
Напишите программу, анализа инцидентов несанкционированного доступа,\
отраженных в log файле, находящемся в zip архиве data11dz.zip
Для анализа строк log файла и извлечения данных используйте регулярные выражения
Каждый инцидент соответствует одной строке log файла. В строке инцидента\
в обязательном порядке присутствует ключевое слово from за которым следует\
IP адрес атакующего компьютера. В строке инцидента также присутствует\
имя пользователя, используемое для атаки
Напечатайте отчет инцидентов по IP адресам в виде:
- Заголовок отчета: # Report by IP
- Строки отчета в виде: <IP адрес> <пробел> <количество инцидентов>
- IP адрес должен быть помещен в поле шириной 15 символов\
и быть выравнен по левому краю
- Количество инцидентов должно быть помещено в поле шириной 6 символов\
и быть выравнено по правому краю
- Строки отчета должны быть отсортированы\
по количеству инцидентов в порядке убывания
Напечатайте отчет инцидентов по именам пользователя в виде:
- Заголовок отчета: # Report by user
- Строки отчета в виде: <имя пользователя> <пробел> <количество инцидентов>
- Имя пользователя должно быть помещено в поле шириной 15 символов\
и быть выравнено по левому краю
- Количество инцидентов должно быть помещено в поле шириной 6 символов\
и быть выравнено по правому краю
- Строки отчета должны быть отсортированы по количеству инцидентов\
в порядке убывания
Программа должна воспринимать один параметр - число инцидентов.\
В отчеты включаются только те строки, для которых число инцидентов\
больше или равно числу, заданному параметром. Если программа\
была запущена без параметров, в отчет включаются все строки.
"""
import zipfile
import re
import os
import sys
from collections import Counter, OrderedDict
def generate_report(file):
"""
This function generates lists of incidents by ip and by user
:args file: - <file> - name of file, where the computation is done
:returns: <list> of ip incidents, list of user incidents
:debugging: counters of incidents to compare with
"""
pattern = 'from'
list_of_ip = []
list_of_users = []
# debugging
counter_of_incidents = 0
counter_of_ip_inc = 0
real_incidents = 0
count_root_inc = 0
count_user_inc1 = 0
count_user_inc2 = 0
for line in file:
text = str(line)
if re.search(pattern, text, flags=0):
counter_of_incidents += 1
root_incidents = re.search("root" , text, flags=0)
user1 = re.search("user\s[a-zA-Z0-9]+" , text, flags=0)
user2 = re.search("for\s[a-zA-Z0-9]+" , text, flags=0)
if root_incidents:
count_root_inc += 1
list_of_users.append(root_incidents.group())
if user1 and not root_incidents:
count_user_inc1 += 1
list_of_users.append(user1.group()[5:])
if user2 and not root_incidents and not user1:
count_user_inc2 += 1
list_of_users.append(user2.group()[4:])
if root_incidents or user1 or user2:
real_incidents += 1
ip = re.search('(([2][5][0-5]\.)|([2][0-4][0-9]\.)|([0-1]?[0-9]?[0-9]\.)){3}(([2][5][0-5])|([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))', text, flags=0)
if ip:
counter_of_ip_inc += 1
list_of_ip.append(ip.group())
if not __debug__:
print("counter of all incidents", counter_of_incidents)
print("counter of ip incidents", counter_of_ip_inc)
print("real incidents", real_incidents)
print("count_root_inc", count_root_inc)
print("count_user_inc1", count_user_inc1)
print("count_user_inc1", count_user_inc2)
print("sum of inc", count_root_inc + count_user_inc1 + count_user_inc2)
return list_of_ip, list_of_users
def count_elems(list_):
"""
This function makes frequency analylis of list elements
:params list_: <list>
:returns: <dict> frequency dictionary of list
"""
# from collections import Counter
d_count = dict(Counter(list_))
if not __debug__: print("dictionary of counted events", d_count)
return d_count
def dict_sorted_by_values(dict_, order):
""" \
This function sorts dictionary by values
:param dict_: <dict> - dictionary to sort
:param order: <str> - if "reversed", sort in descending order,\
else - in ascending order
:returns: sorted <dict> by values
"""
reverse = True if order == 'reversed' else False
# from collections import OrderedDict
d_sorted = OrderedDict(sorted(
dict_.items(),
key=lambda t: t[1],
reverse=reverse))
return d_sorted
def print_report(dict_, ip__user, order, n_incidents):
"""
This function prints a report
:param dict_: <dict> - dictionary to print report
:param order: <str> - optional, if "reversed", sort in descending order,\
else - in ascending order
:param ip__user: <str> - if "user", sort by user,if "ip" - sort by ip
:param n_incidents: <int> - minimal number of incidents
:returns: None
"""
n_incidents = 0 if n_incidents == None else int(n_incidents)
if ip__user == 'ip': print('# Report by IP')
elif ip__user == 'user': print('# Report by user')
d_sorted = dict_sorted_by_values(dict_, order)
for key, value in zip(d_sorted.keys(), d_sorted.values()):
if value >= n_incidents:
print("{ip:<15} {N_incidents:>6}".format(ip=key, N_incidents=value))
def main():
"""
Main function, obtains # of incidents, opens zip, and processes\
file "log*.txt"; finally, prints a report
"""
n_incidents = sys.argv[1] if len(sys.argv) > 1 else None
dirname = sys.argv[2] if len(sys.argv) > 2 else None
for filename in sorted(os.listdir(dirname or '.')):
if filename.startswith('data') and filename.endswith('.zip'):
with zipfile.ZipFile(filename, 'r') as z1:
if not __debug__:
z1.printdir()
print(z1.namelist())
for log_file in z1.namelist():
if log_file.startswith('log') and log_file.endswith('.txt'):
with open(log_file) as f:
list_of_ip, list_of_users = generate_report(f)
if not __debug__: print(list_of_ip, list_of_users)
ip_counted = count_elems(list_of_ip)
user_counted = count_elems(list_of_users)
if not __debug__:
print(ip_counted, user_counted)
sorted_ip_counted = dict_sorted_by_values(
ip_counted, order = 'reversed')
sorted_user_counted = dict_sorted_by_values(
user_counted, order = 'reversed')
print(sorted_ip_counted, sorted_user_counted)
print("# of elements:", sum(sorted_ip_counted.values()))
print("# of elements:", sum(sorted_user_counted.values()))
print_report(ip_counted, 'ip', 'reversed', n_incidents)
print_report(user_counted, 'user', 'reversed', n_incidents)
if __name__ == '__main__':
import cProfile
cProfile.run('main()')
pass |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class NotifyConfig(AppConfig):
name = "notify"
label = "notify"
verbose_name = "站内消息"
|
from path import open
from path import set
def main():
matrixfile = open.import_file()
matrix = open.get_matrix(matrixfile)
set.printmatrix(matrix)
print"\n"
print open.get_startpoint(matrixfile)
print open.get_endpoint(matrixfile)
if __name__ == '__main__':
main() |
from TravelingSalesmanProblem import TSP
from deap import tools, base, algorithms, creator
import array, random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from eaSimple_withElitism import eaSimpleWithElitism
TSP_NAME = "bayg29"
tsp = TSP(TSP_NAME)
# Genetic Constants
MAX_GENERATION = 250
POPULATION = 500
P_CROSSOVER = 0.9
P_MUTATION = 0.1
HALL_OF_FAME_NUMBER = 50
# Define the fitness strategy
creator.create("FitnessMin", base.Fitness, weights=(-1.,))
# Creating the chromosome
creator.create("Individual", array.array, typecode="i", fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("randomOrder", random.sample, range(len(tsp)), len(tsp))
toolbox.register("IndividualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
toolbox.register("populationCreator", tools.initRepeat, list, toolbox.IndividualCreator)
def tspDistance(individual):
return (tsp.getTotalDistance(individual),)
toolbox.register("evaluate", tspDistance)
# register three genetic operators : Select, mate, mutate
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mate", tools.cxOrdered)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1. / len(tsp))
def main():
population = toolbox.populationCreator(n=POPULATION)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("MIN", np.min)
stats.register("MEAN", np.mean)
hof = tools.HallOfFame(HALL_OF_FAME_NUMBER)
population, logbook = eaSimpleWithElitism(population=population,
toolbox=toolbox,
cxpb=P_CROSSOVER,
mutpb=P_MUTATION,
ngen=MAX_GENERATION,
halloffame=hof,
stats=stats,
verbose=True)
maxFitnessValues, meanFitnessValues = logbook.select("MIN", "MEAN")
sns.set_style("whitegrid")
plt.plot(maxFitnessValues, color='red')
plt.plot(meanFitnessValues, color='green')
plt.title("Traveling Salesman Problem")
plt.ylabel("Max/Mean Values")
plt.xlabel("Generations")
plt.legend(["MIN", "MEAN"])
plt.show()
print(f"--Best ever individual: {hof.items[0]}")
print(f"--Best ever fitness: {hof.items[0].fitness.values[0]}")
plot = tsp.plotData(hof.items[0])
plot.show()
if __name__ == '__main__':
main()
|
import torch
import torch.nn.functional as F
from model.base_builder import BaseBuilder
from model.backbone.efficientnet import EfficientNetB0
import model.backbone.resnet_atrous as res_atr
from model.neck.co_neck import UCR, Adjust
from model.head.co_head import ManHead, CoDWRPN
from model.loss.rpn_loss import select_cross_entropy_loss, weight_l1_loss
class CoModelBuilder(BaseBuilder):
def __init__(self, backbone='resnet50c3'):
super(CoModelBuilder, self).__init__()
self.backbone_type = backbone
if self.backbone_type is 'resnet50c3':
self.backbone = res_atr.resnet50(used_layers=[2,],
frozen_layers=['conv1', 'bn1', 'relu', 'maxpool', 'layer1', ])
feat_channels = 512
elif self.backbone_type is 'efficientnetb0':
self.backbone = EfficientNetB0(pretrained=True, frozen_blocks=5)
feat_channels = 320
else:
raise ValueError('backbone should be one of: resnet50c3 and efficientnetb0 !')
self.man_neck = UCR(in_channels=feat_channels, out_channels=1024, center_size=7)
self.man_head = ManHead()
self.corpn_neck = Adjust(in_channels=feat_channels, out_channels=256, center_size=7)
self.corpn_head = CoDWRPN(anchor_num=5, in_channels=256, out_channels=256)
self.ERASETH = 0.9
def forward(self, data):
"""
only used in training
"""
template = data['template'].cuda()
search = data['search'].cuda()
label_cls = data['label_cls'].cuda()
label_loc = data['label_loc'].cuda()
label_loc_weight = data['label_loc_weight'].cuda()
label_fc = data['label_fc'].cuda()
label_fc_weight = data['label_fc_weight'].cuda()
z = self.backbone(template)
x = self.backbone(search)
zf_man = self.man_neck(z)
xf_man = self.man_neck(x)
dw_cls_man, cls_man = self.man_head(zf_man, xf_man)
MAM = self.get_match_active_map(dw_cls_man.detach(), cls_man, xf_man.detach())#2label_fc_delta
pos = torch.ge(MAM, self.ERASETH)
mask = torch.ones(x.size(0), x.size(2), x.size(3)).cuda()
mask[pos.data] = 0.0
mask = torch.unsqueeze(mask, dim=1)
zf = self.corpn_neck(z)
xf = self.corpn_neck(x)
co_xf = xf * mask
cls_rpn1, cls_rpn2, loc = self.corpn_head(zf, xf, co_xf)
# loss
loss_cls_man = F.binary_cross_entropy_with_logits(cls_man, label_fc, weight=label_fc_weight)
loss_cls_rpn1 = select_cross_entropy_loss(cls_rpn1, label_cls)
lss_cls_rpn2 = select_cross_entropy_loss(cls_rpn2, label_cls)
loss_loc = weight_l1_loss(loc, label_loc, label_loc_weight)
# get total loss
outputs = {}
outputs['total_loss'] = loss_cls_man + (loss_cls_rpn1 + lss_cls_rpn2) / 2.0 + 1.2 * loss_loc
outputs['man_loss'] = loss_cls_man
outputs['ori_cls_loss'] = lss_cls_rpn2
outputs['co_cls_loss'] = loss_cls_rpn1
outputs['loc_loss'] = loss_loc
return outputs
def get_match_active_map(self, responses, cls, feature_map):
"""
Compute the matching activation map, refer as MAM
"""
b, c, h, w = responses.shape
cls = cls.view(b, -1)
_, maxi = torch.max(cls, dim=1)
peaks = torch.stack([maxi % h, maxi / h], dim=1).float().cuda()
# peaks = torch.Tensor([[h // 2, w // 2]]).cuda() + cls
MAM = torch.zeros([b, feature_map.size(-2), feature_map.size(-1)]).cuda()
for bi in range(b):
# compute bbox center loc
peak = peaks[bi, :]
# compute response peak loc per channel
response = responses[bi].view(c, -1)
_, maxi = torch.max(response, dim=1)
loc = torch.stack([maxi % h, maxi / h]).float().cuda()
active = torch.sum(torch.abs((loc - peak.unsqueeze(1))), dim=0) <= 2
num_chs = torch.sum(active)
if num_chs > 0:
# # --------------------------
atten_maps = feature_map[bi][active]
ch_mins, _ = torch.min(atten_maps.view(num_chs, -1), dim=-1, keepdim=True)
ch_maxs, _ = torch.max(atten_maps.view(num_chs, -1), dim=-1, keepdim=True)
atten_normed = torch.div(atten_maps.view(num_chs, -1) - ch_mins,
ch_maxs - ch_mins)
atten_normed = atten_normed.view(atten_maps.shape)
cmap = torch.sum(atten_normed, dim=0)
# #---------------------------
val_min = torch.min(cmap)
val_max = torch.max(cmap)
MAM[bi, :, :] = (cmap - val_min) / (val_max - val_min)
return MAM
def template(self, template):
z = self.backbone(template)
self.fa_z = self.man_neck(z)
self.zf = self.corpn_neck(z)
def track(self, search):
x = self.backbone(search)
fa_x = self.man_neck(x)
xf = self.corpn_neck(x)
# get man response map
dw_cls_man, cls_man = self.man_head(self.fa_z, fa_x)
response = cls_man.squeeze()
MAM = self.get_match_active_map(dw_cls_man, cls_man, fa_x)
pos = torch.ge(MAM, self.ERASETH)
mask = torch.ones(x.size(0), x.size(2), x.size(3)).cuda()
mask[pos.data] = 0.0
mask = torch.unsqueeze(mask, dim=1)
co_xf = xf * mask
# get CoRPN outputs
cls_rpn1, cls_rpn2, loc = self.corpn_head(self.zf, xf, co_xf)
return {
'cls1': cls_rpn1,
'cls2': cls_rpn2,
'loc': loc,
'response': response,
}
def flops_forward(self, search): # flops_
"""
only used in flops counting
"""
template = torch.randn(1, 3, 127, 127).cuda()
z = self.backbone(template)
x = self.backbone(search)
zf_man = self.man_neck(z)
xf_man = self.man_neck(x)
dw_cls_man, cls_man = self.man_head(zf_man, xf_man)
MAM = self.get_match_active_map(dw_cls_man.detach(), cls_man, xf_man.detach())#2label_fc_delta
pos = torch.ge(MAM, self.ERASETH)
mask = torch.ones(x.size(0), x.size(2), x.size(3)).cuda()
mask[pos.data] = 0.0
mask = torch.unsqueeze(mask, dim=1)
zf = self.corpn_neck(z)
xf = self.corpn_neck(x)
co_xf = xf * mask
cls_rpn1, cls_rpn2, loc = self.corpn_head(zf, xf, co_xf)
return cls_rpn1, cls_rpn2, loc
|
from re import *
import urllib.request
import sys
def find_emails(text):
return findall(r"[a-zA-Z0-9\&\~._%\*\?\{\}+-]+@[a-zA-Z\&\~\_.-]+\.[a-zA-Z]{2,4}", text)
def url2html(url):
with urllib.request.urlopen(url) as f:
return f.read().decode('utf-8') #only the first 1000 symbols, if the text is to long
def find_hyperlinks(text):
#here ive done something 'clever':
#it turns out that you can make the regex pretty advanced, and the 'else' part is that.
#the if part is much simpeler, and is therfore much faster.
#when running the get_all_emails the faster regex is much smarter to run.
if __name__=="__main__":
#higher failure rate,but is faster!
#no file exteniton. because of speed.
protocol = "https?\:\/\/"
domain = "[\w\.\,\~]+"
sub = "[\w\.\,\~\/]+"
return findall(r"href=\"({0}{1}{2})\">".format(protocol, domain, sub), text)
else:
protocol = "(?:http(?:s)?:\/\/)?"
domain = "(?:(?:www\.)?[a-zA-Z0-9-\.+-]+\.\w{2,3}(?:\/| |$|\n))"
sub = "(?:[a-zA-Z0-9+~-]+\/?)*"
filetype = "(?:[a-z0-9A-Z]+\.[a-zA-Z]+)?"
return findall(r"\<a href=\"({0}{1}{2}{3})\"".format(protocol, domain, sub, filetype), text)
def find_relative_hyperlinks(text, absurl):
protocol = "(?:http(?:s)?:\/\/)?"
domain = "(?:(?:www\.)?[a-zA-Z0-9-\.+-]+\.\w{2,3}(?:\/| |$|\n))"
url = findall(r"{0}{1}".format(protocol, domain), absurl)[0]
a = findall(r'<a href="\/?((?:[a-zA-Z0-9+~-]+\/?)+(?:\.[a-zA-Z0-9]*)?)', text)
return [url + item for item in a if "http" not in item]
def get_all_emails(url, emails=set(), visited_adresses=[], maxsites=100, maxlevel=3, thislevel=0):
"""
Finds all the emails on the first (length) websites on the url page
input:
url: input url, the start point
in_file: the file the email adresses are written to.
visited_adresses: empty list that is filled up with all the adresses previously visited. (think of this as a blacklist)
length: number of sited visited
verbatim: if it prints the results or not
"""
if url in visited_adresses or len(visited_adresses) > maxsites or thislevel > maxlevel:
return None
if url not in visited_adresses:
visited_adresses.append(url)
try:
html = url2html(url)
url_emails = find_emails(html)
print("visiting:", url, "\t\t- emails:", len(url_emails))
for adress in url_emails:
emails.add(adress)
for urls in find_hyperlinks(html) + find_relative_hyperlinks(html, url):
get_all_emails(urls, emails, visited_adresses, maxsites, maxlevel, thislevel + 1)
except (KeyboardInterrupt, SystemExit):
sys.exit()
except Exception as e:
pass
return visited_adresses, emails
if __name__=='__main__':
urls, emails = get_all_emails("https://lucidtech.io/", maxsites=100, maxlevel=3)
with open("test", mode='w') as f:
for email in emails:
f.write(email + "\n")
print("Visited:", len(urls), "\nEmails:", len(emails)) |
http_messages = {
200 : 'OK'
}
def __build_info(status):
return f"HTTP/1.1 {str(status)} {http_messages[status]}\r\n"
def build_info(status,content):
response = f"HTTP/1.1 {str(status)} {http_messages[status]}\r\n"
response += f"Content-Length: {str(len(content))}\r\n"
return response
def build_headers(headers):
response = ""
for key, value in headers.items():
response += f"{str(key)}: {str(value)}\r\n"
return response
def build_cookies(cookies):
response = ""
for cookie in cookies:
response += f"Set-Cookie: {cookie['name']}={cookie['value']}"
if 'expiration' in cookie:
response += f"; Expires={cookie['expiration']}"
if cookie['secure']:
response += f"; Secure"
if cookie['httponly']:
response += f"; HttpOnly"
response += '\r\n'
return response
def create_cookie(name,value,expiration = None,secure = False,httponly = False):
cookie = {
'name' : name,
'value' : str(value),
'secure' : secure,
'httponly': httponly
}
if expiration is not None:
cookie['expiration'] = str(expiration)
return cookie
def build_body(content):
return f"\r\n{str(content)}\r\n" |
def getRank(num):
return [6,6,5,4,3,2,1][num]
def solution(lottos, win_nums):
winNum = set(win_nums)
mnCnt = len(list(filter(lambda x: x in winNum, lottos)))
mxCnt = mnCnt + lottos.count(0)
return list(map(getRank, [mxCnt, mnCnt]))
def main():
lottos = [44, 1, 0, 0, 31, 25]
win_nums = [31, 10, 45, 1, 6, 19]
print(solution(lottos, win_nums))
if __name__ == '__main__':
main() |
import numpy as np
import sys
sys.path.insert(0, '../../../network')
from neuralNetwork import network
import matplotlib
matplotlib.use("TKAGG")
import matplotlib.pyplot as plt
class creature(object):
"""docstring for creature"""
def __init__(self, brain = None):
super(creature, self).__init__()
self.brain = brain
self.r = np.zeros(2)
self.v = np.ones(2)
self.v/= np.sum(self.v*self.v)
if brain == None:
self.brain = network([4,10,2])
def step(self):
"""r and v are python arrays"""
info = self.r.tolist()+self.v.tolist()
self.v += (self.brain.feedForward(info)[-1]-np.array([.5,.5]))*dt
self.r += self.v*dt
numGenerations = 100
populationSize = 500
population = [creature() for i in range(populationSize)]
dt = .1
def collide(a,b):
if np.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)<10:
return True
return False
def breed(n1,n2):
""" n1 and n2 are brains"""
c1 = []
c2 = []
for layer1,layer2 in zip(n1.layers,n2.layers):
mask = np.rint(np.random.random(layer1.shape))
inverse_mask = np.abs(mask-1)
c = layer2*mask +layer1*inverse_mask
x = np.random.randint(0,c.shape[0],20)
y = np.random.randint(0,c.shape[1],20)
c[x,y] = np.logical_not(c[x,y])
c1.append(c)
c = layer1*mask +layer2*inverse_mask
x = np.random.randint(0,c.shape[0],20)
y = np.random.randint(0,c.shape[1],20)
c[x,y] = np.logical_not(c[x,y])
c2.append(c)
c1 = network('',layers = c1)
c2 = network('',layers = c2)
return([creature(brain = c1),creature(brain = c2)])
def getCost(n):
"cost function to create a creature that goes toward some food point"
n.r = np.zeros(2)
n.v = np.ones(2)
for i in range(100):
n.step()
cost = n.r[0]**2+n.r[0]**2
return(cost)
for year in range(numGenerations):
population.sort(key=lambda x: getCost(x))
a = getCost(population[0])
if year%10==0:
print year
print ''
if a<.0001:
print 'yahoo!'
break
children = breed(population[0].brain,population[1].brain)
population[-1] = children[0]
population[-2] = children[1]
population.sort(key=lambda x: getCost(x))
# import numpy as np
# import sys
# sys.path.insert(0, '../../../network')
# from neuralNetwork import network
# import matplotlib
# matplotlib.use("TKAGG")
# import matplotlib.pyplot as plt
# numGenerations = 100
# populationSize = 20
# population = [creature() for i in range(populationSize)]
# dt = 10
#!/usr/bin/env python
# import numpy as np
# import time
# import matplotlib
# matplotlib.use('TKAgg')
# from matplotlib import pyplot as plt
def iter2(individual):
""" A simple random walk with memory """
print getCost(individual)
posX = []
posY = []
individual.r = np.zeros(2)
individual.v = np.ones(2)
for i in range(100):
individual.step()
posX.append(individual.r[0])
posY.append(individual.r[1])
plt.plot(posX,posY)
# plt.show()
for individual in population:
iter2(individual)
plt.show()
# def getPos(population):
# pos = []
# for individual in population:
# posX.append(individual.r[0])
# posY.append(individual.r[1])
# return(posX,posY)
# def iter():
# """ A simple random walk with memory """
# while True:
# posX,posY = np.random.random((2,populationSize))
# posX = posX.tolist()
# posY = posY.tolist()
# for individual in population:
# individual.step()
# posX.append(individual.r[0])
# posY.append(individual.r[1])
# yield [posX,posY]
# def run(niter=1000, doblit=True):
# """
# Display the simulation using matplotlib, optionally using blit for speed
# """
# fig, ax = plt.subplots(1, 1)
# ax.set_aspect('equal')
# ax.set_xlim(-3, 3)
# ax.set_ylim(-3, 3)
# ax.hold(True)
# rw = iter()
# x, y = rw.next()
# plt.show(False)
# plt.draw()
# if doblit:
# # cache the background
# background = fig.canvas.copy_from_bbox(ax.bbox)
# points = ax.plot(x, y, 'o')[0]
# tic = time.time()
# for ii in xrange(niter):
# # update the xy data
# x, y = rw.next()
# points.set_data(x, y)
# if doblit:
# # restore background
# fig.canvas.restore_region(background)
# # redraw just the points
# ax.draw_artist(points)
# # fill in the axes rectangle
# fig.canvas.blit(ax.bbox)
# else:
# # redraw everything
# fig.canvas.draw()
# plt.close(fig)
# print "Blit = %s, average FPS: %.2f" % (
# str(doblit), niter / (time.time() - tic))
# if __name__ == '__main__':
# run(doblit=False)
# run(doblit=True) |
import boto3
import paho.mqtt.client as mqtt
mqttc = mqtt.Client()
def SendMsg():
client = boto3.client("sns", "us-east-1")
phone = "+919702007220"
phone1 = "+XX"
EndMsg = "Hi Mummy Send Using AWS SNS"
Msg = client.publish(PhoneNumber=phone, Message=EndMsg)
Msg1 = client.publish(PhoneNumber=phone1, Message=EndMsg)
print(Msg)
def on_message(mosq, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
if(int(msg.payload) > 30):
SendMsg()
mqttc.on_message = on_message
mqttc.connect("192.168.0.101", 1883, 60)
mqttc.subscribe("test", 0)
mqttc.loop_forever()
|
#!/usr/bin/env fontforge -script
import pickle
import fontforge
import psMat
IPA_FONT = "ipagp.ttf"
PATCH_FONT = "textar-patch.otf"
MSPG_GLYPH_DATA = "mspg_glyph_data.pickle"
OUTPUT_FILE = "textar.sfd"
def scale(glyph, scalex, scaley):
glyph.transform(psMat.scale(scalex, scaley))
def move(glyph, x, y):
glyph.transform(psMat.translate(x, y))
def copy_from_patch_font(font, patch_font, encodings):
font.selection.none()
patch_font.selection.none()
for encoding in encodings:
font.selection.select(("more", None), encoding)
patch_font.selection.select(("more", None), encoding)
patch_font.copy()
font.paste()
font.selection.none()
patch_font.selection.none()
def fix_bbox(width, bbox, ascent, descent):
xmin, ymin, xmax, ymax = bbox
ascent_scale = 1408.0 / 1400
descent_scale = 192.0 / 200
if xmax - xmin == 100:
if width - xmax > xmin:
xmax += 40
else:
xmin -= 40
if ymax - ymin == 100:
if 1400 - ymax > ymin + 200:
ymax += 40
else:
ymin -= 40
if ymin > 0:
ymin *= ascent_scale
else:
ymin *= descent_scale
if ymax > 0:
ymax *= ascent_scale
else:
ymax *= descent_scale
return (xmin, ymin, xmax, ymax)
glyphs_ignore_scalex = {
int(0x49): "center", # I
int(0x4a): "right", # J
int(0x66): "left", # f
int(0x72): "left", # r
int(0x399): "center", # greek capital letter iota
int(0x2160): "center", # roman numberal one
int(0x2161): "center", # roman numberal two
int(0xff29): "center", # fullwidth latin capital letter I
}
f = open(MSPG_GLYPH_DATA, "rb")
mspg_glyph_data = pickle.load(f)
f.close()
font = fontforge.open(IPA_FONT)
font.em = 1600
font.selection.all()
font.selection.select(("more", None), 32)
for glyph in font.selection.byGlyphs:
d = mspg_glyph_data.get(glyph.encoding)
if d and glyph.isWorthOutputting():
mspg_width = d["width"]
mspg_bbox = fix_bbox(mspg_width, d["bbox"], font.ascent, font.descent)
mspg_lbearing = mspg_bbox[0]
width = glyph.width
bbox = glyph.boundingBox()
mspg_boxw = mspg_bbox[2] - mspg_bbox[0]
mspg_boxh = mspg_bbox[3] - mspg_bbox[1]
boxw = bbox[2] - bbox[0]
boxh = bbox[3] - bbox[1]
if boxw:
scalex = mspg_boxw / boxw
else:
scalex = 1
if boxh:
scaley = mspg_boxh / boxh
else:
scaley = 1
bearing_type = glyphs_ignore_scalex.get(glyph.encoding)
if bearing_type:
scale(glyph, 1, scaley)
if bearing_type == "right":
mspg_lbearing += mspg_boxw - boxw
elif bearing_type == "center":
mspg_lbearing += (mspg_boxw - boxw) / 2
else:
scale(glyph, scalex, scaley)
move(glyph, 0, mspg_bbox[1] - glyph.boundingBox()[1])
glyph.left_side_bearing = mspg_lbearing
glyph.width = mspg_width
patch_font = fontforge.open(PATCH_FONT)
encodings = [
int(0x22), int(0x27), int(0x2002), int(0x2003), int(0x2009),
int(0x2016), int(0x2211), int(0x2212), int(0x30FC), int(0xff70), int(65536),
]
copy_from_patch_font(font, patch_font, encodings)
font.save(OUTPUT_FILE)
print "Saved a SFD file. Run the 'gen2.pe' script."
|
from flask import Flask
from flask import request
app = Flask(__name__) #instancia: nuevo objeto
@app.route('/')
def index():
return 'Bienvenido'
@app.route('/PlayApps_201810')
def PlayApps_201810():
return 'PlayApps_201810'
@app.route('/PlayApps_201811')
def PlayApps_201811():
return 'PlayApps_201811'
@app.route('/PlayApps_201812')
def PlayApps_201812():
return 'PlayApps_201812'
# http://localhost:5000/params?params1=Erick_Barcenas
@app.route('/params')
def params():
param = request.args.get('params1', 'no contiene el parámetro')
param_dos = request.args.get('params2','no contiene el parámetro 2')
return 'El parametro uno es: {}, y el dos es: {}'.format(param, param_dos)
if __name__ == '__main__':
app.run(debug = True, port= 5000)
|
import enum
import attr
import bitstruct
import crcmod
import structattr
from structattr.types import UInt, Enum, Bool, Zero, One, FixedPointSInt
from typing import Union
crc_func = crcmod.mkCrcFun(0x11021, initCrc=0, xorOut=0, rev=False)
class ElcobusFrame:
__slots__ = ()
@classmethod
def from_bytes(cls, frame: Union[bytes, bytearray]) -> Union['ElcobusMessage', 'UnknownFrame']:
"""
Attempts to parse the given `frame`
if `frame` supports item deletion (`del frame[0:5]`), the parsed bytes
are removed from `frame`.
:param frame: byte sequence to parse
:raises BufferError when the message is incomplete
:raises ValueError when the message is invalid
:return: The decoded ElcobusMessage, or UnknownFrame
"""
if len(frame) < 4:
raise BufferError("Not enough data to decode message")
dlen = frame[3]
if dlen < 4+2:
raise ValueError("Invalid message: length can't be < 6")
if dlen > 32: # largest seen is 27
raise ValueError("Invalid message: length > 32")
if len(frame) < dlen:
raise BufferError("Not enough data to decode message")
crc_actual, = bitstruct.unpack(">u16", frame[(dlen-2):dlen])
crc_should = crc_func(frame[0:(dlen-2)])
if crc_actual != crc_should:
raise ValueError(f"CRC mismatch, got 0x{crc_actual:04x}, expected 0x{crc_should:04x}")
this_frame = frame[0:(dlen-2)]
try:
del frame[0:dlen]
except TypeError:
# message is bytes, not bytearray. ignore
pass
try:
return ElcobusMessage.from_bytes(this_frame)
except ValueError as e:
return UnknownFrame(header=this_frame[0:3], data=this_frame[4:])
@attr.s(slots=True)
class ElcobusMessage(ElcobusFrame):
class StartOfFrame(Enum(6)):
StartOfFrame = 0xdc >> 2
_start_of_frame = attr.ib(type=StartOfFrame, default=StartOfFrame.StartOfFrame)
_unkn1 = attr.ib(type=Bool, default=False)
_const1 = attr.ib(type=Zero, default=Zero.Zero)
_const2 = attr.ib(type=One, default=One.One)
source_address = attr.ib(type=UInt(7), default=0)
_const3 = attr.ib(type=Zero, default=Zero.Zero)
destination_address = attr.ib(type=UInt(7), default=0)
# length, implicit
class MessageType(Enum(8)):
Info = 2
Set = 3
Ack = 4
Get = 6
Ret = 7
message_type = attr.ib(type=MessageType, default=MessageType.Info)
logical_source = attr.ib(type=UInt(8), default=0)
logical_destination = attr.ib(type=UInt(8), default=0)
field = attr.ib(type=UInt(16), default=0)
data = attr.ib(default=None)
# crc, implicit
@classmethod
def from_bytes(cls, frame: bytes) -> 'ElcobusMessage':
attributes = attr.fields(cls)
bitstruct_info = structattr.BitStructInfo()
for attribute in attributes[0:7]:
bitstruct_info.add_attr(attribute)
header_fields = structattr.deserialize(
frame[0:3],
bitstruct_info
)
# skip length
bitstruct_info = structattr.BitStructInfo()
for attribute in attributes[7:11]:
bitstruct_info.add_attr(attribute)
body_fields = structattr.deserialize(
frame[4:9],
bitstruct_info,
)
structattr.strip_leading_underscore(header_fields)
structattr.strip_leading_underscore(body_fields)
msg = cls(**header_fields, **body_fields)
try:
msg.field = Field(msg.field)
except ValueError:
pass
if msg.message_type in (
ElcobusMessage.MessageType.Info,
ElcobusMessage.MessageType.Set,
ElcobusMessage.MessageType.Ret
):
msg.data = frame[9:]
try:
candidate = msg.field.data_type
msg.data = candidate.from_bytes(msg.data)
except (AttributeError, ValueError):
pass
else:
if len(frame) > 9:
raise ValueError("Data found on Ack or Get packet")
return msg
def to_bytes(self) -> bytes:
attributes = attr.fields(self.__class__)
bitstruct_info = structattr.BitStructInfo()
header_fields = []
for attribute in attributes[0:7]:
bitstruct_info.add_attr(attribute)
header_fields.append(getattr(self, attribute.name))
header_fields = structattr.serialize(
header_fields,
bitstruct_info
)
bitstruct_info = structattr.BitStructInfo()
body_fields = []
for attribute in attributes[7:11]:
bitstruct_info.add_attr(attribute)
body_fields.append(getattr(self, attribute.name))
body_fields = structattr.serialize(
body_fields,
bitstruct_info
)
data = bytearray()
data += header_fields
dlen = len(header_fields) + 1 + len(body_fields) + 2
if self.data is not None:
dlen += len(self.data)
data += bytes([dlen])
data += body_fields
if self.data is None:
pass
elif isinstance(self.data, bytes) or isinstance(self.data, bytearray):
data += self.data
else:
data += self.data.to_bytes()
crc = crc_func(data)
data += bitstruct.pack(">u16", crc)
return data
@attr.s(slots=True, auto_attribs=True)
class UnknownFrame(ElcobusFrame):
header: bytes
data: bytes
def to_bytes(self) -> bytes:
result = bytearray()
result += self.header
result += bytes([len(self.header) + 1 + len(self.data) + 2])
result += self.data
crc = crc_func(result)
result += bitstruct.pack(">u16", crc)
return result
@structattr.add_methods
@attr.s(slots=True, auto_attribs=True)
class Temperature:
class Zero(Enum(8)):
Zero = 0
flag: Zero = Zero.Zero
temperature: FixedPointSInt(total_bits=16, fractional_bits=6) = 0
@structattr.add_methods
@attr.s(slots=True, auto_attribs=True)
class RoomStatus:
temperature: FixedPointSInt(total_bits=16, fractional_bits=6) = 0
class Zero(Enum(8)):
Zero = 0
_unkn1: Zero = Zero.Zero
@structattr.add_methods
@attr.s(slots=True, auto_attribs=True)
class Pressure:
class Zero(Enum(8)):
Zero = 0
flag: Zero = Zero.Zero
pressure: FixedPointSInt(total_bits=16, scale_factor=0.1) = 0
@structattr.add_methods
@attr.s(slots=True, auto_attribs=True)
class Percent:
class Zero(Enum(8)):
Zero = 0
flag: Zero = Zero.Zero
percent: UInt(8) = 0
@structattr.add_methods
@attr.s(slots=True, auto_attribs=True)
class Status:
class Zero(Enum(8)):
Zero = 0
flag: Zero = Zero.Zero
status: UInt(8) = 0
class Field(int, enum.Enum):
def __new__(cls, value: int, data_type: type):
o = int.__new__(cls, value)
o._value_ = value
o.data_type = data_type
return o
RoomStatus = (0x0215, RoomStatus)
OutdoorTemperature = (0x0521, Temperature)
BoilerTemperature = (0x0519, Temperature)
BoilerSetTemperature = (0x0923, Temperature)
BoilerReturnTemperature = (0x051a, Temperature)
TapWaterTemperature = (0x052f, Temperature)
TapWaterSetTemperature = (0x074b, Temperature)
HeatingCircuitTemperature = (0x0518, Temperature) # note: logical address denote circuits: 0x20 + circuit number
HeatingCircuitSetTemperature = (0x0667, Temperature) # note: logical address denote circuits: 0x20 + circuit number
Pressure = (0x3063, Pressure)
BurnerModulation = (0x305f, Percent)
PumpModulation = (0x04a2, Percent)
Status = (0x3034, Status)
|
from ui.tests.TestBase import TestBase
from ui.models import Turn
class TestMoves(TestBase):
def testSingleMove(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Single move')
# verify units
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertNoUnit(turn, 'Germany')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
def testSingleSecondMove(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Germany', 'move', ['Germany', 'Poland'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Single Second move')
# verify units
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertNoUnit(turn, 'Germany')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
def testFailedSecondMove(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
self.setAssertCommand(turn, 'Croatia', 'move', ['Croatia', 'Poland'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Failed Second move')
# verify units
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertResult(turn.previous, 'Croatia', 'fail.target-not-moving:par_1')
self.assertNoUnit(turn, 'Germany')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
def testFailedMove(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'France', 'Army', 'Spain')
# set commands
self.setAssertCommand(turn, 'Germany', 'move', 'France')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Failed move')
# verify units
self.assertResult(turn.previous, 'Germany', 'fail.target-not-moving:par_0')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
def testTwoMoves(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Baltic Sea', 'Ship', 'Russia')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
self.setAssertCommand(turn, 'Baltic Sea', 'move', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Two Moves')
# verify units
self.assertResult(turn.previous, 'Germany', 'fail.more-moves-to-target:par_0')
self.assertResult(turn.previous, 'Baltic Sea', 'fail.more-moves-to-target:par_0')
self.assertNoUnit(turn, 'Poland')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Baltic Sea', 'Ship', 'Russia')
def testSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', 'Croatia')
self.setAssertCommand(turn, 'Croatia', 'move', 'Austria')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Switch')
# verify units
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Croatia', 'ok')
self.assertUnit(turn, 'Austria', 'Army', 'Russia')
self.assertUnit(turn, 'Croatia', 'Army', 'Spain')
def testSecondSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', ['Austria', 'Croatia'])
self.setAssertCommand(turn, 'Croatia', 'move', ['Croatia', 'Austria'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Second Switch')
# verify units
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Croatia', 'ok')
self.assertUnit(turn, 'Austria', 'Army', 'Russia')
self.assertUnit(turn, 'Croatia', 'Army', 'Spain')
def testBlockedSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertUnit(turn, 'France', 'Army', 'Spain')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', 'Croatia')
self.setAssertCommand(turn, 'Croatia', 'move', 'Austria')
self.setAssertCommand(turn, 'France', 'move', 'Austria')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Blocked Switch')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.target-not-empty:par_0')
self.assertResult(turn.previous, 'Croatia', 'fail.more-moves-to-target:par_0')
self.assertResult(turn.previous, 'France', 'fail.more-moves-to-target:par_0')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertUnit(turn, 'France', 'Army', 'Spain')
def testFailedTrain(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertUnit(turn, 'North Sea', 'Ship', 'Spain')
# set commands
self.setAssertCommand(turn, 'France', 'move', 'Austria')
self.setAssertCommand(turn, 'Austria', 'move', 'Germany')
self.setAssertCommand(turn, 'Germany', 'move', 'Denmark')
self.setAssertCommand(turn, 'North Sea', 'move', 'Denmark')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Failed Train')
# verify units
self.assertResult(turn.previous, 'France', 'fail.target-not-empty:par_0')
self.assertResult(turn.previous, 'Austria', 'fail.target-not-empty:par_0')
self.assertResult(turn.previous, 'Germany', 'fail.more-moves-to-target:par_0')
self.assertResult(turn.previous, 'North Sea', 'fail.more-moves-to-target:par_0')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'North Sea', 'Ship', 'Spain')
def testSuccessfulTrain(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'France', 'move', 'Austria')
self.setAssertCommand(turn, 'Austria', 'move', 'Germany')
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Successful Train')
# verify units
self.assertResult(turn.previous, 'France', 'ok')
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertNoUnit(turn, 'France')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
def testBlockedTrain(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertUnit(turn, 'Ukraine', 'Army', 'Russia')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'France', 'move', 'Austria')
self.setAssertCommand(turn, 'Austria', 'move', 'Germany')
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
self.setAssertCommand(turn, 'Ukraine', 'move', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Blocked Train')
# verify units
self.assertResult(turn.previous, 'France', 'fail.target-not-empty:par_0')
self.assertResult(turn.previous, 'Austria', 'fail.target-not-empty:par_0')
self.assertResult(turn.previous, 'Germany', 'fail.more-moves-to-target:par_0')
self.assertResult(turn.previous, 'Ukraine', 'fail.more-moves-to-target:par_0')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Ukraine', 'Army', 'Russia')
def testSuccessfulSecondTrain(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'France', 'Army', 'Spain')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'France', 'move', ['France', 'Austria'])
self.setAssertCommand(turn, 'Austria', 'move', ['Austria', 'Germany'])
self.setAssertCommand(turn, 'Germany', 'move', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Successful Second Train')
# verify units
self.assertResult(turn.previous, 'France', 'ok')
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertNoUnit(turn, 'France')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
def testDoubleSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', ['Croatia', 'Austria'])
self.setAssertCommand(turn, 'Croatia', 'move', ['Austria', 'Croatia'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Double Switch')
# verify units
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Croatia', 'ok')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
def testSuccessfulTrainSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Croatia', 'move', ['Austria', 'Germany'])
self.setAssertCommand(turn, 'Austria', 'move', ['Croatia', 'Austria'])
self.setAssertCommand(turn, 'Germany', 'move', ['Germany', 'Poland'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Successful Train Switch')
# verify units
self.assertResult(turn.previous, 'France', 'ok')
self.assertResult(turn.previous, 'Austria', 'ok')
self.assertResult(turn.previous, 'Germany', 'ok')
self.assertNoUnit(turn, 'Croatia')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Germany', 'Army', 'Russia')
self.assertUnit(turn, 'Poland', 'Army', 'Spain')
def testFailedSecondTrainSwitch(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertNoUnit(turn, 'Poland')
# set commands
self.setAssertCommand(turn, 'Croatia', 'move', ['Austria', 'Germany'])
self.setAssertCommand(turn, 'Austria', 'move', ['Croatia', 'Austria'])
self.setAssertCommand(turn, 'Germany', 'move', ['Germany', 'Austria'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Failed Second Train Switch')
# verify units
self.assertResult(turn.previous, 'Croatia', 'fail.target-not-empty:par_1')
self.assertResult(turn.previous, 'Austria', 'fail.more-moves-to-target:par_1')
self.assertResult(turn.previous, 'Germany', 'fail.more-moves-to-target:par_1')
self.assertUnit(turn, 'Austria', 'Army', 'Russia')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Spain')
def testFirstBlockedNoSecondMove(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', ['Austria', 'Croatia'])
self.setAssertCommand(turn, 'Croatia', 'move', ['Austria', 'Poland'])
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: First Blocked No Second Move')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.more-moves-to-target:par_0')
self.assertResult(turn.previous, 'Croatia', 'fail.more-moves-to-target:par_0')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
def testMoveAttacked(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', 'Poland')
self.setAssertCommand(turn, 'Croatia', 'attack', 'Austria')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Move attacked')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.canceled-by-attack')
self.assertResult(turn.previous, 'Croatia', 'fail.defence-stronger')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
def testSecondMoveAttacked(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', ['Austria', 'Poland'])
self.setAssertCommand(turn, 'Croatia', 'attack', 'Austria')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Second Move attacked')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.canceled-by-attack')
self.assertResult(turn.previous, 'Croatia', 'fail.defence-stronger')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
def testMoveWeaker(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', 'Poland')
self.setAssertCommand(turn, 'Croatia', 'attack', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Move Weaker')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.target-attacked:par_0')
self.assertResult(turn.previous, 'Croatia', 'ok')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Poland', 'Army', 'Russia')
self.assertNoUnit(turn, 'Croatia')
def testSecondMoveWeaker(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', ['Austria', 'Poland'])
self.setAssertCommand(turn, 'Croatia', 'attack', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Second Move Weaker')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.target-attacked:par_1')
self.assertResult(turn.previous, 'Croatia', 'ok')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Poland', 'Army', 'Russia')
self.assertNoUnit(turn, 'Croatia')
def testMoveToAttacked(self):
turn = Turn.objects.get(pk=1)
# verify units
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
# set commands
self.setAssertCommand(turn, 'Austria', 'move', 'Poland')
self.setAssertCommand(turn, 'Croatia', 'attack', 'Poland')
self.setAssertCommand(turn, 'Germany', 'attack', 'Poland')
# calculate turn
turn = self.assertNextTurn(turn, '2000', 'Moves: Move to attacked field')
# verify units
self.assertResult(turn.previous, 'Austria', 'fail.target-attacked:par_0')
self.assertResult(turn.previous, 'Croatia', 'fail.not-strongest')
self.assertResult(turn.previous, 'Germany', 'fail.not-strongest')
self.assertUnit(turn, 'Austria', 'Army', 'Spain')
self.assertUnit(turn, 'Croatia', 'Army', 'Russia')
self.assertUnit(turn, 'Germany', 'Army', 'Spain')
self.assertNoUnit(turn, 'Poland')
|
from types import SimpleNamespace
from hwtypes.adt import Enum, Product, TaggedUnion, Sum
from hwtypes.adt_util import rebind_type
from peak import family_closure
from . import family
@family_closure(family)
def ISA_fc(family):
Bit = family.Bit
BitVector = family.BitVector
Idx = family.Idx
Word = family.Word
# Define the layout of instruction minus tag fields (opcode/func3/func7)
class R(Product):
rd = Idx
rs1 = Idx
rs2 = Idx
class I(Product):
rd = Idx
rs1 = Idx
imm = BitVector[12]
# For shifts
# I would argue that is actually closer to an R type (where rs2 is treated
# as an immiedate) than an I type. But in any event it is its own form,
# destinct from both I and R so I don't kno why riscv calls them I type.
class Is(Product):
rd = Idx
rs1 = Idx
imm = BitVector[5]
class S(Product):
rs1 = Idx
rs2 = Idx
imm = BitVector[12]
class U(Product):
rd = Idx
imm = BitVector[20]
class B(Product):
rs1 = Idx
rs2 = Idx
imm = BitVector[12]
class J(Product):
rd = Idx
imm = BitVector[20]
# define tags for func7/func3
class ArithInst(Enum):
ADD = Enum.Auto()
SUB = Enum.Auto()
SLT = Enum.Auto()
SLTU = Enum.Auto()
AND = Enum.Auto()
OR = Enum.Auto()
XOR = Enum.Auto()
class ShiftInst(Enum):
SLL = Enum.Auto()
SRL = Enum.Auto()
SRA = Enum.Auto()
# Does not effect the encoding, but there is not
# currently a way to union enums
class AluInst(TaggedUnion):
arith = ArithInst
shift = ShiftInst
class StoreInst(Enum):
SB = Enum.Auto()
SH = Enum.Auto()
SW = Enum.Auto()
SD = Enum.Auto()
class LoadInst(Enum):
LB = Enum.Auto()
LBU = Enum.Auto()
LH = Enum.Auto()
LHU = Enum.Auto()
LW = Enum.Auto()
LWU = Enum.Auto()
LD = Enum.Auto()
class BranchInst(Enum):
BEQ = Enum.Auto()
BNE = Enum.Auto()
BLT = Enum.Auto()
BLTU = Enum.Auto()
BGE = Enum.Auto()
BGEU = Enum.Auto()
# Define tagged Layouts
# The types here should define the opcode field
# and when combined with there tag define opcode/func3/func7
class OP(Product):
data = R
tag = AluInst
class OP_IMM_A(Product):
data = I
tag = ArithInst
class OP_IMM_S(Product):
data = Is
tag = ShiftInst
#an OP_IMM is either:
# OP_IMM_A (I data, ArithInst tag)
# OP_IMM_S (Is data, ShftInst tag)
class OP_IMM(TaggedUnion):
arith = OP_IMM_A
shift = OP_IMM_S
# LUI / AUIPC each define there own opcode so I don't merging them
# with the tag / data style
# HACK don't just inherit U because it breaks rebind
class LUI(Product):
data = U
class AUIPC(Product):
data = U
# Similar to above as JAL/JALR are distinguished by opcode I don't
# create a tagged union with JAL=J; JALR=I
class JAL(Product):
data = J
class JALR(Product):
data = I
class Branch(Product):
data = B
tag = BranchInst
class Load(Product):
data = I
tag = LoadInst
class Store(Product):
data = S
tag = LoadInst
# This sum type defines the opcode field
Inst = Sum[OP, OP_IMM, LUI, AUIPC, JAL, JALR, Branch, Load, Store]
return SimpleNamespace(**locals())
|
import discord
import asyncio
import os
from CryptoniteFunctions import MessageHandler
print("Creating bot client...")
client = discord.Client()
print("Creation completed.")
@client.event
async def on_ready():
print("Bot name: " + client.user.name + "\n Bot ID: " + client.user.id)
print("Ready to receive commands!")
@client.event
async def on_message(message):
if message.author == client.user:
return
#prefix determined here ($$)
elif message.content.startswith("$$"):
returnValue = MessageHandler(message.content,message.author)
if returnValue == "exit":
exit()
await client.send_message(message.channel, "Bot shutting down.")
else:
await client.send_message(message.channel, embed=returnValue)
client.run("..")
|
#!/usr/bin/python3
from pyrob.api import *
@task
def task_8_2():
if wall_is_above()==True or wall_is_beneath()==True:
usl=1
while usl==1:
if (wall_is_above()==True and wall_is_beneath()==False) or (wall_is_above()==False and wall_is_beneath()==True):
fill_cell()
if wall_is_on_the_right()==False:
move_right()
else:
usl=0
if __name__ == '__main__':
run_tasks()
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from timer_app.forms.topic import TopicForm
from timer_app.models import Topic
@login_required
def create_and_list_topic(request):
topics = Topic.objects.filter(user=request.user)
context = {
'form': TopicForm(),
'topics': topics
}
if request.method == 'GET':
return render(request, 'timer/topic-create-and-list.html', context)
elif request.method == 'POST':
form = TopicForm(request.POST)
if form.is_valid():
topic = Topic(name=form.cleaned_data['name'])
topic.user = request.user
topic.save()
return render(request, 'timer/topic-create-and-list.html', context)
@login_required
def edit_topic(request, topic_id: int):
current_topic = Topic.objects.get(pk=topic_id)
context = {
'form': TopicForm(instance=current_topic)
}
if request.method == 'GET':
return render(request, 'timer/topic-edit.html', context)
elif request.method == 'POST':
form = TopicForm(request.POST, instance=current_topic)
if not form.is_valid():
context = {
'form': form
}
return render(request, 'timer/topic-edit.html', context)
form.save()
return redirect('create_and_list_topic')
@login_required
def delete_topic(request, topic_id: int):
topic_to_delete = Topic.objects.get(pk=topic_id)
if request.method == 'GET':
return render(request, 'timer/topic-delete.html', {'topic_name': topic_to_delete.name})
elif request.method == 'POST':
topic_to_delete.delete()
return redirect('create_and_list_topic') |
from authentication import views
from django.urls import include, re_path
from django.views.generic.base import RedirectView
urlpatterns = [
# include external frameworks urls
re_path(r'^auth/', include('rest_framework_social_oauth2.urls')),
re_path(r'^', include('registration.backends.hmac.urls')),
# custom registration page
re_path(r'^register/$', views.Reg.as_view(), name='reg'),
# profile page
re_path(r'^profile', RedirectView.as_view(pattern_name='profile')),
# pipeline view for social login (see authentication.pipeline.add_group)
re_path(r'^askforgroup/(?P<backend>.+)', views.AskForGroup.as_view(), name='askforgroup'),
]
|
import webapp2
import string
from xml.dom.minidom import parseString
from google.appengine.ext.webapp import template
from google.appengine.api import users
from site.utils import admin_required
from blog.models import Article
class BlogHandler(webapp2.RequestHandler):
def main(self):
""" Handle request to main page
"""
articles = Article.all().order('-created')
self.response.out.write(template.render(
'blog/templates/index.html', {'articles': articles}))
def get_article(self, slug):
""" Get article
"""
article = Article.get_by_key_name(slug)
if article:
self.response.out.write(template.render(
'blog/templates/article.html', {'article': article}))
else:
self.redirect('/')
@admin_required
def update_article(self, slug):
""" Update existing article
"""
article = Article.get_by_key_name(slug)
if not article:
self.redirect('/')
if not self.request.POST:
self.response.out.write(template.render('blog/templates/edit.html',
{'article': article,
'title': 'Edit article',
'action': webapp2.uri_for('update_article', slug=slug)}))
else:
slug = self.request.get('slug')
article.key_name = slug
article.title = self.request.get('title')
article.description = self.request.get('description')
article.keywords = map(string.strip,
self.request.get('keywords').split(','))
article.content = self.request.get('content')
article.put()
self.redirect('/' + slug)
@admin_required
def create_article(self):
""" Create new article
"""
if not self.request.POST:
self.response.out.write(template.render('blog/templates/edit.html',
{'article': None,
'title': 'New article',
'action': webapp2.uri_for('create_article')}))
else:
slug = self.request.get('slug')
article = Article.get_by_key_name(slug)
if article:
slug = slug + '_to_change'
keywords = map(string.strip,
self.request.get('keywords').split(','))
article = Article(key_name=slug,
title=self.request.get('title'),
description=self.request.get('description'),
keywords=keywords,
content=self.request.get('content'))
article.put()
self.redirect('/' + slug)
|
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.utils.data_utils import get_file
def conv_block(x, nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1), bias=False):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, nb_row, nb_col, subsample=subsample, border_mode=border_mode, bias=bias)(x)
x = Activation('relu')(x)
return x
def create_vgg16(nb_classes=1001, load_weights=True):
if K.image_dim_ordering() == 'th':
init = Input((3, 224, 224))
else:
init = Input((224, 224, 3))
# Input Shape is 224 x 224 x 3 (tf) or 3 x 224 x 224 (th)
x1 = conv_block(init, 64, 3, 3)
x1 = conv_block(x1, 64, 3, 3)
x2 = MaxPooling2D((2, 2), strides=(2, 2), border_mode='valid')(x1)
x2 = conv_block(x2, 128, 3, 3)
x2 = conv_block(x2, 128, 3, 3)
x3 = MaxPooling2D((2, 2), strides=(2, 2), border_mode='valid')(x2)
x3 = conv_block(x3, 256, 3, 3)
x3 = conv_block(x3, 256, 3, 3)
x3 = conv_block(x3, 256, 3, 3)
x4 = MaxPooling2D((2, 2), strides=(2, 2), border_mode='valid')(x3)
x4 = conv_block(x4, 512, 3, 3)
x4 = conv_block(x4, 512, 3, 3)
x4 = conv_block(x4, 512, 3, 3)
x5 = MaxPooling2D((2, 2), strides=(2, 2), border_mode='valid')(x4)
x5 = conv_block(x5, 512, 3, 3)
x5 = conv_block(x5, 512, 3, 3)
x5 = conv_block(x5, 512, 3, 3)
x6 = MaxPooling2D((2, 2), strides=(2, 2), border_mode='valid')(x5)
# Fully connected layers
x6 = Flatten()(x6)
x7 = Dense(output_dim=4096, activation='relu')(x6)
x7 = Dropout(0.5)(x7)
x8 = Dense(output_dim=4096, activation='relu')(x7)
x8 = Dropout(0.5)(x8)
# Output
out = Dense(output_dim=nb_classes, activation='softmax')(x8)
model = Model(init, out, name='VGG16')
return model
if __name__ == "__main__":
# from keras.utils.visualize_util import plot
vgg16 = create_vgg16()
# vgg16.summary()
# plot(vgg16, to_file="VGG16.png", show_shapes=True)
|
# -*- coding: utf-8 -*-
# Copyright (c) St. Anne's University Hospital in Brno. International Clinical
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Std imports
# Third pary imports
import numpy as np
from scipy.stats import entropy
# Local imports
from ..utils.method import Method
def compute_relative_entropy(sig):
"""
Calculation of Kullback-Leibler divergence:
relative entropy of sig[0] with respect to sig[1]
and relative entropy of sig[1] with respect to sig[0]
Parameters
----------
sig: np.array
2D numpy array of shape (signals, samples), time series (int, float)
Returns
-------
ren: float
max value of relative entropy between sig[0] and sig[1]
Example:
-------
ren = compute_relative_entropy(sig)
"""
if type(sig) != np.ndarray:
raise TypeError(f"Signals have to be in numpy arrays!")
# OPTIMIZE - check if we can do this in one array
h1 = np.histogram(sig[0], 10)
h2 = np.histogram(sig[1], 10)
ren = entropy(h1[0], h2[0])
ren21 = entropy(h2[0], h1[0])
if ren21 > ren:
ren = ren21
if ren == float('Inf'):
ren = np.nan
return ren
class RelativeEntropy(Method):
algorithm = 'RELATIVE_ENTROPY'
algorithm_type = 'bivariate'
version = '1.0.0'
dtype = [('ren', 'float32')]
def __init__(self, **kwargs):
"""
Calculation of Kullback-Leibler divergence:
relative entropy of sig1 with respect to sig2
and relative entropy of sig2 with respect to sig1
"""
super().__init__(compute_relative_entropy, **kwargs)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.db import transaction
from .models import Teacher, User, Student, Thesis
class StudentSignUpForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name','username', 'email')
@transaction.atomic
def save(self, commit=True):
user = super().save(commit=False)
user.is_student = True
# if commit:
user.save()
student = Student.objects.create(user=user)
return user
class TeacherSignUpForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name','username', 'email')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_teacher = True
user.save()
teacher = Teacher.objects.create(user=user)
return user
class ThesisSubmitForm(forms.ModelForm):
class Meta():
model = Thesis
fields = ('subject', 'teacher', 'file')
# def clean_file(self):
# file= self.cleaned_data['file']
# try:
# #validate content type
# main, sub = file.content_type.split('/')
# if not (sub in ['pdf']):
# raise forms.ValidationError(u'Please use PDF file.')
# #validate file size
# except AttributeError:
# """
# Handles case when we are updating the user profile
# and do not supply a new avatar
# """
# pass
# return file
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# %%06e04634dc02a21685ae0bf1dca0dadbeff3eb0758082e61fcd2304ae19ba69e7afda013125c5f1d3138487a150f61a0118734169f4c28e12ecbf82c7b0f3873%%
SCRIPT_DEBUG_MODE = False
#
# Standard imports
#
import sys
import os
import platform
import argparse
import json
import datetime
now = datetime.date.today
#
# Custom modules imports
#
from scripts_commons import get_conf, DEFAULT_LOCATOR_SOURCE, DEFAULT_ENC, check_os, main, writable
import sqlite_utils
import htmlgen
def add(datafile, datainfo):
conn = sqlite_utils.SQLiteConn(datafile)
with open(datainfo) as f:
data_elems = json.load(f)
cols = insert_data(data_elems)
submodel = {
"model": data_elems['model'],
"insert": [
{
"table": data_elems['model']['table'],
"columns": cols
}
]
}
for values in submodel['insert']:
conn.insert(submodel['model'], values)
conn.close()
def insert_data(model):
cols = {}
for x in model['model']['order']:
if x == "ID" and "INTEGER PRIMARY KEY" in model['model']['columns'][x]:
cols[x] = None
elif x == "DATE":
cols[x] = str(now())
else:
cols[x] = input("[{}]: ".format(x))
if "ID" in model['model']['columns'] and "ID" not in cols:
cols["ID"] = None
return cols
DATA_CONF = '.data_conf'
#
# Script execution
#
def execute(script_name, script_dir, cur_dir, paths):
elements = {}
datafile = None
defaultmodel = None
conf = os.path.join(script_dir, DATA_CONF)
if os.path.isfile(conf):
elements = get_conf(conf)
datafile = elements.get('dbname', None)
defaultmodel = elements.get('defaultmodel', None)
parser = argparse.ArgumentParser()
if len(sys.argv) > 2 or not datafile:
parser.add_argument("datafile",
help="Db name to operate in",
type=str)
if not defaultmodel or len(sys.argv) > 1:
parser.add_argument("datainfo",
help="General data info for command(model and values)",
type=str)
args = parser.parse_args()
if not defaultmodel or len(sys.argv) > 1:
defaultmodel = args.datainfo
if len(sys.argv) > 2 or not datafile:
datafile = args.datafile
add(datafile, defaultmodel)
if __name__ == '__main__':
main(execute, SCRIPT_DEBUG_MODE)
|
"""
Tests for SimEngine.Mote.sf
"""
import types
import pytest
import test_utils as u
import SimEngine.Mote.MoteDefines as d
from SimEngine import SimLog
from SimEngine import SimEngine
# =========================== helpers =========================================
def set_app_traffic_rate(sim_engine, app_pkPeriod):
sim_engine.settings.app_pkPeriod = app_pkPeriod
def start_app_traffic(mote):
mote.app.startSendingData()
def stop_app_traffic(sim_engine):
set_app_traffic_rate(sim_engine, 0)
def run_until_cell_allocation(sim_engine, mote, _cell_options):
mote.tsch.original_addCell = mote.tsch.addCell
def new_addCell(
self,
slotOffset,
channelOffset,
neighbor,
cellOptions
):
mote.tsch.original_addCell(
slotOffset,
channelOffset,
neighbor,
cellOptions
)
if (
(self.mote.id == mote.id)
and
(neighbor is not None)
and
(neighbor == mote.rpl.getPreferredParent())
and
(cellOptions == _cell_options)
):
# pause the simulator
sim_engine.pauseAtAsn(sim_engine.getAsn() + 1)
# revert addCell
mote.tsch.addCell = mote.tsch.original_addCell
mote.tsch.addCell = types.MethodType(new_addCell, mote.tsch)
u.run_until_end(sim_engine)
def run_until_dedicated_tx_cell_is_allocated(sim_engine, mote):
run_until_cell_allocation(
sim_engine,
mote,
[d.CELLOPTION_TX]
)
def run_until_mote_is_ready_for_app(sim_engine, mote):
run_until_cell_allocation(
sim_engine,
mote,
[d.CELLOPTION_TX, d.CELLOPTION_RX, d.CELLOPTION_SHARED]
)
def run_until_sixp_cmd_is_seen(sim_engine, mote, cmd):
mote.sixp.original_tsch_enqueue = mote.sixp._tsch_enqueue
def new_tsch_enqueue(self, packet):
mote.sixp.original_tsch_enqueue(packet)
if (
(packet['app']['msgType'] == d.SIXP_MSG_TYPE_REQUEST)
and
(packet['app']['code'] == cmd)
):
sim_engine.pauseAtAsn(sim_engine.getAsn() + 1)
# revert _tsch_enqueue
mote.sixp._tsch_enqueue = mote.sixp.original_tsch_enqueue
mote.sixp._tsch_enqueue = types.MethodType(new_tsch_enqueue, mote.sixp)
u.run_until_end(sim_engine)
# =========================== fixtures =========================================
@pytest.fixture(params=['add', 'delete', 'relocate'])
def test_case(request):
return request.param
# =========================== tests ===========================================
class TestMSF(object):
def test_msf(self, sim_engine):
""" Test Scheduling Function Traffic Adaptation
- objective : test if msf adjust the number of allocated cells in
accordance with traffic
- precondition: form a 2-mote linear network
- precondition: the network is formed
- action : change traffic
- expectation : MSF should trigger ADD/DELETE/RELOCATE accordingly
"""
sim_engine = sim_engine(
diff_config = {
'app_pkPeriod' : 0,
'app_pkPeriodVar' : 0.05,
'exec_numMotes' : 3,
'exec_numSlotframesPerRun': 4000,
'secjoin_enabled' : False,
'sf_class' : 'MSF',
'conn_class' : 'Linear',
}
)
# XXX
d.MSF_MIN_NUM_TX = 10
# for quick access
root = sim_engine.motes[0]
hop_1 = sim_engine.motes[1]
hop_2 = sim_engine.motes[2]
asn_at_end_of_simulation = (
sim_engine.settings.tsch_slotframeLength *
sim_engine.settings.exec_numSlotframesPerRun
)
# make hop_1 not receive anything on dedicated RX cells other than the
# first allocated one than one dedicated RX cell so that MSF would
# perform cell relocation
hop_1.tsch.original_addCell = hop_1.tsch.addCell
hop_1.tsch.original_tsch_action_RX = hop_1.tsch._tsch_action_RX
def new_addCell(self, slotOffset, channelOffset, neighbor, cellOptions):
if (
(cellOptions == [d.CELLOPTION_RX])
and
(len(self.getRxCells(neighbor)) == 0)
):
# remember the slotoffset of first allocated dedicated cell. While
# this cell might be deleted later, ignore such an edge case for
# this test.
self.first_dedicated_slot_offset = slotOffset
self.original_addCell(
slotOffset,
channelOffset,
neighbor,
cellOptions
)
def new_action_RX(self):
slot_offset = self.engine.getAsn() % self.settings.tsch_slotframeLength
cell = self.schedule[slot_offset]
if (
(cell['neighbor'] is not None)
and
hasattr(self, 'first_dedicated_slot_offset')
and
((self.first_dedicated_slot_offset) != slot_offset)
):
# do nothing on this dedicated cell
pass
else:
self.original_tsch_action_RX()
hop_1.tsch.addCell = types.MethodType(new_addCell, hop_1.tsch)
hop_1.tsch._tsch_action_RX = types.MethodType(new_action_RX, hop_1.tsch)
# wait for the network formed
u.run_until_everyone_joined(sim_engine)
# wait for hop_2 to get ready to start application
run_until_mote_is_ready_for_app(sim_engine, hop_2)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# generate application traffic which is supposed to trigger an ADD
# transaction between hop_2 and hop_1
asn_starting_app_traffic = sim_engine.getAsn()
set_app_traffic_rate(sim_engine, 1.4)
start_app_traffic(hop_2)
run_until_dedicated_tx_cell_is_allocated(sim_engine, hop_2)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# increase the traffic
asn_increasing_app_traffic = sim_engine.getAsn()
set_app_traffic_rate(sim_engine, 1.1)
run_until_dedicated_tx_cell_is_allocated(sim_engine, hop_2)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# decrease the traffic; run until a RELOCATE command is issued
set_app_traffic_rate(sim_engine, 1.4)
run_until_sixp_cmd_is_seen(sim_engine, hop_2, d.SIXP_CMD_RELOCATE)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# stop the traffic; run until a DELETE command is issued
stop_app_traffic(sim_engine)
run_until_sixp_cmd_is_seen(sim_engine, hop_2, d.SIXP_CMD_DELETE)
assert sim_engine.getAsn() < asn_at_end_of_simulation
def test_parent_switch(self, sim_engine):
sim_engine = sim_engine(
diff_config = {
'exec_numSlotframesPerRun': 4000,
'exec_numMotes' : 3,
'app_pkPeriod' : 0,
'sf_class' : 'MSF',
'conn_class' : 'Linear'
}
)
# for quick access
root = sim_engine.motes[0]
mote_1 = sim_engine.motes[1]
mote_2 = sim_engine.motes[2]
asn_at_end_of_simulation = (
sim_engine.settings.tsch_slotframeLength *
sim_engine.settings.exec_numSlotframesPerRun
)
# wait for hop_2 to get ready. this is when the network is ready to
# operate.
run_until_mote_is_ready_for_app(sim_engine, mote_2)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# stop DIO (and EB) transmission
sim_engine.settings.tsch_probBcast_ebProb = 0
# force mote_1 to switch its preferred parent
old_parent = root
new_parent = mote_2
# invalidate old_parent
dio = old_parent.rpl._create_DIO()
dio['mac'] = {'srcMac': old_parent.id}
dio['app']['rank'] = 65535
mote_1.rpl.action_receiveDIO(dio)
# give a DIO from new_parent with a good rank
dio = new_parent.rpl._create_DIO()
dio['mac'] = {'srcMac': new_parent.id}
dio['app']['rank'] = 255
mote_1.rpl.action_receiveDIO(dio)
# mote_1 should issue CLEAR to the old preferred parent and ADD to the
# new one
asn_start_testing = sim_engine.getAsn()
u.run_until_end(sim_engine)
logs = u.read_log_file(
filter = [SimLog.LOG_SIXP_TX['type']],
after_asn = asn_start_testing
)
def it_is_add_request(packet):
# return if the packet is a ADD request sent from mote_1 to
# new_parent
return (
(packet['mac']['srcMac'] == mote_1.id)
and
(packet['mac']['dstMac'] == new_parent.id)
and
(packet['type'] == d.PKT_TYPE_SIXP)
and
(packet['app']['msgType'] == d.SIXP_MSG_TYPE_REQUEST)
and
(packet['app']['code'] == d.SIXP_CMD_ADD)
)
def it_is_clear_request(packet):
# return if the packet is a CLEAR request sent from mote_1 to
# new_parent
return (
(packet['mac']['srcMac'] == mote_1.id)
and
(packet['mac']['dstMac'] == old_parent.id)
and
(packet['type'] == d.PKT_TYPE_SIXP)
and
(packet['app']['msgType'] == d.SIXP_MSG_TYPE_REQUEST)
and
(packet['app']['code'] == d.SIXP_CMD_CLEAR)
)
assert len([l for l in logs if it_is_add_request(l['packet'])]) > 0
assert len([l for l in logs if it_is_clear_request(l['packet'])]) > 0
@pytest.fixture(params=['adapt_to_traffic', 'relocate'])
def function_under_test(self, request):
return request.param
def test_no_available_cell(self, sim_engine, function_under_test):
sim_engine = sim_engine(
diff_config = {
'exec_numSlotframesPerRun': 1000,
'exec_numMotes' : 2,
'app_pkPeriod' : 0,
'sf_class' : 'MSF',
'conn_class' : 'Linear'
}
)
# for quick access
root = sim_engine.motes[0]
hop_1 = sim_engine.motes[1]
asn_at_end_of_simulation = (
sim_engine.settings.tsch_slotframeLength *
sim_engine.settings.exec_numSlotframesPerRun
)
# wait for hop_1 to get ready.
run_until_mote_is_ready_for_app(sim_engine, hop_1)
assert sim_engine.getAsn() < asn_at_end_of_simulation
# fill up the hop_1's schedule
channel_offset = 0
cell_options = [d.CELLOPTION_TX]
used_slots = hop_1.tsch.getSchedule().keys()
for _slot in range(sim_engine.settings.tsch_slotframeLength):
if _slot in used_slots:
continue
else:
hop_1.tsch.addCell(_slot, channel_offset, root.id, cell_options)
assert len(hop_1.tsch.getSchedule()) == sim_engine.settings.tsch_slotframeLength
# put dummy stats so that scheduling adaptation can be triggered
hop_1.sf.num_cells_passed = 100
hop_1.sf.num_cells_used = hop_1.sf.num_cells_passed
# trigger scheduling adaptation
if function_under_test == 'adapt_to_traffic':
hop_1.sf._adapt_to_traffic(root.id)
elif function_under_test == 'relocate':
_slot = hop_1.tsch.getTxRxSharedCells(root.id).keys()[0]
relocating_cell = hop_1.tsch.getTxRxSharedCells(root.id)[_slot]
hop_1.sf._request_relocating_cells(
neighbor_id = root.id,
cell_options = [
d.CELLOPTION_TX, d.CELLOPTION_RX, d.CELLOPTION_SHARED
],
num_relocating_cells = 1,
cell_list = [relocating_cell]
)
else:
# not implemented
assert False
# make sure the log is written into the file
SimEngine.SimLog.SimLog().flush()
# MSF should output a "schedule-full" error in the log file
logs = u.read_log_file(
filter = [SimLog.LOG_MSF_ERROR_SCHEDULE_FULL['type']],
after_asn = sim_engine.getAsn() - 1
)
assert len(logs) == 1
assert logs[0]['_mote_id'] == hop_1.id
|
# -*- coding:utf-8 -*-
#爬取糗事百科段子
import urllib
import urllib2
import re
# page = 1
def scanQiushi(page):
url = 'http://www.qiushibaike.com/hot/page/' + str(page)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
try:
request = urllib2.Request(url,headers = headers)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8')
#print content
pattern = re.compile('<h2>(.*?)</h2>.*?<div class="content">\s+<span>(.*?)</span>\s+</div>',re.S)
items = re.findall(pattern,content)
#print items
print '第',page,'页内容:\n'
for item in items:
print '作者:',item[0],'\n','内容:',re.sub(r'<br[ ]?/?>', '\n', item[1]),'\n'
except urllib2.URLError, e:
if hasattr(e,"code"):
print e.code
if hasattr(e,"reason"):
print e.reason
for i in range(5):
scanQiushi(i+1) |
from Combat import Enemy, Combat
from Player import Player, Weapon, Armour
from bestiary import bastard_sword, quarterstaff
player = Player("Hrothgar")
player.add_item(bastard_sword)
player.add_item(quarterstaff)
player.add_item(Armour("Leather Leggings", "", 1, "leggings"))
player.equip("sword")
rand_encounter = Combat(player)
rand_encounter.combat_loop() |
# -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import os
from flask import Flask, render_template
from flask_restplus import Api
from apis import find_spots, periods, photos, search
from core import db
app = Flask(__name__)
app.config['SWAGGER_UI_DOC_EXPANSION'] = 'list'
app.config['RESTPLUS_VALIDATE'] = True
app.config['RESTPLUS_MASK_SWAGGER'] = False
mode = os.environ['SESLR_APP_MODE']
mode = '/' + mode if mode != 'prod' else ''
@app.route('/')
def documentation():
return render_template('documentation.html', mode=mode)
api = Api(
app,
title='SESLR Data API',
version='1.0',
description='The data API for the SESLR project.'
)
api.add_namespace(find_spots, path='/find_spots')
api.add_namespace(periods, path='/periods')
if os.environ['SESLR_APP_MODE'] != 'demo':
api.add_namespace(photos, path='/photos')
api.add_namespace(search, path='/search')
api.namespaces.pop(0)
db.init_app(app)
|
"""
Module containing tests for the Params class
"""
import pathlib
import pytest
from micone.config import ParamsSet
from micone.config.params import Params
@pytest.mark.usefixtures("pipeline_settings", "example_pipelines")
class TestParamsSet:
""" Tests for ParamsSet class """
def test_init(self, pipeline_settings):
internal_raw = pipeline_settings["otu_processing"]
assert ParamsSet(internal_raw)
wrong_format = {
"env": "micone",
"root_dir": "filter/partition",
"output_location": "split_otu_table",
"input": [{"datatype": "sequence_16s", "format": ["fasta"]}],
"output": [
{"datatype": "otu_table", "format": ["biom"], "location": "*.biom"}
],
"parameters": [{"process": "something", "data": 123}],
}
assert Params(("otu_processing.filter.partition", wrong_format))
with pytest.raises(TypeError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "input": "string"},
)
)
with pytest.raises(TypeError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "output": "string"},
)
)
with pytest.raises(TypeError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "parameters": "string"},
)
)
with pytest.raises(ValueError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "input": [{"datatype": "sequence_16s"}]},
)
)
with pytest.raises(ValueError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "output": [{"datatype": "sequence_16s"}]},
)
)
with pytest.raises(ValueError):
Params(
(
"otu_processing.filter.partition",
{**wrong_format, "parameters": [{"data": "temp"}]},
)
)
def test_iter_len(self, pipeline_settings):
external_raw = pipeline_settings["otu_assignment"]
external = ParamsSet(external_raw)
count = 0
for l1 in external_raw:
for l2 in external_raw[l1]:
for l3 in external_raw[l1][l2]:
count += 1
assert count == len(external)
for process in external:
assert isinstance(process, Params)
def test_contains_getitem(self, pipeline_settings):
external_raw = pipeline_settings["otu_assignment"]
external = ParamsSet(external_raw)
for l1 in external_raw:
for l2 in external_raw[l1]:
for l3 in external_raw[l1][l2]:
test_external_key = f"{l1}.{l2}.{l3}"
assert test_external_key in external
def test_param_get(self, pipeline_settings):
internal_raw = pipeline_settings["otu_processing"]
internal = ParamsSet(internal_raw)
curr_param = internal["otu_processing.filter.group"]
assert curr_param.get("otu_table", category="input")
assert curr_param.get("group", category="parameters")
assert curr_param.get("children_map", category="output")
def test_param_update_location(self, pipeline_settings):
external_raw = pipeline_settings["otu_assignment"]
external = ParamsSet(external_raw)
curr_param = external["otu_assignment.sequence_processing.demultiplex_illumina"]
curr_param.update_location(
"sequence_16s", location="file_path", category="input"
)
assert external["otu_assignment.sequence_processing.demultiplex_illumina"].get(
"sequence_16s", "input"
).location == pathlib.Path("file_path")
def test_param_merge(self, pipeline_settings, example_pipelines):
external_raw = pipeline_settings["otu_assignment"]
external = ParamsSet(external_raw)
curr_param = external["otu_assignment.sequence_processing.demultiplex_454"]
user_settings = example_pipelines[
"otu_assignment_sequence_processing_demultiplex_454"
]
curr_param.merge(
user_settings["otu_assignment"]["sequence_processing"]["demultiplex_454"]
)
assert curr_param.get("sequence_16s", "input").location == pathlib.Path(
"/path/to/sequence_16s"
)
assert curr_param.get("quality", "input").location == pathlib.Path(
"/path/to/quality"
)
assert curr_param.get(
"sample_barcode_mapping", "input"
).location == pathlib.Path("/path/to/mapping")
|
__author__ = 'yury'
# A palindrome is a string that is written the same forward as it is in reverse.
# Write a method to return the longest palindrome in a given string.
# Example: "yzzy" == solution("xyzzy")
def is_palindrome(x):
return x == x[::-1]
def each_cons(x, size):
return [x[i:i+size] for i in range(len(x)-size+1)]
def solution(t):
for k in range(len(t), 0, -1):
for word in each_cons(t, k):
if is_palindrome("".join(word)):
return "".join(word)
|
# Generated by Django 3.0.5 on 2020-07-03 16:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0005_auto_20200703_1623'),
]
operations = [
migrations.RemoveField(
model_name='tableuser',
name='table_user_col_create_time',
),
migrations.RemoveField(
model_name='tableuser',
name='table_user_col_modify_time',
),
migrations.RemoveField(
model_name='tableuser',
name='table_user_col_send_time',
),
]
|
import cv2
image = cv2.imread ("C:/Users/User/Desktop/python/photo.jpg", 3)
cv2.imshow("Title", resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import random
"""
Number Guesser Program
This is an example of how to use variables to
keep track of information in a program that
also makes use of loops
"""
def main():
count = 1
lower = 1
upper = 100
num = random.randint(lower, upper)
while True:
indicator = input(f"Is your number {num}? ")
if indicator == "correct":
print(f"I win! It took me {count} guesses")
break
else:
num , new_lower, new_upper = guess_number(num, indicator, lower, upper)
count += 1
lower = new_lower
upper = new_upper
def guess_number(num, indicator, lower, upper):
new_lower = lower
new_upper = upper
if indicator == "lower":
new_upper = num - 1
num = random.randint(new_lower, new_upper)
elif indicator == "higher":
new_lower = num + 1
num = random.randint(new_lower, new_upper)
else:
print("Just type: lower or higher")
return num, new_lower, new_upper
if __name__ == "__main__":
main() |
#
# this is a contrived example designed to teach variable scoping behavior.
#
# usage:
#
# opsmop apply content/var_scoping.py
#
# ==============================================================================
from opsmop.core.easy import *
# ==============================================================================
class One(Role):
def set_variables(self):
return dict(glorp='fizz', level='one')
def set_resources(self):
return Resources(
Echo("Role parameterization can work by kwargs. foosball={{ foosball }}"),
Echo("Role parameterization can work by set_variables. glorp={{ glorp }}"),
Echo("Policies can be parameterized, blarg={{ blarg }}"),
Echo("Policy scope is available, other={{ other }} and should be True"),
# Global(global_var='blippy')
Echo("Inside the Role 'One', level={{ level }} and should be 'one'"),
Set(level='runtime'),
Resources(
Set(level='nested'),
Echo("Inside a nested scope, level={{ level }} and should be 'nested'")
),
Echo("Back outside that scope, level={{ level }} and should be 'runtime'")
)
# ==============================================================================
class Two(Role):
def set_variables(self):
return dict(level='two')
def set_resources(self):
return Resources(
Echo("Policies can be parameterized, blarg={{ blarg }}"),
Echo("Roles can be parameterized. foosball={{ foosball }} and should be 2 or 3"),
# future feature (soon):
# SetGlobal(blippy='foo'),
# Echo("Global variables can be set. global_var={{ blippy }}"),
Echo("This role defines level differently than the Role 'One'. level={{ level }} and should be two")
)
# ==============================================================================
class ScopeTest(Policy):
def set_variables(self):
return dict(level='Scope', other=True)
def set_roles(self):
return Roles(
One(foosball=1),
Two(foosball=2),
Two(foosball=3)
)
EXPORTED = [
ScopeTest(blarg=5150)
]
|
# Generated by Django 3.2.3 on 2021-05-23 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='eventdetails',
name='Location',
field=models.CharField(default='college', max_length=20),
),
migrations.AddField(
model_name='user_details',
name='registeredevent',
field=models.ImageField(default='image', upload_to='pics'),
preserve_default=False,
),
]
|
from sys import stdin
def main():
print(sum([int(is_valid(line)) for line in stdin]))
def is_valid(line):
parts = line.split()
i1, i2 = map(int_decrement, parts[0].split("-"))
char = parts[1][0]
password = parts[2]
return int((password[i1] == char) != (password[i2] == char))
def int_decrement(n):
return int(n) - 1
if __name__ == "__main__":
main()
|
# my code
a = 1
b = 2
c = 3
# sums
d = a + b
print(a, b, d)
print('Summe aus a und b:', d)
print('Summe aus c und b:', c+b)
print('Summe aus a und c:', a+c)
# code is from Christine Greif
# date 18.6.21
# nicht auf git
print('Das ist noch nicht auf git') |
import serial
from time import sleep
class test:
def __init__(self):
try:
self.ser = serial.Serial("/dev/ttyACM2", 9600)
except:
print("ERROR: Cannot connect to serial port")
self.current = [0,1500,1500]
def setSpeed(self, left, right):
self.current[1] = left
self.current[2] = right
self.ser.write(''.join(str(e) for e in self.current).encode())
def setActive(self, s):
if s == 0:
self.current[0] = 1
elif s == 1:
self.current[0] = 2
elif s == 2:
self.current[0] = 3
else:
print("Cannot set the active axis to: " , s)
self.current[0] = s
print(self.current[0])
def writeActive(self):
self.ser.write(str(self.current[0]).encode())
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from forms import registration_form
from django.contrib.auth import logout
from django.contrib.auth import authenticate
# try to login the user
def login(request):
email_addr = request.POST.get( 'last_name', '' )
password = request.POST.get( 'last_name', '' )
user = authenticate( username=email_addr, password=password)
if user is not None:
if user.is_active:
return render_to_response( "users/dashboard.html", variable, context_instance=RequestContext(request) )
else:
print("The password is valid, but the account has been disabled!")
else:
print("The username and password were incorrect.")
# try to logout the user
def logout(request):
logout(request)
# Redirect to a success page.
# try to register the user
def register(request):
user = success = ''
form = registration_form( request.POST or None )
if form.is_valid():
form.save()
success = True
variable = { 'form' : form, 'success' : success }
return render_to_response( "login/registration.html", variable, context_instance=RequestContext(request) )
|
# Import the pandas library
import pandas as pd
# Read the provided customer data CSV file
customer_df = pd.read_csv("C:/Users/anipb/Desktop/CustomerList.csv")
# Display the customer data
print(customer_df)
# Find customers from Georgia and from the USA
print("Example for AND operator")
print(customer_df.loc[(customer_df['Country'] == 'USA') & (customer_df['State'] == 'Georgia')])
# Find customers from the USA or from Ontario
print("Example for OR operator")
print(customer_df.loc[(customer_df['Country'] == 'USA') | (customer_df['State'] == 'Ontario')])
# Find customers who are not from the USA
print("Example for NOT operator")
print(customer_df.loc[(customer_df['Country'] != 'USA')]) |
from z3 import *
IC = BoolVector('IC', 3)
x, y = Ints('x y')
s = Solver()
s.add(Implies(IC[0], x > 10),
Implies(IC[0], y > x),
Implies(IC[1], y < 5),
Implies(IC[2], y > 0))
print s.check(IC)
print s.unsat_core()
print IC
#for uc in s.unsat_core():
#print simplify(uc == 'IC__1')
# print simplify(uc == IC[1])
#print is_true(uc == 'IC__1')
# print is_true(uc == IC[1])
#print is_true(simplify(uc == 'IC__1'))
## print is_true(simplify(uc == IC[1]))
# print IC.index(uc)
# if is_true(uc == IC[0]):
# print 1
# else:
# print 0
idx = []
for i in range(0,len(IC)):
if is_true(simplify(If(IC[i] in s.unsat_core(), True,False))):
idx.append(i)
print idx |
"""Definitions for the components for models participating in the framework.
A "component" is a functional unit that performs a specific step in
the coupled model processing pipeline. Generally, a component will
correspond to one of the models being coupled, but components can also
implement utility processes, such as formatting and packaging output,
and particularly complex models could be split over several components.
Classes:
CapabilityNotFound - Exception class raised when a component requests
a capability that is not provided by any
component in the system.
ComponentBase - Base class for all components. Provides the
interface, as well as services like managing
threads, locks, and condition variables.
GlobalParamsComponent - Store parameters common to all components.
GcamComponent - Run the GCAM core model.
TethysComponent - Run the Tethys spatiotemporal global water use
downscaling model.
XanthosComponent - Run the Xanthos global hydrology model.
FldgenComponent - Run the fldgen climate scenario generator.
HectorStubComponent - Serve Hector output for RCP scenarios.
DummyComponent - A simple component class for tests.
"""
# TODO: many of these classes have gotten a bit long. It would be
# better to refactor them so that the main functionality is
# implemented in a separate python module for each component, with
# the class derived from ComponentBase providing a thin wrapper that grabs inputs
# needed from other components and passes them to a main function in the
# relevant python component.
import os
import re
import subprocess
import threading
import logging
import pkg_resources
import pandas as pd
from cassandra import util
# This class is here to make it easy for a class to ignore failures to
# find a particular capability in fetch() while still failing on any
# other sort of error.
class CapabilityNotFound(RuntimeError):
pass
class ComponentBase(object):
"""Common base class for all components (i.e., functional units) in the system.
We can put any utility functions that are common to all components
here, but its main purpose is to provide all the multithreading
functionality so that the individual components can focus
exclusively on doing their particular tasks.
Methods that shouldn't be overridden:
run(): start the component running. The params argument should
be a dictionary containing the parameters the component
needs (probably parsed from the initial config). Each
subclass should provide a method called run_component()
that performs the component's work; that method will be
called from run(). The run_component() method should
return 0 on successful completion. Note that we don't
make any effort to limit the number of concurrent threads
beyond the inherent limitations imposed by data
dependencies between the components. This method returns
the thread object, mainly so that the driver can call
join() on all of the component threads.
TODO: implement an active thread counter.
run_component_wrapper(): used internally by run(). Don't monkey around
with this function.
fetch(): retrieve the component's results for a single capability.
Takes a capability name as an argument. If the capability
does not belong to this component, performs a lookup and
calls fetch() on the component that has the requested data.
If the component hasn't completed yet, wait to be notified
of completion. This mechanism implicitly enforces correct
ordering between components. Note that we don't make any
checks for deadlock caused by circular dependencies.
addparam(): Add a key and value to the params array. Generally
this should only be done in the config file parser.
addcapability(): Add a capability to the capability table.
addresults(): Update the results for a single capability. Use this
rather than updating self.results directly, as this
method ensures that the capability exists.
Methods that can be extended (but not overridden; you must be sure
to call the base method):
__init__(): initialization, obviously, but each init method must
take an extra argument that is a dictionary of component
capabilities.
The base class init stores a reference to the
capability table for future lookup. A component may
optionally call addcapability() here to add
capabilities that are independent of any parameters.
finalize_parsing(): When parsing is complete, what you have is a
bunch of key-value pairs. This function is the place
to do any processing that needs to be done (e.g.,
converting strings to other types). The base class
version of the method does this for parameters that
are applicable to all components, so it must always be
called if the method is overridden. A component may
also call addcapability() from this method to add
capabilities that depend on the component's
parameters.
Methods that can be overridden freely
run_component(): function that does the component's work. It should only be
called from the run_component_wrapper() method via the run()
method. Together, these methods perform the additional
bookkeeping required to ensure that components don't try to
use results before they are ready.
Attributes:
params: dictionary of parameters parsed from the config file.
Generally this array should be altered only by calling the
addparam method.
"""
def __init__(self, cap_tbl):
"""Initialize the component base.
Every subclass __init__ method should call this method as its
first action. The cap_tbl argument is a dictionary linking
capabilities (i.e., tags for identifying functional units)
with the components that provide those capabilities. Subclasses
should extend this method by adding their self reference to
the table under an appropriate tag. E.g.:
cap_tbl['gcam_core'] = self
The capability table will be available as self.cap_tbl for use
in a subclass's methods. Since cassandra passes the same
capacity table to each subclass instance initialization, by
the time a component starts running the table will contain an
index of all the active components in the calculation.
"""
self.status = 0 # status indicator: 0- not yet run, 1- complete, 2- error
self.results = {}
self.params = {}
self.cap_tbl = cap_tbl # store a reference to the capability lookup table
self.condition = threading.Condition()
def run(self):
"""Execute the component's run_component() method in a separate thread."""
thread = threading.Thread(target=lambda: self.run_component_wrapper())
thread.start()
# returns immediately
return thread
def run_component_wrapper(self):
"""Lock the condition variable, execute run_component(), and unlock when it returns.
At the conclusion of the run_component() method, self.status will be
set to 1 if the run was successful, to 2 otherwise. This
variable will be used by the fetch() method to notify clients
if a run failed. Either way, threads waiting on the condition
variable will be released when the run completes.
At the end of this function the following will be true:
1. Either self.status==1 or self.status==2
2. If self.status==1, the self.results has a full set of
results in it.
This function should be called *only* by the run() method
above.
"""
# This block holds the lock on the condition variable for the
# entire time the run_component() method is running. That's ok for
# now, but it's not ideal, and it will cause problems when we
# eventually try to implement co-simulations.
with self.condition:
try:
logging.debug(f'starting {self.__class__}')
rv = self.run_component()
if not rv == 0:
# possibly add some other error handling here.
msg = f"{self.__class__}: run_component returned error code {str(rv)}"
logging.error(msg)
raise RuntimeError(msg)
else:
logging.debug(f"{self.__class__}: finished successfully.\n")
self.status = 1 # set success condition
except:
self.status = 2 # set error condition
logging.exception(f'Exception in component {str(self.__class__)}.')
raise
finally:
self.condition.notify_all() # release any waiting threads
logging.debug(f'completed {self.__class__}')
# end of with block: lock on condition var released.
def fetch(self, capability):
"""Return the data associated with the named capability.
Components don't return results from run() because it will run
asynchronously. Instead, if you want the results associated
with a particular capability, you call this method with the
name of the capability. If the capability does not exist in
the system, a CapabilityNotFound exception will be thrown.
Internally, this method first looks up the component that has
the requested data and forwards the request to that
component's fetch method. That call waits if necessary, then
checks whether the run was successful (indicated by
self.status), and if so returns the requested data. If the
run_component() method failed, the variable will so indicate,
and an exception will be raised.
Components should store their results by calling
self.addresults(capability-name, data), which adds the results
in the self.results dictionary, which is where this method
will look for them. The system levies no particular
requirements on the format of data returned, so components
should publish a complete description of their data in their
documentation.
WARNING: if a component tries to fetch a capability that it,
itself, provides, this will lead to instant deadlock. So,
don't do that.
"""
try:
provider = self.cap_tbl[capability]
except KeyError:
raise CapabilityNotFound(capability)
if self is not provider:
# This is a request (presumably originating in our own run
# method) for a capability in another component. Forward
# it to that component.
return provider.fetch(capability)
# If we get to here, then this is a request from another
# component for some data we are holding.
# If the component is currently running, then the condition
# variable will be locked, and we will block when the 'with'
# statement tries to obtain the lock.
with self.condition:
if self.status == 0: # component hasn't run yet. Wait on it
logging.debug(f"\twaiting on {self.__class__}\n")
self.condition.wait()
# end of with block: lock is released
# By this point, the component should have run. If status is not success, then
# there has been an error.
if self.status != 1:
raise RuntimeError(f"{self.__class__}: wait() returned with non-success status!")
return self.results[capability]
def finalize_parsing(self):
"""Process parameters that are common to all components (e.g. clobber).
The components will be responsible for processing their own
special parameters. If a component needs to override this
method, it should be sure to call the base version too.
"""
self.clobber = True # default to overwriting outputs
if "clobber" in self.params:
self.clobber = util.parseTFstring(self.params["clobber"])
# processing for additional common parameters go here
return
def addparam(self, key, value):
"""Add a parameter key and value parsed from the config file.
In the current design, this should be called only by the
config file parser.
"""
self.params[key] = value
def addcapability(self, capability):
"""Add a capability to the capability table."""
if capability in self.cap_tbl:
raise RuntimeError(f'Duplicate definition of capability {capability}.')
self.cap_tbl[capability] = self
def addresults(self, capability, res):
"""Add data to the specified capability of this component."""
if capability not in self.cap_tbl:
raise CapabilityNotFound(capability)
if self.cap_tbl[capability] is not self:
raise RuntimeError(f'Component {self.__class__} does not own capability {capability}.')
self.results[capability] = res
def run_component(self):
"""Subclasses of ComponentBase are required to override this method.
Components' implementations of this method should add the
results of their calculations to the self.results dictionary
by calling self.addresults(<capability-name>, data), where
capability-name is the name of the capability being provided.
A component can provide multiple capabilities, with each one
getting its own entry in the results dictionary.
"""
raise NotImplementedError("ComponentBase is not a runnable class.")
# class to hold the general parameters.
class GlobalParamsComponent(ComponentBase):
"""Class to hold the general parameters for the calculation.
Technically this isn't a component as such; it doesn't run anything,
but treating it as a component allows us to parse it using the same
code we use for all the real components, and having it in the
capability table makes it easy for any component that needs one of
the global parameters to look them up.
Parameters:
ModelInterface - Location of the jar file for the ModelInterface
code, used to query GCAM outputs.
DBXMLlib - Location of the DBXML libraries used by older
versions of the ModelInterface code.
inputdir - Directory containing general input files. (OPTIONAL
- default is './input-data'). Relative paths will
be interpreted relative to the working directory
(even if they don't begin with './')
rgnconfig - Directory containing region configuration files.
Any data that changes with the the region mapping
should be in this directory. The directory will be
converted to an absolute path if it does not start
with '/'. If it starts with './' the path will be
relative to the directory the driver code is running
in; otherwise, it will be relative to inputdir.
(OPTIONAL - default is 'rgn14')
"""
def __init__(self, cap_tbl):
"""Copy parameters into results dictionary.
This function also sets the genparams attribute of the util
component, since it can't get them from this class directly.
"""
super(GlobalParamsComponent, self).__init__(cap_tbl)
self.addcapability('general')
# this is a reference copy, so any entries added to params will also appear in results.
self.addresults('general', self.params)
logging.debug('General parameters as input:')
logging.debug(self.results['general'])
# We need to allow gcamutil access to these parameters, since it doesn't otherwise know how to find the
# global params component. <- gross. we need a better way to do this.
util.global_params = self
def run_component(self):
"""Set the default value for the optional parameters, and convert filenames to absolute paths."""
genrslt = self.results['general']
genrslt['ModelInterface'] = util.abspath(self.results['general']['ModelInterface'])
genrslt['DBXMLlib'] = util.abspath(self.results['general']['DBXMLlib'])
if 'inputdir' in genrslt:
inputdir = genrslt['inputdir']
else:
inputdir = './input-data'
genrslt['inputdir'] = util.abspath(inputdir, os.getcwd())
if 'rgnconfig' in genrslt:
rgnconfig = genrslt['rgnconfig']
else:
logging.warning('[GlobalParamsComponent]: Using default region mapping (14 region)')
rgnconfig = 'rgn14'
genrslt['rgnconfig'] = util.abspath(rgnconfig, genrslt['inputdir'])
return 0 # nothing to do here.
class GcamComponent(ComponentBase):
"""Provide the 'gcam-core' capability.
This component runs the GCAM core model, making the location of the
output database available under the 'gcam-core' capability.
Parameters:
exe = full path to gcam.exe
config = full path to gcam configuration file
logconfig = full path to gcam log configuration file
clobber = flag: True = clobber old outputs, False = preserve old outputs
Results:
dbxml = gcam dbxml output file. We get this from the gcam config.xml file.
Component dependencies: none
"""
def __init__(self, cap_tbl):
"""Add self to the capability table."""
super(GcamComponent, self).__init__(cap_tbl)
self.addcapability('gcam-core')
def run_component(self):
"""Run the GCAM core model.
We start by checking to see that all the input files needed
for GCAM to run are actually available. If any of them are
missing, we raise an IOError execption. Next we parse the
config.xml file to find out what outputs we expect, and we
check to see if they are already present. If they are, and if
'clobber' is not set to True, then we skip the run and return
the location of the existing dbxml. Otherwise, we do the GCAM
run and then return the dbxml location.
"""
# Process the parameters
exe = self.params["exe"]
cfg = self.params["config"]
logcfg = self.params["logconfig"]
try:
logfile = self.params['logfile'] # file for redirecting gcam's copious stdout
except KeyError:
# logfile is optional
logfile = None
# usually the exe, cfg, and logcfg files will be in the same
# directory, but in case of difference, take the location of
# the config file as controlling.
self.workdir = os.path.dirname(exe)
msgpfx = "GcamComponent: " # prefix for messages coming out of this component
# Do some basic checks: do these files exist, etc.
if not os.path.exists(exe):
raise IOError(msgpfx + "File " + exe + " does not exist!")
if not os.path.exists(cfg):
raise IOError(msgpfx + "File " + cfg + " does not exist!")
if not os.path.exists(logcfg):
raise IOError(msgpfx + "File " + logcfg + " does not exist!")
# we also need to get the location of the dbxml output file.
# It's in the gcam.config file (we don't repeat it in the
# config for this component because then we would have no way to
# ensure consistency).
dbxmlfpat = re.compile(r'<Value name="xmldb-location">(.*)</Value>')
dbenabledpat = re.compile(r'<Value name="write-xml-db">(.*)</Value>')
# get a reference to the results that we will be exporting
gcamrslt = {}
with open(cfg, "r") as cfgfile:
# we don't need to parse the whole config file; all we
# want is to locate the name of the output file make sure
# the dbxml output is turned on.
dbxmlfile = None
for line in cfgfile:
# the dbxml file name will come early in the file
match = dbxmlfpat.match(line.lstrip())
if match:
dbxmlfile = match.group(1)
break
logging.info(f"{self.__class__}: dbxmlfile = {dbxmlfile}")
# The file spec is a relative path, starting from the
# directory that contains the config file.
dbxmlfile = os.path.join(self.workdir, dbxmlfile)
gcamrslt["dbxml"] = dbxmlfile # This is our eventual output
if os.path.exists(dbxmlfile):
if not self.clobber:
# This is not an error; it just means we can leave
# the existing output in place and return it.
logging.info("GcamComponent: results exist and no clobber. Skipping.")
gcamrslt["changed"] = 0 # mark the cached results as clean
return 0
else:
# have to remove the dbxml, or we will merely append to it
os.unlink(dbxmlfile)
# now make sure that the dbxml output is turned on
for line in cfgfile:
match = dbenabledpat.match(line.lstrip())
if match:
if match.group(1) != "1":
raise RuntimeError(
msgpfx + "Config file has dbxml input turned off. Running GCAM would be futile.")
else:
break
# Add our output structure to the results dictionary.
self.addresults('gcam-core', gcamrslt)
# now we're ready to actually do the run. We don't check the return code; we let the run() method do that.
logging.info(f"Running: {exe} -C{cfg} -L{logcfg}")
if logfile is None:
return subprocess.call([exe, '-C'+cfg, '-L'+logcfg], cwd=self.workdir)
else:
with open(logfile, "w") as lf:
return subprocess.call([exe, '-C'+cfg, '-L'+logcfg], stdout=lf, cwd=self.workdir)
class TethysComponent(ComponentBase):
"""Class for the global water withdrawal downscaling model Tethys.
This component makes use of the Tethys package, an open-source
spatiotemporal water demand downscaling model.
The results are global annual gridded water withdrawal by sector,
providing a capability for each Tethys output sector. Units are
specified by the Tethys configuration file. If Tethys is set up to
run with temporal downscaling, additional capabilities for monthly
results will be available.
For more information: https://github.com/JGCRI/tethys
params:
config_file - path to Tethys config file
"""
def __init__(self, cap_tbl):
super(TethysComponent, self).__init__(cap_tbl)
# Map the capability name to the corresponding Tethys result
self.capability_map = {
"gridded_water_demand_dom": "wddom", # Domestic
"gridded_water_demand_elec": "wdelec", # Electricity Generation
"gridded_water_demand_irr": "wdirr", # Irrigation
"gridded_water_demand_liv": "wdliv", # Livestock
"gridded_water_demand_mfg": "wdmfg", # Manufacturing
"gridded_water_demand_min": "wdmin", # Mining
"gridded_water_demand_nonag": "wdnonag", # Non-Agricultural
"gridded_water_demand_total": "wdtotal" # Total
}
self.temporal_sectors = {
"gridded_monthly_water_demand_dom": "twddom", # Domestic
"gridded_monthly_water_demand_elec": "twdelec", # Electricity Generation
"gridded_monthly_water_demand_irr": "twdirr", # Irrigation
"gridded_monthly_water_demand_liv": "twdliv", # Livestock
"gridded_monthly_water_demand_mfg": "twdmfg", # Manufacturing
"gridded_monthly_water_demand_min": "twdmin", # Mining
}
for cap in self.capability_map.keys():
self.addcapability(cap)
def finalize_parsing(self):
super(TethysComponent, self).finalize_parsing()
# Check if Tethys is running with temporal downscaling (an optional output)
from configobj import ConfigObj
tethys_config = ConfigObj(self.params['config_file'])
temporal_downscaling = tethys_config['Project']['PerformTemporal']
# If it is, add the temporal downscaling capabilities
if temporal_downscaling:
for cap in self.temporal_sectors.keys():
self.addcapability(cap)
self.capability_map.update(self.temporal_sectors)
def run_component(self):
"""Run Tethys."""
from tethys.model import Tethys
config_file = self.params["config_file"]
# run the Tethys model
tethys_results = Tethys(config=config_file)
for capability_name, tethys_attr in self.capability_map.items():
self.addresults(capability_name, getattr(tethys_results.gridded_data, tethys_attr))
return 0
class XanthosComponent(ComponentBase):
"""Class for the global hydrologic model Xanthos
This component makes use of the Xanthos package, an open-source hydrologic
model (https://github.com/JGCRI/xanthos).
The two main inputs to Xanthos are gridded monthly precipitation and
temperature. If these capabilities are provided by another component
Xanthos will use them as arguments, otherwise it will expect them to be
specified in the Xanthos configuration file. It is assumed the order of
the grids in the precipitation and temperature lists match one another.
params:
config_file - Path to Xanthos config file
OutputNameStr - Name for the directory to create for Xanthos outputs
Capability dependencies (all optional):
gridded_pr - List of gridded monthly precipitation by grid cell
gridded_tas - List of gridded monthly temperature by grid cell
gridded_pr_coord - Matrix of lat/lon coordinates for the precip grid cells
gridded_tas_coord - Matrix of lat/lon coordinates for the tas grid cellss
results:
gridded_runoff - Capability 'gridded_runoff', a list of runoff matrices,
(gridcells x timestep) with the units and aggregation
level specified in the Xanthos config file
"""
def __init__(self, cap_tbl):
super(XanthosComponent, self).__init__(cap_tbl)
self.addcapability("gridded_runoff")
def finalize_parsing(self):
"""Load the reference file mapping Xanthos cell index to lat/lon."""
super(XanthosComponent, self).finalize_parsing()
from configobj import ConfigObj
import pandas as pd
xanthos_config = ConfigObj(self.params['config_file'])
root_dir = xanthos_config['Project']['RootDir']
in_dir = xanthos_config['Project']['InputFolder']
ref_dir = xanthos_config['Project']['RefDir']
cell_map_path = os.path.join(root_dir, in_dir, ref_dir, 'coordinates.csv')
xcolnames = ['cell_id', 'lon', 'lat', 'lon_idx', 'lat_idx']
self.cell_map = pd.read_csv(cell_map_path, names=xcolnames)
def run_component(self):
"""Run Xanthos."""
import xanthos
config_file = self.params["config_file"]
xth = xanthos.Xanthos(config_file)
gridded_runoff = []
# Other components should produce gridded climate data as a list of 2d numpy arrays
cap_names = ['gridded_pr', 'gridded_tas', 'gridded_pr_coord', 'gridded_tas_coord']
if all(cap in self.cap_tbl for cap in cap_names):
pr_grids = self.fetch('gridded_pr')
tas_grids = self.fetch('gridded_tas')
pr_coord = self.fetch('gridded_pr_coord')
tas_coord = self.fetch('gridded_tas_coord')
# Run Xanthos for each pair of precipitation and temperature grids
args = {}
if self.params.get('OutputNameStr') is not None:
args['OutputNameStr'] = self.params['OutputNameStr']
for pr, tas in zip(pr_grids, tas_grids):
args['PrecipitationFile'] = self.prep_for_xanthos(pr, pr_coord)
args['trn_tas'] = self.prep_for_xanthos(tas, tas_coord) - 273.15 # K to C
xth_results = xth.execute(args)
gridded_runoff.append(xth_results.Q)
else:
xth_results = xth.execute()
gridded_runoff.append(xth_results.Q)
self.addresults("gridded_runoff", gridded_runoff)
return 0
def prep_for_xanthos(self, monthly_data, coords):
"""Convert climate data to Xanthos' expected input format.
Retrieve Xanthos grid cells from alternately indexed vectors.
params:
monthly_data - Input data for Xanthos as numpy array (cells x months)
coords - Lat/lon array corresponding to monthly_data
returns:
2d array of Xanthos cells by month
"""
coords = pd.DataFrame(coords, columns=['lat', 'lon'])
# The input data must have the same number of grid cells as Xanthos
assert len(coords.index) == len(self.cell_map.index)
# Map the Xanthos coordinate indices to the input coordinates
cell_id_map = coords.merge(self.cell_map, on=['lat', 'lon'])
# The 'cell_id' column now says the id of the Xanthos cell each row of
# the input data corresponds to. The ids start at 1, so to re-order the
# input data to the Xanthos order, we can just index by one less than
# the value of the 'cell_id' column.
ordered_data = monthly_data[cell_id_map['cell_id'] - 1, :]
return ordered_data
class FldgenComponent(ComponentBase):
"""Run the fldgen climate field generator.
This component makes use of the fldgen and an2month R packages. They must
either be installed in the user's R library, or they must be available to
load separately.
The current version of this component requires that the emulator have been
pretrained on the ESM data and saved as an RDS file. Eventually we will
support training the emulator as part of the coupled calculation, but the
pretraining case seemed likely to be the more common one, so we started with
that.
params:
loadpkgs - Flag indicating whether the fldgen and an2month packages need
to be explicitly loaded. If false, those packages must be
preinstalled in the user's R library.
pkgdir - Directory containing R package repositories for fldgen and
an2month (ignored if loadpkgs is False).
emulator - RDS file containing the trained emulator to use for the
calculation.
ngrids - Number of climate fields to generate.
startyr - Starting year for the climate fields
nyear - Number of years in the climate fields. This MUST match the
number of years the emulator was trained on.
TODO: get this from the emulator when we read it in so that
we don't have to set it manually.
scenario - Hector scenario to use for the mean field calculation.
RNGseed - Optional seed for the R random number generator. If omitted,
then the R instance will seed its RNG with whatever default it
normally uses.
a2mfrac - monthly fraction dataset to use for monthly downscaling. If
omitted, the data is assumed to have been generated at monthly
resolution.
debugdir - Location to write debug file output. If omitted, no debug output
is produced.
Capability dependencies:
Tgav - Global mean temperature. Tgav is normally provided by
scenario. This component ignores the scenario designation.
If multiple scenarios are present, it takes the first one.
results: precipitation (pr) and temperature (tas) grids and coordinate
matrix. The results are organized thus:
capability 'gridded_pr': list of matrices. Each matrix is one of the
generated precipitation fields, with grid cells in rows and months in
columns. TODO: document units of precip (kg/m^2/s ?)
capability 'gridded_tas': list of matrices. Each matrix is one of the
generated temperature fields, with grid cells in rows and months in
columns. TODO: document units of temperature (K ?)
capability 'gridded_tas_coord': Matrix of lat/lon coordinates for the
temperature grid cells. The rows are in the same order as the rows in the
gridded data; the two columns are lat, lon, respectively.
capability 'gridded_pr_coord': Matrix of lat/lon coordinates for the precip
grid cells. The rows are in the same order as the rows in the gridded data;
the two columns are lat, lon, respectively. This is exactly the same matrix
as the 'gridded_tas_coord' matrix; the additional capability is provided as
a convenience in case there are components that do not assume that
temperature and precipitation are on the same grid.
Note that although the temperature and precipitation are provided separately
so that components that need only one or the other can fetch just what they
need, the grids for the two variables are paired. That is, tas[0] goes with
pr[0], tas[1] with pr[1], and so on. Mixing the tas and pr grids from two
different realizations (e.g., tas[0] wth pr[2]) is not valid and should be
avoided.
"""
def __init__(self, cap_tbl):
super(FldgenComponent, self).__init__(cap_tbl)
self.addcapability("gridded_pr")
self.addcapability("gridded_tas")
self.addcapability('gridded_pr_coord')
self.addcapability('gridded_tas_coord')
def finalize_parsing(self):
super(FldgenComponent, self).finalize_parsing()
self.params['loadpkgs'] = util.parseTFstring(self.params['loadpkgs'])
self.params['ngrids'] = int(self.params['ngrids'])
self.params['startyr'] = int(self.params['startyr'])
self.params['nyear'] = int(self.params['nyear'])
def run_component(self):
"""Run the fldgen and an2month R scripts."""
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
import numpy as np
from rpy2.robjects import numpy2ri
numpy2ri.activate() # enable automatic conversion of numpy objects to R equivalents.
if self.params['loadpkgs']:
import os
pkgdir = self.params["pkgdir"]
an2month = os.path.join(pkgdir, "an2month")
fldgen = os.path.join(pkgdir, "fldgen")
devtools = importr("devtools")
devtools.load_all(an2month)
devtools.load_all(fldgen)
# Import fldgen and run the generator
fldgen = importr('fldgen')
emu = fldgen.loadmodel(self.params['emulator'])
if self.params.get('RNGseed') is not None:
setseed = robjects.r['set.seed']
setseed(self.params['RNGseed'])
fullgrids_annual = self.run_fldgen(emu, fldgen)
coords = self.extract_coords(emu, fldgen)
if self.params.get('a2mfrac') is None:
# Data is already at monthly resolution; however, we do still
# need to transpose it so that months are in columns.
fullgrids_monthly = {}
fullgrids_monthly['pr'] = [np.transpose(np.asarray(x, dtype=np.float32)) for x in fullgrids_annual['pr']]
fullgrids_monthly['tas'] = [np.transpose(np.asarray(x, dtype=np.float32)) for x in fullgrids_annual['tas']]
else:
fullgrids_monthly = self.run_monthlyds(fullgrids_annual, coords)
self.addresults('gridded_pr', fullgrids_monthly['pr'])
self.addresults('gridded_tas', fullgrids_monthly['tas'])
self.addresults('gridded_pr_coord', coords['pr'])
self.addresults('gridded_tas_coord', coords['tas'])
# Produce debug output, if requested
ddir = self.params.get('debugdir')
if ddir is not None:
import os.path
import numpy as np
for var in ['tas', 'pr']:
filestem = os.path.join(ddir, f'debug-{var}')
for i, m in enumerate(fullgrids_monthly[var]):
# Write debug output with months in rows, as it will be easier to visually scan that way.
tasdata = np.transpose(m[0:10, 0:24])
filename = f'{filestem}-{i}.csv'
np.savetxt(filename, tasdata)
return 0
def run_fldgen(self, emu, fldgen):
"""Run the fldgen calculation and return the results.
:param emu: Fldgen emulator structure
:param fldgen: Fldgen package handle from rpy2
:return: Dictionary with entries 'tas' and 'pr'. Each entry is a list
of numpy arrays.
"""
import numpy as np
# Calculate residuals
resids = fldgen.generate_TP_resids(emu, self.params['ngrids'])
# Get global mean temperatures. This is returned as a dataframe
# containing multiple scenarios, so we need to filter it down to the
# one we want.
tgavdf = self.fetch('Tgav')
scen = self.params['scenario']
if scen not in tgavdf['scenario'].values:
raise RuntimeError(f'Requested scenario {scen} not in Tgav results.')
tgavdf = tgavdf[tgavdf['scenario'] == scen].loc[:, ]
startyr = self.params['startyr']
endyr = startyr + self.params['nyear']
# We need to filter this down to just the years we are going to use
tgavdf = tgavdf[np.logical_and(tgavdf['year'] >= startyr, tgavdf['year'] < endyr)].loc[:, ]
year = tgavdf['year'].values
perm = np.argsort(year)
tgav = tgavdf['value'].values[perm]
fullgrids = fldgen.generate_TP_fullgrids(emu, resids, tgav)
# fullgrids is a list of paired temperature and precipitation grids. in R notation they
# are stored in fullgrids$fullgrids[[i]]$tas and fullgrids$fullgrids[[i]]$pr. We don't care about
# anything else in the fullgrids structure above. (Remember x[[1]] in R is
# x[0] in python.)
gridstructs = fullgrids.rx2('fullgrids')
tas = [np.asarray(gs.rx2('tas')) for gs in gridstructs]
pr = [np.asarray(gs.rx2('pr')) for gs in gridstructs]
return {'tas': tas, 'pr': pr}
def extract_coords(self, emu, fldgen):
"""Extract the coordinate structure from the emulator
:param emu: Fldgen emulator structure.
:param fldgen: Fldgen package structure from rpy2.
:return: Dictionary with entries 'tas' and 'pr'. Each is a matrix of coordinates
for each grid cell, with cells in rows and latitude, longitude in the two
columns.
"""
import numpy as np
griddataT = emu[0]
griddataP = emu[1]
coords = {}
for name, griddata in zip(['tas', 'pr'], [griddataT, griddataP]):
gd = dict(griddata.items())
try:
coord = np.asarray(gd['coord'])
except KeyError:
# If the grid is regular, then fldgen doesn't store a coordinate
# array. Use the coord_array function tocreate one.
coord = np.asarray(fldgen.coord_array(gd['lat'], gd['lon']))
coords[name] = coord
return coords
def run_monthlyds(self, annual_flds, coords):
"""Run the monthly downscaling calculation
:param annual_flds: Structure returned from run_fldgen
:param coords: Coordinate matrix returned from fldgen
:return: Dictionary with 'pr' and 'tas' entries. Each entry is a list of
matrices of field data at monthly resolution (grid cells in rows,
months in columns)
"""
from rpy2.robjects.packages import importr
import numpy as np
an2month = importr('an2month')
rslt = {}
for var in annual_flds:
ntime = np.asarray(annual_flds[var][0]).shape[0] # there is probably an easier way to do this.
time = np.arange(ntime) + self.params['startyr'] - 1
monthly = an2month.downscaling_component_api(self.params['a2mfrac'], annual_flds[var],
coords[var], time, var)
if var == 'pr':
# If this is precipitation, convert units.
monthly = [an2month.pr_conversion(x) for x in monthly]
rslt[var] = [np.transpose(np.asarray(x, dtype=np.float32)) for x in monthly]
logging.debug(f'Result for {var}: len = {len(rslt[var])}. Shape = {rslt[var][0].shape}')
return rslt
class HectorStubComponent(ComponentBase):
"""Component to serve Hector output data for RCP scenarios.
In cases where Hector output will be used only for the standard RCP
scenarios, it is not necessary to run the model, as the outputs never
change. This module provides a way to read some of the most commonly used
outputs for those scenarios.
The component provides three capabilities:
* Tgav : global mean temperature
* atm-co2: atmospheric CO2 concentration
* Ftot : total radiative forcing
Each capability returns a data frame with data from all of the scenarios
specified in the configuration. Spinup time steps are not included.
The parameters accepted by this component are:
scenarios : comma separated list of scenarios to include
e.g.: rcp26,rcp45
If omitted, all four rcp scenarios are included.
T0 : Preindustrial temperature. This must be added to the temperature
anomalies produced by Hector to get real temperatures.
"""
def __init__(self, cap_tbl):
super(HectorStubComponent, self).__init__(cap_tbl)
self.addcapability('Tgav')
self.addcapability('atm-co2')
self.addcapability('Ftot')
def run_component(self):
"""Run the HectorStub component
Load the requested scenarios and make each variable available to the
rest of the system.
"""
import pandas as pd
# scenarios is either parsed as a list or a string, depending on if
# multiple scenarios were specified
scenarios = self.params['scenarios']
if not isinstance(scenarios, list):
scenarios = [scenarios]
scendata = pd.concat([self._read_scen_data(scen) for scen in scenarios])
scendata['scenario'] = scendata['run_name']
retcols = ['year', 'scenario', 'variable', 'value', 'units']
tgav = scendata[scendata['variable'] == 'Tgav'].loc[:, retcols]
tgav['value'] += float(self.params['T0']) # convert anomaly to temperature
self.addresults('Tgav', tgav)
self.addresults('atm-co2', scendata[scendata['variable'] == 'Ca'].loc[:, retcols])
self.addresults('Ftot', scendata[scendata['variable'] == 'Ftot'].loc[:, retcols])
return 0
def _read_scen_data(self, scen):
"""Read stored scenario data.
:param scen: Scenario name to load. One of rcp26, rcp45, rcp60, or
rcp85.
The spinup data will be filtered from the data that is read.
"""
from os.path import join
from pickle import load
data = pkg_resources.resource_filename('cassandra', 'data')
infile = open(join(data, f'hector-outputstream-{scen}.dat'),
'rb')
df = load(infile)
infile.close()
return df[df['spinup'] == 0]
class DummyComponent(ComponentBase):
"""Dummy component for tests
A dummy component with parameters for delaying requests and outputs in order
to test interactions between multiple components.
The idea is that if the the name of the capabilities being declared are
derived from each component instance’s parameters, it is possible to
configure a setup cleverly to create multiple copies that interact in any
manner of one's choosing.
params:
capability_out - name of the output capability
capability_reqs - list of the capabilities this component requests
request_delays - list of time delays (ms) before each request is made
finish_delay - delay (ms) before the component finalizes and exports
except - Throw an exception with the parameter value just before
the component would have exited (this is used for testing
error handling).
"""
def __init__(self, cap_tbl):
super(DummyComponent, self).__init__(cap_tbl)
# most components add a capability here, but we can't do that
# yet because we need our parameters before we can decide what
# capability we are offering.
def finalize_parsing(self):
super(DummyComponent, self).finalize_parsing()
# get this component's name and add it as a capability
self.name = self.params['name']
self.addcapability(self.name)
# get this component's capability requirements
if 'capability_reqs' in self.params:
cr = self.params['capability_reqs']
if not isinstance(cr, list):
cr = [cr]
self.capability_reqs = [s for s in cr if s != '']
else:
self.capability_reqs = []
# get the request delays
if 'request_delays' in self.params:
rd = self.params['request_delays']
if not isinstance(rd, list):
rd = [rd]
self.request_delays = [int(s) for s in rd if s != '']
else:
self.request_delays = []
if len(self.capability_reqs) != len(self.request_delays):
raise RuntimeError('Lengths of capability_reqs and request_delays must be the same.')
# get the finish delay
self.finish_delay = int(self.params['finish_delay'])
def run_component(self):
"""Run, request, delay, output."""
from time import time, sleep
from logging import info
from os import uname
st = time()
info(f'{st}: Start component on host {uname().nodename}')
st_msg = (0, f'Start {self.name}')
data = [st_msg] # list of tuples: (time, message)
capability_reqs = self.capability_reqs
request_delays = self.request_delays
finish_delay = self.finish_delay
for i, req in enumerate(capability_reqs):
delay = request_delays[i]
sleep(delay / 1000.0) # ms to s
data.append((time() - st, f'Requesting data from {req}'))
self.fetch(req)
data.append((time() - st, f'Recieved data from {req}'))
sleep(finish_delay / 1000.0)
# Add our list of messages as the result for this capability
self.addresults(self.name, data)
data.append((time() - st, f'Done {self.name}'))
# If configuration calls for us to fail, do so.
if 'except' in self.params:
from logging import critical
msg = self.params['except']
critical(msg)
raise RuntimeError(msg)
return 0
def report_test_results(self):
"""Report the component's results to the unit testing code."""
return self.results[self.name]
|
#!/usr/bin/env python
import ROOT
from rootpy.utils import asrootpy
from rootpy.plotting import Canvas, Legend, HistStack
from FinalStateAnalysis.Utilities.AnalysisPlotter import styling,samplestyles
import rootpy.io as io
import FinalStateAnalysis.StatTools.poisson as poisson
# This stuff is just so we can get matching styles as analysis.py
import FinalStateAnalysis.PatTools.data as data_tool
int_lumi = 5000
skips = ['DoubleEl', 'EM']
samples, plotter = data_tool.build_data(
'VH', '2012-04-14-v1-WHAnalyze', 'scratch_results',
int_lumi, skips, count='emt/skimCounter')
# Get stupid templates to build the styles automatically
signal = asrootpy(plotter.get_histogram(
'VH120',
'emt/skimCounter',
).th1)
wz = asrootpy(plotter.get_histogram(
'WZ',
'emt/skimCounter',
).th1)
zz = asrootpy(plotter.get_histogram(
'ZZ',
'emt/skimCounter',
).th1)
fakes_myhist = asrootpy(plotter.get_histogram(
'Zjets',
'emt/skimCounter',
))
styling.apply_style(fakes_myhist, **samplestyles.SAMPLE_STYLES['ztt'])
fakes = asrootpy(fakes_myhist.th1)
data = asrootpy(plotter.get_histogram(
'data_DoubleMu',
'emt/skimCounter',
).th1)
canvas = Canvas(800, 800)
print "canvas", canvas.GetRightMargin()
canvas.SetRightMargin(0.05)
legend = ROOT.TLegend(0.55, 0.60, 0.9, 0.90, "", "brNDC")
legend.SetFillStyle(0)
legend.SetBorderSize(0)
data.SetMarkerSize(2)
data.SetTitle("data")
data.SetLineWidth(2)
legend.AddEntry(data, "Observed", "lp")
signal.SetLineStyle(1)
signal.SetLineWidth(3)
signal.SetTitle("(5#times) m_{H}=120 GeV")
signal.SetLineColor(ROOT.EColor.kRed)
signal.SetFillStyle(0)
wz.SetTitle('WZ')
zz.SetTitle('ZZ')
fakes.SetTitle("Non-prompt")
legend.AddEntry(signal, signal.GetTitle(), "l")
#legend.AddEntry(wz, 'WZ', 'lf')
legend.AddEntry(zz, 'ZZ', 'lf')
legend.AddEntry(fakes, 'Non-prompt', "lf")
zz_file = io.open('file_ZZ.root')
signal_file = io.open('file_Signal.root')
data_file = io.open('file_Data.root')
fake_file = io.open('file_Fake.root')
hZZ = zz_file.Get('zz')
hZJets = fake_file.Get('fake')
hData = data_file.Get('data')
hSignal = signal_file.Get('signal')
hHWW = hSignal
canvas.cd()
hHWW = hHWW*5.0
#print hZZ, hZJets
# make ZZ not a stack
hZZ_plain = hZJets.Clone()
hZZ_plain.Reset()
for bin in range(hZZ.GetNbinsX()+1):
hZZ_plain.SetBinContent(
bin,
hZZ.GetBinContent(bin)
)
#hZZ.Draw()
#print hZZ.GetNbinsX()
#canvas.SaveAs('wtf.pdf')
#hData.Draw()
#print hData.GetNbinsX()
#canvas.SaveAs('data.pdf')
#hZJets.Draw()
#print hZJets.GetNbinsX()
#canvas.SaveAs('zj.pdf')
hZZ = hZZ_plain
print hData
hZZ.decorate(zz)
#hWZ.decorate(wz)
hZJets.decorate(fakes)
hData.decorate(data)
hHWW.decorate(signal)
hHWW.SetLineWidth(2)
for hist in [hZZ, hZJets]:
hist.format = 'hist'
for hist in [hZZ, hZJets, hData, hHWW]:
pass
#hist.Rebin(2)
hData_poisson = poisson.convert(hData, x_err=False, set_zero_bins=-100)
hData_poisson.SetMarkerSize(2)
stack = HistStack()
stack.Add(hZJets)
stack.Add(hZZ)
stack.Draw()
print stack
stack.GetXaxis().SetTitle("Visible Mass (GeV)")
bin_width = stack.GetXaxis().GetBinWidth(1)
stack.GetYaxis().SetTitle("Events/%0.0f GeV" % bin_width)
stack.GetYaxis().SetTitleOffset(0.9)
#stack.GetYaxis().SetTitleSize(0.05)
#stack.GetXaxis().SetTitleSize(0.05)
stack.SetMinimum(1e-1)
stack.SetMaximum(13)
hHWW.Draw('same,hist')
hData_poisson.Draw('p0')
cms_label = styling.cms_preliminary(5000, is_preliminary=False,
lumi_on_top=True)
#canvas.SetLogy(True)
legend.Draw()
blurb = ROOT.TPaveText(0.18, 0.85, 0.4, 0.89, "brNDC")
blurb.SetFillStyle(0)
blurb.SetBorderSize(0)
blurb.SetTextSize(0.05)
blurb.SetTextAlign(11)
blurb.AddText("4L channels")
blurb.Draw()
canvas.Update()
canvas.SaveAs('zh_result.pdf')
|
# coding=utf-8
import json
from pymongo import MongoClient
# 数据库的连接
import files
from files import save
client = MongoClient('localhost', 27017)
db = client.panda
table_chengshi = db.chengshi
# 更新最后一条微博的id,不建议使用
def update_last_blog_id():
data = table_chengshi.find().limit(1).sort([('_id', -1)])
for d in data:
last_blog_id = d['mblog']['idstr']
save('sinceid.txt', last_blog_id)
print(last_blog_id)
poster = []
for data in table_chengshi.find():
mblog = data['mblog']
if mblog['created_at'].startswith("2018") and 'title' in mblog:
user = data['mblog']['user']
poster.append(user)
user_ids = []
r = {}
for user in poster:
user_id = user['id']
user_name = user['screen_name']
if user_name not in r:
r[user_name] = 1
else:
r[user_name] = r[user_name] + 1
l = []
for i in r:
l.append({'username': i, 'post_time': r[i]})
l.sort(key=lambda x: x['post_time'], reverse=True)
# print(json.dumps(l, sort_keys=True, indent=2, ensure_ascii=False))
val = ""
sort = ""
total = 0
for index, i in enumerate(l):
print(i)
sort += "No.%d @%s; 发帖量%s\n" % (index + 1, i['username'], i['post_time'])
val += "%s\n" % json.dumps(i, ensure_ascii=False)
total += i['post_time']
print(total)
files.save("property/user.json", val)
files.save("property/sort.txt", sort)
size = 0
for data in table_chengshi.find():
size += 1
# total = 0
# for i in user_in_group:
# total += r[i]
#
# print("超话共%d条数据,群里的亲妈共发%d条,占比%f%s" % (size, total, (total / size) * 100, "%"))
|
import socket
UDP_IP= "10.160.108.101"
UDP_PORT= 5005
MESSAGE="?"
MESSAGE2="cinema"
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.sendto(MESSAGE.encode(), (UDP_IP, UDP_PORT))
data, addr = sock.recvfrom(1024)
print("Message recu: ", data.decode())
sock.sendto(MESSAGE2.encode(), (UDP_IP, UDP_PORT))
data, addr = sock.recvfrom(1024)
print("Code :", data)
|
from django.contrib import admin
from teacherAvaluation.models import *
admin.site.register(Degree)
admin.site.register(Subject)
admin.site.register(Teacher)
admin.site.register(Evaluation)
|
from enum import Enum
class EnvironmentsRetrieveExpand(str, Enum):
ENVIRONMENTAPPLICATION = "environment.application"
ENVIRONMENTAWS_ECS_SERVICES = "environment.aws_ecs_services"
ENVIRONMENTDEPLOYMENTS = "environment.deployments"
def __str__(self) -> str:
return str(self.value)
|
def calculatemin(stack):
counter = 0
print('new stack')
if checktrue(stack):
return counter
else:
for i in xrange(len(stack)-1, -1, -1):
#print(stack[i])
print(i, stack[i])
if stack[i] == '-':
stack = flippartialstack(stack, i)
print('im in')
print(stack)
counter+=1
if checktrue(stack):
print(counter)
return counter
def checktrue(s):
return (s == len(s) * '+')
def flippartialstack(stack, index):
if len(stack)>1:
unchanged = stack[index+1:]
else:
unchanged = ""
#print('unchanged:' + (',').join(unchanged))
changestack = stack[:index+1]
#print('change:' + (',').join(changestack))
changestack = changestack.replace('+', '1')
changestack = changestack.replace('-', '+')
changestack = changestack.replace('1', '-')
#print('new stack:' + (',').join(changestack)+(',').join(unchanged))
return changestack+unchanged
def outputfile(answers):
f = open('output.out', 'a')
for i, v in enumerate(answers):
f.write('Case #' + str(i+1) + ': ' + str(v) + '\n')
f.close()
def vapenaesh():
fname = 'B-large.in'
content = []
answers = []
with open(fname) as o:
for line in o:
content.append(line.rstrip())
content = content[1:]
for stack in content:
answers.append(calculatemin(stack))
outputfile(answers)
vapenaesh()
|
from DataBase import *
from navigator import *
import matplotlib.pyplot as plt
f = open("DataBase.txt", "r+")
DB = TDataBase(f)
DB.pull()
CG = CoursesGraph(DB)
print CG.get_optimal_path([1], [3,4])
#pos=nx.circular_layout(CG.Graph) # positions for all nodes
#
## nodes
#nx.draw_networkx_nodes(CG.Graph,pos,node_size=700)
#
## edges
#nx.draw_networkx_edges(CG.Graph,pos,
# width=6,alpha=0.5,edge_color='b',style='dashed')
#
## labels
#nx.draw_networkx_labels(CG.Graph,pos,font_size=20,font_family='sans-serif')
#
#plt.axis('off')
#plt.show()
|
from abc import ABC, abstractmethod
class BroadcastContract(ABC):
@abstractmethod
def ssl(self):
pass
@abstractmethod
def channel(self):
pass
|
"""
Posterior Predictive check with Maximum Mean Discrepancy (MMD).
Introduction:
-------------
TATTER (Two-sAmple TesT EstimatoR) is equipped with a posterior predictive check that allows
the users to study the goodness of a data generative model vs. an observed data. This module employ
MMD, KL, KS test to compute the distance a set of simulated dataset.
Quickstart:
-----------
To start using TATTER, simply use "from tatter import posterior_predictive_check"
to access the posterior predictive check function. The exact requirements
for the inputs are listed in the docstring of the posterior_predictive_check()
function further below. An example of this function looks like this:
-----------------------------------------------------------------
| from tatter import posterior_predictive_check |
| |
| test_value, test_null, p_value = |
| posterior_predictive_check(X, sims, |
| model='MMD', |
| kernel_function='rbf', |
| gamma=gamma, |
| n_jobs=4, |
| verbose=True, |
| random_state=0) |
| |
-----------------------------------------------------------------
Author:
--------
Arya Farahi, aryaf@umich.edu
Data Science Fellow
University of Michigan -- Ann Arbor
Libraries:
----------
The two-sample test estimator used in this implementation utilizes
'numpy', 'matplotlib', 'sklearn', 'joblib', 'tqdm', and 'pathlib' libraries.
References:
-----------
[1]. A. Gelman, X. L. Meng, and H. Stern,
"Posterior predictive assessment of model fitness via realized discrepancies."
Statistica sinica (1996): 733-760.
"""
from __future__ import division
import numpy as np
from tqdm import tqdm
from scipy import stats
from sklearn.metrics import pairwise_kernels
from .KL_estimator import KL_divergence_estimator
from .TST_estimators import MMD2u_estimator, MMD_null_estimator,\
KS_null_estimator, KL_divergence_estimator, KL_null_estimator
from joblib import Parallel, delayed
def posterior_predictive_check(X, sims, model='MMD', kernel_function='rbf', verbose=False,
random_state=None, n_jobs=1, **kwargs):
"""
This function performs a posterior predictive check. The posterior predictive check is a
Bayesian counterpart of the classical tests for goodness-of-the-fit. It often can be used in
judging the fit of a single Bayesian model to the observed data. The posterior predictive
check require a test statistics. In our implementation, the test sample here can be one of
the following tests: Kolmogorov-Smirnov, Kullback-Leibler divergence, or MMD.
We note that the Kolmogorov-Smirnov test is defined for only one-dimensional data.
The module perform a bootstrap algorithm to estimate the null distribution, and corresponding p-value.
:param X: numpy-array
Observed data, of size M x D [ is the number of data points, D is the features dimension]
:param sims: list of numpy-array
A list of a set of simulated data, of size N x D
[Nsim is the number of simulatins, N is the number of data points, D is the features dimension]
:param model: string
defines the basis model to perform two sample test ['KS', 'KL', 'MMD']
:param kernel_function: string
defines the kernel function, only used for the MMD.
For the list of implemented kernel please consult with https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.kernel_metrics.html#sklearn.metrics.pairwise.kernel_metrics
:param verbose: bool
controls the verbosity of the model's output.
:param random_state: type(np.random.RandomState()) or None
defines the initial random state.
:param n_jobs: int [not implimented yet]
number of jobs to run in parallel.
:param kwargs:
extra parameters, these are passed to `pairwise_kernels()` as kernel parameters or `KL_divergence_estimator()`
as the number of k. E.g., if `kernel_two_sample_test(..., kernel_function='rbf', gamma=0.1)`
:return: tuple of size 3
float: the test value,
numpy-array: a null distribution via bootstraps,
float: estimated p-value
"""
if model not in ['KS', 'KL', 'MMD']:
raise ValueError("The Model '%s' is not implemented, try 'KS', 'KL', or 'MMD'."%model)
# if len(X.shape) != 2 or len(sim.shape) != 3:
# raise ValueError("Incompatible shape for X and sim matrices. X and sim should have a shape of length 2 and 3 respectively,"
# ": len(X.shape) == %i while len(sim.shape) == %i."%(len(X.shape), len(sim.shape)))
# if X.shape[1] != sim.shape[1]:
# raise ValueError("Incompatible dimension for X and Y matrices. X and Y should have the same feature dimension,"
# ": X.shape[1] == %i while Y.shape[1] == %i."%(X.shape[1], Y.shape[1]))
if model == 'KS' and X.shape[1] > 1:
raise ValueError("The KS test can handle only one dimensional data,"
": X.shape[1] == %i and Y.shape[1] == %i."%(X.shape[1], Y.shape[1]))
if not (isinstance(n_jobs, int) and n_jobs > 0):
raise ValueError('n_jobs is incorrect type or <1. n_jobs:%s'%n_jobs)
# define the random state
if type(random_state) == type(np.random.RandomState()):
rng = random_state
else:
rng = np.random.RandomState(random_state)
m = len(X)
# p-value's resolution
resolution = 1.0/len(sims)
test_value = []
null_value = []
# compute the test statistics according to the input model
# compute the null distribution via a bootstrap algorithm
if model == 'MMD':
for Y in sims:
n = len(Y)
XY = np.vstack([X, Y])
K = pairwise_kernels(XY, metric=kernel_function, **kwargs)
test_value += [MMD2u_estimator(K, m, n)]
# nv = np.mean([MMD_null_estimator(K, m, n, rng) for i in range(20)])
null_value += [MMD_null_estimator(K, m, n, rng)]
elif model == 'KS':
for Y in sims:
n = len(Y)
K = np.concatenate((X.T[0], Y.T[0]))
test_value += [stats.ks_2samp(X.T[0], Y.T[0])[0]]
null_value += [KS_null_estimator(K, m, n, rng)]
elif model == 'KL':
for Y in sims:
n = len(Y)
K = np.vstack([X, Y])
test_value += [KL_divergence_estimator(X, Y, **kwargs)]
null_value += [KL_null_estimator(K, m, n, rng)]
test_value = np.array(test_value)
null_value = np.array(null_value)
if verbose:
print("test value = %s"%test_value)
print("Computing the null distribution.")
# compute the p-value, if less then the resolution set it to the resolution
p_value = max(resolution, resolution*(null_value > test_value).sum())
if verbose:
if p_value == resolution:
print("p-value < %s \t (resolution : %s)" % (p_value, resolution))
else:
print("p-value ~= %s \t (resolution : %s)" % (p_value, resolution))
return test_value, null_value, p_value
def test_statistics(X, Y, model='MMD', kernel_function='rbf', **kwargs):
"""
This function performs a test statistics and return a test value. This implementation can perform
the Kolmogorov-Smirnov test (for one-dimensional data only), Kullback-Leibler divergence and MMD.
:param X: numpy-array
Data, of size MxD [M is the number of data points, D is the features dimension]
:param Y: numpy-array
Data, of size NxD [N is the number of data points, D is the features dimension]
:param model: string
defines the basis model to perform two sample test ['KS', 'KL', 'MMD']
:param kernel_function: string
defines the kernel function, only used for the MMD.
For the list of implemented kernel please consult with https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.kernel_metrics.html#sklearn.metrics.pairwise.kernel_metrics
:param kwargs:
extra parameters, these are passed to `pairwise_kernels()` as kernel parameters or `KL_divergence_estimator()`
as the number of k. E.g., if `kernel_two_sample_test(..., kernel_function='rbf', gamma=0.1)`
:return: float
the test value
"""
if model not in ['KS', 'KL', 'MMD']:
raise ValueError("The Model '%s' is not implemented, try 'KS', 'KL', or 'MMD'." % model)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices. X and Y should have the same feature dimension,"
": X.shape[1] == %i while Y.shape[1] == %i." % (X.shape[1], Y.shape[1]))
if model == 'KS' and X.shape[1] > 1:
raise ValueError("The KS test can handle only one dimensional data,"
": X.shape[1] == %i and Y.shape[1] == %i." % (X.shape[1], Y.shape[1]))
m = len(X)
n = len(Y)
# compute the test statistics according to the input model
if model == 'MMD':
XY = np.vstack([X, Y])
K = pairwise_kernels(XY, metric=kernel_function, **kwargs)
test_value = MMD2u_estimator(K, m, n)
elif model == 'KS':
test_value, _ = stats.ks_2samp(X.T[0], Y.T[0])
elif model == 'KL':
test_value = KL_divergence_estimator(X, Y, **kwargs)
return test_value
|
import re,urllib
import urllib.request
u=urllib.request.urlopen("https://www.findandtrace.com/mobile-phone-number-database/Andhra-Pradesh/IDEA")
print(type(u))
r=u.read()
r=re.findall(r"[9]{1}[0-9]{9}",str(r))
for i in r:
print(i)
|
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
def encode_cat_feature_using_targets(feature_train, targets, feature_test, use_loo=False, alpha=1.0):
# Compute smoothed mean of targets for each category in feature_train
target_means = targets.groupby(feature_train).mean()
# Map smoothed means on feature_test
feature_test = feature_test.map(lambda x: target_means.get(x, default=0.5))
if use_loo:
category_lens = targets.groupby(feature_train).transform(len)
feature_test = (category_lens * feature_test - targets + 1) / (category_lens + 1)
return feature_test
def main():
people_cat_columns_to_use = ['people_id', 'group_1', 'char_2']
people_num_columns_to_use = ['char_38']
people_columns_to_use = people_cat_columns_to_use + people_num_columns_to_use
activities_cat_columns_to_use = ['people_id']
activities_columns_to_use = activities_cat_columns_to_use
cat_columns_to_use = set(people_cat_columns_to_use + activities_columns_to_use)
num_columns_to_use = people_num_columns_to_use
target_column = 'outcome'
activities_train = pd.read_csv('../data/act_train.csv.zip', index_col='activity_id')
activities_test = pd.read_csv('../data/act_test.csv.zip', index_col='activity_id')
people = pd.read_csv('../data/people.csv.zip')
train = pd.merge(people[people_columns_to_use],
activities_train[activities_columns_to_use],
on='people_id',
left_index=True).drop('people_id', axis=1)
test = pd.merge(people[people_columns_to_use],
activities_test[activities_columns_to_use],
on='people_id',
left_index=True).drop('people_id', axis=1)
targets = activities_train[target_column]
for col in train.columns:
if col in cat_columns_to_use:
test[col] = encode_cat_feature_using_targets(train[col], targets, test[col], use_loo=False)
train[col] = encode_cat_feature_using_targets(train[col], targets, train[col], use_loo=True)
scaler = MinMaxScaler()
train[num_columns_to_use] = scaler.fit_transform(train[num_columns_to_use])
test[num_columns_to_use] = scaler.transform(test[num_columns_to_use])
log_regression = LogisticRegression(C=1, n_jobs=-1, random_state=45)
log_regression.fit(train, targets)
predictions = pd.Series(log_regression.predict_proba(test)[:, 1], index=test.index, name='outcome')
predictions.to_csv('../submissions/first_log_regression.csv', index=True, header=True)
if __name__ == '__main__':
main() |
import cv2
import time
import numpy as np
import sys
import imutils
from sklearn.cluster import KMeans
# cap = cv2.VideoCapture('inside2.m4v')
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('inside4.m4v')
fil = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (300,168))
i =0
while(True):
frame = cap.read()[1]
if frame is None:
break
size_fudge = 120
value_fudge = 0.5
hue_fudge = 1000
# resize the frame, blur it, and convert it to the HSV colour space
frame = imutils.resize(frame, width = 300)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv_org = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
h, w, _ = hsv_org.shape
print(h,w)
hsv = hsv_org.astype(float)
angle = 2*np.pi*(hsv[:,:,0] % 180)/180
cx = hue_fudge * hsv[:,:,1] * hsv[:,:,2] * np.sin(angle)
cy = hue_fudge * hsv[:,:,1] * hsv[:,:,2] * np.cos(angle)
cz = hsv[:,:,2]*value_fudge
cs = hsv[:,:,1]*value_fudge
y,x = np.meshgrid(np.arange(w), np.arange(h))
y = y*size_fudge/h
x = x*size_fudge/w
# print(cx.shape)
# print(cy.shape)
# print(cz.shape)
# print(y.shape)
# print(x.shape)
out = np.stack((cx,cy,cz,cs,y,x),axis =2).reshape(h*w, 6)
# print(out.shape)
clusters = 10
t = time.time()
k = KMeans(n_clusters=clusters,precompute_distances=True).fit(out)
print('done', time.time() -t)
img = k.labels_.reshape(h,w).astype(np.uint8)
print(img.shape)
for i in range(clusters):
ccx, ccy, ccz, ccs = k.cluster_centers_[i][:4]
hue = (180*np.arctan2(ccx,ccy)/(2*np.pi)) % 180
value = ccz/value_fudge
saturation = ccs/value_fudge
# saturation = np.sqrt( ccy**2 + ccx*2)/value
# saturation = 255
print(i, np.array([hue,saturation,value]))
hsv_org[img == i] = np.array([hue,saturation,value]).astype(np.uint8)
# edges = cv2.Canny(img*40,20,20)
# hsv = np.ones([400,400,3]) * np.array([i%180, 255,255])
# hsv = hsv.astype(np.uint8)
# i +=1
# print(hsv[0,0,:])
show = cv2.cvtColor(hsv_org, cv2.COLOR_HSV2BGR)
cv2.imshow('video', show)
cv2.imshow('org', frame)
fil.write(show)
# cv2.imshow('video', img * 20)
# cv2.imshow('edgled', edges)
# cv2.imshow('video', hsv)
# cv2.setMouseCallback('video',draw_circle)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
elif k == ord('t'):
mode = 'tennis'
elif k == ord('b'):
mode = 'bg'
cap.release()
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.