index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,300 | c632f2f8b3fe7ab5f366a3f94b8dfa66c0ebf8cf | # Generated by Django 2.2.3 on 2019-08-10 16:13
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0007_bannerimage_cropping'),
]
operations = [
migrations.AddField(
model_name='eventimage',
name='listing',
field=image_cropping.fields.ImageRatioField('image', '520x292', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='listing'),
),
]
|
10,301 | 4a0b0979038366a07d5b69344c96506fdfc58b55 | import numpy as np
def check_dim(X, dim):
dimX = np.ndim(X)
if(dimX != dim):
raise ValueError("{0}d array is expected, but {1}d is given".format(dim, dimX))
|
10,302 | 50e2d3baaf509d3b26bf4334b15e63266d497d4c | ###########################################
# Imports
###########################################
import os, sys
import math
from collections import namedtuple, defaultdict
from itertools import product, groupby, permutations, combinations
rootRelativePath = '..'
rootAbsolutePath = os.path.abspath(rootRelativePath)
sys.path.append(rootAbsolutePath)
from CH4.ch04_ex2 import calc_correlation
###########################################
# Enumerating the Cartesian product
###########################################
# Mathematically, the product of two 3-item sets has 9 pairs as follows:
set1 = {1, 2, 3}
set2 = {'D', 'H', 'S'}
cartesianProd = {
(1, 'D'), (1, 'S'), (1, 'H'),
(2, 'D'), (2, 'S'), (2, 'H'),
(3, 'D'), (3, 'S'), (3, 'H')
}
# We can produce the preceding results by executing the following commands:
cartesianProd = list(product(range(1, 4), 'DHS'))
###########################################
# Reducing a product
###########################################
# We can use the join() custom function to join two tables,
# as shown in the following command:
def join(table1, table2, where):
return filter(where, product(table1, table2))
# Assume that we have a table of Color objects as follows:
Color = namedtuple("Color", ("red", "green", "blue", "name"))
rgbColorNames = [
Color(rgb=(239, 222, 205), name='Almond'),
Color(rgb=(255, 255, 153), name='Canary'),
Color(rgb=(28, 172, 120), name='Green'),
Color(rgb=(255, 174, 66), name='Yellow Orange')
]
# Given a PIL.Image object, we can iterate over the collection of pixels
# with something like the following:
def calc_pixels_from_coords(image):
w, h = image.size
return (
(color, image.getpixel(color))
for color in product(range(w), range(h))
)
'''
* We've determined the range of each coordinate based on the image size.
* The calculation of the product(range(w), range(h)) method creates
all the possible combinations of coordinates.
'''
#----------------
# Computing distances
#----------------
'''
* When doing color matching, we won't have a simple equality test.
* We're often forced to define a minimal distance function
to determine whether two colors are close enough,
without being the same three values of R, G, and B.
'''
# Here are the Euclidean and Manhattan distance functions:
def euclidean(pixel, color):
return math.sqrt(sum(map(\
lambda x, y: (x -y)**2,
pixel,
color.rgb
)))
def manhattan(pixel, color):
return sum(map(\
lambda x, y: abs(x - y),
pixel,
color.rgb
))
'''
For each individual pixel, we can compute the distance from that pixel's color
to the available colors in a limited color set.
'''
# The results of this calculation for a single pixel might look like this:
pixelDistances = (
((0, 0), (92, 139, 195), Color(rgb=(239, 222, 205), name='Almond'),
169.10943202553784),
((0, 0), (92, 139, 195), Color(rgb=(255, 255, 153), name='Canary'),
204.42357985320578),
((0, 0), (92, 139, 195), Color(rgb=(28, 172, 120), name='Green'),
103.97114984456024),
((0, 0), (92, 139, 195), Color(rgb=(48, 186, 143), name='Mountain Meadow'),
82.75868534480233),
)
'''
Each of the four tuples contains the following contents:
• The pixel's coordinates, for example, (0,0)
• The pixel's original color, for example, (92, 139, 195)
• A Color object from our set of seven colors, for example,
Color(rgb=(239,222, 205),name='Almond')
• The Euclidean distance between the original color
and the given Color object
'''
# The smallest Euclidean distance is the closest match color.
# This kind of reduction is done with the min() function:
pixelMinDistance = min(pixelDistances, key=lambda x: x[3])
#----------------
# Getting all pixels and all colors
#----------------
# One way to map pixels to colors is to enumerate all pixels
# and all colors using the product() function:
xy_coords = lambda xyp_c: xyp_c[0][0]
pixel = lambda xyp_c: xyp_c[0][1]
color = lambda xyp_c: xyp_c[1]
def get_pixelcolor_pairs(image, colors):
return (
(
xy_coords(item),
pixel(item),
color(item),
euclidean(pixel(item), color(item))
)
for item in product(calc_pixels_from_coords(image), colors)
)
distances = get_pixelcolor_pairs('someImage', rgbColorNames)
for _, choices in groupby(distances, key=lambda x: x[0]):
print(min(choices, key=lambda x: x[3]))
#----------------
# Performance analysis
#----------------
# Here is a basic algorithm to collect some data from a .JPG image:
def group_pixel_by_color(image):
palette = defaultdict(list)
for xy_pixel in calc_pixels_from_coords(image):
xy_coords, pixel = xy_pixel
palette[pixel].append(xy_coords)
w, h = image.size
print("Total pixels ", w*h)
print("Total colors ", len(palette))
# We can apply mask values to the RGB bytes with the following:
maskedColors = tuple(map(lambda x: x&0b11100000, rgbColorNames))
#----------------
# Combining two transformations
#----------------
# Here is a way to build a color map that combines both distances
# to a given set of colors and truncation of the source colors:
img = 'someImage'
bit3 = range(0, 256, 0b100000)
best = (
(min(euclidean(rgb, color), rgb, color) for color in rgbColorNames)
for rgb in product(bit3, bit3, bit3)
)
color_map = dict((b[1], b[2].rgb) for b in best)
# The following are the commands for the image replacement:
clone = img.copy()
for xy, p in calc_pixels_from_coords(img):
r, g, b = p
repl = color_map[(0b11100000&r, 0b11100000&g, 0b11100000&b)]
clone.putpixel(xy, repl)
clone.show()
###########################################
# Permuting a collection of values
###########################################
'''
* One popular example of combinatorial optimization problems is the
assignment problem.
* We have n agents and n tasks, but the cost of each agent
performing a given task is not equal.
* Some agents have trouble with some details,
while other agents excel at these details.
* If we can properly assign tasks to agents, we can minimize the costs.
'''
# Assuming that we have a cost matrix with 36 values that show
# the costs of six agents and six tasks,
# we can formulate the problem as follows:
cost = [] # 6X6 Matrix
perms = permutations(range(6))
alt = (
(sum(cost[x][y] for y, x in enumerate(perm)), perm)
for perm in perms
)
minMatrix = min(alt)[0]
print(ans for s, ans in alt if s == minMatrix)
###########################################
# Generating all combinations
###########################################
# There are 2,598,960 5-card poker hands.
# We can actually enumerate all 2 million hands as follows:
hands = list(combinations(tuple(product(range(13), '♠♥♦♣')), 5))
# Let's get some sample data from http://www.tylervigen.com
# We'll pick three datasets with the same time range:
# numbers 7, 43, and 3890.
# We'll laminate the data into a grid, repeating the year column.
# This is how the first and the remaining rows of the yearly data will look:
dataset = [
('year',
'Per capita consumption of cheese (US) - Pounds (USDA)',
'Number of people who died by becoming tangled in their \
bedsheets - Deaths (US) (CDC)'),
('year',
'Per capita consumption of mozzarella cheese (US) - Pounds (USDA)',
'Civil engineering doctorates awarded (US) - \
Degrees awarded (National Science Foundation)'),
('year',
'US crude oil imports from Venezuela - Millions of barrels \
(Dept. of Energy)',
'Per capita consumption of high fructose corn syrup (US) - Pounds (USDA)')
(2000, 29.8, 327, 2000, 9.3, 480, 2000, 446, 62.6),
(2001, 30.1, 456, 2001, 9.7, 501, 2001, 471, 62.5),
(2002, 30.5, 509, 2002, 9.7, 540, 2002, 438, 62.8),
(2003, 30.6, 497, 2003, 9.7, 552, 2003, 436, 60.9),
(2004, 31.3, 596, 2004, 9.9, 547, 2004, 473, 59.8),
(2005, 31.7, 573, 2005, 10.2, 622, 2005, 449, 59.1),
(2006, 32.6, 661, 2006, 10.5, 655, 2006, 416, 58.2),
(2007, 33.1, 741, 2007, 11, 701, 2007, 420, 56.1),
(2008, 32.7, 809, 2008, 10.6, 712, 2008, 381, 53),
(2009, 32.8, 717, 2009, 10.6, 708, 2009, 352, 50.1)
]
# Wwe can use the combinations() function to emit all the combinations
# of the nine variables in this dataset, taken two at a time:
combinations(range(9), 2)
# Here is a function that picks a column of data out of our dataset:
def column(source, x):
for row in source:
yield row[x]
# This is how we can compute all combinations of correlations:
for p, q in combinations(range(9), 2):
header_p, *data_p = list(column(source, p))
header_q, *data_q = list(column(source, q))
if header_p == header_q: continue
r_pq = calc_correlation(data_p, data_q)
print("{2: 4.2f}: {0} vs {1}".format(
header_p,
header_q,
r_pq)
)
|
10,303 | 1c5bb1f97aebd71a12a5a0b7ff6eece6bcb49f2c | # Assignment 1 to print Hello World
print("Hello world")
|
10,304 | 070ad2a9aee634fc404f4767984d1a54a055a1c4 | from django.contrib import admin
from .models import Listing,Listing_Image,Review
admin.site.register(Listing)
admin.site.register(Listing_Image)
admin.site.register(Review)
# class Listing_ImageInline(admin.TabularInline):
# model = Listing_Image
# extra = 3
#
# class ListingAdmin(admin.ModelAdmin):
# inlines = [ Listing_ImageInline, ]
|
10,305 | 614e57c5c3456fb627b032c00bbb7c2959225b8d | """Custom node groups"""
import bpy
from .node_arranger import tidy_tree
# docs-special-members: __init__
# no-inherited-members
class NodeGroup:
"""Generic Node Group"""
TYPE = 'Compositor'
def __init__(self, name: str, node_tree: bpy.types.NodeTree):
"""
A generic NodeGroup class
:param name: Name of node group
:param node_tree: NodeTree to add group to
"""
self.name = name
self.node_tree = node_tree
self.group = bpy.data.node_groups.new(type=f'{self.TYPE}NodeTree', name=name)
self.gn = group_node = node_tree.nodes.new(f"{self.TYPE}NodeGroup")
group_node.node_tree = self.group
self.input_node = self.group.nodes.new("NodeGroupInput")
self.output_node = self.group.nodes.new("NodeGroupOutput")
def tidy(self):
tidy_tree(self.group)
@property
def inputs(self) -> dict:
"""Input sockets"""
return self.gn.inputs
@property
def outputs(self) -> dict:
"""Output sockets"""
return self.gn.outputs
def input(self, name: str) -> bpy.types.NodeSocket:
"""Get input socket by name"""
return self.inputs[name]
def output(self, name: str) -> bpy.types.NodeSocket:
"""Get output socket by name"""
return self.outputs[name]
def add_node(self, key: str) -> bpy.types.Node:
"""Create a new node in the group by name"""
return self.group.nodes.new(key)
def link(self, from_socket: bpy.types.NodeSocket, to_socket: bpy.types.NodeSocket) -> bpy.types.NodeLink:
"""
Link two sockets in the group
:param from_socket: Socket to link from
:param to_socket: Socket to link to
"""
return self.group.links.new(from_socket, to_socket)
def __str__(self):
return f"{self.TYPE}NodeGroup({self.name})"
def update(self, camera=None, scene=None):
pass
class CompositorNodeGroup(NodeGroup):
"""Node Group for use in the compositor"""
TYPE = 'Compositor'
class ShaderNodeGroup(NodeGroup):
"""Node Group for use in the shader editor"""
TYPE = 'Shader'
|
10,306 | 850840b0e53a1f5a0d2b3b587db3ccca2f549a31 | # range ile belirli araliktaki degerleri istedigimiz sekilde kullanabiliriz
# python da ilk deger inclusive ikinci deger ise exclusive dir
#yani ilk deger dahil ikinci deger ise dahil degildir
for i in range(20):
print("{}) {}".format(i,('*'*i)))
|
10,307 | 6523d2a3245119bd9cfec5d87fd3f71eb058736d | # coding=utf-8
'''
给定一个二叉树,判断其是否是一个有效的二叉搜索树。
假设一个二叉搜索树具有如下特征:
节点的左子树只包含小于当前节点的数。
节点的右子树只包含大于当前节点的数。
所有左子树和右子树自身必须也是二叉搜索树。
示例 1:
输入:
2
/ \
1 3
输出: true
示例 2:
输入:
5
/ \
1 4
/ \
3 6
输出: false
解释: 输入为: [5,1,4,null,null,3,6]。
根节点的值为 5 ,但是其右子节点值为 4 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/validate-binary-search-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
return_list = []
def digui(node):
if node:
digui(node.left)
return_list.append(node.val)
digui(node.right)
digui(root)
for i in range(1, len(return_list) - 1):
if return_list[i] < return_list[i - 1]:
return False
return True
|
10,308 | b970b43ec2ed15f3352334da25960774ab99c60a | import numpy, h5py, matplotlib
import matplotlib.pyplot as plt
import os
import scipy.signal as sp
import numpy as np
import bead_util as bu
import os, re, time, glob
startfile = 0
endfile = 200
path = r"C:\data\20170925\bead4_15um_QWP_NS\steps\DC"
file_list = glob.glob(path+"\*.h5")
def list_file_time_order(filelist):
filelist.sort(key=os.path.getmtime)
return filelist
file_list = list_file_time_order(file_list)
file_list = file_list[startfile:endfile]
def get_specific_DCp(list):
file_list_new = []
for i in range(len(list)):
if float(re.findall("-?\d+mVDC",list[i])[0][:-4]) == 18800:
file_list_new.append(list[i])
return file_list_new
def get_specific_DCm(list):
file_list_new = []
for i in range(len(list)):
if float(re.findall("-?\d+mVDC",list[i])[0][:-4]) == -18800:
file_list_new.append(list[i])
return file_list_new
file_listp = get_specific_DCp(file_list)
file_listm = get_specific_DCm(file_list)
l1 = len(file_listp)
l2 = len(file_listm)
lmin = np.min([l1,l2])
file_listp = file_listp[:lmin]
file_listm = file_listm[:lmin]
Fs = 10e3 ## this is ignored with HDF5 files
NFFT = 2**19
def get_x(fname):
print "Opening file: ", fname
## guess at file type from extension
_, fext = os.path.splitext( fname )
if( fext == ".h5"):
f = h5py.File(fname,'r')
dset = f['beads/data/pos_data']
dat = numpy.transpose(dset)
#max_volt = dset.attrs['max_volt']
#nbit = dset.attrs['nbit']
Fs = dset.attrs['Fsamp']
#dat = 1.0*dat*max_volt/nbit
dat = dat * 10./(2**15 - 1)
else:
dat = numpy.loadtxt(fname, skiprows = 5, usecols = [2, 3, 4, 5, 6] )
x = dat[:, bu.xi]-numpy.mean(dat[:, bu.xi])
return x
def sum_time_stream(file_list):
xs = 0
for i in range(len(file_list)):
x = get_x(file_list[i])
xs = xs + x
return xs
xsp = sum_time_stream(file_listp)
xsm = sum_time_stream(file_listm)
xs1 = xsp + xsm
xs2 = xsp + xsp
xpsd1, f1 = matplotlib.mlab.psd(xs1, Fs = Fs, NFFT = NFFT)
xpsd2, f2 = matplotlib.mlab.psd(xs2, Fs = Fs, NFFT = NFFT)
plt.figure
plt.loglog(f1,xpsd1, label = "oposite DC = 18800")
plt.loglog(f2,xpsd2, label = "same DC = 18800")
plt.legend()
plt.show()
|
10,309 | 3064aeed019a24409a9bf734bc8cc9b4dcab118b | cinsiyet=input("Cinsiyetiniz: (E/K)")
if cinsiyet==("E"):
print("Erkek")
elif cinsiyet==("K"):
print("Kadın")
else :
print("Hatalı seçim.") |
10,310 | 41086f5b9e74eeeadfe6d3ef42c65ff02a04f92c | """
Created on Oct 20, 2013
@author: Ofra
"""
from action import Action
from actionLayer import ActionLayer
from util import Pair
from proposition import Proposition
from propositionLayer import PropositionLayer
class PlanGraphLevel(object):
"""
A class for representing a level in the plan graph.
For each level i, the PlanGraphLevel consists of the actionLayer and propositionLayer at this level in this order!
"""
independentActions = [] # updated to the independentActions of the propblem GraphPlan.py line 31
actions = [] # updated to the actions of the problem GraphPlan.py line 32 and planningProblem.py line 25
props = [] # updated to the propositions of the problem GraphPlan.py line 33 and planningProblem.py line 26
@staticmethod
def setIndependentActions(independentActions):
PlanGraphLevel.independentActions = independentActions
@staticmethod
def setActions(actions):
PlanGraphLevel.actions = actions
@staticmethod
def setProps(props):
PlanGraphLevel.props = props
def __init__(self):
"""
Constructor
"""
self.actionLayer = ActionLayer() # see actionLayer.py
self.propositionLayer = PropositionLayer() # see propositionLayer.py
def getPropositionLayer(self):
return self.propositionLayer
def setPropositionLayer(self, propLayer):
self.propositionLayer = propLayer
def getActionLayer(self):
return self.actionLayer
def setActionLayer(self, actionLayer):
self.actionLayer = actionLayer
def updateActionLayer(self, previousPropositionLayer):
"""
Updates the action layer given the previous proposition layer (see propositionLayer.py)
allAction is the list of all the action (include noOp in the domain)
"""
allActions = PlanGraphLevel.actions
for action in allActions:
if previousPropositionLayer.allPrecondsInLayer(action):
self.actionLayer.addAction(action)
for p1 in action.getPre():
for p2 in action.getPre():
if previousPropositionLayer.isMutex(p1, p2):
self.actionLayer.removeActions(action)
def updateMutexActions(self, previousLayerMutexProposition):
"""
Updates the mutex list in self.actionLayer,
given the mutex proposition from the previous layer.
currentLayerActions are the actions in the current action layer
"""
currentLayerActions = self.actionLayer.getActions()
for a1 in currentLayerActions:
for a2 in currentLayerActions:
if a1 == a2:
continue
if mutexActions(a1, a2, previousLayerMutexProposition):
self.actionLayer.addMutexActions(a1, a2)
def updatePropositionLayer(self):
"""
Updates the propositions in the current proposition layer,
given the current action layer.
don't forget to update the producers list!
"""
currentLayerActions = self.actionLayer.getActions()
propsToAdd = dict()
for action in currentLayerActions:
for prop in action.getAdd():
if prop.getName() not in propsToAdd:
propsToAdd[prop.getName()] = Proposition(prop.getName())
temp = propsToAdd[prop.getName()]
if action not in temp.getProducers():
temp.addProducer(action)
for prop in propsToAdd.values():
self.propositionLayer.addProposition(prop)
def updateMutexProposition(self):
"""
updates the mutex propositions in the current proposition layer
"""
currentLayerPropositions = self.propositionLayer.getPropositions()
currentLayerMutexActions = self.actionLayer.getMutexActions()
for prop1 in currentLayerPropositions:
for prop2 in currentLayerPropositions:
if prop1 == prop2:
continue
if mutexPropositions(prop1, prop2, currentLayerMutexActions):
self.propositionLayer.addMutexProp(prop1, prop2)
def expand(self, previousLayer):
"""
Your algorithm should work as follows:
First, given the propositions and the list of mutex propositions from the previous layer,
set the actions in the action layer.
Then, set the mutex action in the action layer.
Finally, given all the actions in the current layer, set the propositions and their mutex relations in the proposition layer.
"""
previousPropositionLayer = previousLayer.getPropositionLayer()
previousLayerMutexProposition = previousPropositionLayer.getMutexProps()
self.updateActionLayer(previousPropositionLayer)
self.updateMutexActions(previousLayerMutexProposition)
self.updatePropositionLayer()
self.updateMutexProposition()
def expandWithoutMutex(self, previousLayer):
"""
Questions 11 and 12
You don't have to use this function
"""
previousLayerProposition = previousLayer.getPropositionLayer()
"*** YOUR CODE HERE ***"
def mutexActions(a1, a2, mutexProps):
"""
Complete code for deciding whether actions a1 and a2 are mutex,
given the mutex proposition from previous level (list of pairs of propositions).
Your updateMutexActions function should call this function
"""
if Pair(a1, a2) not in PlanGraphLevel.independentActions:
return True
for x in [Pair(y, z) for y in a1.getPre() for z in a2.getPre()]:
if x in mutexProps:
return True
return False
def mutexPropositions(prop1, prop2, mutexActions):
"""
complete code for deciding whether two propositions are mutex,
given the mutex action from the current level (list of pairs of actions).
Your updateMutexProposition function should call this function
"""
for a1 in prop1.getProducers():
for a2 in prop2.getProducers():
if Pair(a1, a2) not in mutexActions:
return False
return True
|
10,311 | c511f17d734c3104c8e4cbc02ddb5757ddd58818 | import numpy as np
import matplotlib.pyplot as plt
# Make a scatter plot by drawing 100 items from a mixture distribution
# 0.3N((1,0)^T, (1 & 0.2 \\ 0.2 & 1)) + 0.7N((-1,0)^T,(1 & -0.2 \\ -0.2 & 1)).
# mean vector and covariance matrix
mu1 = np.array([1, 0])
Sigma1 = np.array([[1, 0.2], [0.2, 1]])
mu2 = np.array([-1, 0])
Sigma2 = np.array([[1, -0.2], [-0.2, 1]])
# generate 100 sample points
x = np.empty(100)
y = np.empty(100)
for i in range (100):
val1 = np.random.multivariate_normal(mu1, Sigma1)
val2 = np.random.multivariate_normal(mu2, Sigma2)
x[i] = (0.3 * val1[0]) + (0.7 * val2[0])
y[i] = (0.3 * val1[1]) + (0.7 * val2[1])
# plot
plt.scatter(x, y)
plt.show |
10,312 | f99afc8bcf0d26241644ca8510091779c82c1c5a | # coding:utf-8
import requests
import re
import urllib3
from bs4 import BeautifulSoup
urllib3.disable_warnings()
# 登陆拉勾网
s = requests.session()
url1 = "https://passport.lagou.com/login/login.html"
r1 = s.get(url1,verify=False)
print(r1.status_code)
# print(r1.content.decode("utf-8"))
res = r1.content.decode("utf-8")
soup= BeautifulSoup(r1.content,"html.parser")
s1 = soup.find_all("script")
# for i in s1:
# # print(i)
print(s1[1].string)
a = s1[1].string
X_Anti_Forge_Token = re.findall("_Token = \'(.+?)\'", a)
print(X_Anti_Forge_Token[0])
X_Anti_Forge_Cod = re.findall("e_Code = \'(.+?)\'", a)
print(X_Anti_Forge_Cod[0])
# result = re.findall("<script>(.+?)\</script>", res)
# print(result)
url = "https://passport.lagou.com/login/login.json"
h = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36",
"X_Anti_Forge_Token":"",
"X_Anti_Forge_Cod":""
}
body = {
"isValidate": "true",
"username": "13414140950",
"password": "a658cefe791f6c870413ea5fb6420187",
"request_form_verifyCode": "",
"submit": "",
"challenge": "91c87059bb6c17f6718bbd936d9655e2"
}
r = s.post(url, data=body,headers=h, verify=False)
print(r.status_code)
print(r.content.decode("utf-8"))
# <!-- 页面样式 --> <!-- 动态token,防御伪造请求,重复提交 -->
# <script>
# window.X_Anti_Forge_Token = '8ddb320f-82d5-4147-a128-f1bd5ef671fa';
# window.X_Anti_Forge_Code = '48340395';
# </script>
#
# <!-- H5 -->
|
10,313 | 99da42061e36a4e7d8d8bfe10663986181f5d5e1 |
class HiddenAnswer(object):
def __init__(self, correct_answer):
self.correct_answer = correct_answer
self.hidden_answer = '_' * len(self.correct_answer)
def reveal(self, guessed_letter):
hidden = ''
for position, letter in enumerate(self.correct_answer):
if letter == guessed_letter:
hidden += letter
else:
hidden += self.hidden_answer[position]
self.hidden_answer = hidden
def __str__(self):
return ' '.join([l for l in self.hidden_answer])
def __repr__(self):
return ' '.join([l for l in self.hidden_answer])
|
10,314 | a366a87bc3ab931a4326c4e61c1af7d3ad1e2072 | #!/opt/app/cacheDB/python/bin/python3
"""
A script for getting data objects from Vertica
References: Vertica Python - https://github.com/uber/vertica-python
"""
import vertica_python
import logging
# Set the logging level to DEBUG
logging.basicConfig(level=logging.INFO)
conn_info = {'host': 'stg-wavert01.bodc.att.com',
'port': 5433,
#'user': 'ng2157',
'user': 'STG_WEBR_OPS',
#'password': 'password',
'password': 'WEBR_OPS_STG',
'database': 'STG_EDM',
# 10 minutes timeout on queries
'read_timeout': 600,
# default throw error on invalid UTF-8 results
'unicode_error': 'strict',
# SSL is disabled by default
'ssl': False,
'connection_timeout': 500}
conn_info_task = {'host': 'stg-wavert01.bodc.att.com',
'port': 5433,
#'user': 'ng2157',
'user': 'apptasks',
#'password': 'password',
'password': 'tasksapp',
'database': 'STG_EDM',
# 10 minutes timeout on queries
'read_timeout': 600,
# default throw error on invalid UTF-8 results
'unicode_error': 'strict',
# SSL is disabled by default
'ssl': False,
'connection_timeout': 500}
class VerticaGetter():
""" A class to get data from Vertica"""
TEST_QUERY="select Attuid from S08_DB.Alltasks"
def __init__(self):
pass
def __str__(self):
return " A vertica data getter for db {} on host {}".format(conn_info['database'],conn_info['host'])
def test_connection(self):
""" A test method just to check connection to Vetica"""
with vertica_python.connect(**conn_info) as connection:
print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(self.TEST_QUERY)
for row in cur.iterate():
print("The row is {}".format(row))
def get_log_parts_param(self,params=['2017-6-9','eVar13']):
""" A method which returns values for a parameter name for a given date"""
result =[]
query_params = {'parameter_name':params[1],'et_log_date':params[0]}
logging.debug("Query params : parameter_name {} and date {}".format(params[1],params[0]))
query = """select a.key,b.parameter_name,b.parameter_value,a.uuid,b.source
from wt_logs a,wt_log_parts b
where a.key = b.key
and a.et_log_date = b.et_log_date
and a.et_log_date = :et_log_date
and b.parameter_name = :parameter_name
order by b.pt_hour_id desc limit 500""".replace('\n',' ')
logging.debug("The log parts part is {}".format(query))
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_log_parts(self,params=['1716057041010008656']):
""" A method which returns all records for a log part with a single recordID """
result =[]
#query_params = {'key':params[1],'et_log_date':params[0]}
query_params = {'key':params[0]}
logging.debug("Query params are: key {}".format(params[0]))
query = """select key,parameter_name,parameter_value,'foo',source
from stg_perf_test.wt_log_parts
where key = :key""".replace('\n',' ')
logging.debug("The log parts part is {}".format(query))
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_log_parts_pattern(self,params=['2017-6-9','eVar85','pid=1']):
""" A method which returns all records for a log part matching a value for parameter value"""
result =[]
query_params = {'parameter_name':params[1],'et_log_date':params[0],'parameter_value':''}
logging.debug("Query params are: key {} and date {}".format(params[1],params[0]))
query = """select b.key,b.parameter_name,b.parameter_value,a.uuid,b.source
from wt_logs a,wt_log_parts b
where a.key = b.key
and a.et_log_date = b.et_log_date
and a.et_log_date = :et_log_date
and b.parameter_name = :parameter_name
and b.parameter_value = :parameter_value""".replace('\n',' ')
logging.debug("The log parts part is {}".format(query))
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_log_parts_pattern_limit(self,params=['2017-6-9','eVar85','pid=1']):
""" A method which returns all records for a log part matching a value for parameter value and limts the result ordered by the date"""
result =[]
query_params = {'parameter_name':params[1],'et_log_date':params[0],'parameter_value':'%' + params[2] + '%'}
logging.debug("Query params are: key {} and date {}".format(params[1],params[0]))
query = """select b.key,b.parameter_name,b.parameter_value,a.uuid,b.source
from wt_logs a,wt_log_parts b
where a.key = b.key
and a.et_log_date = b.et_log_date
and a.et_log_date = :et_log_date
and b.parameter_name = :parameter_name
and b.parameter_value LIKE :parameter_value
order by a.et_log_date limit 500""".replace('\n',' ')
logging.debug("The log parts part is {}".format(query))
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_log_parts_pattern_aggr(self,params=['2017-6-9','eVar85','pid=1']):
""" A method which returns aggregated records for a log part matching a value for parameter name"""
result =[]
query_params = {'parameter_name':params[1],'et_log_date':params[0],'parameter_value':'%' + params[2] + '%'}
logging.debug("Query params are: key {} and date {}".format(params[1],params[0]))
query = """select b.et_log_date,count(key),b.parameter_name,b.parameter_value,b.source
from wt_log_parts b
where b.et_log_date = :et_log_date
and b.parameter_name = :parameter_name
and b.parameter_value LIKE :parameter_value
group by 1,3,4,5 order by 1,2""".replace('\n',' ')
logging.debug("The log parts part is {}".format(query))
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_logs(self,fields=['key','app_visitor_cookie','page_url','et_log_date'],date='2017-06-10'):
""" A method to query wt_logs table for a given date """
result =[]
query_params = {'uuid':'0000000000000000001', 'param_value':'Default'}
logs_query = "select uuid,param_name,param_value from log_parts_backup where uuid = :uuid and param_value =:param_value"
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(logs_query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_logs_and_parts(self):
""" A method which joins logs and log parts table and returns a combined result"""
result =[]
query_params = {'key':'1715230983110018712', 'parameter_name':'imprId','et_log_date':'2017-06-01'}
query = """select a.key,a.uuid,a.page_url,a.domain_name,a.app_visitor_cookie,a.referral_domain
from wt_logs a, wt_log_parts b
where a.key = b.key
and a.et_log_date = :et_log_date
and a.key = :key
and b.parameter_name = :parameter_name""".replace('\n',' ')
with vertica_python.connect(**conn_info) as connection:
#print("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
def get_visitor_analysis(self,params=['2017-7-13','2017-7-15','www.att.com']):
""" A method which returns a count of key visitor metrics filtered by the domain"""
result =[]
res_from_query1 = []
query_params = {'domain_name':params[2],'start_date':params[0],'end_date':params[1]}
logging.debug("Query params are: end date {} and start date {} for domain {}".format(params[1],params[0],params[2]))
query1 = """select
A.pt_log_date,
TO_CHAR(count(*),'fm999999999.00') as total,
ROUND(count(A.adobe_visid_high_low),2) as mcVisIdHigh,
count(case when D.parameter_value = '0' then null else 1 end) as mcVisIdHigh,
count(B.parameter_value) as uuid,
count(case when instr(B.parameter_value,'-') > 0 then null else B.parameter_value end) as auth_uuid
from wt_logs A left outer join wt_log_parts B on A.key = B.key and A.distribution_key = B.distribution_key and B.parameter_name = 'prop48' and B.pt_log_date between :start_date and :end_date
left outer join wt_log_parts D on A.key = D.key and A.distribution_key = D.distribution_key and D.parameter_name = 'mcVisIdHigh' and D.pt_log_date between :start_date and :end_date
where A.pt_log_date between :start_date and :end_date
and A.domain_name = :domain_name
group by A.pt_log_date""".replace('\n',' ')
logging.info("The first query is {}".format(query1))
with vertica_python.connect(**conn_info) as connection:
cur = connection.cursor()
cur.execute(query1,query_params)
for row in cur.iterate():
res_from_query1.append(row)
return(res_from_query1)
def get_task_details_by_id(self,params=['ng2157']):
""" A method which joins logs and log parts table and returns a combined result"""
result =[]
query_params = {'attid':params[0]}
query = """select a.Attuid,a.Status,a.Severity,a.TaskDetails,a.Remarks,a.StartDate,a.EndDate,a.TaskFinishDate,a.InsertDate,a.InsertedBy,a.UpdateDate,a.UpdatedBy
from s08_DB.Alltasks a
where a.Attuid =:attid""".replace('\n',' ')
with vertica_python.connect(**conn_info) as connection:
logging.debug("Connected to {} on host{} ".format(conn_info['database'],conn_info['host']))
logging.info("The read SQL -> {} ".format(query))
cur = connection.cursor()
cur.execute(query,query_params)
for row in cur.iterate():
result.append(row)
return(result)
if __name__ == '__main__':
vertica = VerticaGetter()
#vertica.test_connection()
res = vertica.get_task_details_by_id()
#res = vertica.get_log_parts_pattern_aggr()
for row in res:
print(row)
|
10,315 | 39e7eae39e10a72fafa6afab6e4ceeb8dce223a2 | import re
import hashlib
import os
import base64
import random
def login():
while True:
username = input("Username: ")
if len(username) == 0:
print("Username can not be empty")
elif len(username) > 20:
print("Username is too long")
elif re.search("[^a-zA-Z0-9\\_\\.\\-\\']", str(username)):
print("Error usernames may only contain numbers, letters, dashes, underscores, apostrophes and periods")
else:
break
while True:
password = input("Password: ")
if len(password) == 0:
print("Password can not be empty")
elif len(password) > 30:
print('Password is too long')
# elif re.search(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[~!@#$%^&*_\-+=`|\\(){}[\]:;'<>,.?/.]).{8,30}$", password) :
# break
else:
break
# print("Error you may have entered some wrong credentials")
return(username, password)
def userPanel():
print('\n[ac] Add an Client')
print('[ec] Edit an Client')
print('[l] Logout')
print('[q] Exit')
return input("\nChoose an option: ")
def advisorPanel():
print('\n[ac] Add an Client')
print('[ec] Edit an Client')
print('[l] Logout')
print('[q] Exit')
return input("\nChoose an option: ")
def superAdminPanel():
print('\n[ac] Add an Client')
print('[au] Add an user ')
print('[ec] Edit an Client')
print('[eu] Edit user role')
print('[gu] Get all users')
print('[gc] Get all Clients')
print('[sl] Show logs')
print('[l] Logout')
print('[q] Exit')
return input("\nChoose an option: ")
def systemAdminPanel():
print('\n[ac] Add an Client')
print('[ec] Edit an Client')
print('[au] Add an user')
print('[eu] Edit user role')
print('[sl] Show logs')
print('[l] Logout')
print('[q] Exit')
return input("\nChoose an option: ")
def choose_panel():
print("\n[1] Login")
print("[q] Quit\n")
return input("\nLogin or quit?: ")
def captcha() :
print("\nCaptcha, You have tried too many bad login attempts please answer the following question: ")
captchaBool = True
randomEquation = [('*'),('+'),('-')]
numberEquation = random.randint(0,2)
randomNumber1 = random.randint(1, 99)
randomNumber2 = random.randint(1, 20)
print("What is the answer to: " + str(randomNumber1) + ' ' + randomEquation[numberEquation] + ' ' + str(randomNumber2))
while captchaBool:
answer = input("the answer is: ")
inputAnswer = str(str(randomNumber1) + ' ' + str(randomEquation[numberEquation]) + ' ' + str(randomNumber2))
if re.search('[^0-9]',answer) :
print("Error you can only enter a number")
elif int(answer) == eval(inputAnswer) :
captchaBool = False
break
else :
captchaBool = True
print("Im sorry this answer is incorrect")
|
10,316 | beea8a00565174cc993dd9f134627aec1edc2bb4 | import sys
sys.setrecursionlimit(10000)
n, m = map(int, sys.stdin.readline().split())
a = [[] for _ in range(n+1)]
check = [False]*(n+1)
for _ in range(m):
u, v = map(int, sys.stdin.readline().split())
a[u].append(v)
a[v].append(u)
def dfs(now):
check[now] = True
for i in a[now]:
if check[i] is False:
dfs(i)
cnt = 0
for i in range(1, n+1):
if check[i] is False:
dfs(i)
cnt += 1
print(cnt)
|
10,317 | 5ca187ae37972da2da99714cedcdcfb3671857fc | import fasttext
import numpy as np
class Classifier:
def __init__(self):
self.model_path = './model_cooking.bin'
def train(self):
model = fasttext.load_model(self.model_path)
self._save(model)
def predict(self, title, body):
model = fasttext.load_model(self.model_path)
text = '{} {}'.format(title, body.replace('\n', ' '))
labels_acc = model.predict(text) ## form = ((l, l, l), (a, a, a))
labels = [label.replace('__label__', '') for label in labels_acc[0]]
return labels
def _save(self, model):
model.save_model(self.model_path)
|
10,318 | 5f321d436bc4861bcf0f6df7e3a3e0edc839fb76 | import pytest
from voyage.exceptions import QueryException
from voyage.models import Comment, Membership, Voyage
from voyage.schema.queries import VoyageQuery
def test_getting_all_voyages(db_voyage):
voyages = VoyageQuery.resolve_voyages('root', 'info').all()
assert voyages == [db_voyage]
def test_getting_single_voyage(db_voyage):
voyage = VoyageQuery.resolve_voyage('root', 'info', db_voyage.id)
assert voyage == db_voyage
def test_getting_comments_for_chapter_user_has_access_to(db_session, db_voyage, db_user_member, client):
assert db_user_member in db_voyage.members
comment = Comment(
voyage=db_voyage,
user=db_user_member,
text='Test comment',
chapter=db_voyage.chapters[0],
)
db_session.add(comment)
other_voyage = Voyage(name='Other voyage', media=db_voyage.media, owner=db_user_member)
db_session.add(other_voyage)
other_comment = Comment(
voyage=other_voyage,
user=db_user_member,
text='Different comment',
chapter=other_voyage.chapters[0],
)
db_session.add(other_comment)
db_session.commit()
with client.use(db_user_member):
comments = VoyageQuery.resolve_comments_for_voyage('root', 'info', db_voyage.id).all()
assert len(comments) == 1
assert comments[0] == comment
def test_getting_comments_raises_if_user_not_member_of_voyage(db_voyage, db_user, client):
assert db_user not in db_voyage.members
with client.use(db_user):
with pytest.raises(QueryException) as exc:
VoyageQuery.resolve_comments_for_voyage('root', 'info', db_voyage.id)
assert 'Voyage not found' in exc.exconly()
def test_getting_comments_doesnt_show_user_comments_in_future_chapters(
db_session, db_voyage, db_user_owner, db_user_member, client):
comment = Comment(
voyage=db_voyage,
user=db_user_member,
text='Test comment, on future chapter',
chapter=db_voyage.chapters[1],
)
db_session.add(comment)
db_session.commit()
membership = (
Membership.query
.filter(
Membership.voyage == db_voyage,
Membership.user == db_user_owner,
)
).first()
assert membership.current_chapter == db_voyage.chapters[0]
with client.use(db_user_owner):
comments = VoyageQuery.resolve_comments_for_voyage('root', 'info', db_voyage.id).all()
assert len(comments) == 0
membership.current_chapter = db_voyage.chapters[1] # The chapter the comment was on
db_session.commit()
with client.use(db_user_owner):
comments = VoyageQuery.resolve_comments_for_voyage('root', 'info', db_voyage.id).all()
assert len(comments) == 1
|
10,319 | ddcaef981ec2d22e877718d03562abbdee86ada6 |
from xai.brain.wordbase.nouns._retrofit import _RETROFIT
#calss header
class _RETROFITTING(_RETROFIT, ):
def __init__(self,):
_RETROFIT.__init__(self)
self.name = "RETROFITTING"
self.specie = 'nouns'
self.basic = "retrofit"
self.jsondata = {}
|
10,320 | bfefdf164b159135d6698981278e90e418c94e08 | #!/bin/env python
import math
name = input("Name: ")
description = input("Description: ")
typename = input("Typename: ")
category = input("Category: ")
erosion_type = input("Erosion type: ")
res_ = {}
res_per_progress = {}
while True:
res = input("Resource: ")
amount = input("Amount: ")
if not res or not amount:
break
res_[res] = int(amount)
progress_duration = int(input("Build duration: "))
while 100 % progress_duration != 0:
print("Invalid! 100 must be dividable by this number!")
progress_duration = int(input("Build duration: "))
progress = 100 / progress_duration
for k in res_:
res_per_progress[k] = str(int(math.ceil(float(res_[k]) / float(progress_duration))))
print(k + " per progress: " + str(res_per_progress[k]))
f = open("template.json", "r")
w = open("definitions/building-" + category + "-" + typename + ".json", "w")
progress = str(int(progress))
for line in f:
line = line.replace("%NAME", name).replace("%DESCR", description).replace("%TYPE", typename).replace("%CATEGORY", category).replace("%PROGRESS_AMOUNT", progress).replace("%RESOURCES_PER_PROGRESS", ", ".join( [ "\"" + k + "\" : " + res_per_progress[k] for k in res_per_progress ] )).replace("%EROSION_T", erosion_type)
w.write(line)
w.close()
f.close()
|
10,321 | 31621cf9a156fead21a71c136456361ea27b28b6 | class Turtle:
def __init__(self, x):
self.num = x
class Fish:
def __init__(self, x):
self.num = x
class Pool:
def __init__(self, x, y):
self.turtle = Turtle(x).num
self.fish = Fish(y).num
def print_num(self):
print('水池中乌龟%d只,小鱼%d条' % (self.turtle, self.fish))
pool = Pool(10, 100)
pool.print_num() |
10,322 | 79acaf993c08a2002ffcb060825225c45e2548a3 | # Generated by Django 3.2.3 on 2021-05-21 08:17
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('education', '0010_student'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=150, verbose_name='Название')),
('description', tinymce.models.HTMLField(blank=True, default='', verbose_name='Описание курса')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='courses', to='education.category', verbose_name='Категория')),
('responsible', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='education.mentor', verbose_name='Отвественный')),
],
options={
'verbose_name': 'Курс',
'verbose_name_plural': 'Курс',
},
),
]
|
10,323 | 7183346b0ba501b080f4596e260d4dda082d1d3f | """
CAD model for camera mounting posts.
"""
from py2scad import *
from part import Part
class Camera_Post(Part):
def make(self):
dxf_profile = self.params['dxf_profile']
length = self.params['length']
width = self.params['width']
part = Linear_DXF_Extrude(dxf_profile,height=length)
part = Scale(part,v=(INCH2MM, INCH2MM, 1.0))
self.part = part
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import params
part = Camera_Post(**params.camera_post)
prog = SCAD_Prog()
prog.fn = 50
prog.add(part)
prog.write('camera_post.scad')
|
10,324 | 800bb4114af7a2c3161505c27f8d32928e7019bf | from reportlab.lib import utils
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import landscape, A4
from django.contrib.staticfiles.storage import staticfiles_storage
PAGE_SIZE = landscape(A4)
def render_pdf(session, fileobj):
c = canvas.Canvas(fileobj, pagesize=PAGE_SIZE)
# Header
drawImage(c, 'image002.png', 1, 1.3, 2) # Litchdon
drawImage(c, 'image020.png', 10, 1.3, 8) # Your sugar counts
drawImage(c, 'image037.png', 22, 1.5, 6) # NHS Trust
drawImage(c, 'image013.png', 2, 4.5, 11) # "One in 100"
drawImage(c, 'image010.png', 2, 7.3, 11) # "One in 10"
drawImage(c, 'image009.png', 5.4, 8.5, 11) # "above this level"
drawImage(c, 'image005.png', 14, 4.5, 11) # background
drawImage(c, 'image016.png', 25, 4.5, 1.8) # 100
drawImage(c, 'image017.png', 25, 7.3, 1.5) # 58
drawImage(c, 'image018.png', 25, 8.5, 1.5) # 48
for idx, result in enumerate(session.results.all()[:4]):
drawString(c, 24, 5, "%s" % result.value)
drawImage(c, 'image041.png', 24, 5.5, 2) # arrow
drawImage(c, 'image036.png', 20, 10, 5.5) # latest result
drawImage(c, 'image014.png', 2, 10, 10) # what is
drawImage(c, 'image011.png', 1, 12, 27.5) # Reducing..
drawImage(c, 'image012.png', 1, 14, 10) # You can reduce
drawImage(c, 'image025.png', 12, 14, 16.5) # Discuss
c.showPage()
c.save()
def drawString(canvas, x, y, *args, **kwargs):
canvas.drawString(
(x * cm),
PAGE_SIZE[1] - (y * cm),
*args,
**kwargs
)
def drawImage(canvas, filename, x, y, width, height=None):
path = staticfiles_storage.path('images/f_reports_pdf/%s' % filename)
if height is None:
iw, ih = utils.ImageReader(path).getSize()
aspect = ih / float(iw)
height = width * aspect
canvas.drawImage(
path,
x * cm,
PAGE_SIZE[1] - (y * cm) - (height * cm),
width=width * cm,
height=height * cm,
mask='auto'
)
|
10,325 | 9bdd5fdbc78aaa1433fe29fb515fcfe3582e6bee | import random
def checking(i):
try:
float(i)
return True
except ValueError:
return False
play = True
proceed = False
while True:
rand = random.randint(1,9)
guessCount = 0
userGuess = raw_input("Guess a number: ")
if userGuess == "exit":
quit()
else:
#print (type(userGuess))
if checking(userGuess) == False:
while proceed == False:
print('Enter a valid input')
userGuess = raw_input("Guess a number: ")
if userGuess == "exit":
quit()
if checking(userGuess) == True:
proceed = True
while True:
userGuess = int(userGuess)
if userGuess == rand:
guessCount = guessCount + 1
print('You guessed right! Your guess count: %d' % (guessCount))
print('Starting new game')
break
else:
print('You guessed wrong!')
if userGuess < rand:
print('Your guess was less than the random')
guessCount = guessCount + 1
#print(rand-userGuess)
else:
print('Your guess was higher than the random')
guessCount = guessCount + 1
#print(userGuess-rand)
print('Try again')
proceed = False
userGuess = raw_input("Guess a number: ")
if userGuess == "exit":
quit()
else:
if checking(userGuess) == False:
while proceed == False:
print('Enter a valid input')
userGuess = raw_input("Guess a number: ")
if userGuess == "exit":
quit()
if checking(userGuess) == True:
proceed = True
|
10,326 | 67196941bf8c17b30bb418b5614317d29aab67d1 | # Generated by Django 3.0.4 on 2020-03-08 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0001_create_model_question"),
]
operations = [
migrations.AlterField(
model_name="question",
name="answer_correct",
field=models.CharField(help_text="a, b, c ou d", max_length=50),
),
migrations.AlterField(
model_name="question",
name="category",
field=models.CharField(
choices=[
("action", "Leviers d'action"),
("biodiversité", "Biodiversité"),
("climat", "Climat"),
("consommation", "Consommation"),
("énergie", "Energie"),
("histoire", "Histoire, Anthropologie"),
("pollution", "Pollution"),
("ressources", "Ressources (hors énergie)"),
("science", "Science"),
("autre", "Autre"),
],
max_length=50,
),
),
migrations.AlterField(
model_name="question",
name="difficulty",
field=models.IntegerField(
choices=[(1, "Facile"), (2, "Moyen"), (3, "Difficile"), (4, "Expert")],
help_text="Le niveau de difficulté de la question",
),
),
]
|
10,327 | bedbce828e15d9d9cce130af40efb510f266dfd5 | from django.urls import re_path, path
from . import views
# https://stackoverflow.com/a/59604748
urlpatterns = [
path('', views.index),
re_path(r'^.*/$', views.index)
]
|
10,328 | 65d46feb6ac23ec715552fa718484606f925b84b | import pycountry
from pycountry_convert.convert_country_alpha2_to_continent_code import country_alpha2_to_continent_code
europe = []
for c in pycountry.countries:
try:
continent = country_alpha2_to_continent_code(c.alpha_2)
except KeyError:
continue
if continent != "EU":
continue
europe.append(c.alpha_2)
# Not sure why Vatican City is left out
europe.append("VA")
# Let's put in Cyrpus too
europe.append("CY")
print("cc2")
for cc2 in sorted(europe):
print(cc2)
|
10,329 | c35569cff725d433a4e35229fd9fd2ea3aadb512 | import unittest
import HW6
class TestHW6(unittest.TestCase):
def test_111(self):
self.assertEqual(HW6.solve([1,1,1,1,1,1]), 1)
def test_123(self):
self.assertEqual(HW6.solve([1,2,3]), 3)
def test_2(self):
self.assertEqual(HW6.solve([3,4,5,6]), 6)
def test_3(self):
self.assertEqual(HW6.solve([1,4,3,9,1,2,4,10]), 10)
if __name__ == '__main__':
unittest.main()
|
10,330 | a51dfa8ab8c344a7f1552a1759d3c1bc57b0dbe0 | #External imports
from flask import Flask
from flask_restful import Resource, Api, reqparse
import json
#Import classes
from task_service.task import Task, TaskList
# Create an instance of Flask
app = Flask(__name__)
api = Api(app)
api.add_resource(Task,'/tasks/<int:identifier>')
api.add_resource(TaskList, '/tasks') |
10,331 | 1b444c742fca9e13e1f0141ab675e4b7f1a68020 | class Employee:
company = "Bharat Gas"
salary = 5600
salaryBonas = 500
# totalSalary = 6100
@property
def totalSalary(self):
return self.salary + self.salaryBonas
@totalSalary.setter
def totalSalary(self, val):
self.salaryBonas = val - self.salary
e = Employee()
print(e.totalSalary)
e.totalSalary = 5800
print(e.totalSalary)
print(e.salary)
print(e.salaryBonas) |
10,332 | 5e4608934d258a6c00770b88b9224e5a8ab8fedc | from pymysql import connect, cursors
from pymysql.err import OperationalError
import os
import configparser
_base_dir = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
_db_config_file = 'db_config.ini'
_cf = configparser.ConfigParser()
_cf.read(os.path.join(_base_dir, _db_config_file))
host = _cf.get("mysqlconf", "host")
port = _cf.get("mysqlconf", "port")
db = _cf.get("mysqlconf", "db_name")
user = _cf.get("mysqlconf", "user")
password = _cf.get("mysqlconf", "password")
# Encapsulating MySQL operation
class DB(object):
def __init__(self, *args, **kwargs):
try:
self.conn = connect(host=host,
user=user,
password=password,
db=db,
charset='utf8mb4',
cursorclass=cursors.DictCursor)
except OperationalError as e:
print("Mysql Error %d: %s" % (e.args[0], e.args[1]))
def clear(self, table_name):
real_sql = "delete from " + table_name + ';'
with self.conn.cursor() as cursor:
cursor.execute("SET FOREIGN_KEY_CHECKS=0;")
print('- ' + real_sql)
cursor.execute(real_sql)
self.conn.commit()
def insert(self, table_name, table_data):
for key in table_data:
table_data[key] = "'" + str(table_data[key]) + "'"
key = ','.join(table_data.keys())
value = ','.join(table_data.values())
real_sql = "INSERT INTO " + table_name + "(" + key + ") VALUES (" + value + ")"
with self.conn.cursor() as cursor:
print('- ' + real_sql)
cursor.execute(real_sql)
self.conn.commit()
def close(self):
self.conn.close()
if __name__ == '__main__':
print('Using INI file: ' + os.path.join(_base_dir, _db_config_file))
db = DB()
table_name = "sign_event"
data = {'id':12,
'name': '大可乐',
'attendees_limit': 200,
'status': 1,
'address': '古城大理南陵西路12号悦来客栈',
'start_time': '2012-09-12 14:30:00',
'create_time': '2018-06-11 09:30:00'
}
db.clear(table_name)
db.insert(table_name, data)
db.close() |
10,333 | 4745470ef771415383d1bbe6b9ab04e1f750d57d | # Generated by Django 3.1.2 on 2020-12-01 13:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('galleryview', '0004_remove_list_available'),
]
operations = [
migrations.AlterModelOptions(
name='list',
options={},
),
migrations.AddField(
model_name='list',
name='author',
field=models.ForeignKey(default='Ricky', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterIndexTogether(
name='list',
index_together=set(),
),
]
|
10,334 | 68618de734696fd9a2c335031e96cf4171016186 | from .abstractgameunit import AbstractGameUnit
class OrcRider(AbstractGameUnit):
def __init__(self, name = ''):
super().__init__(name = name)
self.max_hp = 30
self.health_meter = self.max_hp
self.unit_type = 'enemy'
self.hut_number = 0
def info(self):
print("I'm a Orc Wolf Rider") |
10,335 | 8ea01c453590fbde8210bafa6601f597c80de5e8 | def greet(name, age):
message = "Your name is " + name + " and you are " + age + " years old."
return message
name = input("Enter your name: ")
age = input("Enter your age: ")
print(greet(name, age))
def add(a, b):
return a + b
def subtract(a, b):
return a - b
num_one = int(input("Enter a number: "))
num_two = int(input("Enter another number: "))
message = f"The result of {num_one} + {num_two} is {add(num_one, num_two)}"
print(message)
message = f"The result of {num_one} - {num_two} is {subtract(num_one, num_two)}"
print(message)
def get_result(answer):
if answer == "a":
return True
else:
return False
print("Do you like programing?")
print("a. Yes")
print("b. No")
result = input("Enter a or b: ")
if get_result(result):
print("Awesome! Programming is really fun!")
else:
print("Hang in there! It's an acquired taste!")
|
10,336 | 97c6365f0109ba99c9526258c5a595e2c5cf524e | import pyodbc
import pyzure
def to_azure(result, all_batch_id, azure_instance):
all_batch_id = ["'" + e + "'" for e in all_batch_id]
azure_table = result["table_name"]
print(azure_table)
if all_batch_id:
try:
query = 'DELETE FROM ' + azure_table + ' WHERE batch_id IN ' + "(" + ",".join(all_batch_id) + ");"
pyzure.execute.execute_query(azure_instance, query)
except pyodbc.ProgrammingError:
pass
result["columns_name"] = [r.replace(":", "_") for r in result["columns_name"]]
pyzure.send_to_azure(azure_instance, result, replace=False)
return 0
|
10,337 | fea43a3b50f59f4209fb8dbf1a1afd53050fd986 | #Write a Python program to sum all the items in a list.
def sum_list(inp_list):
sum = 0
for item in inp_list:
sum += item
return sum
def main():
inp_list = [1,2,3,4,5,6,7,8,9,10]
print('The sum of all the elements of the list is:',sum_list(inp_list))
main()
|
10,338 | 1c22b822a30f860aeb818634e0dbffb995b4e3cc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 6 12:55:42 2018
@author: Yacong
"""
import glob,os
import numpy as np
import matplotlib.pyplot as plt
import math
type_lookup_file = 'id-type.tab'
dump_file_head = 'dump.fc_0.'
path_to_splitted_dump = './25GPa_threshold_0_ref/'
bin_width = 0.1
# plot_axis_lim[Element] = [x_lo_hist, x_hi_hist, y_lo_hist, y_hi_hist,x_lo_cn, x_hi_cn, y_lo_cn, y_hi_cn]
plot_axis_lim = {'Mg':[-0.5, 12, 0, 3200, 0, 12, 0, 10],
'Ca':[-0.5, 12, 0, 5760, 0, 12, 0, 10],
'Al':[-0.5, 10, 0, 3200, 0, 8, 0, 6],
'Si':[-0.5, 10, 0, 10000, 0, 5, 0, 8],}
atom_count = {1:0,\
2:64,\
3:100,\
4:72,\
5:200,\
6:672}
# create type_table (id-type)
type_table = {0:0}
with open(type_lookup_file,'r') as type_lookup:
for lc,lines in enumerate(type_lookup):
if lc > 0:
type_table[int(lines.split()[0])] = int(lines.split()[1])
os.chdir(path_to_splitted_dump)
# do statistics and make plots
dump_file_list = glob.glob(dump_file_head+'*')
if len(dump_file_list) == 1:
print('Found only 1 dump file!')
dump_file = dump_file_list[0]
with open(dump_file,'r') as neighbor_raw:
# read face area array into memory
face_stat_mg = np.asarray([])
face_stat_ca = np.asarray([])
face_stat_al = np.asarray([])
face_stat_si = np.asarray([])
for lc,lines in enumerate(neighbor_raw):
if lc > 9:
center_atom = type_table[int(lines.split()[1])]
coord_atom = type_table[int(lines.split()[2])]
face_area = float(lines.split()[3])
if coord_atom == 6:
if center_atom == 2:
face_stat_mg = np.append(face_stat_mg,[face_area])
elif center_atom == 3:
face_stat_ca = np.append(face_stat_ca,[face_area])
elif center_atom == 4:
face_stat_al = np.append(face_stat_al,[face_area])
elif center_atom == 5:
face_stat_si = np.append(face_stat_si,[face_area])
# sort large->small and compute cn
face_stat_mg[::-1].sort()
face_stat_ca[::-1].sort()
face_stat_al[::-1].sort()
face_stat_si[::-1].sort()
cn_mg = (np.expand_dims(np.arange(1,len(face_stat_mg)+1),axis=1)) / atom_count[2]
cn_ca = (np.expand_dims(np.arange(1,len(face_stat_ca)+1),axis=1)) / atom_count[3]
cn_al = (np.expand_dims(np.arange(1,len(face_stat_al)+1),axis=1)) / atom_count[4]
cn_si = (np.expand_dims(np.arange(1,len(face_stat_si)+1),axis=1)) / atom_count[5]
# bin plot of Mg
hist_fig = plt.figure('Mg-O')
plt.xlabel('face area / Angstrom^2')
plt.ylabel('Count')
plt.title('Histogram of voronoi face area')
ax = hist_fig.gca()
ax.set_xlim(plot_axis_lim['Mg'][0:2])
ax.set_ylim(plot_axis_lim['Mg'][2:4])
ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Mg'][1]),10))
ax.set_yticks(np.arange(plot_axis_lim['Mg'][2],plot_axis_lim['Mg'][3]),1000)
n,bins,patches=plt.hist(face_stat_mg,int(face_stat_mg[0]//bin_width))
plt.show()
# cn plot of Mg
cn_fig = plt.figure('CN(Mg-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Mg-O) - threshold of face area')
plt.plot(face_stat_mg,cn_mg,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Mg'][4:6])
ax.set_ylim(plot_axis_lim['Mg'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Mg'][4],plot_axis_lim['Mg'][5],10))
ax.set_yticks(np.arange(plot_axis_lim['Mg'][6],plot_axis_lim['Mg'][7]))
plt.grid()
plt.show()
# bin plot of Ca
hist_fig = plt.figure('Ca-O')
plt.xlabel('face area / Angstrom^2')
plt.ylabel('Count')
plt.title('Histogram of voronoi face area')
ax = hist_fig.gca()
ax.set_xlim(plot_axis_lim['Ca'][0:2])
ax.set_ylim(plot_axis_lim['Ca'][2:4])
ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Ca'][1])))
ax.set_yticks(np.arange(plot_axis_lim['Ca'][2],plot_axis_lim['Ca'][3]),10)
n,bins,patches=plt.hist(face_stat_ca,int(face_stat_ca[0]//bin_width))
plt.show()
# cn plot of Ca
cn_fig = plt.figure('CN(Ca-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Ca-O) - threshold of face area')
plt.plot(face_stat_ca,cn_ca,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Ca'][4:6])
ax.set_ylim(plot_axis_lim['Ca'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Ca'][4],plot_axis_lim['Ca'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Ca'][6],plot_axis_lim['Ca'][7]))
plt.grid()
plt.show()
# bin plot of Al
hist_fig = plt.figure('Al-O')
plt.xlabel('face area / Angstrom^2')
plt.ylabel('Count')
plt.title('Histogram of voronoi face area')
ax = hist_fig.gca()
ax.set_xlim(plot_axis_lim['Al'][0:2])
ax.set_ylim(plot_axis_lim['Al'][2:4])
ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Al'][1])))
ax.set_yticks(np.arange(plot_axis_lim['Al'][2],plot_axis_lim['Al'][3]),10)
n,bins,patches=plt.hist(face_stat_al,int(face_stat_al[0]//bin_width))
plt.show()
# cn plot of Al
cn_fig = plt.figure('CN(Al-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Al-O) - threshold of face area')
plt.plot(face_stat_al,cn_al,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Al'][4:6])
ax.set_ylim(plot_axis_lim['Al'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Al'][4],plot_axis_lim['Al'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Al'][6],plot_axis_lim['Al'][7]))
plt.grid()
plt.show()
# bin plot of Si
hist_fig = plt.figure('Si-O')
plt.xlabel('face area / Angstrom^2')
plt.ylabel('Count')
plt.title('Histogram of voronoi face area')
ax = hist_fig.gca()
ax.set_xlim(plot_axis_lim['Si'][0:2])
ax.set_ylim(plot_axis_lim['Si'][2:4])
ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Si'][1])))
ax.set_yticks(np.arange(plot_axis_lim['Si'][2],plot_axis_lim['Si'][3]),10)
n,bins,patches=plt.hist(face_stat_si,int(face_stat_si[0]//bin_width))
plt.show()
# cn plot of Si
cn_fig = plt.figure('CN(Si-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Si-O) - threshold of face area')
plt.plot(face_stat_si,cn_si,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Si'][4:6])
ax.set_ylim(plot_axis_lim['Si'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Si'][4],plot_axis_lim['Si'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Si'][6],plot_axis_lim['Si'][7]))
plt.grid()
plt.show()
else:
dump_file_list.remove('dump.fc_0.voro')
n_frame = len(dump_file_list)
# print(len(dump_file_list))
traj_face_stat_mg = np.asarray([])
traj_face_stat_ca = np.asarray([])
traj_face_stat_al = np.asarray([])
traj_face_stat_si = np.asarray([])
for dump_file in dump_file_list:
timestep = dump_file.split('.')[-1]
with open(dump_file,'r') as neighbor_raw:
# read face area array into memory
face_stat_mg = np.asarray([])
face_stat_ca = np.asarray([])
face_stat_al = np.asarray([])
face_stat_si = np.asarray([])
for lc,lines in enumerate(neighbor_raw):
if lc > 9:
center_atom = type_table[int(lines.split()[1])]
coord_atom = type_table[int(lines.split()[2])]
face_area = float(lines.split()[3])
if coord_atom == 6:
if center_atom == 2:
face_stat_mg = np.append(face_stat_mg,[face_area])
elif center_atom == 3:
face_stat_ca = np.append(face_stat_ca,[face_area])
elif center_atom == 4:
face_stat_al = np.append(face_stat_al,[face_area])
elif center_atom == 5:
face_stat_si = np.append(face_stat_si,[face_area])
traj_face_stat_mg = np.concatenate((traj_face_stat_mg,face_stat_mg))
traj_face_stat_ca = np.concatenate((traj_face_stat_ca,face_stat_ca))
traj_face_stat_al = np.concatenate((traj_face_stat_al,face_stat_al))
traj_face_stat_si = np.concatenate((traj_face_stat_si,face_stat_si))
# sort large->small and compute cn
traj_face_stat_mg[::-1].sort()
traj_face_stat_ca[::-1].sort()
traj_face_stat_al[::-1].sort()
traj_face_stat_si[::-1].sort()
cn_mg = (np.expand_dims(np.arange(1,len(traj_face_stat_mg)+1),axis=1)) / atom_count[2] / n_frame
cn_ca = (np.expand_dims(np.arange(1,len(traj_face_stat_ca)+1),axis=1)) / atom_count[3] / n_frame
cn_al = (np.expand_dims(np.arange(1,len(traj_face_stat_al)+1),axis=1)) / atom_count[4] / n_frame
cn_si = (np.expand_dims(np.arange(1,len(traj_face_stat_si)+1),axis=1)) / atom_count[5] / n_frame
# # bin plot of Mg
# hist_fig = plt.figure('Mg-O')
# plt.xlabel('face area / Angstrom^2')
# plt.ylabel('Count')
# plt.title('Histogram of voronoi face area')
# ax = hist_fig.gca()
# ax.set_xlim(plot_axis_lim['Mg'][0:2])
# ax.set_ylim(plot_axis_lim['Mg'][2:4])
# ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Mg'][1])))
# ax.set_yticks(np.arange(plot_axis_lim['Mg'][2],plot_axis_lim['Mg'][3]),10)
# n,bins,patches=plt.hist(traj_face_stat_mg,int(traj_face_stat_mg[0]//bin_width))
# # plt.show()
# plt.savefig('Mg-O_hist.png')
# cn plot of Mg
cn_fig = plt.figure('CN(Mg-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Mg-O) - threshold of face area')
plt.plot(traj_face_stat_mg,cn_mg,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Mg'][4:6])
ax.set_ylim(plot_axis_lim['Mg'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Mg'][4],plot_axis_lim['Mg'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Mg'][6],plot_axis_lim['Mg'][7]))
plt.grid()
# plt.show()
plt.savefig('Mg-O_cn.png')
# # bin plot of Ca
# hist_fig = plt.figure('Ca-O')
# plt.xlabel('face area / Angstrom^2')
# plt.ylabel('Count')
# plt.title('Histogram of voronoi face area')
# ax = hist_fig.gca()
# ax.set_xlim(plot_axis_lim['Ca'][0:2])
# ax.set_ylim(plot_axis_lim['Ca'][2:4])
# ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Ca'][1])))
# ax.set_yticks(np.arange(plot_axis_lim['Ca'][2],plot_axis_lim['Ca'][3]),10)
# n,bins,patches=plt.hist(traj_face_stat_ca,int(traj_face_stat_ca[0]//bin_width))
# # plt.show()
# plt.savefig('Ca-O_hist.png')
# cn plot of Ca
cn_fig = plt.figure('CN(Ca-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Ca-O) - threshold of face area')
plt.plot(traj_face_stat_ca,cn_ca,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Ca'][4:6])
ax.set_ylim(plot_axis_lim['Ca'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Ca'][4],plot_axis_lim['Ca'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Ca'][6],plot_axis_lim['Ca'][7]))
plt.grid()
# plt.show()
plt.savefig('Ca-O_cn.png')
# # bin plot of Al
# hist_fig = plt.figure('Al-O')
# plt.xlabel('face area / Angstrom^2')
# plt.ylabel('Count')
# plt.title('Histogram of voronoi face area')
# ax = hist_fig.gca()
# ax.set_xlim(plot_axis_lim['Al'][0:2])
# ax.set_ylim(plot_axis_lim['Al'][2:4])
# ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Al'][1])))
# ax.set_yticks(np.arange(plot_axis_lim['Al'][2],plot_axis_lim['Al'][3]),10)
# n,bins,patches=plt.hist(traj_face_stat_al,int(traj_face_stat_al[0]//bin_width))
# # plt.show()
# plt.savefig('Al-O_hist.png')
# cn plot of Al
cn_fig = plt.figure('CN(Al-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Al-O) - threshold of face area')
plt.plot(traj_face_stat_al,cn_al,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Al'][4:6])
ax.set_ylim(plot_axis_lim['Al'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Al'][4],plot_axis_lim['Al'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Al'][6],plot_axis_lim['Al'][7]))
plt.grid()
# plt.show()
plt.savefig('Al-O_cn.png')
# # bin plot of Si
# hist_fig = plt.figure('Si-O')
# plt.xlabel('face area / Angstrom^2')
# plt.ylabel('Count')
# plt.title('Histogram of voronoi face area')
# ax = hist_fig.gca()
# ax.set_xlim(plot_axis_lim['Si'][0:2])
# ax.set_ylim(plot_axis_lim['Si'][2:4])
# ax.set_xticks(np.arange(0,math.ceil(plot_axis_lim['Si'][1])))
# ax.set_yticks(np.arange(plot_axis_lim['Si'][2],plot_axis_lim['Si'][3]),10)
# n,bins,patches=plt.hist(traj_face_stat_si,int(traj_face_stat_si[0]//bin_width))
# # plt.show()
# plt.savefig('Si-O_hist.png')
# cn plot of Si
cn_fig = plt.figure('CN(Si-O)')
plt.xlabel('threshold / Angstrom^2')
plt.ylabel('CN')
plt.title('CN(Si-O) - threshold of face area')
plt.plot(traj_face_stat_si,cn_si,'b-')
ax = cn_fig.gca()
ax.set_xlim(plot_axis_lim['Si'][4:6])
ax.set_ylim(plot_axis_lim['Si'][6:])
ax.set_xticks(np.arange(plot_axis_lim['Si'][4],plot_axis_lim['Si'][5]))
ax.set_yticks(np.arange(plot_axis_lim['Si'][6],plot_axis_lim['Si'][7]))
plt.grid()
# plt.show()
plt.savefig('Si-O_cn.png')
|
10,339 | 6d26e21caf8b21124a243eadfa585571b7476620 | import os
from flask import Flask
from flask_migrate import Migrate
from app.config import DevelopmentConfig, app_config
from app.models import db
from app.controllers import stadium_groups_controller
from app.controllers import stadiums_controller
from app.controllers import stadium_controller
from app.models.stadium_group import StadiumGroup
from app.models.stadium import Stadium
from app.models.address import Address
from app.models.user import User
from app.models.stadium_image import StadiumImage
def create_app(config_name):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.register_blueprint(stadium_groups_controller.bp)
app.register_blueprint(stadium_controller.bp)
app.register_blueprint(stadiums_controller.bp)
db.init_app(app)
Migrate().init_app(app, db)
return app
|
10,340 | 8b7c860597a9345dd7f274a3f9ce4a26db5ea125 | '''
Created on March 15, 2013
@author: nils
'''
from django.contrib import admin
from annotation_server.models import *
admin.site.register(Taxon)
admin.site.register(GenomeBuild)
|
10,341 | f1b18c9a0a75a074f0f318525d13fdabd54431b4 | from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Integer,
String,
)
from sqlalchemy.orm import relationship
from sqlalchemy.ext.associationproxy import association_proxy
from clusterflunk.models.base import Base
class Group(Base):
__tablename__ = 'groups'
name = Column(String(100))
description = Column(String(500))
created = Column(DateTime)
edited = Column(DateTime)
network_id = Column(Integer, ForeignKey('networks.id'))
founder_id = Column(Integer, ForeignKey('users.id'))
founder = relationship('User', backref='founded_groups')
posts = relationship('Post', backref='group')
moderators = association_proxy('moderator', 'user')
subscribers = association_proxy('subscription', 'user')
questions = association_proxy('broadcasts', 'question')
def __repr__(self):
return "<Group('%s')>" % (self.id)
|
10,342 | 724319362c76645e150ee1c37ed8e6dccb1732ef | # -*- coding: utf-8 -*-
import math
def _raise_dim_error(dim1, dim2):
raise ValueError("Vector Operands have %d != %d Dims!" % (dim1, dim2))
def _raise_type_error(desc, wanted, got):
raise TypeError("%s requires a %s, got a %s: %s"
% (desc, wanted, type(got).__name__, str(got)))
def unpack_data(data):
if isinstance(data, tuple):
if len(data) != 2:
_raise_dim_error(2, len(data))
return data
elif isinstance(data, Vector2):
return data.x, data.y
else:
_raise_type_error("Vector2 Operation", "Vector2 or 2-Tuple", data)
class Vector2(object):
def __init__(self, x=0.0, y=0.0):
self.x = float(x)
self.y = float(y)
def __neg__(self):
return Vector2(-self.x, -self.y)
def __iadd__(self, another):
dx, dy = unpack_data(another)
self.x += dx
self.y += dy
return self
def __isub__(self, another):
dx, dy = unpack_data(another)
self.x -= dx
self.y -= dy
return self
def __imul__(self, scalar):
self.x *= scalar
self.y *= scalar
return self
def __mul__(self, scalar):
return Vector2(self.x * scalar, self.y * scalar)
def __itruediv__(self, scalar):
self.x /= scalar
self.y /= scalar
return self
def __truediv__(self, scalar):
return Vector2(self.x / scalar, self.y / scalar)
def __add__(self, another):
dx, dy = unpack_data(another)
return Vector2(self.x + dx, self.y + dy)
def __sub__(self, another):
dx, dy = unpack_data(another)
return Vector2(self.x - dx, self.y - dy)
def __radd__(self, another):
return self + another
def __rsub__(self, another):
return -(self - another)
def __repr__(self):
return "Vector2({0:.3f}, {1:.3f})".format(self.x, self.y)
def __str__(self):
return "({0:.3f}, {1:.3f})".format(self.x, self.y)
def set_zero(self):
self.x = 0.0
self.y = 0.0
return self
def set_one(self):
self.x = 1.0
self.y = 1.0
return self
def set_unitx(self):
self.x = 1.0
self.y = 0.0
return self
def set_unity(self):
self.x = 0.0
self.y = 1.0
return self
def set_x(self, x):
self.x = float(x)
return self
def set_y(self, y):
self.y = float(y)
return self
def set_xy(self, x, y):
self.x = float(x)
self.y = float(y)
return self
def rotate(self, degrees):
r = math.radians(degrees)
c = math.cos(r)
s = math.sin(r)
x, y = self.x, self.y
self.x = x * c - y * s
self.y = x * s + y * c
return self
def rotated_by(self, degrees):
return Vector2(self.x, self.y).rotate(degrees)
def dot(self, another):
ex, ey = unpack_data(another)
return self.x * ex + self.y * ey
def angle(self):
m = self.length
if m == 0:
return 0.0
c = self.x / m
a = math.degrees(math.acos(c))
return a if self.y >= 0 else 360.0 - a
def angle_to(self, another):
ex, ey = unpack_data(another)
dot_r = self.x * ex + self.y * ey
len_s = self.length
len_a = math.sqrt(ex ** 2 + ey ** 2)
cosa = dot_r / len_s / len_a
return math.degrees(math.acos(cosa))
@property
def length(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def squared_distance(self, another):
ex, ey = unpack_data(another)
return (self.x - ex) ** 2 + (self.y - ey) ** 2
def distance(self, another):
return math.sqrt(self.squared_distance(another))
|
10,343 | cf3cea841cd34533d939b0264fb071b70df3070f | #!/usr/bin/python
def get_clustering():
f = open('clusterings.txt')
cl = {}
cli = {}
for s in f:
s = s.strip()
topicid, clusterid, docs = s.split(' ', 2)
docs = docs.split()
key = "%s:%s" % (topicid, clusterid)
cl[key] = docs
for doc in docs:
if not cli.has_key(doc):
cli[doc] = key
else:
print "error"
return (cl, cli) |
10,344 | 9b9d012e10333cce663aad0f1c5a5795d8529bcc | #!/usr/bin/env python3.5
'''
openlut: A package for managing and applying 1D and 3D LUTs.
Color Management: openlut deals with the raw RGB values, does its work, then puts out images with correct raw RGB values - a no-op.
Dependencies:
-numpy: Like, everything.
-wand: Saving/loading images.
-PyOpenGL - For image viewer and other future graphics processing.
-pygame - For the physical display in the viewer.
-scipy - OPTIONAL: For spline interpolation.
Easily get all deps: sudo pip3 install numpy wand scipy PyOpenGL pygame
*Make sure you get the Python 3.X version of these packages!!!
LICENCE:
The MIT License (MIT)
Copyright (c) 2016 Sofus Rose
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
#~ from lib.files import Log #For Development
if __name__ == "__main__" :
if not sys.argv[1:]: print('Use -t to test!'); exit()
if sys.argv[1] == '-t' :
import tests.suite
tests.suite.runTest('img_test', 'testpath')
|
10,345 | f53a3c05ad8d04f2706c844bb63028d97bbe7b37 | # Here we will read xml file using python.
# Importing libraries/modules
import os
import codecs
import csv
import bz2
import time
import json
import logging
import argparse
class Requirements():
def __init__(self, args):
dump_path = args.dump_path
if dump_path is None:
dump_path = os.path.join(r".", "Raw")
latest_all_json = args.file_name
if latest_all_json is None:
latest_all_json = "latest-all.json.bz2"
self.filename = os.path.join(dump_path, latest_all_json)
save_path = args.save_path
if save_path is None:
save_path = os.path.join(r".", "CSV")
self.encoding = args.encode
if self.encoding is None:
self.encoding = "utf-8"
self.save_log = args.save_log
if self.save_log:
logging.basicConfig(filename="1_WikiData_Main_Dump_Parser.log"
, level="DEBUG", filemode="a"
, format="%(asctime)s - %(levelname)s: %(message)s"
, datefmt="%m/%d/%Y %I:%M:%S %p")
self.display_message = args.display_message
self.file_identification = os.path.join(save_path, "WD_identification_item.csv")
self.file_wikibase_entityid = os.path.join(save_path, "WD_wikibase_entityid.csv")
self.file_quantity = os.path.join(save_path, "WD_quantity.csv")
self.file_globecoordinate = os.path.join(save_path, "WD_globecoordinate.csv")
self.file_time = os.path.join(save_path, "WD_time.csv")
@staticmethod
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
@staticmethod
def ent_values(ent):
wd_type = ent["type"]
wd_item = ent["id"]
if ent["labels"].get("en", "not found") == "not found":
wd_label = ""
else:
wd_label = ent["labels"]["en"]["value"]
if ent["descriptions"].get("en", "not found") == "not found":
wd_desc = ""
else:
wd_desc = ent["descriptions"]["en"]["value"]
if ent["sitelinks"].get("enwiki", "not found") == "not found":
wd_title = ""
else:
wd_title = ent["sitelinks"]["enwiki"]["title"]
return([wd_type, wd_item, wd_label, wd_desc, wd_title])
@staticmethod
def concat_claims(claims):
for rel_id, rel_claims in claims.items():
for claim in rel_claims:
yield claim
def __repr__(self):
return "all requirements saved in this object"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d","--dump_path"
, help = "Provide a path containing WikiData JSON data dump. Default Option: a 'Raw' folder within the existing directory."
, type=str)
parser.add_argument("-f","--file_name"
, help = "Provide filename for WikiData JSON data dump. Default Option: 'latest-all.json.bz2'."
, type=str)
parser.add_argument("-s","--save_path"
, help = "Provide a path to save output csv files. Default Option: a 'CSV' folder within the existing directory."
, type=str)
parser.add_argument("-c","--encode"
, help = "Provide a encoding code. Default Option: 'utf-8'."
, type=str)
parser.add_argument("-l", "--save_log"
, help="Save log flag."
, action="store_true")
parser.add_argument("-m", "--display_message"
, help="Display messsage to the consol flag."
, action="store_true")
args = parser.parse_args()
req = Requirements(args)
i = 0
start_time = time.time()
with codecs.open(req.file_identification, "w", req.encoding) as op_identification \
,codecs.open(req.file_wikibase_entityid, "w", req.encoding) as op_wikibase_entityid \
,codecs.open(req.file_quantity, "w", req.encoding) as op_quantity \
,codecs.open(req.file_globecoordinate, "w", req.encoding) as op_globecoordinate \
,codecs.open(req.file_time, "w", req.encoding) as op_time:
opw_identification = csv.writer(op_identification, quoting=csv.QUOTE_MINIMAL)
opw_identification.writerow(["WD_Type", "WD_WikiData_Item", "WD_Label", "WD_Description", "WD_Title"])
opw_wikibase_entityid = csv.writer(op_wikibase_entityid, quoting=csv.QUOTE_MINIMAL)
opw_wikibase_entityid.writerow(["WD_Subject","WD_Predicate","WD_Object"])
opw_quantity = csv.writer(op_quantity, quoting=csv.QUOTE_MINIMAL)
opw_quantity.writerow(["WD_Subject","WD_Predicate","WD_Object","WD_Units"])
opw_globecoordinate = csv.writer(op_globecoordinate, quoting=csv.QUOTE_MINIMAL)
opw_globecoordinate.writerow(["WD_Subject","WD_Predicate","WD_Object","WD_Precision"])
opw_time = csv.writer(op_time, quoting=csv.QUOTE_MINIMAL)
opw_time.writerow(["WD_Subject","WD_Predicate","WD_Object","WD_Precision"])
with bz2.BZ2File(req.filename, "rb") as f:
for line in f:
try:
line = line.decode(req.encoding, errors="ignore")
if line in ("[\n", "]\n"):
pass
else:
ent = json.loads(line.rstrip('\n,'))
if ent["type"] != "item":
continue
opw_identification.writerow(req.ent_values(ent))
claims = req.concat_claims(ent["claims"])
e1 = ent["id"]
for claim in claims:
mainsnak = claim["mainsnak"]
rel = mainsnak["property"]
snak_datatype = mainsnak["datatype"]
if mainsnak['snaktype'] == "value":
snak_value = mainsnak["datavalue"]["value"]
if snak_datatype in ("wikibase-item", "wikibase-property"):
opw_wikibase_entityid.writerow([e1, rel, snak_value["id"]])
elif snak_datatype == "quantity":
e2 = (snak_value["amount"],snak_value["unit"].strip(r"http://www.wikidata.org/entity/"))
opw_quantity.writerow([e1, rel, e2[0],e2[1]])
elif snak_datatype == "globe-coordinate":
e2 = ((snak_value["latitude"],snak_value["longitude"]),snak_value["precision"])
opw_globecoordinate.writerow([e1, rel, e2[0], e2[1]])
elif snak_datatype == "time":
e2 = (snak_value["time"],snak_value["precision"])
opw_time.writerow([e1, rel, e2[0],e2[1]])
else:
pass
i = i + 1
if i%1000000 == 0 & req.display_message:
print("{} number of item processed".format(i))
except:
if req.save_log:
logging.exception("Exception occurred", exc_info=True)
else:
pass
elapsed_time = time.time() - start_time
msg = msg = "Total item processed: {:,} \n Elapsed time: {}".format(i-1, req.hms_string(elapsed_time))
if req.display_message:
print(msg)
if req.save_log:
logging.info(msg)
if __name__ == "__main__":
main()
|
10,346 | 13cfed24aa13e33bd0562ea0d5022d72aca0e5c6 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from map_file/Lane.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Lane(genpy.Message):
_md5sum = "14eee265f5c4b4e93a294e03e3451866"
_type = "map_file/Lane"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 lnid
int32 did
int32 blid
int32 flid
int32 bnid
int32 fnid
int32 jct
int32 blid2
int32 blid3
int32 blid4
int32 flid2
int32 flid3
int32 flid4
int32 clossid
float64 span
int32 lcnt
int32 lno
"""
__slots__ = ['lnid','did','blid','flid','bnid','fnid','jct','blid2','blid3','blid4','flid2','flid3','flid4','clossid','span','lcnt','lno']
_slot_types = ['int32','int32','int32','int32','int32','int32','int32','int32','int32','int32','int32','int32','int32','int32','float64','int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
lnid,did,blid,flid,bnid,fnid,jct,blid2,blid3,blid4,flid2,flid3,flid4,clossid,span,lcnt,lno
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Lane, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.lnid is None:
self.lnid = 0
if self.did is None:
self.did = 0
if self.blid is None:
self.blid = 0
if self.flid is None:
self.flid = 0
if self.bnid is None:
self.bnid = 0
if self.fnid is None:
self.fnid = 0
if self.jct is None:
self.jct = 0
if self.blid2 is None:
self.blid2 = 0
if self.blid3 is None:
self.blid3 = 0
if self.blid4 is None:
self.blid4 = 0
if self.flid2 is None:
self.flid2 = 0
if self.flid3 is None:
self.flid3 = 0
if self.flid4 is None:
self.flid4 = 0
if self.clossid is None:
self.clossid = 0
if self.span is None:
self.span = 0.
if self.lcnt is None:
self.lcnt = 0
if self.lno is None:
self.lno = 0
else:
self.lnid = 0
self.did = 0
self.blid = 0
self.flid = 0
self.bnid = 0
self.fnid = 0
self.jct = 0
self.blid2 = 0
self.blid3 = 0
self.blid4 = 0
self.flid2 = 0
self.flid3 = 0
self.flid4 = 0
self.clossid = 0
self.span = 0.
self.lcnt = 0
self.lno = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_14id2i.pack(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 72
(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno,) = _struct_14id2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_14id2i.pack(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 72
(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno,) = _struct_14id2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_14id2i = struct.Struct("<14id2i")
|
10,347 | dcae57870138f70581d9d555558173263a8d4a59 | __author__ = 'mithrawnuruodo'
from Stepper import SoncebosStepper
from DataModels import RawData, Data, PrintingTaskData |
10,348 | 95309fa1a5a5288d32d870a9c6d1a034906f5c6d | # Generated by Django 2.0 on 2021-05-10 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0008_auto_20210510_1131'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='otp',
),
migrations.RemoveField(
model_name='user',
name='password',
),
migrations.AddField(
model_name='customer',
name='otp',
field=models.IntegerField(default=459),
),
migrations.AddField(
model_name='customer',
name='password',
field=models.CharField(default='password', max_length=20),
),
]
|
10,349 | 84d7c272e009fdf25f69ffdc8f15c42853d32e3e | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import logging
import re
class WikiPipeline(object):
def process_item(self, item, spider):
list_of_age = []
if None in ([item[k] for k,v in item.items()]):
raise DropItem("Missing value in %s" % item)
else:
data_in_text = item['record']
for data in data_in_text:
res = re.search(r'( \d\d, .*)', data)
if res:
detail = res.group(1).replace(',',' ').strip().split()
list_of_age.append( detail )
if list_of_age:
logging.info( item['year'] )
logging.info( ' '.join( min(list_of_age, key=(lambda x: x[0])) ) ) |
10,350 | bfba4caa5f13f30ba0d310c0e55d8ebd7bba728d | # -*- coding: utf-8 -*-
"""
fabrik.ext.npm
-------------------------
"""
from fabric.decorators import task
from fabric.state import env
def install():
env.run("npm install")
|
10,351 | 64f62b598b53c57fdc870e753bb2fb1594b0c3c9 | from django.shortcuts import render, reverse, HttpResponseRedirect
from django_mptt_hierarchy.models import File
from django_mptt_hierarchy.forms import FileAddForm
from django.views import View
def homepage_view(request):
return render(request, "homepage.html", {"files": File.objects.all()})
def file_add_view(request):
pass
html = "add_file.html"
form = None
if request.method == "POST":
form = FileAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
File.objects.create(
name=data["name"],
parent=data["parent"]
)
return HttpResponseRedirect(reverse('homepage'))
else:
form = FileAddForm()
return render(request, html, {"form": form}) |
10,352 | 0d7c3f33c1d1a53905911ef255de12197de62ccd | # cython: language_level=3
from __future__ import absolute_import
from .PyrexTypes import CType, CTypedefType, CStructOrUnionType
import cython
try:
import pythran
pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9)
pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6)
except ImportError:
pythran = None
pythran_is_pre_0_9 = True
pythran_is_pre_0_9_6 = True
if pythran_is_pre_0_9_6:
pythran_builtins = '__builtin__'
else:
pythran_builtins = 'builtins'
# Pythran/Numpy specific operations
def has_np_pythran(env):
if env is None:
return False
directives = getattr(env, 'directives', None)
return (directives and directives.get('np_pythran', False))
@cython.ccall
def is_pythran_supported_dtype(type_):
if isinstance(type_, CTypedefType):
return is_pythran_supported_type(type_.typedef_base_type)
return type_.is_numeric
def pythran_type(Ty, ptype="ndarray"):
if Ty.is_buffer:
ndim,dtype = Ty.ndim, Ty.dtype
if isinstance(dtype, CStructOrUnionType):
ctype = dtype.cname
elif isinstance(dtype, CType):
ctype = dtype.sign_and_name()
elif isinstance(dtype, CTypedefType):
ctype = dtype.typedef_cname
else:
raise ValueError("unsupported type %s!" % dtype)
if pythran_is_pre_0_9:
return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
else:
return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim))
if Ty.is_pythran_expr:
return Ty.pythran_type
#if Ty.is_none:
# return "decltype(pythonic::builtins::None)"
if Ty.is_numeric:
return Ty.sign_and_name()
raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty)))
@cython.cfunc
def type_remove_ref(ty):
return "typename std::remove_reference<%s>::type" % ty
def pythran_binop_type(op, tA, tB):
if op == '**':
return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % (
pythran_type(tA), pythran_type(tB))
else:
return "decltype(std::declval<%s>() %s std::declval<%s>())" % (
pythran_type(tA), op, pythran_type(tB))
def pythran_unaryop_type(op, type_):
return "decltype(%sstd::declval<%s>())" % (
op, pythran_type(type_))
@cython.cfunc
def _index_access(index_code, indices):
indexing = ",".join([index_code(idx) for idx in indices])
return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
def _index_type_code(index_with_type):
idx, index_type = index_with_type
if idx.is_slice:
n = 2 + int(not idx.step.is_none)
return "pythonic::%s::functor::slice{}(%s)" % (
pythran_builtins,
",".join(["0"]*n))
elif index_type.is_int:
return "std::declval<%s>()" % index_type.sign_and_name()
elif index_type.is_pythran_expr:
return "std::declval<%s>()" % index_type.pythran_type
raise ValueError("unsupported indexing type %s!" % index_type)
def _index_code(idx):
if idx.is_slice:
values = idx.start, idx.stop, idx.step
if idx.step.is_none:
func = "contiguous_slice"
values = values[:2]
else:
func = "slice"
return "pythonic::types::%s(%s)" % (
func, ",".join((v.pythran_result() for v in values)))
elif idx.type.is_int:
return to_pythran(idx)
elif idx.type.is_pythran_expr:
return idx.pythran_result()
raise ValueError("unsupported indexing type %s" % idx.type)
def pythran_indexing_type(type_, indices):
return type_remove_ref("decltype(std::declval<%s>()%s)" % (
pythran_type(type_),
_index_access(_index_type_code, indices),
))
def pythran_indexing_code(indices):
return _index_access(_index_code, indices)
def np_func_to_list(func):
if not func.is_numpy_attribute:
return []
return np_func_to_list(func.obj) + [func.attribute]
if pythran is None:
def pythran_is_numpy_func_supported(name):
return False
else:
def pythran_is_numpy_func_supported(func):
CurF = pythran.tables.MODULES['numpy']
FL = np_func_to_list(func)
for F in FL:
CurF = CurF.get(F, None)
if CurF is None:
return False
return True
def pythran_functor(func):
func = np_func_to_list(func)
submodules = "::".join(func[:-1] + ["functor"])
return "pythonic::numpy::%s::%s" % (submodules, func[-1])
def pythran_func_type(func, args):
args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
return "decltype(%s{}(%s))" % (pythran_functor(func), args)
@cython.ccall
def to_pythran(op, ptype=None):
op_type = op.type
if op_type.is_int:
# Make sure that integer literals always have exactly the type that the templates expect.
return op_type.cast_code(op.result())
if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]):
return op.result()
if op.is_none:
return "pythonic::%s::None" % pythran_builtins
if ptype is None:
ptype = pythran_type(op_type)
assert op.type.is_pyobject
return "from_python<%s>(%s)" % (ptype, op.py_result())
@cython.cfunc
def is_type(type_, types):
for attr in types:
if getattr(type_, attr, False):
return True
return False
def is_pythran_supported_node_or_none(node):
return node.is_none or is_pythran_supported_type(node.type)
@cython.ccall
def is_pythran_supported_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex")
return is_type(type_, pythran_supported) or is_pythran_expr(type_)
def is_pythran_supported_operation_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
return is_type(type_,pythran_supported) or is_pythran_expr(type_)
@cython.ccall
def is_pythran_expr(type_):
return type_.is_pythran_expr
def is_pythran_buffer(type_):
return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and
type_.mode in ("c", "strided") and not type_.cast)
def pythran_get_func_include_file(func):
func = np_func_to_list(func)
return "pythonic/numpy/%s.hpp" % "/".join(func)
def include_pythran_generic(env):
# Generic files
env.add_include_file("pythonic/core.hpp")
env.add_include_file("pythonic/python/core.hpp")
env.add_include_file("pythonic/types/bool.hpp")
env.add_include_file("pythonic/types/ndarray.hpp")
env.add_include_file("pythonic/numpy/power.hpp")
env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins)
env.add_include_file("<new>") # for placement new
for i in (8, 16, 32, 64):
env.add_include_file("pythonic/types/uint%d.hpp" % i)
env.add_include_file("pythonic/types/int%d.hpp" % i)
for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
"complex", "complex64", "complex128"):
env.add_include_file("pythonic/types/%s.hpp" % t)
|
10,353 | 9250e0b366c00826c2ffd9b36c3d6e0c97b57798 | import pexpect
import re
import _thread
import threading
import json
import math
import time
from queue import Queue
from threading import Timer
from time import sleep
from videoplayer import VideoPlayer
from gpiocontroller import GPIOController
from datastore import DataStore
from playlist import Playlist
from link import Link
import hashlib
import json
_videoLink = Link()
_gpioLink = Link()
def StartVideoPlayerThread():
player = VideoPlayer(_videoLink)
player.Run()
def StartGPIOThread(self):
gpio = GPIOController(_gpioLink)
gpio.Run()
def test():
# Start threads
videothread = threading.Thread(name='videoplayerthread', target=StartVideoPlayerThread)
videothread.start()
gpiothread = threading.Thread(name='gpiothread', target=StartGPIOThread)
gpiothread.start()
# Start db
db = DataStore()
f = './big_buck_bunny_720p_30mb.mp4'
vidplaymsg = '{"action":"play", "file":"./big_buck_bunny_720p_30mb.mp4"}'
vidstopmsg = '{"action":"stop"}'
gpiomsg1 = '{"action":"reset"}'
gpiomsg2 = '{"action":"setup_input", "inputs":"pb01,pb02", "debounce":"250", "throttle":"1"}'
gpiomsg3 = '{"action":"led_on", "outputs":"led01,led02"}'
gpiomsg4 = '{"action":"led_off", "outputs":"led01,led02"}'
gpiomsg5 = '{"action":"led_blink", "outputs":"led01,led02", "interval":"1.5"}'
GpioCmdQ.put_nowait('{"action":"exit"}')
VideoPlayerQ.put_nowait('{"action":"exit"}')
gpiothread.join()
videothread.join()
return
######################################################
gg = 0
while True:
sleep(1.0)
print("sleeping")
gg += 1
## ## GPIO module tests
## ## -----------------
## if gg == 5:
## GpioCmdQ.put_nowait(gpiomsg5)
##
## ## Video module tests
## ## ------------------
## if gg == 10:
## print(vidplaymsg)
## VideoPlayerQ.put_nowait(vidplaymsg)
## if gg == 15:
## VideoPlayerQ.put_nowait(vidstopmsg)
## if gg == 20:
## VideoPlayerQ.put_nowait(vidplaymsg)
def main():
print("Starting pie")
# Start baker
baker = Baker()
baker.Start()
if __name__ == "__main__":
main()
|
10,354 | 9d9108b8e34005b0a218c63e0bff09bad8b0ac20 | import re
def hey(said):
# if(any(x.isupper() for x in said[1:]) and '?' not in said):
if said.isupper():
return 'Whoa, chill out!'
elif len(said) > 0 and said[len(said)-1] == '?':
return 'Sure.'
elif re.search('[a-zA-Z0-9]', said) is None:
return 'Fine. Be that way!'
elif(len(said)>0):
return 'Whatever.'
|
10,355 | f9773711fb486582a61c605812563f5d907e02e3 | from utils import *
import matplotlib.pyplot as plt
# *************************************
# Question 2
# *************************************
# Utilisation de la fonciton rand_gauss
n=200
m=[1, 2]
sigma=[0.1, 0.2]
data = rand_gauss(n, m, sigma)
plt.hist(data[:,0])
plt.hist(data[:,1])
plot_2d(data)
# Utilisation de la fonciton rand_bi_gauss
n1=200
m1=[1, 2]
sigma1=[0.1, 0.2]
n2=300
m2=[2, 4]
sigma2=[0.2, 0.3]
data = rand_bi_gauss(n1, n2, m1, m2, sigma1, sigma2)
plot_2d(data[:,0:-1], data[:,-1])
# Utilisation de la fonciton rand_tri_gauss
n1=200
m1=[1, 2]
sigma1=[0.1, 0.2]
n2=300
m2=[2, 4]
sigma2=[0.2, 0.3]
n3=300
m3=[3, 5]
sigma3=[0.3, 0.4]
data = rand_tri_gauss(n1, n2, n3, m1, m2, m3, sigma1, sigma2, sigma3)
plot_2d(data[:,0:-1], data[:,-1])
# Utilisation de la fonciton rand_clown
n1=200
n2=300
s1=2
s2=4
data = rand_clown(n1, n2, s1, s2)
plot_2d(data[:,0:-1], data[:,-1])
# Utilisation de la fonciton rand_checkers
n1=200
n2=300
n3=250
n4=350
s=0.01
data = rand_checkers(n1, n2, n3, n4, s)
plot_2d(data[:,0:-1], data[:,-1])
# *************************************
# Question 3
# *************************************
from sklearn import tree
trainingSet = rand_checkers(114, 114, 114, 114, 0.2)
validationSet = rand_checkers(114, 114, 114, 114, 0.2)
plot_2d(trainingSet[:,0:-1], trainingSet[:,-1])
plot_2d(validationSet[:,0:-1], validationSet[:,-1])
clf_gini = tree.DecisionTreeClassifier(criterion='gini', max_depth=40)
clf_gini.fit(trainingSet[:,0:-1], trainingSet[:,-1])
score_gini_training = clf_gini.score(trainingSet[:,0:-1], trainingSet[:,-1])
score_gini_validation = clf_gini.score(validationSet[:,0:-1], validationSet[:,-1])
scores = np.zeros((40,4))
for i in range(40):
clf_gini = tree.DecisionTreeClassifier(criterion='gini', max_depth=i+1)
clf_entropy = tree.DecisionTreeClassifier(criterion='entropy', max_depth=i+1)
clf_gini.fit(trainingSet[:,0:-1], trainingSet[:,-1])
clf_entropy.fit(trainingSet[:,0:-1], trainingSet[:,-1])
score_gini_training = clf_gini.score(trainingSet[:,0:-1], trainingSet[:,-1])
score_gini_validation = clf_gini.score(validationSet[:,0:-1], validationSet[:,-1])
score_entropy_training = clf_entropy.score(trainingSet[:,0:-1], trainingSet[:,-1])
score_entropy_validation = clf_entropy.score(validationSet[:,0:-1], validationSet[:,-1])
scores[i,0] = score_gini_training
scores[i,1] = score_gini_validation
scores[i,2] = score_entropy_training
scores[i,3] = score_entropy_validation
plt.plot(scores[:,0:2])
plt.plot(scores[:,2:4])
# *************************************
# Question 4
# *************************************
score_entroy_max = max(scores[:,3])
best_entropy_dept = np.dot(range(0,40), (scores[:,3]==score_entroy_max)) + 1
clf_entropy = tree.DecisionTreeClassifier(criterion='entropy', best_entropy_dept)
plot_2d(validationSet[:,0:-1], validationSet[:,-1])
decision_f = clf_entropy.predict
frontiere(decision_f, validationSet[:,0:-1])
# *************************************
# Question 5
# *************************************
import os
f = tree.export_graphviz(clf_entropy, out_file="my_tree.dot") # clf: tree classifier
os.system("dot -Tpdf my_tree.dot -o my_tree.pdf")
# os.system("evince my_tree.pdf") # Does not work on windows
# *************************************
# Question 6
# *************************************
newValidationSet = rand_checkers(50, 50, 50, 50, 0.2)
score = clf_entropy.score(newValidationSet[:,0:-1], newValidationSet[:,-1])
# *************************************
# Question 7
# *************************************
from sklearn import datasets
digits = datasets.load_digits()
X, y = digits.data, digits.target
X_tranining = X[0:1000,:]
y_training = y[0:1000]
X_validation = X[1001:,:]
y_validation = y[1001:]
scores = np.zeros((40,4))
for i in range(40):
clf_gini = tree.DecisionTreeClassifier(criterion='gini', max_depth=i+1)
clf_entropy = tree.DecisionTreeClassifier(criterion='entropy', max_depth=i+1)
clf_gini.fit(X_tranining, y_training)
clf_entropy.fit(X_tranining, y_training)
score_gini_training = clf_gini.score(X_tranining, y_training)
score_gini_validation = clf_gini.score(X_validation, y_validation)
score_entropy_training = clf_entropy.score(X_tranining, y_training)
score_entropy_validation = clf_entropy.score(X_validation, y_validation)
scores[i,0] = score_gini_training
scores[i,1] = score_gini_validation
scores[i,2] = score_entropy_training
scores[i,3] = score_entropy_validation
plt.plot(scores[:,0:2])
plt.plot(scores[:,2:4])
# TO DO ...
# *************************************
# Question 8
# *************************************
from scipy.stats import binom
import numpy as np
import matplotlib.pyplot as plt
# Creatioon de 10 modeles ayant la probabilite de bonne reponse de 0.7 :
m, p = 20, 0.7 # Binomial parameters
x = np.arange(0,m+1) # Possible outputs
pmf = binom.pmf(x, m, p) # Probability mass function
plt.figure()
plt.plot(x, pmf, 'bo', ms=8)
plt.vlines(x, 0, pmf, colors='b', lw=5, alpha=0.5)
# La somme de plusieurs modeles ayant une proba de bonne reponse de 0.7 donne
# une proba de bonne reponse de 0.90 :
coeffs = np.zeros(m+1)
coeffs[(m/2)]=0.5
coeffs[(m/2)+1:m+1]=1
proba_agrege = np.dot(coeffs, pmf)
print 'Probabilite individuelle = %s' %(p)
print 'Probabilite aggregee = %s' %(proba_agrege)
# *************************************
# Question 9
# *************************************
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
import numpy as np
n = 80
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(n, 1), axis=0)
y = np.sin(X).ravel()
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y[::5] += 1 * (0.5 - rng.rand(16))
trees = []
predicts = []
nb_trees = 10
max_depth = 5
for i in range(0, nb_trees):
ind_boot = np.random.randint(0, n, n) # Bagging
X_boot = X[ind_boot, :]
y_boot = y[ind_boot]
trees.append(tree.DecisionTreeRegressor(max_depth=max_depth))
trees[-1].fit(X_boot, y_boot)
predicts.append(trees[-1].predict(X_test))
predicts_mean = np.array(predicts).mean(axis=0)
# Plot the results
import pylab as plt
plt.close('all')
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, predicts_mean, c="g", label="Tree (depth: %d)" % max_depth)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
# *************************************
# Question 10
# *************************************
# Quand m augmente la probabilite agregee augmente aussi (quesiton 8)
# Quand max_depth augmente la precision augmente mais il y a overfitting
# *************************************
# Question 11
# *************************************
# *************************************
# Question 12
# *************************************
# *************************************
# Question 13
# *************************************
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
import numpy as np
n = 200 # Taille de l'echantillon
s = 30 # Taille du sous-echantillon
nb_trees = 10
max_depth = 5
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(n, 1), axis=0)
y = np.sin(X).ravel()
X_test = np.arange(0.0, 5, 0.01)[:, np.newaxis]
y[::5] += 1 * (0.5 - rng.rand(n/5))
trees = []
predicts = []
for i in range(0, nb_trees):
ind_boot = np.random.permutation(n)[:s] # Bagging
X_boot = X[ind_boot, :]
y_boot = y[ind_boot]
trees.append(tree.DecisionTreeRegressor(max_depth=max_depth))
trees[-1].fit(X_boot, y_boot)
predicts.append(trees[-1].predict(X_test))
predicts_mean = np.array(predicts).mean(axis=0)
# Plot the results
import pylab as plt
plt.close('all')
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, predicts_mean, c="g", label="Tree (depth: %d)" % max_depth)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
10,356 | 8bd097acf85b51e4e7c9cd5228c40a9ccd084f3d | import os
import csv
import torch
from itertools import groupby
from typing import List, Dict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from collections import Counter
from sklearn.utils import shuffle
def flatten(l):
return [i for sublist in l for i in sublist]
def save_predictions(filepath, samples, truth, preds, scores):
assert len(samples) == len(truth) == len(preds) == len(scores)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as fp:
for i in range(len(samples)):
assert len(samples[i]) == len(truth[i]) == len(preds[i]) == len(scores[i])
for j in range(len(samples[i])):
fp.write("{}\t{}\t{}\t{:.5f}\n".format(
samples[i][j], truth[i][j], preds[i][j], scores[i][j]))
fp.write('\n')
def save_path_scores(filepath, scores):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as fp:
for i in range(len(scores)):
fp.write('{:.5f}\n'.format(scores[i]))
def process_logits(logits, tags):
probs = []
logits = torch.softmax(logits.data, dim=-1)
for i, instance_tags in enumerate(tags):
instance_probs = []
for j, tag_id in enumerate(instance_tags):
instance_probs.append(logits[i, j, tag_id].item())
probs.append(instance_probs)
return probs
def count_params(model):
return sum([p.nelement() for p in model.parameters() if p.requires_grad])
def get_dataloader(dataset, batch_size, shuffle=False):
sampler = RandomSampler(dataset) if shuffle else SequentialSampler(dataset)
dloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size, collate_fn=dataset.collate_fn)
return dloader
def create_dataloaders(datasets, args):
oarg = args.training.optim
train_batch_size = args.training.batch_size * max(1, oarg.n_gpu)
eval_batch_size = args.evaluation.batch_size * (max(1, oarg.n_gpu))
dataloaders = {
'train': get_dataloader(datasets['train'], batch_size=train_batch_size, shuffle=True),
'dev': get_dataloader(datasets['dev'], batch_size=eval_batch_size, shuffle=False),
'test': get_dataloader(datasets['test'], batch_size=eval_batch_size, shuffle=False)
}
return dataloaders
def get_label_to_index(datasets):
all_labels = flatten([flatten(datasets[dataset].labels) for dataset in datasets])
all_labels = {l: i for i, l in enumerate(sorted(set(all_labels)))}
return all_labels
def read_conll(filename, columns: List[str], delimiter='\t'):
def is_empty_line(line_pack):
return all(field.strip() == '' for field in line_pack)
data = []
with open(filename) as fp:
reader = csv.reader(fp, delimiter=delimiter, quoting=csv.QUOTE_NONE)
groups = groupby(reader, is_empty_line)
for is_empty, pack in groups:
if is_empty is False:
data.append([list(field) for field in zip(*pack)])
data = list(zip(*data))
dataset = {colname: list(data[columns[colname]]) for colname in columns}
return dataset
def write_conll(filename, data, colnames: List[str] = None, delimiter='\t'):
os.makedirs(os.path.dirname(filename), exist_ok=True)
if colnames is None:
colnames = list(data.keys())
any_key = colnames[0]
with open(filename, 'w') as fp:
for sample_i in range(len(data[any_key])):
for token_i in range(len(data[any_key][sample_i])):
row = [str(data[col][sample_i][token_i]) for col in colnames]
fp.write(delimiter.join(row) + '\n')
fp.write('\n')
def print_frequent_hashtags(datasets):
hashtags = []
for split in datasets:
dtokens = datasets[split].tokens
for tokens in dtokens:
for i, token in enumerate(tokens):
if token == '#' and i + 1 < len(tokens):
hashtags.append('#' + tokens[i+1])
if token.startswith('#') and token != '#':
hashtags.append(token)
hashtags = Counter(hashtags).most_common(10)
# hashtags = [item for item, num in hashtags]
print(hashtags)
def shuffle_datasets(datasets, num_samples=2200, ratio=[0.7, 0.15, 0.15]):
tokens, labels = [], []
for split in datasets:
if split == 'test':
continue
tokens.extend(datasets[split].tokens)
labels.extend(datasets[split].labels)
tokens, labels = shuffle(tokens, labels)
datasets['train'].tokens, datasets['train'].labels = tokens[0: 1540], labels[0: 1540]
datasets['dev'].tokens, datasets['dev'].labels = tokens[1540: 1870], labels[1540: 1870]
datasets['test'].tokens, datasets['test'].labels = tokens[1870: 2200], labels[1870: 2200]
return datasets
def reform_test_by_hashtags(dataset, hashtags):
new_tokens, new_labels = [], []
for i in range(len(dataset.tokens)):
tokens = dataset.tokens[i]
labels = dataset.labels[i]
intersections = list(set(hashtags) & set(tokens))
if len(intersections) > 0:
new_tokens.append(tokens)
new_labels.append(labels)
dataset.tokens, dataset.labels = new_tokens, new_labels
return dataset
|
10,357 | ffe9e00143e1a9a0ef6ccb8e4e7bc8baaebd1c69 | #!/usr/bin/env python3
# -*- coding: utf-8 -*- #
import os
import argparse
import subprocess
import circling_r
from circling_py.OBc import *
levels = ["A","B","C","D","E*","E**","F"]
def getOpt():
parser = argparse.ArgumentParser(description="Audit species barcodes from OBc pipeline", add_help=True)
parser.add_argument('-i','--input',
metavar='str',
default=None,
help='[Required] input file',
required=False)
parser.add_argument('--for',
nargs='+',
metavar="str",
default=None,
help='[Optional] Specific group for plotting radars. If there is any value, all groups for '
' `Group` column is taken [Default = None]')
parser.add_argument('--at',
nargs='+',
metavar="str",
default=None,
help='[Optional] Coupled with `--for` option. Split polygons inside each specific '
' group correspondingly to a specific taxonomical rank (e.g. if `Family` is choosen,'
' each family has its own polygon inside a radar). If there is any value, overall '
' data is plotted without distinction of taxomical ranks. If there is only one value, this'
' is used for all radar plots. Otherwise, an error is raised, including mismatches between '
' values introduced here and available taxonomical rank from input data [Default = None]')
parser.add_argument('--n',
nargs='+',
metavar="str",
default=None,
help='[Optional] Coupled with `--at` option. Maximum number of polygons inside each'
' specific group correspondingly. If there is any value, whole data is taken. If '
' there is only one value, this is used for all '
' radar plots. Otherwise, an error is raised'
' [Default = None]')
parser.add_argument('-l', '--legend',
action="store_true",
default=False,
help='''[Optional] if selected, draw legend''')
parser.add_argument('-g', '--grades',
nargs='+',
metavar="str",
default=levels,
help='''[Optional] Specific grades to plot. Levels can be collapsed with a forward slash
(e.g. A/B C D E*/E** F) [Default = A B C D E* E** F] ]
''')
parser.add_argument('-p', '--pal',
metavar='str',
type=str,
default='NA',
help='[Optional] Palette of colors [Default = NA]')
parser.add_argument('-b', '--labelsize',
metavar='float',
type=float,
default=12,
help='[Optional] Size of labels [Default = 14]')
parser.add_argument('-L', '--linesize',
metavar='float',
type=float,
default=1.8,
help='[Optional] Size of labels [Default = 1.8]')
parser.add_argument('-t', '--transform',
metavar='str',
type=str,
default="percentage",
help="[Optional] transform species counts. There are three options: 'percentage', 'exponential' and 'log' [Default = percentage]")
parser.add_argument('-T', '--tnumber',
metavar='float',
type=float,
default=0.5,
help='''Transforming number and is coupled with `--transform` optio. This number is used as base when `log`
is used or exponential number when using 'exponential' [Default = 0.5]
''')
parser.add_argument('-c', '--ctitle',
action="store_true",
default=False,
help='''if selected, title is changed according to above options''')
parser.add_argument('-H',
metavar='float',
type=float,
default=5,
help='[Optional] Height of plot in inches [Default = 7]')
parser.add_argument('-W',
metavar='float',
type=float,
default=11.5,
help='[Optional] Height of plot in inches [Default = 14]')
parser.add_argument('-r',
metavar='float',
type=float,
default=200,
help='[Optional] Resolution of plot [Default = 200]')
parser.add_argument('-o', '--output',
metavar='str',
type=str,
default='input_based',
help='[Optional] Output name [Default = <input_based>.jpeg]')
args = parser.parse_args()
return args
def runShell(args):
p = subprocess.Popen(args)
p.communicate()
def cname(s):
tail = "_RadarPlot.jpeg"
try:
return s.split(".")[-2].split("/")[-1] + tail
except IndexError:
return s.split("/")[-1] + tail
def main():
option = vars(getOpt())
sameLevels = len(set(levels) - set(option['grades'])) == 0
if not sameLevels:
rinput = str( OBc().changeGrades( option['input'], option['grades'], write=True) )
else:
rinput = option['input']
plusHeader = "labelsize,linesize,tnumber,transform,pal,legend,ctitle"
plusOpt = ",".join([ str(option['labelsize']),
str(option['linesize']),
str(option['tnumber']),
option['transform'],
option['pal'],
'TRUE' if option['legend'] else 'FALSE',
'TRUE' if option['ctitle'] else 'FALSE'
])
df = OBc().RadarPlotOpt( option['input'], option['for'], option['at'], option['n'] )
out = ["%s,%s" % (df[0], plusHeader)]
for i in df[1:]:
out.append("%s,%s" % (i, plusOpt))
rindications = str(OBc().writeOut(out))
fo = option['output'] if option['output'] != "input_based" else cname(option['input'])
radar_r = os.path.join(circling_r.__path__[0], "plot_radar.R")
Ropt = [ "Rscript",
radar_r,
'-a', rinput,
'-i', rindications,
'-g', ",".join(sorted(option['grades'])),
'-H', str(option['H']),
'-W', str(option['W']),
'-r', str(option['r']),
'-o', fo
]
runShell(Ropt)
if not sameLevels:
runShell(['rm', rinput])
runShell(['rm', rindications])
if __name__ == "__main__":
main() |
10,358 | 38064f01b5d80fb3f95a8e35f35eb23201e45e49 | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("WELCOME RUCHI")
def index1(request):
return HttpResponse("helloooo")
|
10,359 | 4e535457c809608ee0856f95584f95e54884559a | PlotGrid(2, 2, p1, p2 ,p3, p4)
# PlotGrid object containing:
# Plot[0]:Plot object containing:
# [0]: cartesian line: x for x over (-5.0, 5.0)
# [1]: cartesian line: x**2 for x over (-5.0, 5.0)
# [2]: cartesian line: x**3 for x over (-5.0, 5.0)
# Plot[1]:Plot object containing:
# [0]: cartesian line: x**2 for x over (-6.0, 6.0)
# [1]: cartesian line: x for x over (-5.0, 5.0)
# Plot[2]:Plot object containing:
# [0]: cartesian line: x**3 for x over (-5.0, 5.0)
# Plot[3]:Plot object containing:
# [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
|
10,360 | 2b1f350da926bce0755f3823f2d4a2a099962c0a | """Pilot Reports (PIREP)
This module attempts to process and store atomic data from PIREPs. These are
encoded products that look like so:
UBUS01 KMSC 221700
EAU UA /OV EAU360030/TM 1715/FL350/TP B737/TB CONT LGT-MOD CHOP =
EHY UA /OV MBW253036 /TM 1729 /FL105 /TP C206 /SK FEW250 /TA M06
/TB NEG /RM SMTH=
Unfortunately, there is not much documentation of this format and the feed of
this data contains a bunch of formatting errors.
"""
from enum import Enum
import datetime
import re
import math
from pydantic import BaseModel
import pyiem.nws.product as product
from pyiem.datatypes import distance
from pyiem.util import html_escape, LOG
OV_LATLON = re.compile(
(
r"\s?(?P<lat>[0-9]{3,4})(?P<latsign>[NS])"
r"\s?(?P<lon>[0-9]{3,5})(?P<lonsign>[EW])"
)
)
OV_LOCDIR = re.compile(
r".*?(?P<loc>[A-Z0-9]{3,4})\s?(?P<dir>[0-9]{3})(?P<dist>[0-9]{3})"
)
OV_TWOLOC = re.compile(
r"(?P<loc1>[A-Z0-9]{3,4})\s?-\s?(?P<loc2>[A-Z0-9]{3,4})"
)
OV_OFFSET = re.compile(
(
r"(?P<dist>[0-9]{1,3})\s?"
"(?P<dir>NORTH|EAST|SOUTH|WEST|N|NNE|NE|ENE|E|ESE|"
r"SE|SSE|S|SSW|SW|WSW|W|WNW|NW|NNW)\s+(OF )?(?P<loc>[A-Z0-9]{3,4})"
)
)
DRCT2DIR = {
"N": 0,
"NNE": 22.5,
"NE": 45,
"ENE": 67.5,
"E": 90,
"ESE": 112.5,
"SE": 135,
"SSE": 157.5,
"S": 180,
"SSW": 202.5,
"SW": 225,
"WSW": 247.5,
"W": 270,
"WNW": 292.5,
"NW": 305,
"NNW": 327.5,
"NORTH": 0,
"EAST": 90,
"SOUTH": 180,
"WEST": 270,
}
class Priority(str, Enum):
"""Types of reports."""
def __str__(self):
"""When we want the str repr."""
return str(self.value)
UA = "UA"
UUA = "UUA"
class PilotReport(BaseModel):
""" A Pilot Report. """
base_loc: str = None
text: str = None
priority: Priority = None
latitude: float = None
longitude: float = None
valid: datetime.datetime = None
cwsu: str = None
aircraft_type: str = None
is_duplicate: bool = False
class Pirep(product.TextProduct):
""" Class for parsing and representing Space Wx Products. """
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
""" constructor """
product.TextProduct.__init__(
self,
text,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
self.reports = []
self.parse_reports()
def parse_reports(self):
"""Actually do the parsing of the product that generates the reports
stored within the self.reports list"""
txt = (
self.unixtext
if self.unixtext[:2] != "\001\n"
else self.unixtext[2:]
)
lines = txt.split("\n")
# There may be an AWIPSID in line 3 or silly aviation control char
pos = 3 if len(lines[2]) < 10 or lines[2].startswith("\x1e") else 2
meat = "".join(lines[pos:])
for report in meat.split("="):
if report.strip() == "":
continue
res = self.process_pirep(" ".join(report.strip().split()))
if res is not None:
self.reports.append(res)
def process_pirep(self, report):
""" Convert this report text into an actual PIREP object """
_pr = PilotReport()
_pr.text = report
for i, token in enumerate(report.split("/")):
token = token.strip()
# First token is always priority
if i == 0:
if len(token) > 10:
LOG.info("Aborting as not-PIREP? |%s|", report)
return
if token.find(" UUA") > 0:
_pr.priority = Priority.UUA
else:
_pr.priority = Priority.UA
parts = token.split()
if len(parts) == 2:
_pr.base_loc = parts[0]
if len(_pr.base_loc) == 4 and _pr.base_loc[0] == "K":
_pr.base_loc = _pr.base_loc[1:]
continue
# Aircraft Type
if token.startswith("TP "):
_pr.aircraft_type = token[3:]
# Location
if token.startswith("OV "):
dist = 0
bearing = 0
therest = token[3:]
if len(therest) == 3:
loc = therest
elif therest.startswith("FINAL RWY"):
loc = report[:8].split()[0]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
elif len(therest) == 4:
if therest[0] == "K":
loc = therest[1:]
else:
loc = therest
elif re.match(OV_OFFSET, therest):
d = re.match(OV_OFFSET, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
dist = int(d["dist"])
bearing = DRCT2DIR[d["dir"]]
elif re.match(OV_LOCDIR, therest):
# KFAR330008
d = re.match(OV_LOCDIR, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
bearing = int(d["dir"])
dist = int(d["dist"])
elif re.match(OV_LATLON, therest):
# 2500N07000W
# FMH-12 says this is in degrees and minutes!
d = re.match(OV_LATLON, therest).groupdict()
_pr.latitude = float(
"%s.%i"
% (
d["lat"][:-2],
int(float(d["lat"][-2:]) / 60.0 * 10000.0),
)
)
if d["latsign"] == "S":
_pr.latitude = 0 - _pr.latitude
_pr.longitude = float(
"%s.%i"
% (
d["lon"][:-2],
int(float(d["lon"][-2:]) / 60.0 * 10000.0),
)
)
if d["lonsign"] == "W":
_pr.longitude = 0 - _pr.longitude
continue
elif therest == "O":
# Use the first part of the report in this case
loc = report[:3]
elif therest.find("-") > 0 and re.match(OV_TWOLOC, therest):
d = re.match(OV_TWOLOC, therest).groupdict()
numbers = re.findall("[0-9]{6}", therest)
if numbers:
bearing = int(numbers[0][:3])
dist = int(numbers[0][3:])
loc = d["loc2"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
else:
# Split the distance between the two points
lats = []
lons = []
for loc in [d["loc1"], d["loc2"]]:
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
if loc not in self.nwsli_provider:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
lats.append(self.nwsli_provider[loc]["lat"])
lons.append(self.nwsli_provider[loc]["lon"])
_pr.latitude = sum(lats) / 2.0
_pr.longitude = sum(lons) / 2.0
continue
else:
loc = therest[:3]
if loc not in self.nwsli_provider:
if _pr.base_loc is None:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
loc = _pr.base_loc
if loc not in self.nwsli_provider:
self.warnings.append(
f"Double-unknown location: {report}"
)
return None
# So we discard the offset when we go back to the base
dist = 0
bearing = 0
_pr.longitude, _pr.latitude = self.compute_loc(
loc, dist, bearing
)
continue
# Time
if token.startswith("TM "):
numbers = re.findall("[0-9]{4}", token)
if len(numbers) != 1:
self.warnings.append("TM parse failed %s" % (report,))
return None
hour = int(numbers[0][:2])
minute = int(numbers[0][2:])
_pr.valid = self.compute_pirep_valid(hour, minute)
continue
return _pr if _pr.latitude is not None else None
def compute_loc(self, loc, dist, bearing):
""" Figure out the lon/lat for this location """
lat = self.nwsli_provider[loc]["lat"]
lon = self.nwsli_provider[loc]["lon"]
# shortcut
if dist == 0:
return lon, lat
meters = distance(float(dist), "MI").value("M")
northing = meters * math.cos(math.radians(bearing)) / 111111.0
easting = (
meters
* math.sin(math.radians(bearing))
/ math.cos(math.radians(lat))
/ 111111.0
)
return lon + easting, lat + northing
def compute_pirep_valid(self, hour, minute):
""" Based on what utcnow is set to, compute when this is valid """
res = self.utcnow.replace(
hour=hour, minute=minute, second=0, microsecond=0
)
if hour > self.utcnow.hour:
res -= datetime.timedelta(hours=24)
return res
def sql(self, txn):
""" Save the reports to the database via the transaction """
for report in self.reports:
if report.is_duplicate:
continue
txn.execute(
"INSERT into pireps(valid, geom, is_urgent, "
"aircraft_type, report) VALUES (%s, "
"ST_GeographyFromText('SRID=4326;POINT(%s %s)'),%s,%s,%s)",
(
report.valid,
report.longitude,
report.latitude,
report.priority == Priority.UUA,
report.aircraft_type,
report.text,
),
)
def assign_cwsu(self, txn):
""" Use this transaction object to assign CWSUs for the pireps """
for report in self.reports:
txn.execute(
"select distinct id from cwsu WHERE "
"st_contains(geom, geomFromEWKT('SRID=4326;POINT(%s %s)'))",
(report.longitude, report.latitude),
)
if txn.rowcount == 0:
# self.warnings.append("Find CWSU failed %.3f %.3f %s" % (
# report.longitude, report.latitude, report.text))
continue
row = txn.fetchone()
report.cwsu = row["id"]
def get_jabbers(self, _uri, _uri2=None):
""" get jabber messages """
res = []
for report in self.reports:
if report.is_duplicate or report.valid is None:
continue
jmsg = {
"priority": "Urgent"
if report.priority == Priority.UUA
else "Routine",
"ts": report.valid.strftime("%H%M"),
"report": html_escape(report.text),
"color": (
"#ff0000" if report.priority == Priority.UUA else "#00ff00"
),
}
plain = "%(priority)s pilot report at %(ts)sZ: %(report)s" % jmsg
html = (
"<span style='color:%(color)s;'>%(priority)s pilot "
"report</span> at %(ts)sZ: %(report)s"
) % jmsg
xtra = {
"channels": (
f"{report.priority}.{report.cwsu},{report.priority}.PIREP"
),
"geometry": "POINT(%s %s)"
% (report.longitude, report.latitude),
"ptype": report.priority,
"category": "PIREP",
"twitter": plain[:140],
"valid": report.valid.strftime("%Y%m%dT%H:%M:00"),
}
res.append([plain, html, xtra])
return res
def parser(buf, utcnow=None, ugc_provider=None, nwsli_provider=None):
""" A parser implementation """
return Pirep(
buf,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
|
10,361 | a2e9b3f87dee7f32c0a2c79b203831942c3aa195 | def solution(arr,sum):
arr.sort()
div = arr[len(arr)-1]-arr[0]
if div>sum:
return 1
return 0
nums = int(input())
for x in range(nums):
arr = list(map(int,input().split()))
num = int(input())
count=0
for i in range(0,len(arr)):
for j in range(i+1,len(arr)):
temp = solution([arr[x] for x in range(i,j+1)],num)
if(temp==1):
count+=1
print(count)
|
10,362 | a69b76d2906842d2264a6b7801e31aa5b8c28d4a | #!/usr/bin/env python3
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import os
import argparse
import sys
from collections import defaultdict
try: # since gzip will only be needed if there are gzipped files,
import gzip # accept failure to import it.
except:
pass
# If the encoding of the default sys.stdout is not utf-8,
# force it to be utf-8. See PR #95.
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding.lower() != "utf-8":
import codecs
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
sys.stderr = codecs.getwriter("utf-8")(sys.stderr.detach())
sys.stdin = codecs.getreader("utf-8")(sys.stdin.detach())
parser = argparse.ArgumentParser(
description="Extracts word counts from a data directory "
"and creates a count directory with similar structure. "
"Input directory has *.txt, counts directory has *.counts. "
"Format of counts files is 'count word', e.g. '124 hello' ",
epilog="See egs/swbd/run.sh for example.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("text_dir",
help="Directory in which to look for input text data\n")
parser.add_argument("count_dir",
help="Directory, to be written to, for counts files\n")
args = parser.parse_args()
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])))
if os.system("validate_text_dir.py " + args.text_dir) != 0:
sys.exit(1)
if not os.path.exists(args.count_dir):
os.mkdir(args.count_dir)
def ProcessFile(text_file, counts_file):
try:
if text_file.endswith(".gz"):
f = gzip.open(text_file, 'rt', encoding="utf-8")
else:
f = open(text_file, 'r', encoding="utf-8")
except Exception as e:
sys.exit("Failed to open {0} for reading: {1}".format(
text_file, str(e)))
word_to_count = defaultdict(int)
for line in f:
for word in line.split():
word_to_count[word] += 1
f.close()
try:
cf = open(counts_file, "w", encoding="utf-8")
except:
sys.exit("Failed to open {0} for writing".format(text_file))
for word, count in word_to_count.items():
print("{0} {1}".format(count, word), file=cf)
cf.close()
num_files_processed = 0
for f in os.listdir(args.text_dir):
num_files_processed += 1
text_path = args.text_dir + os.sep + f
if os.path.isdir(text_path):
continue
if f.endswith(".txt"):
counts_path = args.count_dir + os.sep + f[:-4] + ".counts"
ProcessFile(text_path, counts_path)
elif f.endswith(".txt.gz"):
counts_path = args.count_dir + os.sep + f[:-7] + ".counts"
ProcessFile(text_path, counts_path)
elif f != "unigram_weights":
sys.exit("get_word_counts.py: did not expect to find file {0}/{1} in "
"text directory".format(args.text_dir, f))
num_files_in_dest = 0
for f in os.listdir(args.count_dir):
if f.endswith(".counts"):
num_files_in_dest += 1
if num_files_in_dest > num_files_processed:
sys.exit("get_word_counts.py: It looks like your destination directory " +
args.count_dir + " contains some extra counts files. "
"Please clean up.")
print("Created {0} .counts files in {1}".format(num_files_processed,
args.count_dir),
file=sys.stderr)
|
10,363 | fa5e337111e53cb5a5c6b0fde0214c8e67d167d4 | # list of tuples with book names and links
BOOKS = [("hp1_sorcerers_stone", "http://www.glozman.com/TextPages/Harry%20Potter%201%20-%20Sorcerer's%20Stone.txt", "txt"),
("hp2_chamber_of_secrets", "http://www.glozman.com/TextPages/Harry%20Potter%202%20-%20Chamber%20of%20Secrets.txt", "txt"),
("hp3_prisioner_of_azkaban", "http://www.glozman.com/TextPages/Harry%20Potter%203%20-%20The%20Prisoner%20Of%20Azkaban.txt", "txt"),
("hp4_globet_of_fire", "http://www.glozman.com/TextPages/Harry%20Potter%204%20-%20The%20Goblet%20Of%20Fire.txt", "txt"),
("hp5_order_of_the_phoenix", "http://www.glozman.com/TextPages/Harry%20Potter%205%20-%20Order%20of%20the%20Phoenix.txt", "txt"),
("hp6_half_blood_prince", "http://www.glozman.com/TextPages/Harry%20Potter%206%20-%20The%20Half%20Blood%20Prince.txt", "txt"),
("hp7_deathly_hallows", "http://www.glozman.com/TextPages/Harry%20Potter%207%20-%20Deathly%20Hollows.txt", "txt")]
# list of tuples with adjacent files
SPELLS = [("hp_spells_list", "https://www.pojo.com/harry-potter-spell-list/", "json")]
EXTRAS = [("hp_places_list", "http://m.uploadedit.com/bbtc/1544391705882.txt", "csv"),
("hp_characters_list", "http://m.uploadedit.com/bbtc/1544392366399.txt", "csv"),
("hp_classes_list","http://m.uploadedit.com/bbtc/154439335942.txt", "csv")] |
10,364 | 0cfe04596a2eb4f44f4425dbd9ebc5be78b4adcd | """Reduce 操作"""
# TO BE UPDATED
from functools import partial
from typing import Any, Callable, Generator, Iterable, Iterator
from more_itertools import chunked, first, take
from multiprocess import Process, Queue, cpu_count
from pb import ProgressBar
from .map import map
def reduce(
func: Callable[[Any, Any], Any],
data: Iterable,
size: int = -1,
chunk_size: int = 16,
batch_size: int = 8192,
jobs: int = cpu_count(),
silent: bool = False,
label: str = 'reduce',
) -> Generator[list, None, None]:
"""并行处理一个列表或迭代器,返回乱序的 chunk 迭代器
Arguments:
func: Callable[[Any, Any], Any],
Reduce 函数,第一个参数是累积结果,第二个参数是新元素
data: Iterable
待处理的数据
size: int = -1
待处理数据的长度,-1 则需要将 iterator 转换为列表并求长度
如果禁用长度,可以使用 size=None
chunk_size: int = 16
每个线程单次处理的数据数量
batch_size: int = 8192
每次迭代之多使用的元素数量,不会小于 chunk_size * jobs
jobs: int = cpu_count()
开启线程数量,默认为核心数量
silent: bool = False
关闭进度输出
label: str = 'reduce'
进度条显示名称
"""
batch_size = max(batch_size, chunk_size * jobs)
completed = 0
progress_bar = ProgressBar(label)
progress_bar.reset()
def _reduce_chunk(chunk: Iterable) -> Any:
"""将一个 chunk reduce 成单个结果"""
chunk = iter(chunk)
result = first(chunk)
for item in chunk:
result = func(result, item)
return result
# 可能需要计算 size
if size is not None and size == -1:
try:
size = len(data)
except TypeError:
data = list(data)
size = len(data)
iterator = iter(data)
# 分层计算,每一层达到上限后计算下一层
output = [take(batch_size, iterator)]
def reduce_layer(index: int):
nonlocal completed
if index + 1 >= len(output):
output.append([])
result = map(
_reduce_chunk,
chunked(output[index], chunk_size),
chunk_size=1,
jobs=jobs,
silent=silent,
customize_callback=lambda current, _=None: progress_bar.update(
current * 15 + completed, size),
)
completed += len(output[index]) - len(result)
output[index + 1] += result
output[index] = []
while output[0]:
reduce_layer(0)
for i in range(1, len(output)):
if len(output[i]) >= batch_size:
reduce_layer(i)
output[0] = take(batch_size, iterator)
for i in range(len(output)):
if i + 1 >= len(output):
chunk = output[i]
while len(chunk) > max(chunk_size, 2 * jobs):
new_chunk_size = min(chunk_size, len(chunk) // jobs + 1)
new_chunk = map(
_reduce_chunk,
chunked(chunk, new_chunk_size),
chunk_size=1,
jobs=jobs,
silent=silent,
customize_callback=lambda current, _=None: progress_bar.update(
current * (new_chunk_size - 1) + completed, size),
)
completed += len(chunk) - len(new_chunk)
chunk = new_chunk
result = _reduce_chunk(chunk)
completed += len(chunk)
progress_bar.update(completed, size)
return result
if len(output[i]) > batch_size:
reduce_layer(i)
else:
output[i + 1] += output[i]
|
10,365 | 98568df731d9b9df37d7c0a8a60289abb8c8f309 | class TeamsAsyncOperationStatus:
def __init__(self):
"""Describes the current status of a teamsAsyncOperation."""
pass
invalid = 0
""" Invalid value."""
notStarted = 1
"""The operation has not started."""
inProgress = 2
""" The operation is running."""
succeeded = 3
"""The operation succeeded."""
failed = 4
"""The operation failed."""
|
10,366 | 70bbbbaa44beb68125628ed22dd6e2c5e710b163 | """add event log event type idx.
Revision ID: f4b6a4885876
Revises: 29a8e9d74220
Create Date: 2021-09-08 10:28:28.730620
"""
from dagster._core.storage.migration.utils import create_event_log_event_idx
# revision identifiers, used by Alembic.
revision = "f4b6a4885876"
down_revision = "29a8e9d74220"
branch_labels = None
depends_on = None
def upgrade():
create_event_log_event_idx()
def downgrade():
pass
|
10,367 | 1cfb8463c2b7e0b006bbad654851a73c5204abb7 | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'yíngxiāng'
CN=u'迎香'
NAME=u'yingxiang21'
CHANNEL='largeintestine'
CHANNEL_FULLNAME='LargeIntestineChannelofHand-Yangming'
SEQ='LI20'
if __name__ == '__main__':
pass
|
10,368 | 1f432314f5ee55956fd28d6d5468eb95e22c4179 | """DICOM cardiac MRI image training pipeline.
Usage:
dicompipeline [--log <level>] (--data-dir <data_dir>)
dicompipeline (-h | --help)
dicompipeline --version
Options:
--log <level> Specify the log level to use, one of "info",
"warning", or "debug".
intermediate files are generated.
--data-dir <data_dir> Use the given data directory for the source data set.
-h --help Show this screen.
--version Show version.
Exit Codes:
0 if no errors occurred.
1 on user error.
2 on an unexpected error, e.g. lack of memory, disk, bug, etc.
"""
import asyncio
import logging
import os
import sys
from docopt import docopt
from dicompipeline.dataset import Dataset
from dicompipeline.pipeline import Pipeline
from dicompipeline.version import get_version
from traceback import format_exc
def main(argv=None):
if argv is None:
# When not invoked by tests or from code, get argv from how we were
# invoked on the command-line.
from sys import argv
arguments = docopt(__doc__,
version=get_version(),
options_first=True,
help=True,
argv=argv[1:])
log_level = arguments["--log"]
if log_level is None:
log_level = "info"
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
logging.error("Invalid log level {}".format(logLevel))
sys.exit(1)
logging.basicConfig(level=numeric_level)
data_dir = arguments["--data-dir"]
if not os.path.isdir(data_dir):
# Note: docopt ensures that if we are here then "data_dir" is not None
# because "--data-dir" is mandatory per the docstring.
logging.error("The specified data directory '{}' does not exist.".format(data_dir))
sys.exit(1)
try:
dicom_dir = os.path.join(data_dir, "dicoms")
i_contour_dir = os.path.join(data_dir, "contourfiles")
links_filename = os.path.join(data_dir, "link.csv")
dataset = Dataset.load_dataset(
dicom_dir,
i_contour_dir,
links_filename)
if dataset.size() == 0:
logging.error("No input images and contour masks were found in the data directory.")
logging.error("This could happen if no contour files match any of the DICOM files even if there are images and contour files in the data directory.")
sys.exit(1)
loop = asyncio.get_event_loop()
pipeline = Pipeline(dataset, loop=loop)
pipeline.train()
sys.exit(0)
except Exception as e:
logging.error("An unexpected error occurred.")
logging.error(str(e))
logging.error(format_exc())
sys.exit(2)
|
10,369 | 6a3c970560647dfeec6c4d4b3affc8294b4d015c | #Utilizando um arquivo de dados com varias colunas (por exemplo, o arquivo dados_alunos.txt),
#faca um histograma com os dados de cada uma das colunas.
#Dica: utilize o matplotlib para fazer os histogramas.
import matplotlib.pyplot as plt
fout = open('dados_alunos.txt', 'r')
linhas = fout.readlines()
lista_idade=[]
lista_altura=[]
lista_peso=[]
for line in linhas:
column = line.strip().split('\t')
lista_idade.append(float(line.split()[0]))
lista_altura.append(float(line.split()[1]))
lista_peso.append(float(line.split()[2]))
fout.close()
#print(lista_idade)
#print(lista_altura)
#print(lista_peso)
def histo(lista):
plt.hist(lista)
plt.show()
histo(lista_idade)
histo(lista_altura)
histo(lista_peso)
|
10,370 | e518ae7bd7ff3b7defdf5bacabfddb0b3b87d031 | from generation import MarkovChains
import re
file = open("sonnets.txt")
text = re.sub(r'\n.+\n', '', file.read())
markov = MarkovChains(";:.!?")
markov.add_text(text)
print(markov.generate_text(4))
|
10,371 | ae37b54b6472a7989a5fccc1024b332277864ccf | import matplotlib.pyplot as plt
import numpy as np
import itertools
"""
Some helper function to plot data
"""
def plot_data(x, y, epochs):
"""
This function plots the model loss over the iterations.
"""
fig = plt.figure()
ax = fig.gca()
ax.set_ylim(0, int(np.max(y)+0.5))
ax.set_xlim(0, np.max(x))
ax.yaxis.grid(True)
ax.grid(which='minor', axis='x', alpha=0.2)
ax.grid(which='major', axis='x', alpha=0.5)
major_ticks = np.arange(0, np.max(x), 88)
minor_ticks = np.arange(0, np.max(x), 16)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
fig.canvas.draw()
labels = ["{:2d}".format(int(int(item.get_text())/88)) for item in ax.get_xticklabels()]
ax.set_xticklabels(labels)
plt.title("Model Loss over {} Epochs".format(epochs))
plt.scatter(x, y, s=50, alpha=0.5, label='cross_entropy')
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(loc='upper right')
plt.show()
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.figure()
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
|
10,372 | 74bb1164b3633467a25e20ed683ec724c0c9f097 | import numpy as np
import matplotlib.pyplot as plt
class Plotter:
def __init__(self):
pass
def plotBarGraph(self, percent_correlation_r):
ind = np.arange(7)
width = 0.7
width = 0.7
test = tuple(percent_correlation_r[0.0])
p0 = plt.bar(ind, percent_correlation_r[0.0], width)
p1 = plt.bar(ind, percent_correlation_r[0.1], width, bottom=percent_correlation_r[0.0])
p2 = plt.bar(ind, percent_correlation_r[0.2], width,
bottom=[percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] for i in ind])
p3 = plt.bar(ind, percent_correlation_r[0.3], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] for i in ind])
p4 = plt.bar(ind, percent_correlation_r[0.4], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] for i in ind])
p5 = plt.bar(ind, percent_correlation_r[0.5], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] + percent_correlation_r[0.4][i] for i in ind])
p6 = plt.bar(ind, percent_correlation_r[0.6], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] + percent_correlation_r[0.4][i] + percent_correlation_r[0.5][i] for i in ind])
p7 = plt.bar(ind, percent_correlation_r[0.7], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] + percent_correlation_r[0.4][i] + percent_correlation_r[0.5][i] +
percent_correlation_r[0.6][i] for i in ind])
p8 = plt.bar(ind, percent_correlation_r[0.8], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] + percent_correlation_r[0.4][i] + percent_correlation_r[0.5][i] +
percent_correlation_r[0.6][i] + percent_correlation_r[0.7][i] for i in ind])
p9 = plt.bar(ind, percent_correlation_r[0.9], width, bottom=[
percent_correlation_r[0.0][i] + percent_correlation_r[0.1][i] + percent_correlation_r[0.2][i] +
percent_correlation_r[0.3][i] + percent_correlation_r[0.4][i] + percent_correlation_r[0.5][i] +
percent_correlation_r[0.6][i] + percent_correlation_r[0.7][i] + percent_correlation_r[0.8][i] for i in ind])
plt.ylabel('Stocks %')
plt.title('Correlation of stocks volume to tweets volume with lags')
plt.xticks(ind, ('-3', '-2', '-1', '0', '1', '2', '3'))
plt.yticks(np.arange(0, 101, 10))
plt.legend((p9[0], p8[0], p7[0], p6[0], p5[0], p4[0], p3[0], p2[0], p1[0], p0[0]),
('0.9', '0.8', '0.7', '0.6', '0.5', '0.4', '0.3', '0.2', '0.1', '0.0'), loc='center left',
bbox_to_anchor=(1, 0.5))
plt.show()
|
10,373 | e820f647810a6d60e6f47e5be74bdf99e99bda55 | from django.contrib import admin
# Register your models here.
from .models import Student
class SignUpAdmin(admin.ModelAdmin):
class Meta:
model = Student
admin.site.register(Student, SignUpAdmin)
|
10,374 | 2a62f0b81a01278f14024366ae15b5ea50a13514 | import sys
from uploadFile import get_pred_files_names
get_pred_files_names(sys.argv[1])
|
10,375 | c93d1795a0afc792efb79df697776174e0b22d01 | from Products.ProjectDatabase.reports.ProjectsAtRiskReportFactory \
import ProjectsAtRiskReportFactory
from basereport import BaseReport
from Products.CMFCore.utils import getToolByName
class ProjectsAtRiskReport(BaseReport):
def getReport(self):
factory = ProjectsAtRiskReportFactory(self.context, projects=self._projects)
return factory.getReport('Projects at Risk')
|
10,376 | c6cf924eeaab7d87240564e1f386acd6f4b2fbac | '''
Simple Balanced Parentheses using stack
'''
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
# function to check only "(" paranthesis
def paranCheck(paranthesis):
s = Stack()
index = 0
balanced = True
while index < len(paranthesis) and balanced:
if(paranthesis[index] == '('):
s.push(paranthesis[index])
else:
if s.isEmpty():
balanced = False
else:
s.pop()
index = index + 1
if balanced and s.isEmpty():
return True
else:
return False
# function to check all paranthesis
def genparanCheck(paranthesis):
s = Stack()
index = 0
balanced = True
while index < len(paranthesis) and balanced:
if paranthesis[index] in "([{":
s.push(paranthesis[index])
else:
if s.isEmpty():
balanced = False
else:
top = s.pop()
if not matches(top, paranthesis[index]):
balanced = False
index = index + 1
if balanced and s.isEmpty():
return True
else:
return False
def matches(open,close):
opens = "([{"
closers = ")]}"
print "open:", opens.index(open)
print "close:", closers.index(close)
return opens.index(open) == closers.index(close)
print genparanCheck('({})') |
10,377 | 8aa370e39e796356a423f2a91cbb9e58617e854d | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest_lib.exceptions import Conflict
from tempest_lib.exceptions import Forbidden
from functionaltests.common import datagen
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.zone_client import ZoneClient
class ZoneTest(DesignateV2Test):
def setUp(self):
super(ZoneTest, self).setUp()
self.increase_quotas(user='default')
def _create_zone(self, zone_model, user='default'):
resp, model = ZoneClient.as_user(user).post_zone(zone_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user(user).wait_for_zone(model.id)
return resp, model
def test_list_zones(self):
self._create_zone(datagen.random_zone_data())
resp, model = ZoneClient.as_user('default').list_zones()
self.assertEqual(resp.status, 200)
self.assertGreater(len(model.zones), 0)
def test_create_zone(self):
self._create_zone(datagen.random_zone_data(), user='default')
def test_update_zone(self):
post_model = datagen.random_zone_data()
resp, old_model = self._create_zone(post_model)
patch_model = datagen.random_zone_data()
del patch_model.name # don't try to override the zone name
resp, new_model = ZoneClient.as_user('default').patch_zone(
old_model.id, patch_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user('default').wait_for_zone(new_model.id)
resp, model = ZoneClient.as_user('default').get_zone(new_model.id)
self.assertEqual(resp.status, 200)
self.assertEqual(new_model.id, old_model.id)
self.assertEqual(new_model.name, old_model.name)
self.assertEqual(new_model.ttl, patch_model.ttl)
self.assertEqual(new_model.email, patch_model.email)
def test_delete_zone(self):
resp, model = self._create_zone(datagen.random_zone_data())
resp, model = ZoneClient.as_user('default').delete_zone(model.id)
self.assertEqual(resp.status, 202)
ZoneClient.as_user('default').wait_for_zone_404(model.id)
class ZoneOwnershipTest(DesignateV2Test):
def setup(self):
super(ZoneTest, self).setUp()
self.increase_quotas(user='default')
self.increase_quotas(user='alt')
def _create_zone(self, zone_model, user):
resp, model = ZoneClient.as_user(user).post_zone(zone_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user(user).wait_for_zone(model.id)
return resp, model
def test_no_create_duplicate_domain(self):
zone = datagen.random_zone_data()
self._create_zone(zone, user='default')
self.assertRaises(Conflict,
lambda: self._create_zone(zone, user='default'))
self.assertRaises(Conflict,
lambda: self._create_zone(zone, user='alt'))
def test_no_create_subdomain_by_alt_user(self):
zone = datagen.random_zone_data()
subzone = datagen.random_zone_data(name='sub.' + zone.name)
subsubzone = datagen.random_zone_data(name='sub.sub.' + zone.name)
self._create_zone(zone, user='default')
self.assertRaises(Forbidden,
lambda: self._create_zone(subzone, user='alt'))
self.assertRaises(Forbidden,
lambda: self._create_zone(subsubzone, user='alt'))
def test_no_create_superdomain_by_alt_user(self):
superzone = datagen.random_zone_data()
zone = datagen.random_zone_data(name="a.b." + superzone.name)
self._create_zone(zone, user='default')
self.assertRaises(Forbidden,
lambda: self._create_zone(superzone, user='alt'))
|
10,378 | 507c50b79710a1ad10754925c7e31d1924334001 | import datetime
from sqlalchemy import Column, Integer, String, Text, DateTime
from sqlalchemy.types import DECIMAL
from app.db.base_class import Base
class Product(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
description = Column(Text, nullable=True)
sku = Column(Integer, nullable=False)
price = Column(DECIMAL(10, 2), nullable=False)
uploadAt = Column(DateTime, default=datetime.datetime.utcnow) |
10,379 | 8e5aeecd09ac2781faa17d7c079b928fba0594eb | import json
import pytest
from typing import (
TYPE_CHECKING,
cast,
)
from eth_typing import (
ChecksumAddress,
)
from eth_utils import (
is_checksum_address,
is_list_like,
is_same_address,
is_string,
)
from hexbytes import (
HexBytes,
)
from web3 import (
constants,
)
from web3.datastructures import (
AttributeDict,
)
from web3.types import (
TxParams,
Wei,
)
if TYPE_CHECKING:
from web3 import ( # noqa: F401
AsyncWeb3,
Web3,
)
PRIVATE_KEY_HEX = "0x56ebb41875ceedd42e395f730e03b5c44989393c9f0484ee6bc05f933673458f"
SECOND_PRIVATE_KEY_HEX = (
"0x56ebb41875ceedd42e395f730e03b5c44989393c9f0484ee6bc05f9336712345"
)
THIRD_PRIVATE_KEY_HEX = (
"0x56ebb41875ceedd42e395f730e03b5c44989393c9f0484ee6bc05f9336754321"
)
PASSWORD = "web3-testing"
ADDRESS = "0x844B417c0C58B02c2224306047B9fb0D3264fE8c"
SECOND_ADDRESS = "0xB96b6B21053e67BA59907E252D990C71742c41B8"
PRIVATE_KEY_FOR_UNLOCK = (
"0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01"
)
ACCOUNT_FOR_UNLOCK = "0x12efDc31B1a8FA1A1e756DFD8A1601055C971E13"
class GoEthereumPersonalModuleTest:
def test_personal_import_raw_key(self, w3: "Web3") -> None:
actual = w3.geth.personal.import_raw_key(PRIVATE_KEY_HEX, PASSWORD)
assert actual == ADDRESS
def test_personal_list_accounts(self, w3: "Web3") -> None:
accounts = w3.geth.personal.list_accounts()
assert is_list_like(accounts)
assert len(accounts) > 0
assert all((is_checksum_address(item) for item in accounts))
def test_personal_list_wallets(self, w3: "Web3") -> None:
wallets = w3.geth.personal.list_wallets()
assert is_list_like(wallets)
assert len(wallets) > 0
assert is_checksum_address(wallets[0]["accounts"][0]["address"])
assert is_string(wallets[0]["accounts"][0]["url"])
assert is_string(wallets[0]["status"])
assert is_string(wallets[0]["url"])
def test_personal_lock_account(
self, w3: "Web3", unlockable_account_dual_type: ChecksumAddress
) -> None:
# TODO: how do we test this better?
w3.geth.personal.lock_account(unlockable_account_dual_type)
def test_personal_unlock_account_success(
self,
w3: "Web3",
unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
result = w3.geth.personal.unlock_account(
unlockable_account_dual_type, unlockable_account_pw
)
assert result is True
def test_personal_unlock_account_failure(
self, w3: "Web3", unlockable_account_dual_type: ChecksumAddress
) -> None:
with pytest.raises(ValueError):
w3.geth.personal.unlock_account(
unlockable_account_dual_type, "bad-password"
)
def test_personal_new_account(self, w3: "Web3") -> None:
new_account = w3.geth.personal.new_account(PASSWORD)
assert is_checksum_address(new_account)
def test_personal_send_transaction(
self,
w3: "Web3",
unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
assert (
w3.eth.get_balance(unlockable_account_dual_type) > constants.WEI_PER_ETHER
)
txn_params: TxParams = {
"from": unlockable_account_dual_type,
"to": unlockable_account_dual_type,
"gas": 21000,
"value": Wei(1),
"gasPrice": w3.to_wei(1, "gwei"),
}
txn_hash = w3.geth.personal.send_transaction(txn_params, unlockable_account_pw)
assert txn_hash
transaction = w3.eth.get_transaction(txn_hash)
assert is_same_address(
transaction["from"], cast(ChecksumAddress, txn_params["from"])
)
assert is_same_address(
transaction["to"], cast(ChecksumAddress, txn_params["to"])
)
assert transaction["gas"] == txn_params["gas"]
assert transaction["value"] == txn_params["value"]
assert transaction["gasPrice"] == txn_params["gasPrice"]
def test_personal_sign_and_ecrecover(
self,
w3: "Web3",
unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
message = "test-web3-geth-personal-sign"
signature = w3.geth.personal.sign(
message, unlockable_account_dual_type, unlockable_account_pw
)
signer = w3.geth.personal.ec_recover(message, signature)
assert is_same_address(signer, unlockable_account_dual_type)
@pytest.mark.xfail(
reason="personal_sign_typed_data JSON RPC call has not been released in geth"
)
def test_personal_sign_typed_data(
self,
w3: "Web3",
unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
typed_message = """
{
"types": {
"EIP712Domain": [
{"name": "name", "type": "string"},
{"name": "version", "type": "string"},
{"name": "chainId", "type": "uint256"},
{"name": "verifyingContract", "type": "address"}
],
"Person": [
{"name": "name", "type": "string"},
{"name": "wallet", "type": "address"}
],
"Mail": [
{"name": "from", "type": "Person"},
{"name": "to", "type": "Person"},
{"name": "contents", "type": "string"}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": "0x01",
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": {
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
},
"contents": "Hello, Bob!"
}
}
"""
signature = HexBytes(
w3.geth.personal.sign_typed_data(
json.loads(typed_message),
unlockable_account_dual_type,
unlockable_account_pw,
)
)
expected_signature = HexBytes(
"0xc8b56aaeefd10ab4005c2455daf28d9082af661ac347cd"
"b612d5b5e11f339f2055be831bf57a6e6cb5f6d93448fa35"
"c1bd56fe1d745ffa101e74697108668c401c"
)
assert signature == expected_signature
assert len(signature) == 32 + 32 + 1
class GoEthereumAsyncPersonalModuleTest:
@pytest.mark.asyncio
async def test_async_sign_and_ec_recover(
self,
async_w3: "AsyncWeb3",
async_unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
message = "This is a test"
signature = await async_w3.geth.personal.sign(
message, async_unlockable_account_dual_type, unlockable_account_pw
)
address = await async_w3.geth.personal.ec_recover(message, signature)
assert is_same_address(async_unlockable_account_dual_type, address)
@pytest.mark.asyncio
async def test_async_import_key(self, async_w3: "AsyncWeb3") -> None:
address = await async_w3.geth.personal.import_raw_key(
THIRD_PRIVATE_KEY_HEX, "Testing"
)
assert address is not None
@pytest.mark.asyncio
async def test_async_list_accounts(self, async_w3: "AsyncWeb3") -> None:
accounts = await async_w3.geth.personal.list_accounts()
assert len(accounts) > 0
@pytest.mark.asyncio
async def test_async_list_wallets(self, async_w3: "AsyncWeb3") -> None:
wallets = await async_w3.geth.personal.list_wallets()
assert isinstance(wallets[0], AttributeDict)
@pytest.mark.asyncio
async def test_async_new_account(self, async_w3: "AsyncWeb3") -> None:
passphrase = "Create New Account"
account = await async_w3.geth.personal.new_account(passphrase)
assert is_checksum_address(account)
@pytest.mark.asyncio
async def test_async_unlock_lock_account(
self,
async_w3: "AsyncWeb3",
async_unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
unlocked = await async_w3.geth.personal.unlock_account(
async_unlockable_account_dual_type, unlockable_account_pw
)
assert unlocked is True
locked = await async_w3.geth.personal.lock_account(
async_unlockable_account_dual_type
)
assert locked is True
@pytest.mark.asyncio
async def test_async_send_transaction(
self,
async_w3: "AsyncWeb3",
async_unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
tx_params = TxParams()
tx_params["to"] = async_unlockable_account_dual_type
tx_params["from"] = async_unlockable_account_dual_type
tx_params["value"] = Wei(123)
response = await async_w3.geth.personal.send_transaction(
tx_params, unlockable_account_pw
)
assert response is not None
@pytest.mark.xfail(
reason="personal_signTypedData JSON RPC call has not been released in geth"
)
@pytest.mark.asyncio
async def test_async_sign_typed_data(
self,
async_w3: "AsyncWeb3",
async_unlockable_account_dual_type: ChecksumAddress,
unlockable_account_pw: str,
) -> None:
message = {"message": "This is a test"}
signature = await async_w3.geth.personal.sign_typed_data(
message, async_unlockable_account_dual_type, unlockable_account_pw
)
address = await async_w3.geth.personal.ec_recover(
json.dumps(message), signature
)
assert is_same_address(async_unlockable_account_dual_type, address)
|
10,380 | 7d24955d06eabb452218b9187089f5bf9b0b9860 | try:
import os
import sys
import difflib
import hashlib
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QColor
except Exception as e:
print('Error:', e)
os.system("pause")
sys.exit()
IGNORE_FILES_EXTS = 'jpg', 'jpeg', 'png', 'ttf', 'mo', 'so', 'bin', 'cgi', 'ico'
DELIMITER = '-' * 75
RED = 250, 20, 20
GREEN = 20, 120, 20
BLUE1 = 20, 20, 120
BLUE2 = 20, 20, 250
CYAN = 20, 160, 160
GRAY = 120, 120, 120
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(700, 700)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.pathToFolder1 = QtWidgets.QLineEdit(Form)
self.pathToFolder1.setObjectName("pathToFolder_1")
self.pathToFolder1.setPlaceholderText('Path to folder 1')
self.verticalLayout.addWidget(self.pathToFolder1)
self.pathToFolder2 = QtWidgets.QLineEdit(Form)
self.pathToFolder2.setObjectName("pathToFolder_2")
self.pathToFolder2.setPlaceholderText('Path to folder 2')
self.verticalLayout.addWidget(self.pathToFolder2)
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout.addWidget(self.textBrowser)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButtonStart = QtWidgets.QPushButton(Form)
self.pushButtonStart.setObjectName("pushButtonStart")
self.horizontalLayout.addWidget(self.pushButtonStart)
self.pushButtonClear = QtWidgets.QPushButton(Form)
self.pushButtonClear.setObjectName("pushButtonClear")
self.horizontalLayout.addWidget(self.pushButtonClear)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "diffFiles"))
self.pushButtonStart.setText(_translate("Form", "Start"))
self.pushButtonClear.setText(_translate("Form", "Clear"))
class BrowserHandler(QtCore.QObject):
newTextAndColor = QtCore.pyqtSignal(str, object)
def compare_files(self, path_to_file1, path_to_file2, mode="r", encoder=None):
if encoder:
file1 = open(path_to_file1, mode, encoding=encoder)
file2 = open(path_to_file2, mode, encoding=encoder)
else:
file1 = open(path_to_file1, mode)
file2 = open(path_to_file2, mode)
if mode == "r":
diff = difflib.unified_diff(
file1.readlines(),
file2.readlines(),
fromfile=path_to_file1,
tofile=path_to_file2)
elif mode == "rb":
hash1 = hashlib.md5()
hash2 = hashlib.md5()
hash1.update(file1.read())
hash2.update(file2.read())
diff = difflib.unified_diff(
['md5: {}'.format(hash1.hexdigest())],
['md5: {}'.format(hash2.hexdigest())],
fromfile=path_to_file1,
tofile=path_to_file2)
else:
self.newTextAndColor.emit('Wrong mode selected!', QColor(*RED))
delimiter_flag = False
for line in diff:
delimiter_flag = True
self.newTextAndColor.emit(line, QColor(*GREEN))
if delimiter_flag:
self.newTextAndColor.emit(DELIMITER, QColor(*GRAY))
file1.close()
file2.close()
def bin_walk(self, path1, path2):
while path1.endswith(('\\', '/', ' ')):
path1 = path1[:-1]
while path2.endswith(('\\', '/', ' ')):
path2 = path2[:-1]
for path in (path1, path2):
if not os.path.exists(path) or not os.path.isdir(path):
self.newTextAndColor.emit('Path doesn\'t exist: {}'.format(path), QColor(*RED))
return
for (dirpath_1, dirnames_1, filenames_1) in os.walk(path1):
filenames_1 = set(filenames_1)
dirnames_1 = set(dirnames_1)
filenames_2 = set()
dirnames_2 = set()
dirpath_2 = os.path.join(path2, dirpath_1[len(path1)+1:])
while dirpath_2.endswith(('\\', '/', ' ')):
dirpath_2 = dirpath_2[:-1]
if os.path.exists(dirpath_2):
for entry in os.listdir(dirpath_2):
if os.path.isfile(os.path.join(dirpath_2, entry)):
filenames_2.add(entry)
elif os.path.isdir(os.path.join(dirpath_2, entry)):
dirnames_2.add(entry)
else:
pass
diff_in_files = filenames_1 ^ filenames_2
diff_in_folders = dirnames_1 ^ dirnames_2
filenames = filenames_1 & filenames_2
if len(diff_in_folders) != 0:
for i, path in enumerate((dirpath_1, dirpath_2)):
for folder in diff_in_folders:
if not os.path.isdir((os.path.join(path, folder))):
self.newTextAndColor.emit('Folder doesn\'t exist: {}'.format(os.path.join(path, folder)), QColor(*BLUE1))
for (missing_paths, _, missing_files) in os.walk(os.path.join(dirpath_2 if i == 0 else dirpath_1, folder)):
for mis_file in missing_files:
missing_path = os.path.join(dirpath_1 if i == 0 else dirpath_2, missing_paths[len(dirpath_2 if i == 0 else dirpath_1)+1:], mis_file)
self.newTextAndColor.emit('File doesn\'t exist: {}'.format(missing_path), QColor(*BLUE2))
self.newTextAndColor.emit(DELIMITER, QColor(*GRAY))
if len(diff_in_files) != 0:
for path in (dirpath_1, dirpath_2):
for file in diff_in_files:
if not os.path.isfile((os.path.join(path, file))):
self.newTextAndColor.emit('File doesn\'t exist: {}'.format(os.path.join(path, file)), QColor(*BLUE2))
self.newTextAndColor.emit(DELIMITER, QColor(*GRAY))
for file in filenames:
if not file.lower().endswith(IGNORE_FILES_EXTS):
filename1 = os.path.join(dirpath_1, file)
filename2 = os.path.join(dirpath_2, file)
try:
self.compare_files(filename1, filename2, encoder="utf-8")
except UnicodeDecodeError as e:
try:
self.compare_files(filename1, filename2, encoder="utf16")
except UnicodeError as e:
try:
self.compare_files(filename1, filename2, mode="rb")
except:
self.newTextAndColor.emit('Can\'t open file: {}'.format(filename2), QColor(*GRAY))
self.newTextAndColor.emit(DELIMITER, QColor(*GRAY))
def run(self):
self.newTextAndColor.emit('---Start---', QColor(*CYAN))
path1 = window.ui.pathToFolder1.displayText()
path2 = window.ui.pathToFolder2.displayText()
self.bin_walk(path1, path2)
self.newTextAndColor.emit('----End----', QColor(*CYAN))
class MyWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__()
self.ui = Ui_Form()
self.ui.setupUi(self)
self.thread = QtCore.QThread()
self.browserHandler = BrowserHandler()
self.browserHandler.moveToThread(self.thread)
self.browserHandler.newTextAndColor.connect(self.addNewTextAndColor)
self.ui.pushButtonStart.clicked.connect(self.browserHandler.run)
self.ui.pushButtonClear.clicked.connect(self.clearBrowser)
self.thread.start()
@QtCore.pyqtSlot(str, object)
def addNewTextAndColor(self, string, color):
self.ui.textBrowser.setTextColor(color)
self.ui.textBrowser.append(string)
@QtCore.pyqtSlot()
def clearBrowser(self):
self.ui.textBrowser.clear()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec())
|
10,381 | f74ee2b88d89b83d93a0d45b5d30826f093e5c5a | import itertools
import os
import random
import pytest
from polyswarmd.utils.bloom import BloomFilter
@pytest.fixture
def log_entries():
def _mk_address():
return os.urandom(20)
def _mk_topic():
return os.urandom(32)
return [(_mk_address(), [_mk_topic()
for _ in range(1, random.randint(0, 4))])
for _ in range(1, random.randint(0, 30))]
def check_bloom(bloom, log_entries):
for address, topics in log_entries:
assert address in bloom
for topic in topics:
assert topic in bloom
def test_bloom_filter_add_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.add(address)
for topic in topics:
bloom.add(topic)
check_bloom(bloom, log_entries)
def test_bloom_filter_extend_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.extend([address])
bloom.extend(topics)
check_bloom(bloom, log_entries)
def test_bloom_filter_from_iterable_method(log_entries):
bloomables = itertools.chain.from_iterable(
itertools.chain([address], topics) for address, topics in log_entries
)
bloom = BloomFilter.from_iterable(bloomables)
check_bloom(bloom, log_entries)
def test_casting_to_integer():
bloom = BloomFilter()
assert int(bloom) == 0
bloom.add(b'value 1')
bloom.add(b'value 2')
assert int(bloom) == int(
'63119152483043774890037882090529841075600744123634985501563996'
'49538536948165624479433922134690234594539820621615046612478986'
'72305890903532059401028759565544372404512800814146245947429340'
'89705729059810916441565944632818634262808769353435407547341248'
'57159120012171916234314838712163868338766358254974260070831608'
'96074485863379577454706818623806701090478504217358337630954958'
'46332941618897428599499176135798020580888127915804442383594765'
'16518489513817430952759084240442967521334544396984240160630545'
'50638819052173088777264795248455896326763883458932483359201374'
'72931724136975431250270748464358029482656627802817691648'
)
def test_casting_to_binary():
bloom = BloomFilter()
assert bin(bloom) == '0b0'
bloom.add(b'value 1')
bloom.add(b'value 2')
assert bin(bloom) == (
'0b1000000000000000000000000000000000000000001000000100000000000000'
'000000000000000000000000000000000000000000000010000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000001000000'
'000000000000000000000000000000000000000000000000000000000000000010'
'000000000000000000000000000000000000000100000000000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000010000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000010000000000001000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000001000000000000000000000000000000000000000000000000000100000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000100000000000000000'
'00000000000000000000000000000000000001000000000000000000000000'
)
def test_combining_filters():
b1 = BloomFilter()
b2 = BloomFilter()
b1.add(b'a')
b1.add(b'b')
b1.add(b'c')
b2.add(b'd')
b2.add(b'e')
b2.add(b'f')
b1.add(b'common')
b2.add(b'common')
assert b'a' in b1
assert b'b' in b1
assert b'c' in b1
assert b'a' not in b2
assert b'b' not in b2
assert b'c' not in b2
assert b'd' in b2
assert b'e' in b2
assert b'f' in b2
assert b'd' not in b1
assert b'e' not in b1
assert b'f' not in b1
assert b'common' in b1
assert b'common' in b2
b3 = b1 | b2
assert b'a' in b3
assert b'b' in b3
assert b'c' in b3
assert b'd' in b3
assert b'e' in b3
assert b'f' in b3
assert b'common' in b3
b4 = b1 + b2
assert b'a' in b4
assert b'b' in b4
assert b'c' in b4
assert b'd' in b4
assert b'e' in b4
assert b'f' in b4
assert b'common' in b4
b5 = BloomFilter(int(b1))
b5 |= b2
assert b'a' in b5
assert b'b' in b5
assert b'c' in b5
assert b'd' in b5
assert b'e' in b5
assert b'f' in b5
assert b'common' in b5
b6 = BloomFilter(int(b1))
b6 += b2
assert b'a' in b6
assert b'b' in b6
assert b'c' in b6
assert b'd' in b6
assert b'e' in b6
assert b'f' in b6
assert b'common' in b6
|
10,382 | ec6bfb386f8c36a03d08e4c5117468bf318328e6 | # Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if root is None or (root.left is None and root.right is None):
return
tmp = [root]
while len(tmp) > 0:
num = len(tmp)
for i in range(num):
node = tmp.pop(0)
if i < num - 1:
node.next = tmp[0]
if node.left:
tmp.append(node.left)
if node.right:
tmp.append(node.right)
|
10,383 | 9ef41c2ea05ebcfa5f20bb062e0248ed05f973d5 | # File to scrape recipes from allrecipes.com
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
# Attempts to get the content at the specified url
def simple_get(url):
try:
with closing(get(url, stream = True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {0}'.format(url, str(e)))
return None
# Checks if the response seems to be HTML (returns true if so)
def is_good_response(resp):
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
# Extract product heading/title
def extract_name(url):
# Get response from url
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
for title in html.select("[class=hf-Bot]"):
return title.h1.string
# Extract product cost
def extract_cost(url):
# Get response from url
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
for span in html.select("[class=price-characteristic]"):
return float(span['content'])
# Extract price per unit
def extract_price_per_unit(url):
# Get response from url
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
for ppu in html.select("[class*=prod-ProductOffer-ppu]"):
words = ppu.string.split(' / ')
price = words[0].replace('$', '')
units = words[1]
return [price, units]
# Prints errors
def log_error(e):
print(e)
# Scrapes all the food products on walmart
def extract_food_data():
base_url = "https://walmart.com/browse/food/976759/"
# Check that the page exists
i = 1
while does_page_exist(base_url, i):
for url in extract_food_urls(base_url + "?page=" + str(i)):
name = extract_name(url)
cost = extract_cost(url)
ppu = extract_price_per_unit(url)
if (name is not None) and (cost is not None) and (ppu is not None):
print(name + " " + str(cost) + " " + str(ppu))
i += 1
# Checks if the desired page number exists
def does_page_exist(base_url, num):
# Get response from built url
url = base_url + "?page=" + str(num)
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
# Return false if fbody is empty
for error in html.select('body'):
if error is not None:
return True
return False
return False
# Extract food urls from page
def extract_food_urls(url):
# Get response from built url
response = simple_get(url)
html = BeautifulSoup(response, 'html.parser')
foods = set()
for food in html.find_all(attrs={'class': 'search-result-productimage'}):
foods.add("https://walmart.com" + food.div.a['href'])
return list(foods)
extract_food_data()
|
10,384 | 2e825df4c686ca657196cbf4d6e97081b61e3c39 | import requests
p = 12027524255478748885956220793734512128733387803682075433653899983955179850988797899869146900809131611153346817050832096022160146366346391812470987105415233
q = 12131072439211271897323671531612440428472427633701410925634549312301964373042085619324197365322416866541017057361365214171711713797974299334871062829803541
e = 65537
phi = (p - 1) * (q - 1)
print("Breaking RSA key ...")
d = pow(e, -1, phi)
f = open("prerequisites/announcement_encrypted.md", "r")
lines = f.readlines()
sentence = ""
print("Decoding announcemment_encrypted.md ...")
for line in lines:
c = int(line)
m = pow(c, d, p * q)
sentence += chr(m)
print("File contents :")
print(sentence)
url = sentence.split("URL: ")
r = requests.get("http://127.0.0.1:3000" + url[1])
print("\nStatus: " + str(r.status_code))
|
10,385 | 8f91c57ad1047ad76a08f620f4f04014fb2671ef | import sys
import os
uttlst = file('lst/thu_ev.lst').readlines()
for i in uttlst:
each = i[:-1].split(' ')
os.makedirs('data/'+each[0])
fout = file('data/'+each[0]+'/wav.scp','w')
fout.write(i)
|
10,386 | efdc92912cabf3f0f253fdf35e201fe0587100ff | #importing library
import pandas as pd
from keras import models
from keras import layers
from keras.datasets import boston_housing
from keras.models import Model
from sklearn.model_selection import cross_val_score
from keras.layers import Input, SimpleRNN, Dense,LSTM,GRU
from keras import optimizers
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from keras import losses
from keras import metrics
from more_itertools import unique_everseen
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import time as time
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import VotingRegressor
from tpot import TPOTRegressor
## Text feature extraction for single 'id' by taking hidden state of LSTM
"""NOTE-> Trained on kaggle-gpu total time taken to train->1 hours approx"""
tic =time.time()
k=0
arr=[]
l=[]
tmp_0_forcast=pd.DataFrame()
for i in range(300):
try:
add ='tmp_'+ str(i)+'.csv'
print(add)
tmp=pd.read_csv('/kaggle/input/'+add)
df_tm=tmp.set_index('id')
count=list(tmp.groupby(['id']).size())
id_list=list(unique_everseen(list(df_tm.index)))# to maintain sequence
new=dict(zip(id_list,count))
a=0
tmp_train=[]
for i in id_list:
n=new[i]
b=a+n
tmp_train.append(tmp[a:b])
a=b
predg=[]
for j in range(len(tmp_train)):
# define model
a1=tmp_train[j].drop(['id'],axis=1)
a = np.array(a1.values.reshape(1,a1.shape[1],a1.shape[0]))
inputs1 = Input(shape=(a1.shape[1],a1.shape[0]))
lstm1, state_h,state_c = LSTM(1, return_sequences=True, return_state=True)(inputs1)
model = Model(inputs=inputs1, outputs=[lstm1, state_h,state_c])
#print(model.predict(a))
pred,q,e= model.predict(a,verbose=1)
predg.append(pred)
# define input data
predgg=np.array(predg).reshape(len(tmp_train),300)
predgg = pd.DataFrame(predgg)
tmp_forcast=predgg.set_index([id_list])
frames = [tmp_forcast,tmp_0_forcast]
tmp_0_forcast=pd.concat(frames)
print(tmp_0_forcast.shape)
except:
l.append(add)
continue
toc =time.time()
print("took time in loading 241 text features by extracting hidden state of LSTM "+str((toc-tic))+" sec")
tmp_0_forcast.index.name = 'id'
tmp_0_forcast.to_csv('final_lstm.csv')
print("final_lstm shape"+str(final_lstm.shape))
## Combining text features and training data
final=pd.read_csv('final_lstm.csv')
train=pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
final=final.set_index('id')
train=train.set_index('id')
test=test.set_index('id')
s1=set(final.index)
s2=set(train.index)
s3=set(test.index)
### Checking if all the 'id' in train are present in final_lstm
not_in_index=list()
for i in s2:
if i not in s1:
not_in_index.append(i)
## Removing 'id' which are not present in train
train=train.drop(not_in_index,axis=0)
## Concating both the datasets
l_train=list(train.index)
new_final=pd.DataFrame(columns=list(final.columns))
for i in l_train:
new_final=new_final.append(final[final.index==i])
traineco=pd.concat([train,new_final],axis=1)
#only conataing id that are present in training dataset, in same order as id's in training dataset
## Same for the test
not_in_index=list()
for i in s3:
if i not in s1:
not_in_index.append(i)
print("id not in final_lstm: "+str(not_in_index))
l_train=list(test.index)
new_final=pd.DataFrame(columns=list(final.columns))
for i in l_train:
new_final=new_final.append(final[final.index==i])
testeco=pd.concat([test,new_final],axis=1)
testeco.to_csv('testeco_lstm.csv')
print("test data after combining :"+str(testeco.shape))
#Now train the model
test= pd.read_csv("testeco_lstm.csv")
train = pd.read_csv("traineco_lstm.csv")
gg=train.fillna(train.median())
y=gg['target']
X=gg.drop(['id','target'],axis=1)
print("X_shape:"+str(X.shape)," , y_shape :"+str(y.shape))
X_train, X_cv, y_train, y_cv = train_test_split(X, y, test_size=0.2, random_state=42)
from sklearn.ensemble import ExtraTreesRegressor
extra_tree = ExtraTreesRegressor(n_estimators=500,random_state=1234)
extra_tree.fit(X_train, y_train)
ypredictions = extra_tree.predict(X_cv)
print(" Root Mean Absolute Error : ",sqrt(mean_squared_error(ypredictions, y_cv)))
extra_tree.fit(X, y)
test2=test.drop(['id'],axis=1)
test2=test2.fillna(test2.median())
predictions = extra_tree.predict(test2)
pred=pd.DataFrame(predictions)
pred=pred.set_index([test['id']])
pred.to_csv("extra_tree_500.csv")
#Our best submission is extra_tree_500 giving accuracy-> 0.98098 on leaderboard,By Default ExtraTreesRegressor (n_estimators=500,random_state=1234) |
10,387 | 72bff87f8b35451e1b25dd5085dfff409389892c | # coding: UTF-8
from list import LinkedList
'''
class SimpleStack: Stack with simple implementation(built-in arraylist).
class ListStack: Pushdown Stack(linked-list implmentation).
'''
class SimpleStack(object):
'''
Stack with simple implementation(built-in arraylist).
-------------
Stack(): init queue
push(item): push an item
pop(): remove the most recently added item
top(): get the most recently added item
empty = isEmpty(): tell stack is empty
stackSize = size(): get size of stack
clear(): reset the stack
'''
def __init__(self):
self.array = []
self.num = 0
def push(self, item):
'''
push an item
input:
item: item to push
output:
None
'''
# print "push:", item
self.array.append(item)
self.num += 1
def pop(self):
'''
remove the most recently added item
input/output:
None
'''
if not self.isEmpty():
item = self.array.pop()
# print "pop:", item
self.num -= 1
def top(self):
'''
get the most recently added item
input:
None
output:
item: the most recentlu added item, None otherwise
'''
if not self.isEmpty():
return self.array[-1]
def isEmpty(self):
'''
tell stack is empty
input:
None
output:
empty: is stack empty, boolean
'''
return self.num == 0
def size(self):
'''
get size of stack
input:
None
output:
stackSize: size of stack, int
'''
return self.num
def clear(self):
'''
reset the stack
input/output:
None
'''
self.array = []
self.num = 0
class ListStack(object):
'''
Pushdown Stack(linked-list implmentation).
-------------
Stack(): init queue
push(item): push an item
pop(): remove the most recently added item
top(): get the most recently added item
empty = isEmpty(): tell stack is empty
stackSize = size(): get size of stack
clear(): reset the stack
'''
def __init__(self):
self.list = LinkedList()
def push(self, item):
self.list.addHead(item)
def isEmpty(self):
return self.list.isEmpty()
def pop(self):
if self.isEmpty():
return
self.list.remove(0)
def top(self):
if self.isEmpty():
return None
return self.list.get(0).item
def size(self):
return self.list.size()
def clear(self):
self.list = LinkedList()
|
10,388 | 86c3ef73384556e9f63992b6bf2a1755149968bf | n=int(input())
b=[]
s=0
for i in range(n):
l=list(map(int,input().split()))
b.append(l)
for i in range(len(b)):
s+=b[i][i]
print(s)
|
10,389 | 71fc177d2880b159495e2759315df3bd0d9d7d6a | import jinja2
import markdown
from schema import (
INDEX_FILES, INDEX_TITLE, INDEX_LINK,
RESEARCH_FILES, RESEARCH_TITLE, RESEARCH_LINK,
TEACHING_FILES, TEACHING_TITLE, TEACHING_LINK,
PROGRAMMING_FILES, PROGRAMMING_TITLE, PROGRAMMING_LINK,
)
def convert_file(fname):
"""
Convert markdown file `fname` to html. Returns html string.
"""
md = markdown.Markdown(extensions=['extra'], tab_length=2)
with open(fname, "r") as f:
content = ''.join(f.readlines())
return md.convert(content)
def make_page(source_md_files=[], pagename=None, pagetitle=""):
if pagename is None:
raise ValueError("pagename cannot be none")
env = jinja2.Environment(loader=jinja2.loaders.FileSystemLoader('templates/'))
template = env.get_template("page.html.jinja")
content = []
for sourcefile in source_md_files:
fname = "markdown/" + sourcefile
content.append(convert_file(fname))
content_string = '\n'.join(content)
with open(pagename, "w") as indexfile:
indexfile.write(template.render(
title=pagetitle,
stuff=content_string,
link=pagename,
))
def make_main_pages():
make_page(
source_md_files=INDEX_FILES,
pagename="index.html",
pagetitle=INDEX_TITLE,
)
make_page(
source_md_files=RESEARCH_FILES,
pagename="research.html",
pagetitle=RESEARCH_TITLE,
)
make_page(
source_md_files=TEACHING_FILES,
pagename="teaching.html",
pagetitle=TEACHING_TITLE,
)
make_page(
source_md_files=PROGRAMMING_FILES,
pagename="programming.html",
pagetitle=PROGRAMMING_TITLE,
)
def main():
make_main_pages()
if __name__ == "__main__":
main()
|
10,390 | da55f20712cc1578a9535bcc2fe2e1334fd9f6b8 | import json
from datetime import datetime
from pprint import pprint
from typing import List, Dict
import numpy as np
import pandas as pd
from spotify_api import SpotifyClient
def _get_track(playlist_item):
if "track" in playlist_item:
return playlist_item["track"]["name"]
else:
return playlist_item["name"]
def _get_artist(playlist_item):
if "track" in playlist_item:
return playlist_item["track"]["artists"][0]["name"]
else:
return playlist_item["artists"][0]["name"]
def _get_id(playlist_item):
if "track" in playlist_item:
return playlist_item["track"]["id"]
else:
return playlist_item["id"]
def create_playlist_of_top_tracks(time_range="short_term", limit=20):
response = SpotifyClient().create_playlist(
f"{limit}_{time_range}_{datetime.now().strftime('%b-%y')}",
f"{limit} ripper tracks from the {time_range} based on number of plays.",
)
my_playlist = Playlist(response["id"])
top_tracks_items = SpotifyClient().get_top("tracks", time_range, limit)
track_ids = [track_data["id"] for track_data in top_tracks_items]
response = my_playlist.add_tracks_to_playlist(track_ids)
return response
class Playlist:
def __init__(self, playlist_id):
self.spotify_client = SpotifyClient()
self.playlist_id = playlist_id
self.playlist_df = pd.DataFrame(columns=["track", "artist"])
def create_playlist_df(self, spotify_items: List[Dict]):
af = self.get_audio_features_of_tracks(spotify_items)
tracks_artists = [
[_get_track(item), _get_artist(item)] for item in spotify_items
]
df_af_array = np.concatenate((tracks_artists, af), axis=1)
af_columns = ["acousticness", "danceability", "energy", "instrumentalness"]
self.playlist_df = pd.DataFrame(
df_af_array, columns=["track", "artist"] + af_columns
) # Get these fields from desired_fields?
for f in af_columns:
self.playlist_df[f] = pd.to_numeric(self.playlist_df[f], downcast="float")
return self.playlist_df
def get_playlists_items(self) -> List[Dict]:
endpoint = f"playlists/{self.playlist_id}/tracks"
spotify_data = self.spotify_client._get_api_data(endpoint)
return spotify_data["items"]
def add_tracks_to_playlist(self, track_ids):
"""Adds tracks defined by track_ids (list) to playlist defined by playlist_id."""
endpoint = f"playlists/{self.playlist_id}/tracks"
self.spotify_client._headers["Content-Type"] = "application/json"
self.spotify_client._data = json.dumps(
[f"spotify:track:{track_id}" for track_id in track_ids]
)
response = self.spotify_client._post_api_data(endpoint)
return response
def get_audio_features_of_tracks(self, playlist_items: List[Dict]):
"""Requires OAuth token with scope user-read-top"""
audio_features_vectors = []
for track_object in playlist_items:
track_id = _get_id(track_object)
track_features = self.spotify_client.get_audio_features(track_id)
audio_features_vectors.append(list(track_features.values()))
return np.array([vec for vec in audio_features_vectors])
def get_mean_audio_features(self):
return {
"acousticness": self.playlist_df["acousticness"].mean(),
"danceability": self.playlist_df["danceability"].mean(),
"energy": self.playlist_df["energy"].mean(),
"instrumentalness": self.playlist_df["instrumentalness"].mean(),
}
# 'speechiness': self.playlist_df['speechiness'].mean()}
def main():
my_pid = "1uPPJSAPbKGxszadexGQJL"
simply = Playlist(my_pid)
simply_data = simply.get_playlists_items()
simply.create_playlist_df(simply_data)
# simply.add_tracks_to_playlist(['1c6usMjMA3cMG1tNM67g2C'])
pprint(simply.playlist_df.head())
print(simply.playlist_df["energy"].dtype)
# mySpotify = SpotifyClient()
# mySpotify.get_current_playback()
# mySpotify.get_recently_played()
# top_playlist = Playlist('')
# top_data = top_playlist.spotify_client.get_top('tracks', 'short_term', limit=10)
# top_df = top_playlist.create_playlist_df(top_data)
# print(top_df.head())
# create_playlist_of_top_tracks('short_term', 10)
# mySpotify.get_audio_features_of_currently_playing_track()
# mySpotify.create_playlist("autogen2 playlist", "a new playlist")
# audio_array = mySpotify.get_audio_features_of_top_tracks()
# compute_similarity_matrix(audio_array)
# mySpotify.create_top_tracks_df()
if __name__ == "__main__":
main()
# idea: use cosine similarity on artist genres to find similar artists
# Make playlist based on two or more peoples common genre interests
# Make playlist of a genre from music in library
# use cosine similarity on audio features of tracks
# Create symmetric matrix of similarity values
# analyse tracks in a playlist, or album ("vibe" of album?) eg. e-1
# Make playlist of tracks with tempo=120
# TODO: Start making tests
# TODO: Try recommendations endpoint
# TODO: create track subclass
# TODO: cron job for creating a monthly playlist
# Use liveness metric to make playlist of live music
# Reorder playlist e- in ascending energy order?
# For n tracks, the number of similarity computations will be
# 1+2+...+(n-1) = n*(n-1)/2 = O(n^2)...
|
10,391 | 7be5056bd3b6f0838b032a0757b7ccd02285043c | # coding: utf-8
import glob
from time import time
from preprocess_text import corpus
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import re
NYT_CORPUS_PATH = '/opt/nlp_shared/corpora/NytCorpora/NYTCorpus/'
# Compute topics for Bill Clinton's terms
YEARS = range(1993, 2002)
N_FEATURES = 1000
N_TOPICS = 15
N_TOP_WORDS = 100
corpora_all = []
def print_top_words(model, feature_names, N_TOP_WORDS):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
words_for_topic = [feature_names[i] for i in topic.argsort()[:-N_TOP_WORDS - 1:-1]]
words_for_topic_filtered = []
for word in words_for_topic:
if re.match('[^a-zA-Z]+', word) is None:
words_for_topic_filtered.append(word)
print(" ".join(words_for_topic_filtered))
print()
for year in YEARS:
corpora_files_for_year = glob.glob("{}{}*industrial*".format(NYT_CORPUS_PATH, year))
corpora_for_year = []
for corpus_path in corpora_files_for_year:
corpus_date = corpus_path.split('/')[-1][:10]
corpus_for_date = corpus.Corpus.load(NYT_CORPUS_PATH, corpus_date)
corpora_for_year.append(corpus_for_date)
print("Found articles for {} days in the year {}".format(len(corpora_for_year), year))
corpora_all += corpora_for_year
dataset = []
for corpus_for_day in corpora_all:
for article in corpus_for_day:
dataset.append('\n'.join(article.sents))
N_SAMPLES=len(dataset)
average_num_sentences = 0.0
for article in dataset:
average_num_sentences += article.count('\n')
average_num_sentences /= len(dataset)
print("average number of sentences in the {} articles is {}".format(len(dataset), average_num_sentences))
data_samples = []
data_samples = dataset[:N_SAMPLES]
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.90, min_df=2,
max_features=N_FEATURES,
stop_words='english',
tokenizer=LemmaTokenizer())
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print("Fitting LDA models with tf features, "
"N_SAMPLES=%d and N_FEATURES=%d..."
% (N_SAMPLES, N_FEATURES))
lda = LatentDirichletAllocation(n_topics=N_TOPICS, max_iter=20,
learning_method='batch',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, N_TOP_WORDS)
|
10,392 | af9db97c3b3f2a8d21e4b76025497f20bba11a6f | #author: Riley Doyle
#date: 7/16/20
#file: calc_CO2_loss_alk
#status:working
import numpy as np
import matplotlib.pyplot as plt
from calc_Ks import *
from calc_alphas import *
def calc_CO2_loss_alk (pK1, pK2, Kh, pH, d, PCO2, alkin, alkend, delalk, kLain, kLaend, delkLa):
L = np.array(['-', '--', '-.', ':', '--'])
alk = np.arange(alkin, alkend, delalk)
kLasteps = np.arange(kLain, kLaend, delkLa)
nkLasteps = len(kLasteps)
y = np.zeros((nkLasteps, len(alk)))
i = 0
for c in kLasteps:
alpha0 = calc_alpha0(pH, pK1, pK2)
alpha1 = calc_alpha1(pH, pK1, pK2)
alpha2 = calc_alpha2(pH, pK1, pK2)
CO2sat = PCO2*Kh*1000
H = 10**(-pH)
OH = 10**(-(14-pH))
bt = (1/(alpha1 + (2*alpha2)))
tp = (alk - OH + H)
CT = tp * bt
H2CO3 = alpha0*CT
y[i,:] = c*(H2CO3 - CO2sat)*24*44
y = y*d
plt.plot(alk, y[i,:].T, linestyle=L[i])
i += 1 |
10,393 | ebd97a9827cc878d1bc33144a955df5a3608c774 | import numpy as np
from matplotlib import pyplot as plt
import cv2
def erode(a, b):
# 结构元反射后再卷积,相当于直接求相关
# opencv中该函数其实是求的相关
dst = cv2.filter2D(a, -1, b, borderType=cv2.BORDER_CONSTANT)
sum_b = np.sum(b)
dst = np.where(dst == sum_b, 1, 0)
return dst.astype(np.uint8)
def dilate(a, b):
# 结构元进行卷积,需要旋转180°
b_reflect = np.rot90(b, 2)
dst = cv2.filter2D(a, -1, b_reflect, borderType=cv2.BORDER_CONSTANT)
dst = np.where(dst > 0, 1, 0)
return dst.astype(np.uint8)
def hit_miss(a, b):
b1 = np.where(b == 1, 1, 0)
b2 = np.where(b == 0, 1, 0)
# 填充一下以解决边界
padding = cv2.copyMakeBorder(a, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
eroded = erode(padding, b1)
a_not = 1 - padding
eroded2 = erode(a_not, b2)
# 去除填充边界
dst = cv2.bitwise_and(eroded, eroded2)[1:-1, 1:-1]
return dst.astype(np.uint8)
def thin(f, b):
hit_miss_res = hit_miss(f, b)
# 记录每个像素是不是连通的
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
neighbor_num = cv2.filter2D(f, -1, kernel, borderType=cv2.BORDER_CONSTANT)
connected = np.where(neighbor_num == 0, 0, 1)
# 击中击不中变换中连通的像素才需要被删除
deleted = cv2.bitwise_and(hit_miss_res, connected.astype(np.uint8))
return cv2.subtract(f, deleted)
def morphological_skeleton_extract(binary):
b = []
b.append(np.array([[0, 0, 0], [-1, 1, -1], [1, 1, 1]]))
b.append(np.array([[-1, 0, 0], [1, 1, 0], [1, 1, -1]]))
b.append(np.array([[1, -1, 0], [1, 1, 0], [1, -1, 0]]))
b.append(np.array([[1, 1, -1], [1, 1, 0], [-1, 0, 0]]))
b.append(np.array([[1, 1, 1], [-1, 1, -1], [0, 0, 0]]))
b.append(np.array([[-1, 1, 1], [0, 1, 1], [0, 0, -1]]))
b.append(np.array([[0, -1, 1], [0, 1, 1], [0, -1, 1]]))
b.append(np.array([[0, 0, -1], [0, 1, 1], [-1, 1, 1]]))
dst = binary.copy()
# 迭代次数
thin_num = 0
# 利用b中的核不断进行细化直到细化前后无变化
while True:
isConverged = False
for bi in b:
thinned = thin(dst, bi)
if (thinned == dst).all():
isConverged = True
break
else:
dst = thinned
thin_num += 1
if isConverged:
break
return dst.astype(np.uint8), thin_num
# 利用腐蚀膨胀提取边缘
def edge_extract(a):
b = np.ones((3, 3), np.uint8)
return cv2.subtract(a, erode(a, b))
# 距离变换,改写自matlab文件
def distance_transform(img):
height, width = img.shape
A = np.where(img == 0, np.Inf, 1)
padding = cv2.copyMakeBorder(A, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=np.inf)
for i in range(1, height):
for j in range(1, width - 1):
temp1 = min(padding[i][j-1] + 3, padding[i][j])
temp2 = min(padding[i-1][j-1] + 4, temp1)
temp3 = min(padding[i-1][j] + 3, temp2)
padding[i][j] = min(padding[i-1][j+1]+4, temp3)
for i in range(height - 2, -1, -1):
for j in range(width - 2, 0, -1):
temp1 = min(padding[i][j+1] + 3, padding[i][j])
temp2 = min(padding[i+1][j+1] + 4, temp1)
temp3 = min(padding[i+1][j] + 3, temp2)
padding[i][j] = min(padding[i+1][j+1]+4, temp3)
D = np.round(padding[:, 1:width-1]/3)
return D
def get_local_max_img(img):
dst = np.zeros_like(img)
height, width = img.shape
padding = img.copy()
padding = cv2.copyMakeBorder(
padding, 3, 3, 3, 3, borderType=cv2.BORDER_CONSTANT, value=np.inf)
# 每个像素的7*7邻域极大
for i in range(height):
for j in range(width):
neighbor = padding[i:i+7, j:j+7]
if img[i][j] == np.max(neighbor):
dst[i][j] = 1
return dst.astype(np.uint8)
def distance_skeleton_extract(binary):
edge_img = edge_extract(binary)
dis_img = distance_transform(edge_img)
distance_skeleton = get_local_max_img(dis_img)
return distance_skeleton
def cut(a):
b = []
b.append(np.rot90(np.array([[0, 0, 0], [-1, 1, -1], [1, 1, 1]]), 1))
b.append(np.rot90(np.array([[-1, 0, 0], [1, 1, 0], [1, 1, -1]]), 1))
b.append(np.rot90(np.array([[1, -1, 0], [1, 1, 0], [1, -1, 0]]), 1))
b.append(np.rot90(np.array([[1, 1, -1], [1, 1, 0], [-1, 0, 0]]), 1))
b.append(np.rot90(np.array([[1, 1, 1], [-1, 1, -1], [0, 0, 0]]), 1))
b.append(np.rot90(np.array([[-1, 1, 1], [0, 1, 1], [0, 0, -1]]), 1))
b.append(np.rot90(np.array([[0, -1, 1], [0, 1, 1], [0, -1, 1]]), 1))
b.append(np.rot90(np.array([[0, 0, -1], [0, 1, 1], [-1, 1, 1]]), 1))
x1 = a.copy()
for bi in b:
x1 = thin(x1, bi)
x2 = np.zeros_like(x1)
for bi in b:
x2_component = hit_miss(x1, bi)
x2 = np.bitwise_or(x2, x2_component)
H = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
# 裁剪中进行腐蚀的次数
ERODE_NUM = 3
eroded = x2.copy()
for i in range(ERODE_NUM):
eroded = erode(eroded, H)
x3 = np.bitwise_and(eroded, a)
return np.bitwise_or(x1, x3)
if __name__ == "__main__":
img = cv2.imread('smallfingerprint.jpg', cv2.IMREAD_GRAYSCALE)
_, binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY_INV)
binary = binary.astype(np.uint8)
morphological_skeleton, thin_num = morphological_skeleton_extract(binary)
morphological_skeleton_cut = cut(morphological_skeleton)
distance_skeleton = distance_skeleton_extract(binary)
distance_skeleton_cut = cut(distance_skeleton)
fig = plt.figure(figsize=(8, 6))
plt.subplot2grid((2, 4), (0, 0), rowspan=2), plt.imshow(img, cmap='gray'), plt.title(
'Original', fontsize=6), plt.axis('off')
plt.subplot2grid((2, 4), (0, 1), rowspan=2), plt.imshow(binary, cmap='gray'), plt.title(
'Binary', fontsize=6), plt.axis('off')
plt.subplot2grid((2, 4), (0, 2)), plt.imshow(morphological_skeleton, cmap='gray'), plt.title(
'Morphological Skeleton, iteration=' + str(thin_num), fontsize=6), plt.axis('off')
plt.subplot2grid((2, 4), (0, 3)), plt.imshow(morphological_skeleton_cut, cmap='gray'), plt.title(
'Cut', fontsize=6), plt.axis('off')
plt.subplot2grid((2, 4), (1, 2)), plt.imshow(distance_skeleton, cmap='gray'), plt.title(
'Distance Skeleton', fontsize=6), plt.axis('off')
plt.subplot2grid((2, 4), (1, 3)), plt.imshow(distance_skeleton_cut, cmap='gray'), plt.title(
'cut', fontsize=6), plt.axis('off')
plt.show()
|
10,394 | 6876e5d4c6f97dd89fa62af36f68c04dc324a006 | from django import forms
from django_grapesjs import settings
from django_grapesjs.utils import get_render_html_value
from django_grapesjs.utils.get_source import get_grapejs_assets
__all__ = (
'GrapesJsWidget',
)
class GrapesJsWidget(forms.Textarea):
"""
Textarea form widget with support grapesjs.
This is widget base config grapesjs.
"""
template_name = settings.GRAPESJS_TEMPLATE
class Media:
css = {
'screen': get_grapejs_assets('css'),
}
js = get_grapejs_assets('js'),
def get_formatted_id_value(self, value_id):
return value_id.replace('-', '_')
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['id'] = self.get_formatted_id_value(
context['widget']['attrs']['id']
)
context['widget'].update({
'get_render_html_value': get_render_html_value(
self.default_html, apply_django_tag=self.apply_django_tag
),
'html_name_init_conf': self.html_name_init_conf,
'template_choices': self.template_choices,
'apply_django_tag': int(self.apply_django_tag),
})
return context
|
10,395 | 1aa8c01e29a76fb784363e668a42228f67f326ff | from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" % filename
print txt.read()
#We call a function on txt named read.
#What you get back from open is a file,
#and it also has commands you can give it.
#You give a file a command by using the . (dot or period),
#the name of the command, and parameters.
print "Type the filename again:"
file_again = raw_input("> ")
txt_again = open(file_again)
print txt_again.read()
# close -- Closes the file. Like File->Save.. in your editor.
# read -- Reads the contents of the file. You can assign the result to a variable.
# readline -- Reads just one line of a text file.
# truncate -- Empties the file. Watch out if you care about the file.
# write('stuff') -- Writes "stuff" to the file. |
10,396 | 05ff6f4af7c7503d0c4aab453a157d667ddf62bd | #Simone and David
import numpy as np
import random
import matplotlib.pyplot as plt
from Config import Config
import json
DEBUG = False
if DEBUG:
def simulate_episode(population): #stupid function that only return the sum of all the elements of all the matrixes
fit = []
for m in range(len(population)):
tmp = 0
for n in range(len(population[m])):
tmp += np.sum(population[m][n])
fit.append(tmp)
return fit
else:
from Environment import *
class Evolution():
def __init__(self, nn_layer_list=None, num_pop=None, num_gen=None, mutation_rate=None, n_best=3):
self.config = Config()
self.weights_bounds = self.config.INITIAL_WEIGHT_RANGE #initial weight bounds
self.nn_layer_list = nn_layer_list if nn_layer_list != None else self.Config.NN_LAYER_NODES
self.num_pop = num_pop if num_pop != None else self.config.POPULATION_SIZE
self.num_gen = num_gen if num_gen != None else self.config.GENERATION_COUNT
self.mutation_rate = mutation_rate if mutation_rate != None else self.config.MUTATION_RATE
self.n_best = Config.N_BEST
self.h_fmax = []
self.h_favg = []
self.h_div = []
def _create_individual(self, mode='std_sampled'):
genotype = []
for l in range(len(self.nn_layer_list)-1):
if mode == 'std_sampled':
genotype.append(np.random.normal(self.weight_bounds[0], self.weight_bounds[1], size=(self.nn_layer_list[l], self.nn_layer_list[l+1]))) #probably better
elif mode == 'uni_sampled':
genotype.append(np.multiply(np.random.uniform(size=(self.nn_layer_list[l], self.nn_layer_list[l+1])), self.weights_bounds[1]-self.weights_bounds[0])+self.weights_bounds[0])
return genotype
def initialization(self, mode='uni_sampled'):
self.population = []
for _ in range(self.num_pop):
self.population.append(self._create_individual(mode))
def evaluation(self, generation_counter):
show_graphically = (generation_counter % self.config.SHOW_INCREMENT_DISTANCE) == 0
self.fit = simulate_episode(self.population, show_graphically, generation_counter)
#Ordered population
tmp = sorted(zip(self.population, self.fit), reverse=True, key = lambda a : a[1])
self.population = [x[0] for x in tmp]
self.fit = [x[1] for x in tmp]
def selection_reproduction(self, mode='trbs', n_best=3):
new_population = []
if mode == 'trbs': #Truncated Rank-Based Selection
n_children = int(self.num_pop/n_best)
while len(new_population)<self.num_pop:
for i in range(n_best):
new_population.append(self.population[i].copy())
if len(new_population)>=self.num_pop: break
return new_population
elif mode == 'elitism':
for i in range(n_best):
new_population.append(self.population[i].copy())
k = 0
for i in range(n_best,len(self.population)):
if (k >= n_best):
k = 0
if (random.random() > 0.75):
new_population.append(self.population[k].copy())
k += 1
else:
new_population.append(self.population[i].copy())
return new_population
elif mode == 'rank_proportional':
rank_proportions = self.config.RANK_PROPRTIONS
for rank in range(len(rank_proportions)):
for count in range(rank_proportions[rank]):
new_population.append(self.population[rank].copy())
return new_population
def Xover(self, p1, p2, mode=0):
child = []
if mode == 3:
for layer_number in range(len(p1)):
for gene_number in range(p1[layer_number].shape[0]):
if random.random() < 0.5:
temp_gene = p1[layer_number][gene_number]
p1[layer_number][gene_number] = p2[layer_number][gene_number]
p2[layer_number][gene_number] = temp_gene
return p1, p2
else:
for m in range(len(p1)):
if mode == 0: #arithmetic
if random.random()<0.5:
x = p1[m] + p2[m]
else:
x = p1[m] - p2[m]
child.append(x)
elif mode == 1: #uniform
a = p1[m].reshape(p1[m].shape[0]*p1[m].shape[1])
b = p2[m].reshape(p2[m].shape[0]*p2[m].shape[1])
x = np.array([])
step = random.randrange(1, p1[m].shape[0]*p1[m].shape[1])
for i in range(0, p1[m].shape[0]*p1[m].shape[1],step):
if random.random()<0.5:
x = np.concatenate((x, a[i:min(i+step, p1[m].shape[0]*p1[m].shape[1])]), axis=0)
else:
x = np.concatenate((x, b[i:min(i+step, p1[m].shape[0]*p1[m].shape[1])]), axis=0)
x = x.reshape((p1[m].shape[0],p1[m].shape[1]))
child.append(x)
elif mode == 2: #average
child.append((p1[m] + p2[m]) /2)
return child
def mutation(self, p):
child = []
for m in range(len(p)):
noise = np.random.normal(size=(p[m].shape[0], p[m].shape[1]))
child.append(p[m] + noise)
return child
def evolution(self, verbose=True, mantain_best=True, exp=0, fp=None):
self.initialization()
for generation_counter in range(self.num_gen):
self.evaluation(generation_counter)
self.population = self.selection_reproduction(mode='rank_proportional', n_best=self.n_best)
#Log functions
if verbose: print('Generation ',generation_counter,' Best: ',self.population[0],' with value: ', self.fit[0])
self.h_fmax.append(self.fit[0])
self.h_favg.append(sum(self.fit)/len(self.fit))
self.h_div.append(self.diversity())
if generation_counter % Config.SAVE_INCREMENT_DISTANCE == 0 or generation_counter>=self.num_gen-1:
fp.write("Exp. %d Gen. %d -> Best with value: %f \n" % (exp, generation_counter,self.fit[0]))
fp.write(json.dumps(self.population[0], cls=NumpyEncoder)+"\n")
print(json.dumps(self.population[0], cls=NumpyEncoder))
start = 0 if not mantain_best else self.n_best
new_generation = []
while len(self.population) > 0:
p1 = self.population.pop(random.randint(0, len(self.population) - 1))
p2 = self.population.pop(random.randint(0, len(self.population) - 1))
child_1, child_2 = self.Xover(p1, p2, mode=3)
new_generation.append(child_1)
new_generation.append(child_2)
for child_number in range(len(new_generation)):
if random.random() < self.mutation_rate:
new_generation[child_number] = self.mutation(new_generation[child_number])
for child_number_1 in range(len(new_generation) - 1):
for child_number_2 in range(child_number_1 +1, len(new_generation)):
child_1 = new_generation[child_number_1]
child_2 = new_generation[child_number_2]
are_identic = True
for layer_number in range(len(child_1)):
if np.any(child_1[layer_number] != child_2[layer_number]):
are_indentic = False
break
if are_identic:
new_generation[child_number_1] = self.mutation(child_1)
# if new_generation[child_number_1] == new_generation[child_number_2]:
# new_generation[child_number_1] = self.mutation(new_generation[child_number_1])
self.population = new_generation
# for p in range(start, self.num_pop):
# self.population[p] = self.Xover(self.population[p], self.population[random.randint(0, self.num_pop-1)], mode=random.randint(0, 2))
# if random.random()<self.mutation_rate:
# self.population[p] = self.mutation(self.population[p])
def diversity(self):
tmp = 0
for i in range(self.num_pop):
for j in range(i, self.num_pop):
for m in range(len(self.population[i])):
tmp += np.average(np.power(self.population[i][m]-self.population[j][m],2))
return tmp
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
if __name__=='__main__':
#Run 1 experiment and show the results
ea = Evolution(Config.NN_LAYER_NODES)
ea.evolution(verbose=False)
plt.figure()
plt.title('Max fitness')
plt.plot(ea.h_fmax)
plt.show()
plt.figure('Avg fitness')
plt.plot(ea.h_favg)
plt.show()
plt.figure('Diversity')
plt.plot(ea.h_div)
plt.show()
print(ea.h_fmax)
print(ea.h_favg)
print(ea.h_div)
|
10,397 | d91a64b5c101a2208b0a073d044f7056ee55e7cc | #!/usr/bin/python3
#-*-coding:utf-8-*-
import os
import time
import string
import re
cf = {
'author':'Remilia Scarlet',
'header-img': "img/post-bg-2015.jpg"
}
def menuSelect(s,l):
print(s)
for i in range(0, len(l)):
print('\t%d) %s' % (i+1,l[i]))
i = input("layout:")
if i == '':
return l.index('post')
return int(i)-1
def inputTags(s):
tags = input(s)
tags = [i.strip() for i in tags.split(',')]
s = ' - '
tmp=''
for i in tags:
if i != '':
tmp+=s+i+'\n'
return tmp.rstrip('\n')
def checkTitle(s):
for i in s:
if i not in''.join((string.ascii_letters,string.digits,'_',' ')):
return False
return True
def getYaml(cf):
def getLine(s):
return '%-15s%s\n' % (s+':',cf[s])
def getLineWithQuoto(s):
if cf[s] == '':
return cf[s]
return '%-15s"%s"\n' % (s+':',cf[s])
yaml = '''---\n%s%s%s%s%s%stags:\n%s\n---\n''' \
% (getLine('layout'), getLineWithQuoto('title'), getLineWithQuoto('subtitle'), getLineWithQuoto('date'), getLineWithQuoto('author'),getLineWithQuoto('header-img'), cf['tags'])
return yaml
if __name__ == '__main__':
layouts = [os.path.splitext(i)[0] for i in os.listdir('_layouts')]
cf['layout'] = layouts[menuSelect('What layout do you want to use?(default is post)',layouts)]
cf['title'] = input("input title:")
#while cf['title'] == '' or not checkTitle(cf['title']):#中文名也是可行的,只是在windows上有bug而已
# print('Title should be in English!')
# cf['title'] = input("input title again:")
cf['subtitle'] = input('input subtitle:')
cf['tags'] = inputTags('input tags(split by comma):')
cf['date'] = time.strftime("%Y-%m-%d %H:%M:%S +0800", time.localtime())
filename = time.strftime("%Y-%m-%d", time.localtime())+'-%s' % re.sub(r'\-+' , '-', re.sub(r'[?*\/<>:"|]','-', cf['title'])) #cf['title'].replace(' ','-')
#yaml ='''---\nlayout: %s\ntitle: "%s"\nsubtitle: "%s"\ndate: %s\nauthor: "%s"\nheader-img: "img/post-bg-2015.jpg"\ntags:\n%s\n---\n''' \
# % (layout,title,subtitle,date,author,tags)
yaml = getYaml(cf)
path = '_posts'
if os.path.exists(path):
with open('%s/%s.markdown'% (path,filename),'w',encoding='utf-8') as f:
f.write(yaml)
|
10,398 | e358d21d574632acfb3f3f27bf3553387aeb9920 | from django.contrib.auth import authenticate
from rest_framework import serializers, status
from rest_framework.response import Response
from rest_framework_jwt.serializers import (
JSONWebTokenSerializer,
jwt_payload_handler,
jwt_encode_handler,
)
class JWTLoginSerializer(JSONWebTokenSerializer):
def validate(self, kwargs):
credentials = {
'username': kwargs.get('username'),
'password': kwargs.get('password')
}
if all(credentials.values()):
user = authenticate(request=self.context['request'], **credentials)
if user:
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
return Response({
'message': 'User Found!',
'data': {
'token': token
}
}, status=status.HTTP_200_OK)
else:
Response({
'message': 'User Not Found!',
'data': None
}, status=status.HTTP_200_OK)
else:
raise serializers.ValidationError(
'Credential must containt Username and Password')
|
10,399 | c6bd402b9d09b13a73d21aa1b207012efc557f21 | import numpy as np
fileName = '/Users/shuruiz/Work/researchProjects/INTRUDE/data/PR_count.csv'
list_repo = []
list_repo_1 = []
list_repo_2 = []
list_repo_3 = []
with open(fileName) as f:
lineList = f.readlines()
for line in lineList:
repo, pr_num = line.split()
if(repo == "repo"): continue
if(int(pr_num) < 11): break
# print(repo + "," + pr_num)
list_repo.append(line)
l = np.array(list_repo)
n = 3
res = l.reshape((len(l) // n), n).T
count = 1
for list_tmp in res:
if(count == 1):
list_repo_1 = list_tmp
with open('/Users/shuruiz/Work/researchProjects/INTRUDE/data/repo_PR_1.txt', 'w') as f:
for item in list_repo_1:
f.write("%s" % item.replace("https://api.github.com/repos/",""))
if(count == 2):
list_repo_2 =list_tmp
with open('/Users/shuruiz/Work/researchProjects/INTRUDE/data/repo_PR_2.txt', 'w') as f:
for item in list_repo_2:
f.write("%s" % item.replace("https://api.github.com/repos/",""))
if(count == 3):
list_repo_3=list_tmp
with open('/Users/shuruiz/Work/researchProjects/INTRUDE/data/repo_PR_3.txt', 'w') as f:
for item in list_repo_3:
f.write("%s" % item.replace("https://api.github.com/repos/",""))
count +=1
print(res)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.