blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac01bdd5589359c258361d561d1a4135be0b5e19 | b74c78396a77bc7e43272b790cb4f12a2f61d498 | /plotlib_to_IE.py | f0b17ea6eb599bda46d12ba0f6a4368abafbed1e | [
"MIT"
] | permissive | louisopen/SimpleHTTPserver | 39c4de1579ddfb0b44dad5b221db2c2a5e41a072 | 59259901c67cee61f031f152f1ee022db9dcf0ab | refs/heads/master | 2020-05-03T02:17:36.404679 | 2019-11-24T11:28:26 | 2019-11-24T11:28:26 | 178,365,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/python
#coding=utf-8
# 將matplotlib所創建的圖面, 直接呼叫本機的IE(開啟預定)瀏覽器顯示圖形
import pandas as pd
import matplotlib.pyplot as plt
from io import BytesIO
from lxml import etree
import base64
import urllib
# 获取数据集,用 urllib 库下载 iris 数据集作为示例
url = "http://aima.cs.berkeley.edu/data/iris.csv"
setl = urllib.request.Request(url)
iris_p = urllib.request.urlopen(setl)
iris = pd.read_csv(iris_p, sep=',',decimal='.',header=None, names=['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width','Species'])
# pandas 的 DataFrame 数据直接装换为 html 代码字符串
iris_des = """<h1>Iris Describe Stastic</h1>"""+ iris.describe().T.to_html()
# matplotlib 任意绘制一张图
fig,axes = plt.subplots(1,4,sharey = True)
for n in range(4):
axes[n].hist( iris.iloc[:,n],bins = 15,color = 'b',alpha = 0.5,rwidth= 0.8 )
axes[n].set_xlabel(iris.columns[n])
plt.subplots_adjust(wspace = 0)
# figure 保存为二进制文件
buffer = BytesIO()
plt.savefig(buffer)
plot_data = buffer.getvalue()
# 图像数据转化为 HTML 格式
imb = base64.b64encode(plot_data)
#imb = plot_data.encode('base64') # 对于 Python 2.7可用
ims = imb.decode()
imd = "data:image/png;base64,"+ims
iris_im = """<h1>Iris Figure</h1> """ + """<img src="%s">""" % imd
action = """<form action="/" method="POST"> Uart On/Off : <input type="submit" name="submit" value="TXD"> <input type="submit" name="submit" value="RXD"> </form>"""
root = "<title>Iris Dataset</title>"
html = etree.HTML(root + iris_des + action + iris_im) #全部串起來, HTML編碼
tree = etree.ElementTree(html)
tree.write('index.html')
# 最后使用默认浏览器打开 html 文件
import webbrowser
webbrowser.open('index.html',new = 1)
| [
"louisopen@gmail.com"
] | louisopen@gmail.com |
7ac9434155a922f056ee417d20b59c11bdad7dc6 | 044350ba74cd40db53fc51bcf8769d0ba6e53916 | /tools/converter/onnx/models/shufflenet_opset_9/TestOnnx.py | 8fc2c2a5ecdbbf56cfd90d77b917899c9957beff | [
"Apache-2.0"
] | permissive | ishine/MAI | cc94c3961d5694e8a9464c9575a2a5eccee7035e | 64753cd2f59af2949896937c2e5dbfc4d8bab1e0 | refs/heads/master | 2021-06-22T14:51:34.735501 | 2021-04-23T07:06:55 | 2021-04-23T07:06:55 | 210,828,572 | 0 | 0 | Apache-2.0 | 2021-04-23T07:06:56 | 2019-09-25T11:30:45 | C++ | UTF-8 | Python | false | false | 2,704 | py | import onnx
import caffe2.python.onnx.backend as backend
import numpy as np
import torch
def addModelOutput(model, name, dims, elem_type=1):
value_info = model.graph.output.add()
value_info.type.tensor_type.elem_type=elem_type
value_info.name = name
for tmpDim in dims:
dim = value_info.type.tensor_type.shape.dim.add()
dim.dim_value = tmpDim
model = onnx.load("shufflenet_opset_9.onnx")
addModelOutput(model, "gpu_0/conv3_0_1", [1,24,112,112])
#addModelOutput(model, "gpu_0/conv3_0_bn_1", [1,24,112,112])
addModelOutput(model, "gpu_0/conv3_0_bn_2", [1,24,112,112])
addModelOutput(model, "gpu_0/pool_0_1", [1,24,56,56])
addModelOutput(model, "gpu_0/gconv1_0_1", [1,112,56,56])
addModelOutput(model, "gpu_0/gconv3_0_1", [1,112,28,28])
addModelOutput(model, "gpu_0/block0_1", [1,136,28,28])
addModelOutput(model, "gpu_0/gconv3_0_bn_1", [1,112,28,28])
#addModelOutput(model, "gpu_0/gconv1_1_bn_1", [1,112,28,28])
addModelOutput(model, "gpu_0/gconv1_0_bn_2", [1,112,56,56])
addModelOutput(model, "gpu_0/gconv1_7_bn_1", [1,136,28,28])
addModelOutput(model, "gpu_0/gconv1_3_bn_1", [1,136,28,28])
print(len(model.graph.output))
#data = model.SerializeToString();
#file=open("mobilenet_v1_1.0_224_all_outputs.onnx", "wb")
#file.write(data)
#onnx.checker.check_model(model)
#onnx.helper.printable_graph(model.graph)
rep = backend.prepare(model, device="CPU")
print(type(rep))
input=np.fromfile("input.txt", sep='\n').reshape([1,3,224,224])
#input.tofile(file="output/input.data", sep="\n")
outputs = rep.run(input.astype(np.float32))
index=-1
#index+=1;outputs[index].tofile(file="output/gpu_0_softmax_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_softmax_1.txt", sep="\n", format="%f")
index+=1;outputs[index].tofile(file="output/gpu_0_conv3_0_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_conv3_0_bn_2.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_pool_0_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv1_0_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv3_0_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_block0_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv3_0_bn_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv1_0_bn_2.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv1_7_bn_1.txt", sep="\n", format="%10.8e")
index+=1;outputs[index].tofile(file="output/gpu_0_gconv1_3_bn_1.txt", sep="\n", format="%10.8e")
| [
"15601910741@163.com"
] | 15601910741@163.com |
d69baecf07111352bcca971246e095e8997453a0 | 6271e999dd0b4c8820a33fa12e5cf86a74091f21 | /Project/GradientX.py | 5777fe521530af025487e1b5f3afc116ee7ea9c9 | [] | no_license | 25dishant/Digital-Image-Processing-Laboratory | 5b402a8052e1f738404004ac1c709c7b0ad2ac98 | dd30ad57752d01f96429f3dfef0b74ec47ba3892 | refs/heads/main | 2023-01-21T21:38:56.256407 | 2020-11-19T18:03:19 | 2020-11-19T18:03:19 | 308,820,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | import numpy as np
import cv2 as cv
import math
def GradientX(ImageName, kernel_size, Sobel_kernelX):
# Conversion of Image into a matrix
image = cv.imread(ImageName)
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # Open Image Grayscale Mode
ImageMatrix = [] # Initialise a list to keep the Image in matrix form
for r in range(0, image.shape[0]):
row = []
for c in range(0, image.shape[1]):
row.append(image.item(r, c))
ImageMatrix.append(row)
# We have image in the form of matrix at this point.
ImageMatrix = np.array(ImageMatrix)
width = len(ImageMatrix[0]) # Width of the Image Matrix
height = len(ImageMatrix) # Height of the Image Matrix
# Condition to check the squared kernel
if kernel_size[0] == kernel_size[1] and kernel_size[0] > 2:
# Pad the image to avoid any loss of information after convolution
ImageMatrix = np.pad(ImageMatrix, kernel_size[0]-2, mode='constant')
else:
pass
GiantMatrix = []
for i in range(0, height-kernel_size[1]+1):
for j in range(0, width-kernel_size[0]+1):
GiantMatrix.append([
[ImageMatrix[col][row]
for row in range(j, j + kernel_size[0])]
for col in range(i, i + kernel_size[1])
])
Matrix_Sampling = np.array(GiantMatrix)
Transformed_Matrix = []
Matrix_Sampling = np.array(Matrix_Sampling)
for each_mat in Matrix_Sampling:
Transformed_Matrix.append(
np.sum(np.multiply(each_mat, Sobel_kernelX)))
reshape_val = int(math.sqrt(Matrix_Sampling.shape[0]))
Transformed_Matrix = np.array(
Transformed_Matrix).reshape(reshape_val, reshape_val)
# Convert the Tranformed Matrix into an image and save it with a proper name.
Name, Extension = ImageName.split('.')
OutputImageName = str(Name+"_GradientX."+Extension)
cv.imwrite(OutputImageName, Transformed_Matrix)
return OutputImageName
| [
"25dishant@gmail.com"
] | 25dishant@gmail.com |
47f13f60bbde6624f6f14638ac656ca92fc489e9 | c4aec154c432886e81da8c9008979e2a2fa4200e | /manage.py | f45ab14f92119dfe3dedd7725d054fa9d5ca7d7f | [] | no_license | ytl6547/CloudShellRemoteConfiguration | dcc9a1e19209e10ca5347b0b6dbff608c69e7d4a | d6e31f504925a6a2f8b12e029375b27482d72e79 | refs/heads/master | 2020-05-26T05:08:23.126631 | 2019-08-02T08:26:01 | 2019-08-02T08:26:01 | 188,116,995 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'terminalControl.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tianliyu@usc.edu"
] | tianliyu@usc.edu |
f4117d65ff6df020a1ea1b68117ef5b4d67cbe02 | 09cc4dd926bdaf0233e1f2a182c3904e6809a45f | /leetcode/median_of_two_sorted_arrays.py | e6ebe3e14b2d9ca6cc25d6d4b0f47a745c22466e | [] | no_license | Amaranese/SudokuenPython | 2f873a5153b11cbaeddde7f196ff470988455ce7 | 29c34075f178d0fc12b2a9edd850b1f43e688da5 | refs/heads/main | 2023-04-21T09:53:09.914212 | 2021-05-06T22:48:12 | 2021-05-06T22:48:12 | 365,054,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | from typing import List
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
a, b = (nums1, nums2) if len(nums1) <= len(nums2) else (nums2, nums1)
N, M = len(a), len(b) # M >= 1
total_len = N + M
# number of elements below and including the smallest median element
num_to_take = (total_len + 1) // 2
# n is how many taken from a, m is how many taken from b
# def need_more_from_a(n):
# m = num_to_take-n
# return m>0 and b[m-1] > a[n]
lo = 0
hi = min(N, num_to_take)
# compute partition_point(a, lo, hi, need_more_from_a)
# why partition_point is not in python I do not know
while lo < hi:
n = (lo + hi) // 2
m = num_to_take - n
if m > 0 and b[m - 1] > a[n]:
lo = n + 1
else:
hi = n
n = lo
m = num_to_take - n
# max(a[:n] + b[:m])
curr = a[n - 1] if m == 0 or n > 0 and a[n - 1] >= b[m - 1] else b[m - 1]
if total_len % 2:
return curr # * 1.0 if you _really_ want to return a float
# min(a[n:] + b[m:])
nxt = a[n] if m == M or n < N and a[n] <= b[m] else b[m]
return (curr + nxt) / 2 | [
"alejandro.maranes@asendia.com"
] | alejandro.maranes@asendia.com |
9bf0c0a81ed9549e94527bfb3c66a2d8ae8d461b | ce9b7c0e38a77c750b4b53ab7bf8ca5948b60281 | /plaid_ledger/tests/__init__.py | dc588f988efb64345c795d444c353e7c727341e8 | [] | no_license | jessedhillon/plaid-ledger | 86e313916d15dc7b7897f928bc4d7e1dd0d24782 | 972615a5aea1de0c5f4c4e6416933c2f792d5ada | refs/heads/master | 2020-06-02T23:03:43.602701 | 2017-06-22T14:05:45 | 2017-06-22T14:05:45 | 94,226,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import json
import os
from unittest import TestCase
class TestCase(TestCase):
def load_fixture(self, path, json=False, lines=False):
base_path = os.path.realpath(os.path.dirname(__file__))
path = os.path.join(base_path, 'fixtures', path)
with open(path, 'r') as f:
if lines:
return f.readlines()
contents = f.read()
if json:
return json.loads(contents)
return contents
| [
"jesse@dhillon.com"
] | jesse@dhillon.com |
24f9fbfbcfc5d2a640a7fbfa5c53b49237e549e6 | b7cc55ff82b3387aeef7e5600c439b26851583fd | /tests/integration/issues/github_1546/good3/helper.py | ac8a3361986577c3b53507d4f103030c90a345e1 | [
"Apache-2.0"
] | permissive | slettner/jina | 582e403819142053d4e85347dcf4463ea81d19f9 | 4140961c62359e3acd540a6d88931665c6313824 | refs/heads/master | 2023-04-18T01:15:21.525987 | 2021-05-02T23:05:27 | 2021-05-02T23:05:27 | 336,388,176 | 0 | 0 | Apache-2.0 | 2021-02-05T21:01:10 | 2021-02-05T21:01:09 | null | UTF-8 | Python | false | false | 4,636 | py | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Union, Optional
import numpy as np
def _move_channel_axis(
img: 'np.ndarray', channel_axis_to_move: int, target_channel_axis: int = -1
) -> 'np.ndarray':
"""
Ensure the color channel axis is the default axis.
"""
if channel_axis_to_move == target_channel_axis:
return img
return np.moveaxis(img, channel_axis_to_move, target_channel_axis)
def _load_image(blob: 'np.ndarray', channel_axis: int):
"""
Load an image array and return a `PIL.Image` object.
"""
from PIL import Image
img = _move_channel_axis(blob, channel_axis)
return Image.fromarray(img.astype('uint8'))
def _crop_image(
img,
target_size: Union[Tuple[int, int], int],
top: Optional[int] = None,
left: Optional[int] = None,
how: str = 'precise',
):
"""
Crop the input :py:mod:`PIL` image.
:param img: :py:mod:`PIL.Image`, the image to be resized
:param target_size: desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the output will have the same height and width as the `target_size`.
:param top: the vertical coordinate of the top left corner of the crop box.
:param left: the horizontal coordinate of the top left corner of the crop box.
:param how: the way of cropping. Valid values include `center`, `random`, and, `precise`. Default is `precise`.
- `center`: crop the center part of the image
- `random`: crop a random part of the image
- `precise`: crop the part of the image specified by the crop box with the given ``top`` and ``left``.
.. warning:: When `precise` is used, ``top`` and ``left`` must be fed valid value.
"""
import PIL.Image as Image
assert isinstance(img, Image.Image), 'img must be a PIL.Image'
img_w, img_h = img.size
if isinstance(target_size, int):
target_h = target_w = target_size
elif isinstance(target_size, Tuple) and len(target_size) == 2:
target_h, target_w = target_size
else:
raise ValueError(
f'target_size should be an integer or a tuple of two integers: {target_size}'
)
w_beg = left
h_beg = top
if how == 'center':
w_beg = int((img_w - target_w) / 2)
h_beg = int((img_h - target_h) / 2)
elif how == 'random':
w_beg = np.random.randint(0, img_w - target_w + 1)
h_beg = np.random.randint(0, img_h - target_h + 1)
elif how == 'precise':
assert w_beg is not None and h_beg is not None
assert (
0 <= w_beg <= (img_w - target_w)
), f'left must be within [0, {img_w - target_w}]: {w_beg}'
assert (
0 <= h_beg <= (img_h - target_h)
), f'top must be within [0, {img_h - target_h}]: {h_beg}'
else:
raise ValueError(f'unknown input how: {how}')
if not isinstance(w_beg, int):
raise ValueError(f'left must be int number between 0 and {img_w}: {left}')
if not isinstance(h_beg, int):
raise ValueError(f'top must be int number between 0 and {img_h}: {top}')
w_end = w_beg + target_w
h_end = h_beg + target_h
img = img.crop((w_beg, h_beg, w_end, h_end))
return img, h_beg, w_beg
def _resize_short(img, target_size: Union[Tuple[int, int], int], how: str = 'LANCZOS'):
"""
Resize the input :py:mod:`PIL` image.
:param img: :py:mod:`PIL.Image`, the image to be resized
:param target_size: desired output size. If size is a sequence like (h, w), the output size will be matched to
this. If size is an int, the smaller edge of the image will be matched to this number maintain the aspect
ratio.
:param how: the interpolation method. Valid values include `NEAREST`, `BILINEAR`, `BICUBIC`, and `LANCZOS`.
Default is `LANCZOS`. Please refer to `PIL.Image` for detaisl.
"""
import PIL.Image as Image
assert isinstance(img, Image.Image), 'img must be a PIL.Image'
if isinstance(target_size, int):
percent = float(target_size) / min(img.size[0], img.size[1])
target_w = int(round(img.size[0] * percent))
target_h = int(round(img.size[1] * percent))
elif isinstance(target_size, Tuple) and len(target_size) == 2:
target_h, target_w = target_size
else:
raise ValueError(
f'target_size should be an integer or a tuple of two integers: {target_size}'
)
img = img.resize((target_w, target_h), getattr(Image, how))
return img
| [
"noreply@github.com"
] | slettner.noreply@github.com |
b1a23bf5a60bf87b508e8a90f6f00522caa1b10e | 7ce98d3c136726def98327ce5609ee0547698cb5 | /nlp/S2SNoiseModel/EditDistanceUtil.py | 77a146a8820e72f8afe6d66925303ef79c0ad500 | [] | no_license | tyhu/PyAI | c877776b3700d958dcfbcb483482367c20dc6876 | b3f0062339b3d2193031c3f2f7a00f808474c510 | refs/heads/master | 2020-04-15T14:35:30.165027 | 2018-01-15T16:11:21 | 2018-01-15T16:11:21 | 55,170,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | ### Ting-Yao Hu, 2016.06
### adaptive edit distance
import sys
import subprocess
def g2p(s):
g2pEXE = '/home2/tingyaoh/flite/flite-2.0.0-release/bin/t2p'
p = subprocess.Popen([g2pEXE, s], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out.strip()
class EditDistanceUtil(object):
#def __init__(self):
def editDist(self,lst1,lst2):
m = len(lst1)+1
n = len(lst2)+1
d = range(m)
for idx in range(n-1):
prev = d[:]
d[0] = prev[0]+1
for jdx in range(1,m):
if lst1[jdx-1]==lst2[idx]:
r = prev[jdx-1]
else: r = prev[jdx-1]+1
l = d[jdx-1]+1
u = prev[jdx]+1
d[jdx] = min(l,u,r)
return d[-1]
### for flite output lst
def phonelstSplit(self,pstr):
plst = pstr.split()
vlst, clst = [],[]
vows = 'aeiou'
for p in plst:
if p=='pau': continue
if any(v in p for v in vows): vlst.append(p)
else: clst.append(p)
return vlst, clst
if __name__=='__main__':
eUtil = EditDistanceUtil()
#print eUtil.phonelstSplit('pau ae1 p')
for line in file('student.txt'):
lst = g2p(line).split()[1:-1]
print ' '.join(lst)
| [
"benken.tyhu@gmail.com"
] | benken.tyhu@gmail.com |
85dfa9396d57949a566aed2b69cea6c01dbb1fc5 | 1aaf49b439ac67707f25cce5e055649f0bcbfee8 | /visitors/migrations/0013_alter_review_authur.py | b1b76c1226d8028630f680740cd19fac14ba3d41 | [] | no_license | Garyschwartz617/Hotel_project | dd7efa2d12281a287f962158655ef83d3ebcecfd | c7fee73147e1681e502ba08d167354d7e452fb4b | refs/heads/main | 2023-07-15T05:24:40.064920 | 2021-08-26T20:41:29 | 2021-08-26T20:41:29 | 399,744,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 3.2.6 on 2021-08-26 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visitors', '0012_alter_review_authur'),
]
operations = [
migrations.AlterField(
model_name='review',
name='authur',
field=models.CharField(max_length=50),
),
]
| [
"gary.s.schwartz617@gmail.com"
] | gary.s.schwartz617@gmail.com |
7f7a3c06102ebce28ee73420eeb79e45582bcaf3 | a399a9024d502e3e23b78196b9d40afc25b30100 | /review_api/views.py | 4903bbd3820ceea7e8c9d040401436a3b47172d2 | [] | no_license | nivaskambhampati1998/Guide-Me | ef47d4fdb7614721b05311bda916e7eb702ba58a | d029b680f1a08a45ccddfc02c23f6e4b154814ca | refs/heads/master | 2023-04-20T06:14:11.328235 | 2021-05-13T06:56:38 | 2021-05-13T06:56:38 | 343,045,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | from django.shortcuts import render
# Create your views here.
from rest_framework import generics, views, status
from rest_framework.response import Response
from blog.models import Review
from .serializers import ReviewSerializer, ReviewGuideSerializer
from accounts.models import Guide, User, Tourist
# class ReviewList(generics.ListAPIView):
# queryset = Review.objects.all()
# serializer_class = ReviewSerializer
class ReviewList(views.APIView):
def get(self, request):
try:
data = Review.objects.all()
serializer = ReviewSerializer(data, many=True)
return Response(data=serializer.data)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, *args, **kwargs):
data=request.data
guideobj = Guide.objects.get(guidename=data['guide'])
guide = Guide.objects.get(guidename=data['guide']).pk
data['guide'] = guide
author = Tourist.objects.get(touristname = data['author']).pk
data['author'] = author
b = Review.objects.all()
a = 0
for i in b:
a += i.rating
guideobj.rating = a/len(b)
guideobj.save()
serializer = ReviewSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print('error', serializer.errors)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ReviewDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Review.objects.all()
serializer_class = ReviewSerializer
class CurrentUserReview(generics.ListAPIView):
serializer_class = ReviewGuideSerializer
def get_queryset(self):
# try:
guidename = self.kwargs['guidename']
return Review.objects.filter(guide__guidename = guidename)
# except:
# return Respose(status = status.40) | [
"nivaskambhampati1998@gmail.com"
] | nivaskambhampati1998@gmail.com |
0e9abfd687f8ca40fa33418a1d257868f1d02881 | 90c6dc7a37effc9fccab1175a2b66a522095a350 | /django_api/users/migrations/0003_user_profile_image.py | 261d683537a7538bf36c8fa6009c896ee4120eb0 | [
"MIT"
] | permissive | ong-ar/nomadgram | 2f81093103c8472127b5e790e994dc0de05f68b7 | 2ddde57209bb3be8887c75a418614d2095e43719 | refs/heads/master | 2021-04-09T13:34:14.810443 | 2018-06-16T07:16:43 | 2018-06-16T07:16:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.0.3 on 2018-03-25 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20180324_1543'),
]
operations = [
migrations.AddField(
model_name='user',
name='profile_image',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"reclusis@gmail.com"
] | reclusis@gmail.com |
fe8ce333bf60fc7b45efa529638969a76a590a28 | 680443ad8b3912a3beb8d69a62ab4da8c7875af2 | /venv/bin/pip | 57237397fb40df60f813c0c03bafb12e383b9708 | [] | no_license | yamathcy/Django_practice | b35eeae22b485e07652e5e83fe255c059865170a | 1009bf9a92dbf034343d60de5acc88ec967877cc | refs/heads/master | 2020-09-24T06:12:34.656503 | 2019-12-03T18:05:40 | 2019-12-03T18:05:40 | 225,684,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | #!/Users/yamamotoyuuya/PycharmProjects/DjangoTutorial/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"s1713120@u.tsukuba.ac.jp"
] | s1713120@u.tsukuba.ac.jp | |
04c0e64afd91b64ea36a0d41189d431b0ca086f5 | ef57b367c4f738f042036ba189201ea18e5c4420 | /main.py | 540ea6b32a2061a7cb860b40cf58f7c2a4a69437 | [] | no_license | burck1/tnt-battlesnake | f7bfc88387063e10b1cee2dc72af9a836daf7e35 | f44a183d92d17e2c9e51e2e801b351fce9ef7122 | refs/heads/master | 2020-05-05T08:35:38.188949 | 2019-04-06T18:21:33 | 2019-04-06T18:21:33 | 179,869,859 | 0 | 0 | null | 2019-04-06T18:14:34 | 2019-04-06T18:14:34 | null | UTF-8 | Python | false | false | 2,768 | py | import argparse
import json
import os
import random
import bottle
from battlesnake.api import ping_response, start_response, move_response, end_response
from battlesnake.agent import Agent
agent_small = None
agent_medium = None
agent_large = None
@bottle.route("/")
def index():
return """
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>.
"""
@bottle.route("/static/<path:path>")
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root="static/")
@bottle.post("/ping")
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post("/start")
def start():
global agent
data = bottle.request.json
if data["board"]["width"] == 9:
agent_small.on_reset()
elif data["board"]["width"] == 11:
agent_medium.on_reset()
else:
agent_large.on_reset()
# print(json.dumps(data))
color = "#00529F"
return start_response(color)
@bottle.post("/move")
def move():
global agent
data = bottle.request.json
# print(json.dumps(data))
if data["board"]["width"] == 9:
direction = agent_small.get_direction(data)
elif data["board"]["width"] == 11:
direction = agent_medium.get_direction(data)
else:
direction = agent_large.get_direction(data)
return move_response(direction)
@bottle.post("/end")
def end():
data = bottle.request.json
# print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"path_small", help="Path to the small checkpoint.", type=str, default=None
)
parser.add_argument(
"path_medium", help="Path to the medium checkpoint.", type=str, default=None
)
parser.add_argument(
"path_large", help="Path to the large checkpoint.", type=str, default=None
)
parser.add_argument(
"--port", help="Port of the web server.", type=str, default="8080"
)
args, _ = parser.parse_known_args()
agent_small = Agent(width=9, height=9, stacked_frames=2, path=args.path_small)
agent_medium = Agent(width=13, height=13, stacked_frames=2, path=args.path_medium)
agent_large = Agent(width=21, height=21, stacked_frames=2, path=args.path_large)
bottle.run(application, host="0.0.0.0", port=args.port, debug=False, quiet=True)
| [
"frederik.schubert@inside-m2m.de"
] | frederik.schubert@inside-m2m.de |
16e826ecdbdd5e5336b355e4e5bb71d3e46b0848 | 0b1da793a53af8274de271d59f6f07a7fd6e1451 | /scripts/main.py | 1a4fd582b7e339a2e1a377417875d5fab0183f00 | [
"MIT"
] | permissive | paulmetzger/Peso | 3ce70c1553f95f97c5e0ee8a2c07e9aec48a4ce0 | eaac931ff523760eac017c35cad10dcc76a08272 | refs/heads/master | 2020-12-18T22:42:18.849609 | 2020-02-26T13:53:26 | 2020-02-26T13:53:26 | 235,541,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,088 | py | #!/usr/bin/env python3
import compilation
import config
import execution
import getopt
import models
import processing
import sqlalchemy
import sys
import templates
from utils import status, status_message
from termcolor import cprint
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from orm_classes import ThroughputSample, \
BatchSizeModelAccuracySample, \
ThroughputHeatmapSample, \
DOPModelAccuracySample, \
ThroughputWithHandImplementations, \
Base
import os
clear = lambda: os.system('clear')
def update_progress(progress):
print('\r[{0}] {1}%'.format('#'*int((progress/10)), progress))
def main(argv):
# Setup code for SQLAlchemy
engine = create_engine('sqlite:///rt.db')
Base.metadata.bin = engine
db_session_factory = sessionmaker(bind=engine)
session = db_session_factory()
opts, args = getopt.getopt(argv, 'ht:', ['type='])
if opts[0][0] == '-h':
print('main.py -t <experiment_type>')
print('Experiment types: throughput, batch_size_model_accuracy, worker_model_accuracy, heatmap, hand_implementation')
elif opts[0][0] in ('-t', '--type'):
if opts[0][1] == 'throughput':
throughput_experiments(session)
elif opts[0][1] == 'batch_size_model_accuracy':
batch_size_model_accuracy_experiments(session)
elif opts[0][1] == 'worker_model_accuracy':
worker_model_accuracy_experiments(session)
elif opts[0][1] == 'heatmap':
print('Starting heatmap experiments...')
heatmap_experiments(session)
elif opts[0][1] == 'hand_implementation':
print('Starting experiments with hand implementations...')
throughput_with_hand_implementations(session)
else:
print('Could not recognise the experiment type.')
print('Done.')
def heatmap_experiments(session):
exp_config = config.read_config()['max_throughput_heatmap']
app_name = exp_config['application_name']
deadline = exp_config['relative_deadline']
max_workers = exp_config['max_workers']
samples = exp_config['samples']
data_types = exp_config['data_types']
for data_type in data_types:
for input_i in range(len(exp_config['input_array_size'])):
input_array_size = exp_config['input_array_size'][input_i]
worker_wcet = exp_config['worker_wcet'][input_i]
# Iterate over the batch sizes
for dop in range(1, max_workers + 1, 1):
# Iterate over the batch sizes
non_viable_parameters = session.query(ThroughputHeatmapSample) \
.filter(
sqlalchemy.or_(
ThroughputHeatmapSample.missed_deadline == 1,
ThroughputHeatmapSample.compiled == 0,
ThroughputHeatmapSample.run_time_error == 1
)) \
.filter(ThroughputHeatmapSample.sample_application_name == app_name) \
.filter(ThroughputHeatmapSample.input_size == input_array_size) \
.filter(ThroughputHeatmapSample.relative_deadline == deadline) \
.filter(ThroughputHeatmapSample.worker_wcet == worker_wcet) \
.filter(ThroughputHeatmapSample.dop == dop) \
.filter(ThroughputHeatmapSample.data_type == data_type) \
.count()
found_non_viable_batch_size = non_viable_parameters >= samples
batch_size = 0
while not found_non_viable_batch_size:
batch_size += 1
# Check if the current data point already exists
'''query_result = session.query(ThroughputHeatmapSample.sample).get(
(app_name,
input_array_size,
deadline,
worker_wcet,
batch_size,
dop))'''
sample_count = session.query(ThroughputHeatmapSample) \
.filter(ThroughputHeatmapSample.sample_application_name == app_name) \
.filter(ThroughputHeatmapSample.input_size == input_array_size) \
.filter(ThroughputHeatmapSample.relative_deadline == deadline) \
.filter(ThroughputHeatmapSample.worker_wcet == worker_wcet) \
.filter(ThroughputHeatmapSample.dop == dop) \
.filter(ThroughputHeatmapSample.batch_size == batch_size) \
.filter(ThroughputHeatmapSample.data_type == data_type) \
.count()
print('Sample count: ' + str(sample_count))
print('Max. samples: ' + str(samples))
print('Collect more samples: ' + str(sample_count < samples))
print('Dop: ' + str(dop))
print('Batch size: ' + str(batch_size))
print('Data type: ' + str(data_type))
# input('Press...')
while sample_count < samples:
succeeded = True
compiled = True
run_time_error = False
missed_deadline = False
measured_min_period = -1
# Measure the max. throughput
succeeded &= status('Creating source file from template...',
templates.create_app_for_throughput_experiments(
app_name,
300, # Period
input_array_size,
deadline,
worker_wcet,
dop,
(False if batch_size == 1 else True), # True == batching is on
batch_size,
dop == 1,
data_type
))
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Measure max. throughput | Could not compile the application. DOP: {} Batch size {}'\
.format(dop, batch_size), 'red')
compiled = False
else:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Measure max. throughput | Could not run the application. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
run_time_error = True
else:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
measured_min_period = processing.compute_interarrival_time(output, batch_size, dop)
# check if batch size and measured period are viable
succeeded &= status('Creating source file from template...',
templates.create_app_for_throughput_heatmap_experiments(
app_name,
measured_min_period, # period
input_array_size,
deadline,
dop,
worker_wcet,
batch_size,
dop == 1,
data_type
))
# Check if the current batch size is viable
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Check if the current batch size is viable | Could not compile the application. DOP: {} Batch size {}'\
.format(dop, batch_size), 'red')
compiled = False
else:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Check if the current batch size is viable | Could not run the application. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
run_time_error = True
else:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, deadline)
if missed_deadline:
cprint('Check if the current batch size is viable | Jobs miss their deadline. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
succeeded = False
# save result
sample = ThroughputHeatmapSample(
sample_application_name=app_name,
input_size =input_array_size,
relative_deadline =deadline,
worker_wcet =worker_wcet,
batch_size =batch_size,
dop =dop,
min_period =measured_min_period,
sample =sample_count + 1,
data_type =data_type,
compiled =compiled,
missed_deadline =missed_deadline,
run_time_error =run_time_error
)
session.add(sample)
session.commit()
sample_count += 1
found_non_viable_batch_size |= not succeeded
def run_worker_model_accuracy_experiment(sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
subtract_from_dop):
succeeded = True
# Compute batch size and worker count
computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period, relative_deadline)
status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))
# Generate source code from template
succeeded &= status('Creating source files from templates...',
templates.create_app_for_worker_model_accuracy_experiments(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
computed_batch_size,
computed_dop,
subtract_from_dop
))
# Compile
if succeeded:
succeeded &= status('Compiling...', compilation.compile_farm())
# Run the experiment
if succeeded:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
# Process the output
matched_throughput = False
if succeeded:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
# Add 10ns to the period account for the accuracy of the board's timers
matched_throughput = (processing.compute_interarrival_time(
output,
internal_param['batch_size'],
internal_param['dop']) <= period + 10)
print('Measured min. period: {}'.format(processing.compute_interarrival_time(
output,
internal_param['batch_size'],
internal_param['dop'])))
return succeeded, matched_throughput, internal_param['batch_size'], internal_param['dop']
def worker_model_accuracy_experiments(session):
benchmarks = config.read_config()['dop_model_accuracy']
for app in benchmarks.keys():
bench_config = benchmarks[app]
relative_deadline_list = bench_config['relative_deadline']
input_array_size_list = bench_config['input_array_size']
worker_wcet_list = bench_config['worker_wcet']
period_start_list = bench_config['period_start']
period_end_list = bench_config['period_end']
period_steps_list = bench_config['period_steps']
samples = bench_config['samples']
for i in range(len(input_array_size_list)):
relative_deadline = relative_deadline_list[i]
input_array_size = input_array_size_list[i]
worker_wcet = worker_wcet_list[i]
period_start = period_start_list[i]
period_end = period_end_list[i]
period_steps = period_steps_list[i]
# Iterate over all periods
for period in range(period_start, period_end + period_steps, period_steps):
# Find the optimum and test predictions
for is_oracle in [False, True]:
sample_count = session.query(DOPModelAccuracySample) \
.filter(DOPModelAccuracySample.sample_application_name == app) \
.filter(DOPModelAccuracySample.input_size == input_array_size) \
.filter(DOPModelAccuracySample.relative_deadline == relative_deadline) \
.filter(DOPModelAccuracySample.worker_wcet == worker_wcet) \
.filter(DOPModelAccuracySample.period == period) \
.filter(DOPModelAccuracySample.is_oracle == is_oracle) \
.count()
print('Is oracle: {}'.format(is_oracle))
print('Sample count: {}'.format(sample_count))
while sample_count < samples:
if is_oracle:
print('Finding the minimum DOP...')
matched_throughput = True
subtract_from_dop = 0
while matched_throughput:
print('Subtract from DOP: ' + str(subtract_from_dop))
succeeded, matched_throughput, batch_size, dop = run_worker_model_accuracy_experiment(
app,
period,
input_array_size,
relative_deadline,
worker_wcet,
subtract_from_dop)
print('Matched throughput: ' + str(matched_throughput))
if not succeeded:
status_message('Oracle experiments failed!')
exit(0)
if matched_throughput and not dop == 1:
subtract_from_dop += 1
elif matched_throughput and dop == 1:
break
elif not matched_throughput:
if subtract_from_dop == 0:
status_message('ERROR | The DOP predicted by our model is too low')
exit(0)
dop += 1
matched_throughput = True
else:
succeeded, matched_throughput, batch_size, dop = run_worker_model_accuracy_experiment(
app,
period,
input_array_size,
relative_deadline,
worker_wcet,
0 # Subtract from DOP
)
if succeeded:
sample = DOPModelAccuracySample(
sample_application_name=app,
input_size =input_array_size,
relative_deadline =relative_deadline,
worker_wcet =worker_wcet,
period =period,
is_oracle =is_oracle,
sample =sample_count + 1,
batch_size =batch_size,
dop =dop,
success =succeeded,
matched_throughput =matched_throughput
)
session.add(sample)
session.commit()
sample_count += 1
else:
status_message('Compilation or execution did not succeed. Exiting...')
exit(0)
def run_batch_size_accuracy_experiment(sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size=0):
succeeded = True
# Compute batch size and worker count
computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period,
relative_deadline)
status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))
# Generate source code from template
succeeded &= status('Creating source files from templates...',
templates.create_app_for_batch_size_accuracy_experiments(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
computed_batch_size,
computed_dop,
add_to_batch_size=add_to_batch_size
))
# Compile
if succeeded:
status_message(('DEBUG | period: {}, input_array_size: {}, relative_deadline: {},' +
' worker_wcet: {}, add_to_batch_size: {}')
.format(period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size))
succeeded &= status('Compiling...', compilation.compile_farm())
else:
status_message("Could not create the sample application.")
exit(0)
# Run the experiment
if succeeded:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
else:
status_message("Could not compile the sample application.")
exit(0)
# Process the output
missed_deadline = False
if succeeded:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, relative_deadline)
else:
status_message("Could not run the sample application.")
exit(0)
return succeeded, missed_deadline, internal_param['batch_size'], internal_param['dop']
def batch_size_model_accuracy_experiments(session):
benchmarks = config.read_config()['batch_size_model_accuracy']
for sample_application in benchmarks.keys():
bench_config = benchmarks[sample_application]
relative_deadline_list = bench_config['relative_deadline']
input_array_size_list = bench_config['input_array_size']
worker_wcet_list = bench_config['worker_wcet']
period_start_list = bench_config['period_start']
period_end_list = bench_config['period_end']
period_steps_list = bench_config['period_steps']
samples = bench_config['samples']
for i in range(len(input_array_size_list)):
relative_deadline = relative_deadline_list[i]
input_array_size = input_array_size_list[i]
worker_wcet = worker_wcet_list[i]
period_start = period_start_list[i]
period_end = period_end_list[i]
period_steps = period_steps_list[i]
# Iterate over all periods
for period in range(period_start, period_end + period_steps, period_steps):
# Find the optimum and test predictions
for is_oracle in [False, True]:
# Check if database entry for the current problem instance exists already
sample_count = session.query(BatchSizeModelAccuracySample) \
.filter(BatchSizeModelAccuracySample.sample_application_name == sample_application) \
.filter(BatchSizeModelAccuracySample.input_size == input_array_size) \
.filter(BatchSizeModelAccuracySample.relative_deadline == relative_deadline) \
.filter(BatchSizeModelAccuracySample.worker_wcet == worker_wcet) \
.filter(BatchSizeModelAccuracySample.period == period) \
.filter(BatchSizeModelAccuracySample.is_oracle == is_oracle) \
.count()
while sample_count < samples:
add_to_batch_size = 0
if is_oracle:
# Find the optimum
missed_deadline = False
# TODO: Refactor duplication
add_to_batch_size = 0
while not missed_deadline:
succeeded, missed_deadline, batch_size, _ = run_batch_size_accuracy_experiment(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size=add_to_batch_size)
if not succeeded:
status_message('ERROR | Oracle experiments failed!')
exit(0)
if not missed_deadline:
add_to_batch_size += 1
else:
if add_to_batch_size == 0:
status_message('ERROR | The batch size chosen by our model is too large.')
exit(0)
# This value will be stored in the DB
# Subtract by 1 since the application fails to meet deadlines with the current
# batch size
status_message('DEBUG | Missed deadlines')
batch_size -= 1
missed_deadline = False
else:
succeeded, missed_deadline, batch_size, _ = run_batch_size_accuracy_experiment(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size)
if missed_deadline:
status_message('ERROR | The batch size chosen by our model is too large.')
exit(0)
# Save results
if succeeded:
sample = BatchSizeModelAccuracySample(
sample_application_name=sample_application,
input_size =input_array_size,
relative_deadline =relative_deadline,
worker_wcet =worker_wcet,
period =period,
is_oracle =is_oracle,
sample =sample_count + 1,
batch_size =batch_size,
success =succeeded,
deadline_missed =missed_deadline
)
session.add(sample)
session.commit()
sample_count += 1
else:
status_message('Compilation or execution did not succeed. Exiting...')
def throughput_experiments(session):
benchmarks = config.read_config()['throughput']
for benchmark in benchmarks.keys():
# Read config file
bench_config = benchmarks[benchmark]
wcets = bench_config['wcet']
input_sizes = bench_config['input_array_size']
rel_dead_start = bench_config['relative_deadline_start']
rel_dead_steps = bench_config['relative_deadline_steps']
rel_dead_stop = bench_config['relative_deadline_stop']
workers_start = bench_config['workers_start']
workers_steps = bench_config['workers_steps']
workers_stop = bench_config['workers_stop']
samples = bench_config['samples']
total_number_of_experiments = 2 * len(wcets) \
* len(range(workers_start, workers_stop + 1, workers_steps)) \
* len(range(rel_dead_start, rel_dead_stop + rel_dead_steps, rel_dead_steps))
experiment_count = 0
# The baseline does not use batching
for with_batching in [True, False]:
# Sweep over the parameter space
# Parameter: input sizes + corresp. WCETs
for wcet_index in range(len(wcets)):
wcet = wcets[wcet_index]
input_size = input_sizes[wcet_index]
batch_sizes = bench_config['batch_sizes'][wcet_index]
for batch_size in batch_sizes:
if not with_batching:
batch_size = 1
# Parameter: worker count
for dop in range(workers_start, workers_stop + 1, workers_steps):
# Parameter: relative deadline
for rel_dead in range(rel_dead_start, rel_dead_stop + rel_dead_steps, rel_dead_steps):
clear()
update_progress(experiment_count / total_number_of_experiments)
print('Experiment: {}, with batching: {}, WCET: {}, DOP: {}, D: {}, Batch size: {}'.format(
benchmark,
with_batching,
wcet,
dop,
rel_dead,
batch_size))
# Check if data for this current parameter set exists
# and execute experiments if they do not exist
sample_count = session.query(ThroughputSample) \
.filter(ThroughputSample.experiment_name == benchmark) \
.filter(ThroughputSample.input_size == input_size) \
.filter(ThroughputSample.relative_deadline == rel_dead) \
.filter(ThroughputSample.worker_wcet == wcet) \
.filter(ThroughputSample.with_batching == int(with_batching)) \
.filter(ThroughputSample.batch_size == batch_size) \
.count()
while sample_count < samples:
# Prepare experiments
status_code = True
status_code &= status('Creating source files from templates... ', templates.create_app_for_throughput_experiments(
benchmark,
300, # period. This does not set the period with which new input data arrives in this case.
# This is just a dummy values that is need to compute the size of the task
# farm internal buffer.
input_size,
rel_dead,
wcet,
dop,
with_batching,
batch_size))
compilation_succeeded = False
if status_code:
status_code &= status('Compiling... ', compilation.compile_farm())
compilation_succeeded = status_code
# Run experiment
if status_code:
execution_status, out = execution.execute_farm()
status_code &= status('Executing... ', execution_status)
print(out)
# Prepare results
if status_code:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
period = processing.compute_interarrival_time(output, batch_size, dop)
# Check if the application could successfully compiled and run
status_message('Compilation and execution was successful')
else:
status_message('Compilation or execution did not succeed. Exiting...')
break
# Print the result to the console
status_message('Found min. interarrival time: ' + str(period))
# Store the result in the database
sample = ThroughputSample(
experiment_name =benchmark,
input_size =input_size,
relative_deadline=rel_dead,
worker_wcet =wcet,
dop =dop,
with_batching =int(with_batching),
sample =sample_count + 1,
success =int(compilation_succeeded))
sample.batch_size = batch_size
sample.min_interarrival_time = period
# Save result
session.add(sample)
session.commit()
sample_count += 1
experiment_count += 1
def throughput_with_hand_implementations(session):
benchmarks = config.read_config()['throughput_with_hand_implementations']
for benchmark_in_config in benchmarks.keys():
# Read config file
bench_config = benchmarks[benchmark_in_config]
wcets = bench_config['wcets']
input_sizes = bench_config['input_array_sizes']
rel_deadlines = bench_config['relative_deadlines']
samples = bench_config['samples']
total_number_of_experiments = 2 * \
len(input_sizes) * \
len(rel_deadlines)
experiment_count = 1
for is_hand_implementation in [False, True]:
if is_hand_implementation:
benchmark = 'hand_implemented_' + benchmark_in_config
else:
benchmark = benchmark_in_config
for input_size_index in range(len(input_sizes)):
dop = 6
wcet = wcets[input_size_index]
input_size = input_sizes[input_size_index]
rel_dead = rel_deadlines[input_size_index]
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
print('Experiment: {}, WCET: {}, DOP: {}, D: {}'.format(
benchmark,
wcet,
dop,
rel_dead
))
sample_count = session.query(ThroughputWithHandImplementations) \
.filter(ThroughputWithHandImplementations.sample_application_name == benchmark_in_config) \
.filter(ThroughputWithHandImplementations.input_size == input_size) \
.filter(ThroughputWithHandImplementations.relative_deadline == rel_dead) \
.filter(ThroughputWithHandImplementations.worker_wcet == wcet) \
.filter(ThroughputWithHandImplementations.dop == dop) \
.filter(ThroughputWithHandImplementations.is_hand_implementation == is_hand_implementation) \
.count()
while sample_count < samples:
# Find max. batch size
batch_size = 1
if not is_hand_implementation:
while True:
succeeded = True
if is_hand_implementation:
succeeded &= status('Creating source files from template...',
templates.create_app_for_comparison_with_hand_implementations(
benchmark,
250, # period
input_size,
batch_size,
dop,
'batch_size_accuracy'
))
else:
succeeded &= status('Creating source files from template...',
templates.create_app_for_batch_size_accuracy_experiments(
benchmark,
250, # period
input_size,
rel_dead,
wcet,
batch_size,
dop,
0 # subtract_from_dop
))
if not succeeded:
cprint('ERROR: Could not generate source file', 'red')
exit(0)
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Check if the current batch size is viable | Could not compile the application', 'blue')
break
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Check if the current batch size is viable | Could not run the application', 'blue')
break
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, rel_dead)
if missed_deadline:
cprint('Check if the current batch size is viable | ' +
'Jobs miss their deadline. DOP: {} Batch size {}'.format(dop, batch_size), 'blue')
break
batch_size += 1
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
batch_size -= 1
else:
instance = session.query(ThroughputWithHandImplementations) \
.filter(ThroughputWithHandImplementations.sample_application_name == benchmark_in_config) \
.filter(ThroughputWithHandImplementations.input_size == input_size) \
.filter(ThroughputWithHandImplementations.relative_deadline == rel_dead) \
.filter(ThroughputWithHandImplementations.worker_wcet == wcet) \
.filter(ThroughputWithHandImplementations.dop == dop) \
.filter(ThroughputWithHandImplementations.is_hand_implementation == 0) \
.first()
batch_size = instance.batch_size
print('Batch size in DB: {}'.format(batch_size))
if batch_size == 0:
cprint('ERROR: Could not compile or run an application with batch size 1', 'red')
exit(0)
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
cprint('Finding maximum throughput with the found maximum batch size...', 'blue')
# Measure max. throughput with the found batch size
# Prepare experiments
status_code = True
if is_hand_implementation:
status_code &= status('Creating source files from templates... ',
templates.create_app_for_comparison_with_hand_implementations(
benchmark,
250, # period
input_size,
batch_size,
dop,
'throughput'
))
else:
status_code &= status('Create source file from templates...',
templates.create_app_for_throughput_experiments(
benchmark,
250, # period,
input_size,
rel_dead,
wcet,
dop,
True, # with_batching
batch_size
))
if status_code:
status_code &= status('Compiling... ', compilation.compile_farm())
else:
cprint('ERROR: Could not generate source code for a sample application', 'red')
exit(0)
# Run experiment
if status_code:
execution_status, out = execution.execute_farm()
status_code &= status('Executing... ', execution_status)
print(out)
else:
cprint('ERROR: Could not compile a sample application', 'red')
exit(0)
# Prepare results
if status_code:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
period = processing.compute_interarrival_time(output, batch_size, dop)
# Check if the application could successfully compiled and run
status_message('Compilation and execution was successful')
else:
cprint('ERROR: Could not execute a sample application', 'red')
exit(0)
# Print the result to the console
status_message('Found min. period: ' + str(period))
# Store the result in the database
sample = ThroughputWithHandImplementations(
sample_application_name=benchmark_in_config,
input_size =input_size,
relative_deadline =rel_dead,
worker_wcet =wcet,
dop =dop,
is_hand_implementation =is_hand_implementation,
sample_count =sample_count+1,
batch_size =batch_size,
min_period =period)
# Save result
session.add(sample)
session.commit()
sample_count += 1
experiment_count += 1
'''The below experiment likely does not make sense'''
'''def throughput_loss_due_to_non_optimal_batch_size_experiments(session):
experiments = session.query(BatchSizeModelAccuracySample).all()
for experiment in experiments:
app_name = experiment.sample_application_name
input_size = experiment.input_size
relative_deadline = experiment.relative_deadline
worker_wcet = experiment.worker_wcet
period = experiment.period
is_oracle = experiment.is_oracle
batch_size = experiment.batch_size
print(experiments[0].sample_application_name)'''
if __name__ == '__main__':
main(sys.argv[1:]) | [
"paul.felix.metzger@googlemail.com"
] | paul.felix.metzger@googlemail.com |
a4602ae0c9d998aa113b60a67e1c797c50032958 | b70c4f87f4b3283cde0f391524242dc242cb5beb | /movie_recommendation/websaver/settings.py | 25f4792a030ed306335f235c61d52cf9a9432fff | [] | no_license | livjung/2020-GJAI-Movie-Recommendation-System | 92472c1078db959d7361cf206a1dc5a271432e4e | 9a5b886ebad16693af48932a84fa1621e2808bcc | refs/heads/master | 2023-03-21T03:06:30.492229 | 2021-01-31T00:42:03 | 2021-01-31T00:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py | """
Django settings for websaver project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(1n6p+3#c%zsm*$c1q!v-s(zdfj0$&^-ek+*9o@z0*lol(_&nr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pre_data',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'websaver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'websaver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"lyh940830@gmail.com"
] | lyh940830@gmail.com |
456bc7c2e4291c8b91065e0e14a4b865109bd42a | abaf81acfb8595724bca019f2036c456d32b8ee2 | /ex32.py | 5ed21f99f5304baa50b6dc2e4a18848cf4a2d6ee | [] | no_license | atticdweller/pythonthehardway | a6e13a4c81ee8a3538d3a29a4dd55e114dba1865 | 2514404c85f2ec26ff0274cc30d58847eed82ef3 | refs/heads/master | 2021-01-16T00:09:18.505887 | 2017-08-26T18:08:52 | 2017-08-26T18:08:52 | 99,956,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this frist kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we hav eot use %r since we don't know what's in it
for i in change:
print "i got %r" % i
# we can also build lists, first start with an empty one
elements = []
# then use the range function to 0 to 5 counts
for i in range(0,6):
print "Adding %d to the list." % i
# append is a function that lists understand
elements.append(i)
# now we can print them out too
for i in elements:
print "Element was: %d" % i | [
"christopher.lobello@gmail.com"
] | christopher.lobello@gmail.com |
2f0a611da567bf2a6e1eedcb7042f1a475d9f211 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /ubertool/exposure_output.py | 4a6a41f37b05e878207260f2803b50a2a59f17da | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
import cgi
import cgitb
cgitb.enable()
import datetime
from ubertool.exposure import Exposure
import logging
class UbertoolExposureConfigurationPage(webapp.RequestHandler):
def post(self):
logger = logging.getLogger("UbertoolExposureConfigurationPage")
form = cgi.FieldStorage()
config_name = str(form.getvalue('config_name'))
user = users.get_current_user()
q = db.Query(Exposure)
q.filter('user =',user)
q.filter("config_name =", config_name)
exposure = q.get()
if exposure is None:
exposure = Exposure()
if user:
logger.info(user.user_id())
exposure.user = user
exposure.config_name = config_name
exposure.cas_number = str(form.getvalue('cas_number'))
exposure.formulated_product_name = form.getvalue('formulated_product_name')
exposure.met_file = form.getvalue('metfile')
exposure.przm_scenario = form.getvalue('PRZM_scenario')
exposure.exams_environment_file = form.getvalue('EXAMS_environment_file')
exposure.application_method = form.getvalue('application_mathod')
exposure.app_type = form.getvalue('app_type')
exposure.weight_of_one_granule = float(form.getvalue('weight_of_one_granule'))
exposure.wetted_in = bool(form.getvalue('wetted_in'))
exposure.incorporation_depth = float(form.getvalue('incorporation_depth'))
exposure.application_kg_rate = float(form.getvalue('application_kg_rate'))
exposure.application_lbs_rate = float(form.getvalue('application_lbs_rate'))
exposure.application_rate_per_use = float(form.getvalue('application_rate_per_use'))
logger.info(form.getvalue("application_date"))
#TODO This is NASTY we should consider using Date Chooser or something with only one valid output
app_data = form.getvalue('application_date')
app_data_parts = app_data.split("-")
exposure.application_date = datetime.date(int(app_data_parts[0]),int(app_data_parts[1]),int(app_data_parts[2]))
exposure.interval_between_applications = float(form.getvalue('interval_between_applications'))
exposure.application_efficiency = float(form.getvalue('application_efficiency'))
exposure.percent_incorporated = float(form.getvalue('percent_incorporated'))
exposure.spray_drift = float(form.getvalue('spray_drift'))
exposure.runoff = float(form.getvalue('runoff'))
exposure.one_in_ten_peak_exposure_concentration = float(form.getvalue('one_in_ten_peak_exposure_concentration'))
exposure.one_in_ten_four_day_average_exposure_concentration = float(form.getvalue('one_in_ten_four_day_average_exposure_concentration'))
exposure.one_in_ten_twentyone_day_average_exposure_concentration = float(form.getvalue('one_in_ten_twentyone_day_average_exposure_concentration'))
exposure.one_in_ten_sixty_day_average_exposure_concentration = float(form.getvalue('one_in_ten_sixty_day_average_exposure_concentration'))
exposure.one_in_ten_ninety_day_average_exposure_concentration = float(form.getvalue('one_in_ten_ninety_day_average_exposure_concentration'))
exposure.maximum_peak_exposure_concentration = float(form.getvalue('maximum_peak_exposure_concentration'))
exposure.maximum_four_day_average_exposure_concentration = float(form.getvalue('maximum_four_day_average_exposure_concentration'))
exposure.maximum_twentyone_day_average_exposure_concentration = float(form.getvalue('maximum_twentyone_day_average_exposure_concentration'))
exposure.maximum_sixty_day_average_exposure_concentration = float(form.getvalue('maximum_sixty_day_average_exposure_concentration'))
exposure.maximum_ninety_day_average_exposure_concentration = float(form.getvalue('maximum_ninety_day_average_exposure_concentration'))
exposure.pore_water_peak_exposure_concentration = float(form.getvalue('pore_water_peak_exposure_concentration'))
exposure.pore_water_four_day_average_exposure_concentration = float(form.getvalue('pore_water_four_day_average_exposure_concentration'))
exposure.pore_water_twentyone_day_average_exposure_concentration = float(form.getvalue('pore_water_twentyone_day_average_exposure_concentration'))
exposure.pore_water_sixty_day_average_exposure_concentration = float(form.getvalue('pore_water_sixty_day_average_exposure_concentration'))
exposure.pore_water_ninety_day_average_exposure_concentration = float(form.getvalue('pore_water_ninety_day_average_exposure_concentration'))
exposure.frac_pest_surface = float(form.getvalue('frac_pest_surface'))
exposure.put()
self.redirect("aquatic_toxicity.html")
app = webapp.WSGIApplication([('/.*', UbertoolExposureConfigurationPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
38b657507fa9116655cd0f1e6c4c24ea7c348d49 | a8a5b9c9c526b600b0b8395a1eaf4044355d6ad9 | /01_Basic/30_Output02(1032).py | 2a3d1b328802e1d3bc48038b9cab25e5e223e60f | [] | no_license | kiteB/CodeUp | a342e40720290758de3fcfff961813250eee9541 | f485f6c50a252e9cb6449c39a872a73561468415 | refs/heads/master | 2023-02-08T15:57:20.557421 | 2020-12-31T08:35:58 | 2020-12-31T08:35:58 | 323,678,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | # 10진수 정수를 입력받아 16진수로 출력하기
a = int(input())
print('%x' %a) | [
"69155170+kiteB@users.noreply.github.com"
] | 69155170+kiteB@users.noreply.github.com |
feddb7230239b94f3212c8d53a646858b9d6224c | 7612f63baab7d872294a64f227af70b5f5b57b58 | /longtutorial/snippets/migrations/0001_initial.py | 1a581773a0326cc4838226c03fa60bdbd67c2733 | [] | no_license | kailIII/DjangoRestAPITest | 560565f864f8677b6858036a6fe50944ae2be593 | 32a9057b57fc7f51404404f45670ff00e045b572 | refs/heads/master | 2020-05-29T11:44:30.672207 | 2016-02-06T04:02:40 | 2016-02-06T04:02:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,230 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(default='', max_length=100, blank=True)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(default='python', max_length=100, choices=[(b'abap', b'ABAP'), (b'abnf', b'ABNF'), (b'ada', b'Ada'), (b'adl', b'ADL'), (b'agda', b'Agda'), (b'ahk', b'autohotkey'), (b'alloy', b'Alloy'), (b'antlr', b'ANTLR'), (b'antlr-as', b'ANTLR With ActionScript Target'), (b'antlr-cpp', b'ANTLR With CPP Target'), (b'antlr-csharp', b'ANTLR With C# Target'), (b'antlr-java', b'ANTLR With Java Target'), (b'antlr-objc', b'ANTLR With ObjectiveC Target'), (b'antlr-perl', b'ANTLR With Perl Target'), (b'antlr-python', b'ANTLR With Python Target'), (b'antlr-ruby', b'ANTLR With Ruby Target'), (b'apacheconf', b'ApacheConf'), (b'apl', b'APL'), (b'applescript', b'AppleScript'), (b'arduino', b'Arduino'), (b'as', b'ActionScript'), (b'as3', b'ActionScript 3'), (b'aspectj', b'AspectJ'), (b'aspx-cs', b'aspx-cs'), (b'aspx-vb', b'aspx-vb'), (b'asy', b'Asymptote'), (b'at', b'AmbientTalk'), (b'autoit', b'AutoIt'), (b'awk', b'Awk'), (b'basemake', b'Base Makefile'), (b'bash', b'Bash'), (b'bat', b'Batchfile'), (b'bbcode', b'BBCode'), (b'bc', b'BC'), (b'befunge', b'Befunge'), (b'blitzbasic', b'BlitzBasic'), (b'blitzmax', b'BlitzMax'), (b'bnf', b'BNF'), (b'boo', b'Boo'), (b'boogie', b'Boogie'), (b'brainfuck', b'Brainfuck'), (b'bro', b'Bro'), (b'bugs', b'BUGS'), (b'c', b'C'), (b'c-objdump', b'c-objdump'), (b'ca65', b'ca65 assembler'), (b'cadl', b'cADL'), (b'camkes', b'CAmkES'), (b'cbmbas', b'CBM BASIC V2'), (b'ceylon', b'Ceylon'), (b'cfc', b'Coldfusion CFC'), (b'cfengine3', b'CFEngine3'), (b'cfm', b'Coldfusion HTML'), (b'cfs', b'cfstatement'), (b'chai', b'ChaiScript'), (b'chapel', b'Chapel'), (b'cheetah', b'Cheetah'), (b'cirru', b'Cirru'), (b'clay', b'Clay'), (b'clojure', b'Clojure'), (b'clojurescript', b'ClojureScript'), (b'cmake', b'CMake'), (b'cobol', b'COBOL'), (b'cobolfree', b'COBOLFree'), (b'coffee-script', b'CoffeeScript'), (b'common-lisp', b'Common Lisp'), (b'componentpascal', b'Component Pascal'), (b'console', b'Bash Session'), (b'control', b'Debian Control file'), (b'coq', b'Coq'), (b'cpp', b'C++'), (b'cpp-objdump', b'cpp-objdump'), (b'cpsa', b'CPSA'), (b'crmsh', b'Crmsh'), (b'croc', b'Croc'), (b'cryptol', b'Cryptol'), (b'csharp', b'C#'), (b'csound', b'Csound Orchestra'), (b'csound-document', b'Csound Document'), (b'csound-score', b'Csound Score'), (b'css', b'CSS'), (b'css+django', b'CSS+Django/Jinja'), (b'css+erb', b'CSS+Ruby'), (b'css+genshitext', b'CSS+Genshi Text'), (b'css+lasso', b'CSS+Lasso'), (b'css+mako', b'CSS+Mako'), (b'css+mozpreproc', b'CSS+mozpreproc'), (b'css+myghty', b'CSS+Myghty'), (b'css+php', b'CSS+PHP'), (b'css+smarty', b'CSS+Smarty'), (b'cucumber', b'Gherkin'), (b'cuda', b'CUDA'), (b'cypher', b'Cypher'), (b'cython', b'Cython'), (b'd', b'D'), (b'd-objdump', b'd-objdump'), (b'dart', b'Dart'), (b'delphi', b'Delphi'), (b'dg', b'dg'), (b'diff', b'Diff'), (b'django', b'Django/Jinja'), (b'docker', b'Docker'), (b'doscon', b'MSDOS Session'), (b'dpatch', b'Darcs Patch'), (b'dtd', b'DTD'), (b'duel', b'Duel'), (b'dylan', b'Dylan'), (b'dylan-console', b'Dylan session'), (b'dylan-lid', b'DylanLID'), (b'earl-grey', b'Earl Grey'), (b'easytrieve', b'Easytrieve'), (b'ebnf', b'EBNF'), (b'ec', b'eC'), (b'ecl', b'ECL'), (b'eiffel', b'Eiffel'), (b'elixir', b'Elixir'), (b'elm', b'Elm'), (b'emacs', b'EmacsLisp'), (b'erb', b'ERB'), (b'erl', b'Erlang erl session'), (b'erlang', b'Erlang'), (b'evoque', b'Evoque'), (b'ezhil', b'Ezhil'), (b'factor', b'Factor'), (b'fan', b'Fantom'), (b'fancy', b'Fancy'), (b'felix', b'Felix'), (b'fish', b'Fish'), (b'fortran', b'Fortran'), (b'fortranfixed', b'FortranFixed'), (b'foxpro', b'FoxPro'), (b'fsharp', b'FSharp'), (b'gap', b'GAP'), (b'gas', b'GAS'), (b'genshi', b'Genshi'), (b'genshitext', b'Genshi Text'), (b'glsl', b'GLSL'), (b'gnuplot', b'Gnuplot'), (b'go', b'Go'), (b'golo', b'Golo'), (b'gooddata-cl', b'GoodData-CL'), (b'gosu', b'Gosu'), (b'groff', b'Groff'), (b'groovy', b'Groovy'), (b'gst', b'Gosu Template'), (b'haml', b'Haml'), (b'handlebars', b'Handlebars'), (b'haskell', b'Haskell'), (b'haxeml', b'Hxml'), (b'hexdump', b'Hexdump'), (b'html', b'HTML'), (b'html+cheetah', b'HTML+Cheetah'), (b'html+django', b'HTML+Django/Jinja'), (b'html+evoque', b'HTML+Evoque'), (b'html+genshi', b'HTML+Genshi'), (b'html+handlebars', b'HTML+Handlebars'), (b'html+lasso', b'HTML+Lasso'), (b'html+mako', b'HTML+Mako'), (b'html+myghty', b'HTML+Myghty'), (b'html+php', b'HTML+PHP'), (b'html+smarty', b'HTML+Smarty'), (b'html+twig', b'HTML+Twig'), (b'html+velocity', b'HTML+Velocity'), (b'http', b'HTTP'), (b'hx', b'Haxe'), (b'hybris', b'Hybris'), (b'hylang', b'Hy'), (b'i6t', b'Inform 6 template'), (b'idl', b'IDL'), (b'idris', b'Idris'), (b'iex', b'Elixir iex session'), (b'igor', b'Igor'), (b'inform6', b'Inform 6'), (b'inform7', b'Inform 7'), (b'ini', b'INI'), (b'io', b'Io'), (b'ioke', b'Ioke'), (b'irc', b'IRC logs'), (b'isabelle', b'Isabelle'), (b'j', b'J'), (b'jade', b'Jade'), (b'jags', b'JAGS'), (b'jasmin', b'Jasmin'), (b'java', b'Java'), (b'javascript+mozpreproc', b'Javascript+mozpreproc'), (b'jcl', b'JCL'), (b'jlcon', b'Julia console'), (b'js', b'JavaScript'), (b'js+cheetah', b'JavaScript+Cheetah'), (b'js+django', b'JavaScript+Django/Jinja'), (b'js+erb', b'JavaScript+Ruby'), (b'js+genshitext', b'JavaScript+Genshi Text'), (b'js+lasso', b'JavaScript+Lasso'), (b'js+mako', b'JavaScript+Mako'), (b'js+myghty', b'JavaScript+Myghty'), (b'js+php', b'JavaScript+PHP'), (b'js+smarty', b'JavaScript+Smarty'), (b'json', b'JSON'), (b'jsonld', b'JSON-LD'), (b'jsp', b'Java Server Page'), (b'julia', b'Julia'), (b'kal', b'Kal'), (b'kconfig', b'Kconfig'), (b'koka', b'Koka'), (b'kotlin', b'Kotlin'), (b'lagda', b'Literate Agda'), (b'lasso', b'Lasso'), (b'lcry', b'Literate Cryptol'), (b'lean', b'Lean'), (b'less', b'LessCss'), (b'lhs', b'Literate Haskell'), (b'lidr', b'Literate Idris'), (b'lighty', b'Lighttpd configuration file'), (b'limbo', b'Limbo'), (b'liquid', b'liquid'), (b'live-script', b'LiveScript'), (b'llvm', b'LLVM'), (b'logos', b'Logos'), (b'logtalk', b'Logtalk'), (b'lsl', b'LSL'), (b'lua', b'Lua'), (b'make', b'Makefile'), (b'mako', b'Mako'), (b'maql', b'MAQL'), (b'mask', b'Mask'), (b'mason', b'Mason'), (b'mathematica', b'Mathematica'), (b'matlab', b'Matlab'), (b'matlabsession', b'Matlab session'), (b'minid', b'MiniD'), (b'modelica', b'Modelica'), (b'modula2', b'Modula-2'), (b'monkey', b'Monkey'), (b'moocode', b'MOOCode'), (b'moon', b'MoonScript'), (b'mozhashpreproc', b'mozhashpreproc'), (b'mozpercentpreproc', b'mozpercentpreproc'), (b'mql', b'MQL'), (b'mscgen', b'Mscgen'), (b'mupad', b'MuPAD'), (b'mxml', b'MXML'), (b'myghty', b'Myghty'), (b'mysql', b'MySQL'), (b'nasm', b'NASM'), (b'nemerle', b'Nemerle'), (b'nesc', b'nesC'), (b'newlisp', b'NewLisp'), (b'newspeak', b'Newspeak'), (b'nginx', b'Nginx configuration file'), (b'nimrod', b'Nimrod'), (b'nit', b'Nit'), (b'nixos', b'Nix'), (b'nsis', b'NSIS'), (b'numpy', b'NumPy'), (b'objdump', b'objdump'), (b'objdump-nasm', b'objdump-nasm'), (b'objective-c', b'Objective-C'), (b'objective-c++', b'Objective-C++'), (b'objective-j', b'Objective-J'), (b'ocaml', b'OCaml'), (b'octave', b'Octave'), (b'odin', b'ODIN'), (b'ooc', b'Ooc'), (b'opa', b'Opa'), (b'openedge', b'OpenEdge ABL'), (b'pacmanconf', b'PacmanConf'), (b'pan', b'Pan'), (b'parasail', b'ParaSail'), (b'pawn', b'Pawn'), (b'perl', b'Perl'), (b'perl6', b'Perl6'), (b'php', b'PHP'), (b'pig', b'Pig'), (b'pike', b'Pike'), (b'pkgconfig', b'PkgConfig'), (b'plpgsql', b'PL/pgSQL'), (b'postgresql', b'PostgreSQL SQL dialect'), (b'postscript', b'PostScript'), (b'pot', b'Gettext Catalog'), (b'pov', b'POVRay'), (b'powershell', b'PowerShell'), (b'praat', b'Praat'), (b'prolog', b'Prolog'), (b'properties', b'Properties'), (b'protobuf', b'Protocol Buffer'), (b'ps1con', b'PowerShell Session'), (b'psql', b'PostgreSQL console (psql)'), (b'puppet', b'Puppet'), (b'py3tb', b'Python 3.0 Traceback'), (b'pycon', b'Python console session'), (b'pypylog', b'PyPy Log'), (b'pytb', b'Python Traceback'), (b'python', b'Python'), (b'python3', b'Python 3'), (b'qbasic', b'QBasic'), (b'qml', b'QML'), (b'qvto', b'QVTO'), (b'racket', b'Racket'), (b'ragel', b'Ragel'), (b'ragel-c', b'Ragel in C Host'), (b'ragel-cpp', b'Ragel in CPP Host'), (b'ragel-d', b'Ragel in D Host'), (b'ragel-em', b'Embedded Ragel'), (b'ragel-java', b'Ragel in Java Host'), (b'ragel-objc', b'Ragel in Objective C Host'), (b'ragel-ruby', b'Ragel in Ruby Host'), (b'raw', b'Raw token data'), (b'rb', b'Ruby'), (b'rbcon', b'Ruby irb session'), (b'rconsole', b'RConsole'), (b'rd', b'Rd'), (b'rebol', b'REBOL'), (b'red', b'Red'), (b'redcode', b'Redcode'), (b'registry', b'reg'), (b'resource', b'ResourceBundle'), (b'rexx', b'Rexx'), (b'rhtml', b'RHTML'), (b'roboconf-graph', b'Roboconf Graph'), (b'roboconf-instances', b'Roboconf Instances'), (b'robotframework', b'RobotFramework'), (b'rql', b'RQL'), (b'rsl', b'RSL'), (b'rst', b'reStructuredText'), (b'rts', b'TrafficScript'), (b'rust', b'Rust'), (b'sass', b'Sass'), (b'sc', b'SuperCollider'), (b'scala', b'Scala'), (b'scaml', b'Scaml'), (b'scheme', b'Scheme'), (b'scilab', b'Scilab'), (b'scss', b'SCSS'), (b'shen', b'Shen'), (b'slim', b'Slim'), (b'smali', b'Smali'), (b'smalltalk', b'Smalltalk'), (b'smarty', b'Smarty'), (b'sml', b'Standard ML'), (b'snobol', b'Snobol'), (b'sourceslist', b'Debian Sourcelist'), (b'sp', b'SourcePawn'), (b'sparql', b'SPARQL'), (b'spec', b'RPMSpec'), (b'splus', b'S'), (b'sql', b'SQL'), (b'sqlite3', b'sqlite3con'), (b'squidconf', b'SquidConf'), (b'ssp', b'Scalate Server Page'), (b'stan', b'Stan'), (b'swift', b'Swift'), (b'swig', b'SWIG'), (b'systemverilog', b'systemverilog'), (b'tads3', b'TADS 3'), (b'tap', b'TAP'), (b'tcl', b'Tcl'), (b'tcsh', b'Tcsh'), (b'tcshcon', b'Tcsh Session'), (b'tea', b'Tea'), (b'termcap', b'Termcap'), (b'terminfo', b'Terminfo'), (b'terraform', b'Terraform'), (b'tex', b'TeX'), (b'text', b'Text only'), (b'thrift', b'Thrift'), (b'todotxt', b'Todotxt'), (b'trac-wiki', b'MoinMoin/Trac Wiki markup'), (b'treetop', b'Treetop'), (b'ts', b'TypeScript'), (b'turtle', b'Turtle'), (b'twig', b'Twig'), (b'urbiscript', b'UrbiScript'), (b'vala', b'Vala'), (b'vb.net', b'VB.net'), (b'vctreestatus', b'VCTreeStatus'), (b'velocity', b'Velocity'), (b'verilog', b'verilog'), (b'vgl', b'VGL'), (b'vhdl', b'vhdl'), (b'vim', b'VimL'), (b'x10', b'X10'), (b'xml', b'XML'), (b'xml+cheetah', b'XML+Cheetah'), (b'xml+django', b'XML+Django/Jinja'), (b'xml+erb', b'XML+Ruby'), (b'xml+evoque', b'XML+Evoque'), (b'xml+lasso', b'XML+Lasso'), (b'xml+mako', b'XML+Mako'), (b'xml+myghty', b'XML+Myghty'), (b'xml+php', b'XML+PHP'), (b'xml+smarty', b'XML+Smarty'), (b'xml+velocity', b'XML+Velocity'), (b'xquery', b'XQuery'), (b'xslt', b'XSLT'), (b'xtend', b'Xtend'), (b'xul+mozpreproc', b'XUL+mozpreproc'), (b'yaml', b'YAML'), (b'yaml+jinja', b'YAML+Jinja'), (b'zephir', b'Zephir')])),
('style', models.CharField(default='friendly', max_length=100, choices=[(b'algol', b'algol'), (b'algol_nu', b'algol_nu'), (b'autumn', b'autumn'), (b'borland', b'borland'), (b'bw', b'bw'), (b'colorful', b'colorful'), (b'default', b'default'), (b'emacs', b'emacs'), (b'friendly', b'friendly'), (b'fruity', b'fruity'), (b'igor', b'igor'), (b'lovelace', b'lovelace'), (b'manni', b'manni'), (b'monokai', b'monokai'), (b'murphy', b'murphy'), (b'native', b'native'), (b'paraiso-dark', b'paraiso-dark'), (b'paraiso-light', b'paraiso-light'), (b'pastie', b'pastie'), (b'perldoc', b'perldoc'), (b'rrt', b'rrt'), (b'tango', b'tango'), (b'trac', b'trac'), (b'vim', b'vim'), (b'vs', b'vs'), (b'xcode', b'xcode')])),
],
options={
'ordering': ('created',),
},
),
]
| [
"nemolina@calpoly.edu"
] | nemolina@calpoly.edu |
ed294ffdc99c3f295aee3f9ac3eccd050ed5366b | 34c0f0f6cda1b08505bed786593db747b5a9ec9e | /src/chapter_03/exercise_09.py | 0b19430aabd0bc1f51fb7405b307f80e7d3bc5d4 | [] | no_license | martinpbarber/python-workout | 6c86e62b94cb361d62ad5006bac099fe2447c513 | 0e9bcde2508268560786003d5063efd213370288 | refs/heads/master | 2023-01-20T04:56:43.453169 | 2020-11-12T20:02:58 | 2020-11-12T20:02:58 | 302,702,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | """ exercise_09 """
def firstlast(sequence):
""" Return the first and last item in a sequence """
# Slices handle empty sequences properly
# and ensure a consistent type is returned
first_item = sequence[:1]
last_item = sequence[-1:]
return first_item + last_item
| [
"MartinPB@opensource.gov"
] | MartinPB@opensource.gov |
630f7309fa95c5a1adb8003686e09debea69cb29 | 36ce02342d0857d4038e6a15dcf0760b83cddb7e | /maze_env.py | 37332cebe9ee054fbf3b0003c7f01ce778f91534 | [
"MIT"
] | permissive | luwis93choi/RL-Random_Maze_Solver | 775e85a3d238f44c9692e58d96bd10793e70783a | 355e8d509129f56717931792976e89bc03d12a0b | refs/heads/main | 2023-03-24T13:36:15.768286 | 2021-03-20T01:56:25 | 2021-03-20T01:56:25 | 339,687,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,451 | py | import turtle as t
import random
import numpy as np
import copy
from maze_drawer import Maze_drawer
from maze_generator import Maze_generator
import time
import cv2 as cv
import math
class Maze():
def __init__(self, height, width, detection_range=2, obstacle_occupancy_prob=0.3):
# Init Maze with random 0 and 1 distribution
self.height = height
self.width = width
# Init Agent's starting point
self.curr_agent_pose = [0, 0]
self.prev_point_val = 0
# Init Target point
self.target_pose = [self.height-1, self.width-1]
# Build random maze based on occupancy probability
self.obstacle_occupancy_prob = obstacle_occupancy_prob
self.maze_generator = Maze_generator(height=self.height, width=self.width, agent_pose=self.curr_agent_pose, target_pose=self.target_pose, obstacle_occupancy_prob=self.obstacle_occupancy_prob) # Generate valid maze using A star
self.maze, self.curr_agent_pose, self.target_pose, self.optimal_path = self.maze_generator.generate()
# Draw the maze
self.maze_drawer = Maze_drawer(self.maze)
# Init empty local map
self.local_map = 0.5 * np.ones([self.maze.shape[0], self.maze.shape[1]])
self.local_map[self.curr_agent_pose[0], self.curr_agent_pose[1]] = 1
self.local_map[self.target_pose[0], self.target_pose[1]] = 2
# Build initial local map
self.detection_range = detection_range
for i in range(-1 * (self.detection_range), self.detection_range + 1):
for j in range(-1 * (self.detection_range), self.detection_range + 1):
if (i == 0) and (j == 0):
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 1
elif self.within_bounds(self.curr_agent_pose[0] + i, 0, self.maze.shape[0]) and self.within_bounds(self.curr_agent_pose[1] + j, 0, self.maze.shape[1]):
if self.maze[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] == 3:
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 3
self.reward = 0.0
self.done = 0
self.collision_count = 0
self.dy = abs(self.target_pose[0] - self.curr_agent_pose[0])
self.dx = abs(self.target_pose[1] - self.curr_agent_pose[1])
self.shortest_dy = self.dy
self.shortest_dx = self.dx
states = self.local_map.flatten()
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
def reset(self):
# Init Agent's starting point
self.curr_agent_pose = [0, 0]
self.prev_point_val = 0
# Init Target point
self.target_pose = [self.height-1, self.width-1]
# Build random maze based on occupancy probability
self.maze_generator = Maze_generator(height=self.height, width=self.width, agent_pose=self.curr_agent_pose, target_pose=self.target_pose, obstacle_occupancy_prob=self.obstacle_occupancy_prob) # Generate valid maze using A star
self.maze, self.curr_agent_pose, self.target_pose, self.optimal_path = self.maze_generator.generate()
# Init empty local map
self.local_map = 0.5 * np.ones([self.maze.shape[0], self.maze.shape[1]])
self.local_map[self.curr_agent_pose[0], self.curr_agent_pose[1]] = 1
self.local_map[self.target_pose[0], self.target_pose[1]] = 2
# Build initial local map
for i in range(-1 * (self.detection_range), self.detection_range + 1):
for j in range(-1 * (self.detection_range), self.detection_range + 1):
if (i == 0) and (j == 0):
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 1
elif self.within_bounds(self.curr_agent_pose[0] + i, 0, self.maze.shape[0]) and self.within_bounds(self.curr_agent_pose[1] + j, 0, self.maze.shape[1]):
if self.maze[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] == 3:
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 3
self.reward = 0.0
self.done = 0
self.collision_count = 0
states = self.local_map.flatten()
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
return states
def within_bounds(self, value, low, high):
return (low <= value) and (value <= high)
def maze_update(self, dheight=0, dwidth=0):
skip = False
if self.within_bounds(self.curr_agent_pose[0] + dheight, 0, self.maze.shape[0]) is False:
# self.reward += math.sqrt((self.height - self.dy)**2 + (self.width - self.dx)**2)
self.reward += -10
# self.done = 1
self.collision_count += 1
skip = True
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
elif self.within_bounds(self.curr_agent_pose[1] + dwidth, 0, self.maze.shape[1]) is False:
# self.reward += math.sqrt((self.height - self.dy)**2 + (self.width - self.dx)**2)
self.reward += -10
# self.done = 1
self.collision_count += 1
skip = True
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
elif self.maze[self.curr_agent_pose[0] + dheight][self.curr_agent_pose[1] + dwidth] >= 3:
# self.reward += math.sqrt((self.height - self.dy)**2 + (self.width - self.dx)**2)
# self.local_map[self.curr_agent_pose[0] + dheight][self.curr_agent_pose[1] + dwidth] += 1
# self.reward += (-10 - 0.001 * self.local_map[self.curr_agent_pose[0] + dheight][self.curr_agent_pose[1] + dwidth])
self.reward += -10
# self.done = 1
self.collision_count += 1
skip = True
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
# if self.collision_count >= 20:
# self.done = 1
# skip = True
if skip: return
prev_agent_pose = copy.deepcopy(self.curr_agent_pose)
self.curr_agent_pose[0] += dheight
self.curr_agent_pose[1] += dwidth
self.maze[prev_agent_pose[0]][prev_agent_pose[1]] = 0
self.local_map[prev_agent_pose[0]][prev_agent_pose[1]] = 0
self.local_map[prev_agent_pose[0]][prev_agent_pose[1]] = self.prev_point_val
self.prev_point_val = self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]] - 1.0
self.dy = abs(self.target_pose[0] - self.curr_agent_pose[0])
self.dx = abs(self.target_pose[1] - self.curr_agent_pose[1])
if self.maze[self.curr_agent_pose[0]][self.curr_agent_pose[1]] == 0:
if self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]] == 0.5:
self.reward += math.sqrt((self.height - self.dy)**2 + (self.width - self.dx)**2)
elif self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]] < 0.5:
self.reward += self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]]
self.maze[self.curr_agent_pose[0]][self.curr_agent_pose[1]] = 1
self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]] = 1
# self.local_map[self.curr_agent_pose[0]][self.curr_agent_pose[1]] = 0
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
def step(self, action):
self.reward = 0.0
self.done = 0
success = False
if action == 0:
self.maze_update(dheight = 0, dwidth = 1)
self.reward += -0.01
elif action == 1:
self.maze_update(dheight = 0, dwidth = -1)
self.reward += -0.01
elif action == 2:
self.maze_update(dheight = 1, dwidth = 0)
self.reward += -0.01
elif action == 3:
self.maze_update(dheight = 1, dwidth = 1)
self.reward += -0.01
elif action == 4:
self.maze_update(dheight = 1, dwidth = -1)
self.reward += -0.01
elif action == 5:
self.maze_update(dheight = -1, dwidth = 0)
self.reward += -0.01
elif action == 6:
self.maze_update(dheight = -1, dwidth = 1)
self.reward += -0.01
elif action == 7:
self.maze_update(dheight = -1, dwidth = -1)
self.reward += -0.01
# Primary Success Reward
if ((self.target_pose[0] - self.curr_agent_pose[0]) <= 1) and ((self.target_pose[1] - self.curr_agent_pose[1]) <= 1):
self.done = 1
self.reward += 1000
success = True
self.maze_drawer.update_maze(self.local_map, curr_pose=self.curr_agent_pose)
for i in range(-1 * (self.detection_range), self.detection_range + 1):
for j in range(-1 * (self.detection_range), self.detection_range + 1):
if (i == 0) and (j == 0):
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 1
elif self.within_bounds(self.curr_agent_pose[0] + i, 0, self.maze.shape[0]) and self.within_bounds(self.curr_agent_pose[1] + j, 0, self.maze.shape[1]):
if self.maze[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] == 3:
if self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] < 3:
self.local_map[self.curr_agent_pose[0] + i][self.curr_agent_pose[1] + j] = 3
states = self.local_map.flatten()
# reward_range = np.array(list(range(-30, 10000)))
# est_reward_mean = reward_range.mean()
# est_reward_std = reward_range.std()
# est_reward_max = reward_range.max()
# est_reward_min =reward_range.min()
# # self.reward = (self.reward - est_reward_mean) / est_reward_std
# self.reward = (self.reward - est_reward_min) / (est_reward_max - est_reward_min)
# # print('Min-Max Reward : {}'.format(self.reward))
# print(states)
return states, self.reward, self.done, success
| [
"luwis93choi@hotmail.com"
] | luwis93choi@hotmail.com |
fc800c8f7e819f042f3564af19ae7750b6c7654c | 3437a6ca4add38baa5eec34dee26eb991899bfb3 | /shisu_code/src/utils.py | 1784e94723dd21b844d2f476b0a255217c580414 | [] | no_license | Xiangtuo/Human_Protein | 4635f33caa15ced56e4be187682698f8a5afeac0 | 65d098ab3cc634326c5f70e4f3c975d0419279a1 | refs/heads/master | 2020-09-27T13:00:42.212301 | 2019-01-12T15:29:01 | 2019-01-12T15:29:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,759 | py | from lightai.core import *
from .metric import F1
def get_mean(dl):
means = []
for img, target in dl:
img = img.cuda()
img = img.float()
img = img.permute(0, 3, 1, 2)
img = img.view(img.shape[0], img.shape[1], -1)
means.append(img.mean(dim=-1))
mean = torch.cat(means).mean(dim=0)
return mean
def get_std(mean, dl):
items = []
for img, target in dl:
img = img.cuda()
img = img.float()
img = img.permute(0, 3, 1, 2)
img = img.view(img.shape[0], img.shape[1], -1)
mean = mean.view(-1, 1)
item = ((img-mean)**2).mean(dim=-1)
items.append(item)
std = torch.cat(items).mean(dim=0)
return std**0.5
def get_idx_from_target(df, target):
res = []
for idx, targets in zip(df.index, df['Target']):
targets = targets.split()
for each in targets:
if int(each) == target:
res.append(idx)
break
return res
def get_cls_weight(df):
cls_sz = []
for i in range(28):
sz = len(get_idx_from_target(df, i))
cls_sz.append(sz)
cls_sz = np.array(cls_sz)
weight = np.log(cls_sz)/cls_sz
weight = weight/weight.max()
return weight
def assign_weight(df, weights=None):
df['weight'] = 0.0
if weights is None:
weights = get_cls_weight(df)
for idx, row in df.iterrows():
targets = row['Target'].split()
weight = 0
for t in targets:
weight += weights[int(t)]
# weight = max([weights[int(t)] for t in targets])
df.loc[idx, 'weight'] = weight
df.weight = df.weight / df.weight.max()
def create_k_fold(k, df):
df['fold'] = 0.0
df = df.iloc[np.random.permutation(len(df))]
df['fold'] = (list(range(k))*(len(df)//k+1))[:len(df)]
return df
def make_rgb(img_id, img_fold):
fold_path = Path(img_fold)
colors = ['red', 'green', 'blue']
channels = []
for color in colors:
channel = cv2.imread(str(fold_path/f'{img_id}_{color}.png'), -1)
channels.append(channel)
img = np.stack(channels, axis=-1)
return img
def score_wrt_threshold_per_cls(logits, targets):
scores = []
thresholds = np.linspace(0, 1, num=100, endpoint=False)
for threshold in thresholds:
predict = (logits.sigmoid() > threshold).float()
tp = (predict*targets).sum(dim=0) # shape (28,)
precision = tp/(predict.sum(dim=0) + 1e-8)
recall = tp/(targets.sum(dim=0) + 1e-8)
f1 = 2*(precision*recall/(precision+recall+1e-8))
scores.append(f1)
scores = torch.stack(scores).permute(1, 0).numpy()
return scores
def score_wrt_threshold(logits, targets):
metrics = [F1(t) for t in np.linspace(0, 1, num=100, endpoint=False)]
for metric in metrics:
metric(logits, targets)
return np.array([metric.res() for metric in metrics])
def resize(sz, src, dst):
"""
src, dst: fold path
"""
src = Path(src)
dst = Path(dst)
def _resize(inp_img_path):
img = cv2.imread(str(inp_img_path), 0)
img = cv2.resize(img, (sz, sz))
cv2.imwrite(str(dst/inp_img_path.parts[-1].replace('jpg', 'png')), img)
with ProcessPoolExecutor(6) as e:
e.map(_resize, src.iterdir())
def p_tp_vs_tn(logits, targets):
p = logits.sigmoid()
p_for_tp = p.masked_select(targets == 1).numpy()
p_for_tn = p.masked_select(targets == 0).numpy()
return p_for_tp, p_for_tn
def p_wrt_test(model, test_dl):
ps = []
with torch.no_grad():
model.eval()
for img in test_dl:
img = img.cuda()
p = model(img).sigmoid().view(-1).cpu().float()
ps.append(p)
return torch.cat(ps).numpy()
def val_vs_test(model, val_dl, test_dl):
val_p_tp, val_p_tn = p_tp_vs_tn(model, val_dl)
val_p = np.concatenate((val_p_tp, val_p_tn))
test_p = p_wrt_test(model, test_dl)
plt.figure(figsize=(9, 9))
val_p_num = plt.hist(val_p, log=True, bins=30, alpha=0.5, weights=np.ones_like(val_p) /
len(val_p), label='val')[0]
test_p_num = plt.hist(test_p, log=True, bins=30, alpha=0.5, weights=np.ones_like(test_p) /
len(test_p), label='test')
plt.legend()
def tp_vs_tn(logits, targets):
p_tp, p_tn = p_tp_vs_tn(logits, targets)
plt.figure(figsize=(9, 9))
tn_num = plt.hist(p_tn, log=True, bins=30, alpha=0.5)[0]
tp_num = plt.hist(p_tp, log=True, bins=30, alpha=0.5)[0]
return tp_num, tn_num
def c_p_tp_vs_tn(logits, targets):
tp_cls = []
tn_cls = []
for c in range(28):
tp = logits[:, c][targets[:, c] == 1]
tn = logits[:, c][targets[:, c] == 0]
tp_cls.append(tp.numpy())
tn_cls.append(tn.numpy())
return tp_cls, tn_cls
def c_tp_vs_tn(logits, targets):
tp_cls, tn_cls = c_p_tp_vs_tn(logits, targets)
_, axes = plt.subplots(28, 1, figsize=(6, 6*28))
for c, (ax, tp, tn) in enumerate(zip(axes, tp_cls, tn_cls)):
tptn = np.concatenate([tp, tn])
bins = np.linspace(tptn.min(), tptn.max(), 50)
tn_num = ax.hist(tn, bins, log=True, label='tn', alpha=0.5)[0]
tp_num = ax.hist(tp, bins, log=True, label='tp', alpha=0.5)[0]
ax.legend()
ax.set_title(c)
def tsfm_contrast(ds, aug):
row = 2
column = 2
img_sz = 8
_, axes = plt.subplots(row, column, figsize=(img_sz*column, img_sz*row))
for row in axes:
i = np.random.randint(0, len(ds))
img = ds[i][0]
row[0].imshow(img[:, :, :3])
auged_img = aug(image=img)['image']
row[1].imshow(auged_img[:, :, :3])
def mis_classify(logits, targets):
logits = logits.sigmoid()
fn = logits * targets
fp = logits * (1 - targets)
return fn, fp
def get_logits(model, val_dl):
logits = []
targets = []
with torch.no_grad():
model.eval()
for img, target in val_dl:
img = img.cuda()
logit = model(img)
logits.append(logit)
targets.append(target)
logits = torch.cat(logits).cpu().float()
targets = torch.cat(targets)
return logits, targets
def most_wrong(logits, targets, val_df):
p = logits.sigmoid()
wrong = (1-p) * targets / targets.sum(dim=1).view(-1, 1)
wrong = wrong.sum(dim=1)
wrong_sorted, perm = torch.sort(wrong, descending=True)
wrong_sorted_df = val_df.iloc[perm.numpy()]
wrong_sorted_df['wrong'] = wrong_sorted.numpy()
return wrong_sorted_df
| [
"noreply@github.com"
] | Xiangtuo.noreply@github.com |
b6e501f0fd72a177f85bfe9ef637b059aea6081b | 21da2b1bbf1a8b4cf8bbaeb38713bb6b35984c37 | /PycharmProjects/assesmwnt/questions/book.py | 7901f19e66067284b532c3b5f24d1cbc7c03d29b | [] | no_license | gopukrish100/gopz | 8063486f0094d093ebfb38c0cc83a861a7921b13 | a0e7796c152e8aac41f2624856aaca9567b69269 | refs/heads/master | 2020-06-27T15:48:05.039213 | 2019-08-01T07:17:27 | 2019-08-01T07:17:27 | 199,991,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | class Book:
def __init__(self,pages):
self.pgs=pages
def __add__(self, other,):
bk=Book(self.pgs+other.pgs)
return bk
def __str__(self):
return str(self.pgs)
def __sub__(self, other):
bk=Book(self.pgs-other.pgs)
return bk
def __mul__(self, other):
return self.pgs*other.pgs
def __truediv__(self, other):
return self.pgs/other.pgs
b1=Book(82)
b2=Book(45)
b3=Book(35)
print(b1+b2+b3)
print(b1-b2-b3)
print(b1*b2)
print(b1/b2)
| [
"gopukrish100@gmail.com"
] | gopukrish100@gmail.com |
12ca5557bb7ff7cb672a333e5046f3ac4ec2c9e2 | 96a640c96640cf4b3dfaeea7981a5027200d07a4 | /tube_exam.spec | e36c82ff39c2872c39f25dd738eb397a7edc3b7a | [] | no_license | AaronGe88/TubeShapeExam | 78cfbf5c5ab147aef396d177721d13fb742f1b0b | d2c423e7a6cf66dc672bd36e632f6afe1323cc79 | refs/heads/master | 2021-01-01T17:42:38.279946 | 2016-09-23T14:17:21 | 2016-09-23T14:17:21 | 42,048,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['tube_exam.py'],
pathex=['C:\\Users\\Noah\\Documents\\GitHub\\TubeShapeExam'],
binaries=None,
datas=None,
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='tube_exam',
debug=False,
strip=False,
upx=True,
console=False , icon='C:\\Users\\Noah\\Desktop\\zh2.ico')
| [
"noahge88@163.com"
] | noahge88@163.com |
7f1ec15ab9ef6757f179eacf232738d26043bf92 | c5e4577bbb6ab388f7bd252ae37239aedf7ac27e | /env/bin/pyrsa-sign | f8c4d4f3cfb2174509ce8f144e6c42c26524fa2f | [] | no_license | ChanJin0801/CabbagePrice-MachineLearning | 22b7aea5fe470849473e8fd76b8833fce3ac9f0b | 036a011e4d78c6105881fef1247d4551509733b2 | refs/heads/master | 2021-06-28T12:23:59.093854 | 2020-11-30T18:25:29 | 2020-11-30T18:25:29 | 183,192,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/Users/parkchanjin/PycharmProjects/untitled/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
| [
"parkchanjin@Parks-MacBook-Pro.local"
] | parkchanjin@Parks-MacBook-Pro.local | |
b0853a9aba65d24c4142d61fcce38fcedb426468 | 2420a09930fcc1a0d3c67a0791be70ddee418f4a | /Kth_Largest_Element_in_an_Array.py | d08f8e38b151d423cded627522ff355833c7db5b | [] | no_license | Superbeet/LeetCode | eff8c2562fb5724b89bc2b05ab230a21b67a9e5a | a1b14fc7ecab09a838d70e0130ece27fb0fef7fd | refs/heads/master | 2020-04-06T03:34:10.973739 | 2018-02-13T00:57:06 | 2018-02-13T00:57:06 | 42,485,335 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | # Use Bubble k times - Time Complexity: O(nk)
class Solution3(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
for i in xrange(0, k):
for j in xrange(0, size-1-i):
if nums[j]>nums[j+1]:
nums[j],nums[j+1] = nums[j+1], nums[j]
return nums[-k]
# Time complexity: O(k + (n-k)Logk) <~> O(nlogk)
import heapq
class MinHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return heapq.heappop(self.data)
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if not nums:
return None
size = len(nums)
heap = MinHeap(k)
for i in xrange(0, size):
heap.push(nums[i])
return heap.pop()
# Time: O(n+klogn)
class MaxHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
element = -element
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return -heapq.heappop(self.data)
class Solution2(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
heap = MaxHeap(size)
for i in xrange(0, size):
heap.push(nums[i])
for j in xrange(k-1):
heap.pop()
return heap.pop()
sol = Solution()
sol2 = Solution2()
sol3 = Solution3()
nums = [3,2,1,5,6,4,11,8,7]
print sol.findKthLargest(nums, 2)
print sol2.findKthLargest(nums, 2)
print sol3.findKthLargest(nums, 2)
| [
"aslan.yeh2010@gmail.com"
] | aslan.yeh2010@gmail.com |
e9b0d83166433673108606dc3ca407e5877e0e82 | 8cc9c59c24464df769fdc3f0d6bc1b295aba1001 | /aiwolfpy/Indigo/agents5_base.py | 4ee9d10ad6016df87296f0b099b42093426427b1 | [] | no_license | kaoru-k/AI-Indigo | 28a1ef2d17a8afccf56b42d2f82bcab9c2a240b5 | 71dfe7c639d5dd96a46d4d93c6f8534855746ba7 | refs/heads/master | 2020-03-27T02:23:02.301879 | 2018-08-23T03:18:34 | 2018-08-23T03:18:34 | 145,786,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,562 | py | import pandas as pd
import random
class Agents5_base(object):
def __init__(self):
self.prob = pd.read_csv('result_prob.csv',index_col='VERB')
self.roleEst = pd.DataFrame(
{'WEREWOLF': [1.0,1.0,1.0,1.0,1.0],
'POSSESSED': [1.0,1.0,1.0,1.0,1.0],
'SEER' : [1.0,1.0,1.0,1.0,1.0],
'VILLAGER' : [1.0,1.0,1.0,1.0,1.0],},
columns = ['WEREWOLF','POSSESSED','SEER','VILLAGER'],
index = [1,2,3,4,5])
self.lastGame = 'newtral'
def initialize(self, base_info, game_setting):
self.game_setting = game_setting
self.base_info = base_info
self.agentIdx = self.base_info['agentIdx']
self.idxlist = []
for k in base_info['statusMap'].keys():
self.idxlist.append(int(k))
self.roleEst = pd.DataFrame(
{'WEREWOLF': [1.0,1.0,1.0,1.0,1.0],
'POSSESSED': [1.0,1.0,1.0,1.0,1.0],
'SEER' : [1.0,1.0,1.0,1.0,1.0],
'VILLAGER' : [1.0,1.0,1.0,1.0,1.0],},
columns = ['WEREWOLF','POSSESSED','SEER','VILLAGER'],
index = self.idxlist)
for i in self.idxlist:
for column in self.roleEst.columns:
if i == self.agentIdx:
if column == base_info['myRole']:
self.roleEst.at[i,column] = 1.0
else:
self.roleEst.at[i,column] = 0.0
elif column == base_info['myRole']:
self.roleEst.at[i,column] = 0.0
self.firstTalk = False # 初日の挨拶をしたかどうか
self.dayGreeting = False # 1日の始まりに挨拶をしたか
self.esttalk1 = False # ESTIMATE発言をしたかどうか1
self.esttalk2 = False # ESTIMATE発言をしたかどうか2
self.seeridx = None # 占い結果を報告した人の番号
self.divdidx = None # DIVINEDしてきたプレイヤー番号
self.divrepo = False # 占い結果を報告したかどうか
self.divresult = '' # 占い結果
self.liedividx = None # 嘘の占い結果を言った対象のID
self.talkFrom = -1 # 誰かから話しかけられたか(デフォルト:-1)
self.talkContent = '' # 話しかけられた内容
self.foundwolf = None # 自分が占い師の時のみ 狼を見つけた場合の狼のプレイヤー番号
self.foundhuman = None # 自分が占い師の時のみ 人間を見つけた場合のそのプレイヤー番号
self.possessedCO = None # 自分が狼の時のみ 狂人COしたプレイヤー番号
self.maybepossessed = None # 自分が狼/村人時のみ 占い結果的に狂人なプレイヤー番号
self.maybeseer = None # 自分が狼/村人時のみ 自分を狼の占い結果を言ったプレイヤー番号
self.estlist ={
self.idxlist[0] : '',
self.idxlist[1] : '',
self.idxlist[2] : '',
self.idxlist[3] : '',
self.idxlist[4] : '',
} # ESTIMATEした役職名
self.coedlist ={
self.idxlist[0] : '',
self.idxlist[1] : '',
self.idxlist[2] : '',
self.idxlist[3] : '',
self.idxlist[4] : '',
} # COMINGOUTした役職名
self.divdlist = {
self.idxlist[0] : '',
self.idxlist[1] : '',
self.idxlist[2] : '',
self.idxlist[3] : '',
self.idxlist[4] : '',
} # DIVINEされた種族名
self.gamefin = False # ゲームがfinishしたかどうか
print('My agentidx = {}, role = {}'.format(self.agentIdx, self.base_info['myRole']))
def update(self,base_info, diff_data, request):
if request == 'DAILY_INITIALIZE':
for i in range(diff_data.shape[0]):
# DIVINE
if diff_data['type'][i] == 'divine':
self.divresult = diff_data['text'][i]
# POSSESSED
if self.base_info['myRole'] == 'POSSESSED':
while True:
i = random.choice(self.idxlist)
if self.probably('SEER') != i and i != self.agentIdx:
self.divresult = 'divine Agent[' + '{0:02d}'.format(i) + '] WEREWOLF'
break
self.base_info = base_info
# print(base_info)
def read_talklog(self, gamedf, i, t):
content = t.split()
# 文頭にアンカーが付いているときの処理
if content[0][:8] == '>>Agent[':
if int(content[0][8:10]) == self.agentIdx:
self.talkFrom = gamedf.agent[i]
self.talkContent = content[1:]
content = content[1:]
if content[0] == 'ESTIMATE':
self.estlist[gamedf.agent[i]] = content[2]
if gamedf.agent[i] != self.agentIdx:
self.update_est(gamedf.agent[i],content[0] + '(' + content[2] + ')')
elif content[0] == 'COMINGOUT':
self.coedlist[gamedf.agent[i]] = content[2]
if gamedf.agent[i] != self.agentIdx:
self.update_est(gamedf.agent[i],content[0] + '(' + content[2] + ')')
elif content[0] == 'VOTE':
if gamedf.agent[i] != self.agentIdx:
self.update_est(gamedf.agent[i],'VOTE')
elif content[0] == 'DIVINED':
n = int(content[1][6:8])
if self.divdlist[n] == '':
self.divdlist[n] = content[2]
elif self.divdlist[n] == 'HUMAN' and content[2] == 'HUMAN':
self.divdlist[n] = 'HUMAN_EX'
elif self.divdlist[n] == 'HUMAN' and content[2] == 'WEREWOLF':
self.divdlist[n] = 'PANDA'
elif self.divdlist[n] == 'WEREWOLF' and content[2] == 'HUMAN':
self.divdlist[n] = 'PANDA'
elif self.divdlist[n] == 'WEREWOLF' and content[2] == 'WEREWOLF':
self.divdlist[n] = 'WEREWOLF_EX'
if gamedf.agent[i] != self.agentIdx:
self.seeridx = gamedf.agent[i]
self.update_est(gamedf.agent[i],'DIVINED(' + content[2] + ')')
if self.base_info['myRole'] == 'WEREWOLF':
if n == self.agentIdx:
if content[2] == 'WEREWOLF':
self.maybeseer = gamedf.agent[i]
elif content[2] == 'HUMAN':
self.maybepossessed = gamedf.agent[i]
elif n != self.agentIdx:
if content[2] == 'WEREWOLF':
self.maybepossessed = gamedf.agent[i]
elif self.base_info['myRole'] == 'VILLAGER':
if n == self.agentIdx:
if content[2] == 'WEREWOLF':
self.maybepossessed = gamedf.agent[i]
elif content[0] == 'Over' or content == 'Skip':
if gamedf.agent[i] != self.agentIdx:
self.update_est(gamedf.agent[i],content[0])
else:
pass
'''
役職推定データを更新
'''
def update_est(self,i,text):
for role in ['WEREWOLF', 'POSSESSED', 'SEER', 'VILLAGER']:
self.roleEst.at[i,role] *= self.prob.at[text,role]
def dayStart(self):
self.divrepo = False
self.gamefin = False
self.dayGreeting = False
def finish(self):
if self.gamefin == False:
self.gamefin = True
print(self.base_info['myRole'] ,self.base_info['statusMap'][str(self.agentIdx)],flush=True)
if self.base_info['myRole'] == 'WEREWOLF' and self.base_info['statusMap'][str(self.agentIdx)] == 'ALIVE':
self.lastGame = 'win'
elif self.base_info['statusMap'][str(self.agentIdx)] == 'DEAD':
self.lastGame = 'lose'
'''
推定結果に応じて行動を選択する
'''
def action(self, cb, act):
# print(act)
if act == 'talk':
# 0日目
if self.base_info['day'] == 0:
return cb.over()
# 1日目
elif self.base_info['day'] == 1:
if self.base_info['myRole'] == 'WEREWOLF':
if self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'VILLAGER'
return cb.comingout(self.agentIdx, 'VILLAGER')
elif self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.esttalk1 == False and self.maybeseer != None:
self.esttalk1 = True
self.estlist[self.agentIdx] = 'WEREWOLF'
return cb.estimate(self.maybeseer,'WEREWOLF')
elif self.maybepossessed != None and self.esttalk2 == False:
self.esttalk2 = True
return cb.estimate(self.maybepossessed,'SEER')
else:
return cb.skip()
if self.base_info['myRole'] == 'VILLAGER':
if self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'VILLAGER'
return cb.comingout(self.agentIdx, 'VILLAGER')
elif self.esttalk1 == False and self.maybepossessed != None:
self.esttalk1 = True
return cb.estimate(self.maybepossessed , 'POSSESSED')
else:
return cb.skip()
elif self.base_info['myRole'] == 'POSSESSED':
#if self.seeridx is not None:
if self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'SEER'
return cb.comingout(self.agentIdx, 'SEER')
elif self.divrepo == False:
for i in self.idxlist:
if self.divdlist[i][:8] == 'WEREWOLF':
while True:
j = random.choice(self.idxlist)
#if j == i and j != self.agentIdx:
# self.divrepo = True
# return cb.divined(j,'HUMAN')
if j != i and j != self.agentIdx and j != self.probably('SEER'):
self.divrepo = True
self.liedividx = j
return cb.divined(j,'WEREWOLF')
elif self.divdlist[i][:8] == 'HUMAN':
while True:
j = random.choice(self.idxlist)
if j != self.probably('WEREWOLF') and j != self.probably('SEER') and j != self.agentIdx:
self.divrepo = True
self.liedividx = j
return cb.divined(j,'WEREWOLF')
return cb.skip()
elif self.esttalk1 == False:
for i in self.idxlist:
if self.coedlist[i] == 'SEER' and i != self.agentIdx:
self.esttalk1 = True
return cb.estimate(i,'POSSESSED')
return cb.skip()
elif self.esttalk2 == False:
for i in self.idxlist:
if self.coedlist[i] == 'WEREWOLF' and i != self.agentIdx and i != self.liedividx:
self.esttalk2 = True
return cb.estimate(i,'VILLAGER')
return cb.skip()
else:
return cb.skip()
#else:
# return cb.skip()
elif self.base_info['myRole'] == 'SEER':
if True: # self.seeridx is not None:
if self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'SEER'
return cb.comingout(self.agentIdx, 'SEER')
elif self.divrepo == False:
self.divrepo = True
d = self.divresult.split()
if d[2] == 'WEREWOLF':
self.foundwolf = int(d[1][6:8])
# 後追加するなら
# ・初日占い先が占いCOした時の分岐
elif d[2] == 'HUMAN':
self.foundhuman = int(d[1][6:8])
#if self.coedlist[int(d[1][6:8])] != 'SEER':
# return cb.divined(int(d[1][6:8]),d[2])
#elif self.coedlist[int(d[1][6:8])] == 'SEER':
# if d[2] == 'HUMAN':
# i = random.choice(self.idxlist)
# if i != int(d[1][6:8]) and self.divdlist[i] != 'WEREWOLF' and i == self.probably('WEREWOLF'):
# return cb.divined(i,'WEREWOLF')
# elif d[2] == 'WEREWOLF':
# return cb.divined(int(d[1][6:8]),d[2])
return cb.divined(int(d[1][6:8]),d[2])
elif self.esttalk1 == False:
for i in self.idxlist:
if self.coedlist[i] == 'SEER' and i != self.agentIdx :
self.esttalk1 = True
if i != self.foundwolf:
return cb.estimate(i,'POSSESSED')
else:
return cb.estimate(i,'WEREWOLF')
return cb.skip()
elif self.esttalk2 == False:
for i in self.idxlist:
if self.divdlist[i] == 'WEREWOLF' and self.divdlist[i] != self.foundwolf:
self.esttalk2 = True
return cb.estimate(i,'VILLAGER')
return cb.skip()
else:
return cb.skip()
else:
return cb.skip()
else:
return cb.skip()
# 2日目
else:
if self.base_info['myRole'] == 'SEER' and self.coedlist[self.agentIdx] != 'WEREWOLF' and self.coedlist[self.agentIdx] != 'POSSESSED':
if self.divrepo == False:
self.divrepo = True
d = self.divresult.split()
return cb.divined(int(d[1][6:8]),d[2])
# elif self.base_info['statusMap'][str(self.probably('POSSESSED'))] == 'ALIVE':
# self.coedlist[self.agentIdx] = 'WEREWOLF'
# return cb.comingout(self.agentIdx, 'WEREWOLF')
# else:
# self.coedlist[self.agentIdx] = 'POSSESSED'
# return cb.comingout(self.agentIdx, 'POSSESSED')
else:
return cb.skip()
elif self.base_info['myRole'] == 'POSSESSED' and self.coedlist[self.agentIdx] != 'POSSESSED' :
self.coedlist[self.agentIdx] = 'POSSESSED'
return cb.comingout(self.agentIdx, 'POSSESSED')
elif self.base_info['myRole'] == 'WEREWOLF':
for i in self.idxlist:
if self.coedlist[i] == 'POSSESSED' and i != self.agentIdx and self.possessedCO == None and i == self.maybepossessed:
self.possessedCO = i
return cb.comingout(self.agentIdx, 'WEREWOLF')
else:
break
if self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'VILLAGER'
return cb.comingout(self.agentIdx, 'VILLAGER')
elif self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.esttalk1 == False:
self.esttalk1 = True
self.estlist[self.agentIdx] = 'WEREWOLF'
return cb.estimate(self.seeridx,'WEREWOLF')
else:
return cb.skip()
elif self.base_info['myRole'] == 'VILLAGER' and self.coedlist[self.agentIdx] != 'WEREWOLF' and self.coedlist[self.agentIdx] != 'POSSESSED':
# if self.base_info['statusMap'][str(self.probably('POSSESSED'))] == 'ALIVE':
# self.coedlist[self.agentIdx] = 'WEREWOLF'
# return cb.comingout(self.agentIdx, 'WEREWOLF')
# else:
# self.coedlist[self.agentIdx] = 'POSSESSED'
# return cb.comingout(self.agentIdx, 'POSSESSED')
if self.divdlist[self.agentIdx][:8] == 'WEREWOLF' and self.coedlist[self.agentIdx] == '':
self.coedlist[self.agentIdx] = 'VILLAGER'
return cb.comingout(self.agentIdx, 'VILLAGER')
else:
return cb.skip()
else:
return cb.skip()
elif act == 'vote':
if self.base_info['myRole'] == 'WEREWOLF':
p = self.probably('POSSESSED')
for i in self.idxlist:
if self.base_info['day'] == 2 and (self.maybepossessed != None or self.possessedCO != None):
if i != self.agentIdx and i != self.maybepossessed and i != self.possessedCO:
return i
elif (self.divdlist[i] == 'WEREWOLF' or self.divdlist[i] != 'WEREWOLF_EX') and i != self.agentIdx and i != p:
return i
elif self.divdlist[i] == 'HUMAN' or self.divdlist[i] == 'HUMAN_EX':
if i != p and i != self.agentIdx and i != p:
return i
while True:
i = random.choice(self.idxlist)
if i != p:
return i
elif self.base_info['myRole'] == 'POSSESSED':
p = self.probably('WEREWOLF')
while True:
i = random.choice(self.idxlist)# 修正 自分の占い結果はdivd にはいっているか?
if self.base_info['day'] == 1:
if (self.divdlist[i] != 'WEREWOLF' or self.divdlist != 'WEREWOLF_EX') and i != p and i != self.agentIdx:
return i
elif self.base_info['day'] == 2:
if self.coedlist[i] != 'WEREWOLF' and i != p and i != self.agentIdx:
return i
if i != p and i != self.agentIdx:
return i
elif self.base_info['myRole'] == 'SEER':
if self.foundwolf != None:
return self.foundwolf
# elif 'WEREWOLF_EX' in self.divdlist:
# for i in self.idxlist:
# if self.divdlist[i] == 'WEREWOLF_EX' and i != self.agentIdx:
# return i
# elif 'WEREWOLF' in self.divdlist or self.divdlist != 'WEREWOLF_EX':
# for i in self.idxlist:
# if (self.divdlist[i] == 'WEREWOLF' or self.divdlist[i] != 'WEREWOLF_EX') and i != self.agentIdx and i == self.foundwolf:
# return i
for i in self.idxlist:
if i == self.probably('WEREWOLF') and i != self.foundhuman and i != self.agentIdx:
return i
for i in self.idxlist:
if i != self.foundhuman and i != self.agentIdx:
return i
elif self.base_info['myRole'] == 'VILLAGER':
for i in self.idxlist:
if self.divdlist[i] == 'WEREWOLF_EX' and i != self.agentIdx:
return i
for i in self.idxlist:
if self.divdlist[i] == 'WEREWOLF' and i != self.agentIdx and i == self.probably('WEREWOLF'):
return i
for i in self.idxlist:
if self.divdlist[i] == 'PANDA' and i != self.agentIdx and i == self.probably('WEREWOLF'):
return i
return self.probably('WEREWOLF')
elif act == 'divine':
if self.base_info['day'] == 0:
while True:
i = random.choice(self.idxlist)
if i != self.agentIdx:
return i
else:
if self.foundwolf != None:
while True:
i = random.choice(self.idxlist)
if i != self.agentIdx and i != self.foundwolf:
return i
elif self.foundhuman != None:
while True:
i = random.choice(self.idxlist)
if i != self.agentIdx and i != self.foundwolf and i != self.foundhuman:
return i
else:
while True:
i = random.choice(self.idxlist)
if i != self.agentIdx :
return i
elif act == 'attack':
# while True:
# i = random.choice(self.idxlist)
# if i != self.agentIdx and i != self.probably('POSSESSED'):
# return i
if self.base_info['day'] == 1:
if self.maybepossessed != None:
if 'HUMAN' in self.divdlist or 'HUMAN_EX' in self.divdlist:
for i in self.idxlist:
if (self.divdlist[i] == 'HUMAN' or self.divdlist[i] == 'HUMAN_EX') and i != self.maybepossessed:
return i
else:
while True:
i = random.choice(self.idxlist)
if i != self.maybepossessed and self.probably('POSSESSED'):
return i
elif 'HUMAN' in self.divdlist or 'HUMAN_EX' in self.divdlist:
for i in self.idxlist:
if (self.divdlist[i] == 'HUMAN' or self.divdlist[i] == 'HUMAN_EX') and i != self.probably('POSSESSED'):
return i
else:
return self.probably('SEER')
elif self.base_info['day'] == 2:
if self.maybeseer != None:
for i in self.idxlist:
if i == self.maybeseer:
return i
elif self.maybepossessed != None:
for i in self.idxlist:
if i != self.maybepossessed:
return i
else:
#p = self.probably('POSSESSED')
#for i in self.idxlist:
# if i != p and i == self.agentIdx:
# return i
return self.probably('SEER')
def probably(self,role):
return self.roleEst.idxmax()[role] | [
"kaoru_kimura@outlook.com"
] | kaoru_kimura@outlook.com |
2f10b24870ef56af83a067e78e716114893fecd4 | 48f755943173648d2a2b3650fc8a6f1e610c5d63 | /python/util/send_mail_file.py | 4572a11df74f8ded422a281a06452551e61b6036 | [] | no_license | dreamhighqiu/python-selenium | 2893e32ce17e4d68bf28d1485ff405575819dd2d | 8090c8d7d28ba7be46212168a19a6ea2ee99de23 | refs/heads/master | 2021-05-09T07:27:25.053479 | 2018-02-01T03:38:55 | 2018-02-01T03:38:55 | 119,361,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | #-*-coding:utf-8-*-
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import os
import sys
from config import readConfig
reload(sys)
sys.setdefaultencoding('utf8')
class Send_Mail_file(object):
def get_report_file(self,report_path):
'''第三步:获取最新的测试报告'''
lists = os.listdir(report_path)
lists.sort(key=lambda fn: os.path.getmtime(os.path.join(report_path, fn)))
print (u'最新测试生成的报告: ' + lists[-1])
# 找到最新生成的报告文件
report_file = os.path.join(report_path, lists[-1])
return report_file
def send_mail(self,sender, psw, receiver, smtpserver, report_file,port=None):
'''第四步:发送最新的测试报告内容'''
with open(report_file, "rb") as f:
mail_body = f.read()
# 定义邮件内容
msg = MIMEMultipart()
body = MIMEText(mail_body, _subtype='html', _charset='utf-8')
msg['Subject'] = u"自动化测试报告"
msg["from"] = sender
# msg["to"] = ";".join(receiver)
msg["to"] = receiver
msg.attach(body)
# 添加附件
att = MIMEText(open(report_file, "rb").read(), "base64", "utf-8")
att["Content-Type"] = "application/octet-stream"
att["Content-Disposition"] = 'attachment; filename= "report.html"'
msg.attach(att)
if port != None:
smtp = smtplib.SMTP_SSL(smtpserver, port)
else:
smtp = smtplib.SMTP()
smtp.connect(smtpserver)
# 用户名密码
smtp.login(sender, psw)
smtp.sendmail(sender, receiver, msg.as_string())
smtp.quit()
print('test report email has send out !')
if __name__ == "__main__":
cur_path = os.path.dirname(os.getcwd())
report_path = os.path.join(cur_path, "report") # 用例文件夹
mail = Send_Mail_file()
report_file = mail.get_report_file(report_path) # 3获取最新的测试报告
sender = readConfig.sender
psw = readConfig.psw
smtp_server = readConfig.smtp_server
port = readConfig.port
receiver = readConfig.receiver
mail.send_mail(sender, psw, receiver, smtp_server, report_file,port ) # 4最后一步发送报告 | [
"qiuyunxia@GZIT003.ycf.com"
] | qiuyunxia@GZIT003.ycf.com |
4593348a596f67161e5cd0bdda5887485b8eabcb | 108517cfbd1b6843ed6ab4b030ddd1bac13d0c70 | /src/hero.py | de63143d0f59492af1107518f873811567924226 | [] | no_license | lefranco/pnethack | 58adf54bc69347d27c4d3b3c6904382ba5793759 | 5e4f00d2b0bd1be33607131db209affdb79f0390 | refs/heads/master | 2022-12-19T05:18:53.053443 | 2020-09-26T19:57:57 | 2020-09-26T19:57:57 | 298,888,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,542 | py | #!/usr/bin/env python3
"""
File : hero.py
Stuff related to the hero (the monster controlled by the player) only.
"""
import typing
import constants
import myrandom
import abstractlevel
import alignment
import monsters
import mapping
class Attribute:
""" Attribute class """
# will be superseded
short_name = "à"
def __init__(self, value: int) -> None:
self._value = value
self._value_min = 3
self._value_max = 25
def __str__(self) -> str:
return f"{type(self).short_name}:{self._value}"
def improve(self) -> None:
""" improve """
if self._value == self._value_max:
return
self._value += 1
def worsen(self) -> None:
""" worsen """
if self._value < self._value_min:
return
self._value -= 1
@property
def value(self) -> int:
""" property """
return self._value
class AttributeStrength(Attribute):
""" Strength """
name = "Strength"
short_name = "St"
def __init__(self, value: int) -> None:
Attribute.__init__(self, value)
self._value_max = 30
def to_hit_bonus(self) -> int:
""" to hit bonus from strength """
if self._value <= 5:
return -2
if self._value <= 7:
return -1
if self._value <= 16:
return 0
if self._value <= 19:
return 1
if self._value <= 22:
return 2
return 3
def damage_bonus(self) -> int:
""" damage bonus from strength """
if self._value <= 5:
return -1
if self._value <= 15:
return 0
if self._value <= 17:
return 1
if self._value <= 18:
return 2
if self._value <= 20:
return 3
if self._value <= 21:
return 4
if self._value <= 22:
return 5
return 6
def __str__(self) -> str:
if self._value <= 18:
strength_value = str(self._value)
elif self._value == 19:
strength_value = "18/50"
elif self._value == 20:
strength_value = "18/63"
elif self._value == 21:
strength_value = "18/83"
elif self._value == 22:
strength_value = "18/95"
elif self._value == 23:
strength_value = "18:/**"
else:
strength_value = str(self._value - 5)
return f"{type(self).short_name}:{strength_value}"
class AttributeDexterity(Attribute):
""" Dexterity """
name = "Dexterity"
short_name = "Dx"
class AttributeConstitution(Attribute):
""" Constitution """
name = "Constitution"
short_name = "Co"
class AttributeIntelligence(Attribute):
""" Intelligence """
name = "Intelligence"
short_name = "In"
class AttributeWisdom(Attribute):
""" Wisdom """
name = "Wisdom"
short_name = "Wi"
class AttributeCharisma(Attribute):
""" Charisma """
name = "Charisma"
short_name = "Ch"
class Attributes:
""" Attributes """
def __init__(self, strength: int, dexterity: int, constitution: int, intelligence: int,
wisdom: int, charisma: int) -> None:
self._strength = AttributeStrength(strength)
self._dexterity = AttributeDexterity(dexterity)
self._constitution = AttributeConstitution(constitution)
self._intelligence = AttributeIntelligence(intelligence)
self._wisdom = AttributeWisdom(wisdom)
self._charisma = AttributeCharisma(charisma)
@property
def strength(self) -> AttributeStrength:
""" property """
return self._strength
def __str__(self) -> str:
return f"{self._strength} {self._dexterity} {self._constitution} " f"{self._intelligence} {self._wisdom} {self._charisma}"
class ExperiencePoints:
""" Experience points """
name = "Experience Points"
short_name = "Exp"
def __init__(self, value: int) -> None:
self._value = value
def gain(self, increase: int) -> None:
""" gains some XPs """
self._value += increase
def __str__(self) -> str:
return f"{type(self).short_name}:{self._value}"
class Score:
""" Score """
name = "Score"
short_name = "S"
def __init__(self, value: int) -> None:
self._value = value
def gain(self, increase: int) -> None:
""" Gain of score """
self._value += increase
def __str__(self) -> str:
return f"{type(self).short_name}:{self._value}"
class Luck:
""" Score """
def __init__(self) -> None:
self._value = 0
def gain(self, increase: int) -> None:
""" Gain of luck """
self._value += increase
def lose(self, increase: int) -> None:
""" Loss of luck """
self._value -= increase
@property
def value(self) -> int:
""" property """
return self._value
class HeroParentClass:
""" This class is parent to classes HeroClass, HeroRace and GenericHero """
def __init__(self) -> None:
# put here stuff that need to be visible by race, role...
# chances of success of actions (percent)
self._success_secret_detection = 25.0
class GenericHero(monsters.Monster, HeroParentClass):
""" Generic Hero class """
def __init__(self, dungeon_level: abstractlevel.AbstractLevel, position: typing.Tuple[int, int], hero_alignment: alignment.Alignment, money_given: int) -> None:
monsters.Monster.__init__(self, monsters.MonsterTypeEnum.HERO, dungeon_level, position, money_given)
HeroParentClass.__init__(self)
# mapping memory
self._mapping_memory: typing.Dict[int, typing.Dict[typing.Tuple[int, int], bool]] = dict()
# attributes
self._hero_name = constants.HERO_NAME
self._attributes = Attributes(myrandom.dice("3d6"), myrandom.dice("3d6"), myrandom.dice("3d6"), myrandom.dice("3d6"), myrandom.dice("3d6"), myrandom.dice("3d6"))
self._xpoints = ExperiencePoints(0)
self._score = Score(0)
self._luck = Luck()
self._hero_alignment = hero_alignment
def note_mapping(self, level: abstractlevel.AbstractLevel, the_mapping: mapping.Mapping) -> None:
""" Store mapping info hero has in his head """
self._mapping_memory[level.identifier] = the_mapping.has_seen
def recall_mapping(self, level: abstractlevel.AbstractLevel) -> typing.Dict[typing.Tuple[int, int], bool]:
""" The hero changes level, note what he know the level he is leaving """
return self._mapping_memory[level.identifier]
def give_status(self, game_turn: int) -> typing.Tuple[str, str, str]:
""" Extracts all status information about the hero """
# pylint: disable=no-member
line1 = f"{self._hero_name} the {self.race_name} {self.role_name} {self._attributes} {self._hero_alignment} {self._score}" # type: ignore
level = f"{self._dungeon_level.depth}{self._dungeon_level.branch}" if self._dungeon_level else "<outside>"
line2 = f"{level} {self._purse} {self._hit_points} {self._power_points} {self._armourclass} {self._xpoints} {game_turn}"
line3 = "Bon pied, bon oeil !"
return (line1, line2, line3)
def enlightment(self) -> typing.List[str]:
""" extract all enlightment information """
table = list()
table.append(f"Chances of secret detection = {self._success_secret_detection}")
return table
def strength_value(self) -> int:
""" strength """
return self.attributes.strength.value
def proba_secret_detection_value(self) -> float:
""" proba secret detection """
return self._success_secret_detection
def luck_value(self) -> int:
""" luck """
return self._luck.value
@property
def hero_alignment(self) -> alignment.Alignment:
""" property """
return self._hero_alignment
@property
def attributes(self) -> Attributes:
""" property """
return self._attributes
def create_hero_class(hero_race: typing.Type, hero_class: typing.Type) -> typing.Type: # type: ignore
""" Important function : create a hero class that inherits from hero, its class, its race """
hero_class_name = f"{hero_race.race_name}_{hero_class.role_name}"
return type(hero_class_name, (GenericHero, hero_class, hero_race), {})
if __name__ == '__main__':
assert False, "Do not run this script"
| [
"jeremie.lefrancois@gmail.com"
] | jeremie.lefrancois@gmail.com |
cea8f85549e20e56b361532625210c10df856781 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2818/60900/249172.py | 7ab8f518b83de81d5c00a7ebae67bc19775a6307 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | str1 = input()
str2 = input()
data1 = str1.split(" ")
chapter = str2.split(" ")
subject = (int)(data1[0])
time = (int)(data1[1])
total = 0
temp = 0
index = 0
while len(chapter)!=0:
temp = (int)(chapter[0])*time
index = 0
for i in range (0,len(chapter)):
if(temp>(int)(chapter[i])*time):
temp = (int)(chapter[i])*time
index = i
total = total+temp
del chapter[index]
if time!=1:
time = time-1
print(total)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
a2a507da9523f362a5fdf59ea6a41aee4088fd67 | 7324045a684d667ce2bff445851953ca4b73540c | /renthop.py | 14ff9f66f039bbb33c75d804ddd7d3cbce8494e2 | [] | no_license | yhyhUMich/renthop_2sigma | 67ab98b2a38134f24a336f7e31995b3878b55c4e | 23127291b73ad61ab25cdef68aa32bd5ee76151b | refs/heads/master | 2021-01-19T01:18:24.838155 | 2017-04-17T22:05:23 | 2017-04-17T22:05:23 | 87,239,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,397 | py | import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
import random
from math import exp
import xgboost as xgb
import os
os.chdir("C:\\Users\\yuan\\Desktop\\renthop_2sigma")
print("hang's code")
random.seed(321)
np.random.seed(321)
# read file
X_train = pd.read_json("./train.json")
X_test = pd.read_json("./test.json")
# quantify target value
interest_level_map = {'low': 0, 'medium': 1, 'high': 2}
X_train.interest_level = X_train.interest_level.apply(lambda x: interest_level_map[x])
X_test["interest_level"] = -1
# add features varible
# normailize all the features tokens
def normal_features(x):
fea_list=[]
for fea in x:
if("*" in fea):
tmp = fea.split("*")
for t in range(tmp.count("")):
tmp.remove("")
for i in tmp:
i = i.strip()
i = "_".join(i.lower().split(" "))
fea_list.append(i)
else:
tmp = fea.split("*")
for t in range(tmp.count("")):
tmp.remove("")
fea = "_".join(fea.lower().split(" "))
fea_list.append(fea)
return " ".join(fea_list)
X_train.features = X_train.features.apply(normal_features)
X_test.features = X_test.features.apply(normal_features)
feature_transform = CountVectorizer(stop_words='english', max_features=150)
feature_transform.fit(list(X_train.features) + list(X_test.features))
#counting size
train_size = len(X_train)
low_count = len(X_train[X_train.interest_level == 0])
medium_count = len(X_train[X_train.interest_level == 1])
high_count = len(X_train[X_train.interest_level == 2])
def find_objects_with_only_one_record(feature_name):
# converting building_ids and manger_ids with only 1 observation into a separate group
temp = pd.concat([X_train[feature_name].reset_index(), X_test[feature_name].reset_index()])
temp = temp.groupby(feature_name, as_index=False).count()
return temp[temp['index'] == 1]
managers_with_one_lot = find_objects_with_only_one_record('manager_id')
buildings_with_one_lot = find_objects_with_only_one_record('building_id')
addresses_with_one_lot = find_objects_with_only_one_record('display_address')
#both display and street address is also a high cardinarl categorical varible
#why not use one lot treatment? still questioning about address using
# form my features matrix
def transform_data(X):
# add the sparse matrix of features into X
feat_sparse = feature_transform.transform(X["features"])
vocabulary = feature_transform.vocabulary_
del X['features']
X1 = pd.DataFrame([pd.Series(feat_sparse[i].toarray().ravel()) for i in np.arange(feat_sparse.shape[0])])
X1.columns = list(sorted(vocabulary.keys()))
X = pd.concat([X.reset_index(), X1.reset_index()], axis=1)
#maybe no need of the original listing index
del X['index']
#transformed other features
X["num_photos"] = X["photos"].apply(len)
X['created'] = pd.to_datetime(X["created"])
X["num_description_words"] = X["description"].apply(lambda x: len(x.split(" ")))
#computing price per room, if room=0, set room = 1
X.loc[X.loc[:,"bedrooms"] == 0, "bedrooms"] = 1
X['price_per_bed'] = X['price'] / X['bedrooms']
#considering not include price/bathrooms, since most of the bathroom is 1
X.loc[X.loc[:,"bathrooms"] == 0, "bathrooms"] = 1
X['price_per_bath'] = X['price'] / X['bathrooms']
X['price_per_room'] = X['price'] / (X['bathrooms'] + X['bedrooms'])
X['low'] = 0
X.loc[X['interest_level'] == 0, 'low'] = 1
X['medium'] = 0
X.loc[X['interest_level'] == 1, 'medium'] = 1
X['high'] = 0
X.loc[X['interest_level'] == 2, 'high'] = 1
X['display_address'] = X['display_address'].apply(lambda x: x.lower().strip())
X['street_address'] = X['street_address'].apply(lambda x: x.lower().strip())
#coondiser no street_address
X['pred0_low'] = low_count * 1.0 / train_size
X['pred0_medium'] = medium_count * 1.0 / train_size
X['pred0_high'] = high_count * 1.0 / train_size
X.loc[X['manager_id'].isin(managers_with_one_lot['manager_id'].ravel()),
'manager_id'] = "-1"
X.loc[X['building_id'].isin(buildings_with_one_lot['building_id'].ravel()),
'building_id'] = "-1"
X.loc[X['display_address'].isin(addresses_with_one_lot['display_address'].ravel()),
'display_address'] = "-1"
return X
print("Start transforming X")
X_train = transform_data(X_train)
X_test = transform_data(X_test)
y = X_train['interest_level'].ravel()
lambda_val = None
k = 5.0
f = 1.0
r_k = 0.01
g = 1.0
def categorical_average(variable, y, pred_0, feature_name):
def calculate_average(sub1, sub2):
s = pd.DataFrame(data={
variable: sub1.groupby(variable, as_index=False).count()[variable],
'sumy': sub1.groupby(variable, as_index=False).sum()['y'],
'avgY': sub1.groupby(variable, as_index=False).mean()['y'],
'cnt': sub1.groupby(variable, as_index=False).count()['y']
})
tmp = sub2.merge(s.reset_index(), how='left', left_on=variable, right_on=variable)
del tmp['index']
tmp.loc[pd.isnull(tmp['cnt']), ['cnt', 'sumy']] = 0.0
def compute_beta(row):
cnt = row['cnt'] if row['cnt'] < 200 else float('inf')
return 1.0 / (g + exp((cnt - k) / f))
if lambda_val is not None:
tmp['beta'] = lambda_val
else:
tmp['beta'] = tmp.apply(compute_beta, axis=1)
tmp['adj_avg'] = tmp.apply(lambda row: (1.0 - row['beta']) * row['avgY'] + row['beta'] * row['pred_0'],
axis=1)
tmp.loc[pd.isnull(tmp['avgY']), 'avgY'] = tmp.loc[pd.isnull(tmp['avgY']), 'pred_0']
tmp.loc[pd.isnull(tmp['adj_avg']), 'adj_avg'] = tmp.loc[pd.isnull(tmp['adj_avg']), 'pred_0']
tmp['random'] = np.random.uniform(size=len(tmp))
tmp['adj_avg'] = tmp.apply(lambda row: row['adj_avg'] * (1 + (row['random'] - 0.5) * r_k),
axis=1)
return tmp['adj_avg'].ravel()
# cv for training set
k_fold = StratifiedKFold(5)
X_train[feature_name] = -999
for (train_index, cv_index) in k_fold.split(np.zeros(len(X_train)), X_train['interest_level'].ravel()):
sub = pd.DataFrame(data={variable: X_train[variable],
'y': X_train[y],
'pred_0': X_train[pred_0]})
sub1 = sub.iloc[train_index]
sub2 = sub.iloc[cv_index]
X_train.loc[cv_index, feature_name] = calculate_average(sub1, sub2)
# for test set
sub1 = pd.DataFrame(data={variable: X_train[variable],
'y': X_train[y],
'pred_0': X_train[pred_0]})
sub2 = pd.DataFrame(data={variable: X_test[variable],
'y': X_test[y],
'pred_0': X_test[pred_0]})
X_test.loc[:, feature_name] = calculate_average(sub1, sub2)
def normalize_high_cordiality_data():
high_cardinality = ["building_id", "manager_id"]
for c in high_cardinality:
categorical_average(c, "medium", "pred0_medium", c + "_mean_medium")
categorical_average(c, "high", "pred0_high", c + "_mean_high")
def transform_categorical_data():
categorical = ['building_id', 'manager_id',
'display_address', 'street_address']
for f in categorical:
encoder = LabelEncoder()
encoder.fit(list(X_train[f]) + list(X_test[f]))
X_train[f] = encoder.transform(X_train[f].ravel())
X_test[f] = encoder.transform(X_test[f].ravel())
def remove_columns(X):
columns = ["photos", "pred0_high", "pred0_low", "pred0_medium",
"description", "low", "medium", "high",
"interest_level", "created"]
for c in columns:
del X[c]
print("Normalizing high cordiality data...")
normalize_high_cordiality_data()
transform_categorical_data()
remove_columns(X_train)
remove_columns(X_test)
print("Start fitting...")
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.02
param['max_depth'] = 4
param['silent'] = 0
param['num_class'] = 3
param['eval_metric'] = "mlogloss"
param['min_child_weight'] = 1
param['subsample'] = 0.7
param['colsample_bytree'] = 0.7
param['seed'] = 321
param['nthread'] = 8
num_rounds = 2000
xgtrain = xgb.DMatrix(X_train, label=y)
clf = xgb.train(param, xgtrain, num_rounds)
#pred_train = clf.predict(xgtrain)
#train_sub = pd.DataFrame(data={'listing_id': X_train['listing_id'].ravel()})
#train_sub['low'] = pred_train[:, 0]
#train_sub['medium'] = pred_train[:, 1]
#train_sub['high'] = pred_train[:, 2]
print("Fitted")
def prepare_submission(model):
xgtest = xgb.DMatrix(X_test)
preds = model.predict(xgtest)
sub = pd.DataFrame(data={'listing_id': X_test['listing_id'].ravel()})
sub['low'] = preds[:, 0]
sub['medium'] = preds[:, 1]
sub['high'] = preds[:, 2]
sub.to_csv("submission.csv", index=False, header=True)
#prepare_submission(clf)
#check_importance = pd.DataFrame(list(zip(list(dict_yh.keys()), list(dict_yh.values()))), columns=["key","value"])
#check_importance.sort("value") | [
"hangyuan@umich.edu"
] | hangyuan@umich.edu |
21610adcf332d720d04f4d26788b6caca4289ec7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/200/usersdata/273/81828/submittedfiles/al15.py | 1656fb72d68d08049b3e4bfbe2bfaff5a11427c5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # -*- coding: utf-8 -*
if numero*0.5=(numero%100) + numero//100:
print(numero)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
01d0f066ebccfbcc3429bb92eb4c58c7288e5c33 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/beyondcorp/v1alpha/client_gateway_iam_member.py | e411f2bc786043e5ec3c549e1882babd6062d57d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 11,706 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import iam as _iam
__all__ = ['ClientGatewayIamMemberArgs', 'ClientGatewayIamMember']
@pulumi.input_type
class ClientGatewayIamMemberArgs:
def __init__(__self__, *,
member: pulumi.Input[str],
name: pulumi.Input[str],
role: pulumi.Input[str],
condition: Optional[pulumi.Input['_iam.v1.ConditionArgs']] = None):
"""
The set of arguments for constructing a ClientGatewayIamMember resource.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
:param pulumi.Input['_iam.v1.ConditionArgs'] condition: An IAM Condition for a given binding.
"""
pulumi.set(__self__, "member", member)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "role", role)
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def member(self) -> pulumi.Input[str]:
"""
Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "member")
@member.setter
def member(self, value: pulumi.Input[str]):
pulumi.set(self, "member", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['_iam.v1.ConditionArgs']]:
"""
An IAM Condition for a given binding.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['_iam.v1.ConditionArgs']]):
pulumi.set(self, "condition", value)
class ClientGatewayIamMember(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']] condition: An IAM Condition for a given binding.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClientGatewayIamMemberArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param ClientGatewayIamMemberArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClientGatewayIamMemberArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClientGatewayIamMemberArgs.__new__(ClientGatewayIamMemberArgs)
__props__.__dict__["condition"] = condition
if member is None and not opts.urn:
raise TypeError("Missing required property 'member'")
__props__.__dict__["member"] = member
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["etag"] = None
__props__.__dict__["project"] = None
super(ClientGatewayIamMember, __self__).__init__(
'google-native:beyondcorp/v1alpha:ClientGatewayIamMember',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ClientGatewayIamMember':
"""
Get an existing ClientGatewayIamMember resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ClientGatewayIamMemberArgs.__new__(ClientGatewayIamMemberArgs)
__props__.__dict__["condition"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["member"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["role"] = None
return ClientGatewayIamMember(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['_iam.v1.outputs.Condition']]:
"""
An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag of the resource's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def member(self) -> pulumi.Output[str]:
"""
Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* group:{emailid}: An email address that represents a Google group. For example, admins@example.com.
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "member")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project in which the resource belongs. If it is not provided, a default will be supplied.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
The role that should be applied.
"""
return pulumi.get(self, "role")
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
c73ec83d2bc16f0e985a6026dd20b6c6936d08f1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/2212.py | d949b82062698cadca5cf074e35b0245522ff71b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | test_input1 = 'ejp mysljylc kd kxveddknmc re jsicpdrysi'
test_input2 = 'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd'
test_input3 = 'de kr kd eoya kw aej tysr re ujdr lkgc jv'
test_output1 = 'our language is impossible to understand'
test_output2 = 'there are twenty six factorial possibilities'
test_output3 = 'so it is okay if you want to just give up'
mapping = {}
for (x, y) in zip(test_input1, test_output1):
mapping[x] = y
for (x, y) in zip(test_input2, test_output2):
mapping[x] = y
for (x, y) in zip(test_input3, test_output3):
mapping[x] = y
mapping['q'] = 'z'
mapping['z'] = 'q'
ntc = int(raw_input())
for i in xrange(0, ntc):
sentence = list(raw_input())
for j in xrange(0, len(sentence)):
sentence[j] = mapping[sentence[j]]
print 'Case #%d: %s'%(i+1, "".join(sentence))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a60b1660c9c9d05518519c0bc81ac5231a8c424b | 32876216bace45f4355f8025ef4982cacce21456 | /esmigrate/internals/script_parser.py | 0c508cc02e6d2dac9787248e73039a5d4de3f02a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mehdirahman88/elastic-migrate | cbc16cb260ea2be04c7d177df0de0bc384d53498 | c097c9672dca4d407b755a5d9262f0f7f661f980 | refs/heads/main | 2023-07-24T11:30:45.188139 | 2021-07-13T19:01:52 | 2021-07-13T19:01:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,403 | py | # -*- coding: utf-8 -*-
import re
from urllib.parse import urlparse
from esmigrate.commons import (
Command,
is_valid_json,
JSON_HEADER,
is_valid_ndjson,
NDJSON_HEADER,
http_verbs,
)
from esmigrate.commons.helpers import construct_path
from esmigrate.contexts import ContextConfig
from esmigrate.exceptions import (
InvalidCommandScriptError,
InvalidCommandVerbError,
ContextObjectNotSetError,
InvalidCommandPathError,
InvalidCommandBodyError,
)
class ScriptParser(object):
def __init__(self, ctx: ContextConfig = None):
self._ctx = ctx
self._pattern = re.compile(rf"^({'|'.join(http_verbs)})\s+(.*)$", re.M | re.I)
def init_ctx(self, ctx: ContextConfig):
self._ctx = ctx
def get_commands(self, script_text: str):
if self._ctx is None:
raise ContextObjectNotSetError("Context was not set")
stripped_lines = [line.strip() for line in script_text.split("\n") if len(line.strip()) > 0]
occurs = [idx for idx, line in enumerate(stripped_lines) if self._pattern.match(line)]
if len(occurs) == 0 or occurs[0] != 0:
raise InvalidCommandScriptError(f"Unexpected command found: {stripped_lines[0].split()[0]}")
occurs.append(len(stripped_lines))
for idx in range(len(occurs) - 1):
cmdline = occurs[idx]
m = self._pattern.match(stripped_lines[cmdline])
verb, path = m.group(1).strip(), m.group(2).strip()
if verb not in http_verbs:
raise InvalidCommandVerbError(f"Unexpected verb found: {verb}")
parsed_path = urlparse(path)
if parsed_path.scheme or parsed_path.netloc:
raise InvalidCommandPathError(f"Unexpected URL scheme found: {path}")
path = construct_path(self._ctx.es_host, path)
cmdnext = occurs[idx + 1]
if cmdline + 1 >= cmdnext:
body, head = None, None
else:
body = "\n".join(stripped_lines[cmdline + 1 : cmdnext])
if is_valid_json(body):
head = JSON_HEADER
elif is_valid_ndjson(body):
head = NDJSON_HEADER
else:
raise InvalidCommandBodyError(f"Expected a {JSON_HEADER} or {NDJSON_HEADER} body")
yield Command(verb, path, body, head)
| [
"zobayer1@gmail.com"
] | zobayer1@gmail.com |
f3eaa01fc3b08c4f71d713dd3d283df3717cf87b | 561c43f03b37ce246debc945cfbf72b56c1194d1 | /utils/tree.py | b2a6674303f973dec7a390c6975713747971a3f8 | [] | no_license | MatienkoAndrew/expert-system | 4cbc544f45da5e9f6482519123eebfebe0f8c846 | 1cba50df5e490497de7855c82a96f4d74a65c596 | refs/heads/main | 2023-02-12T13:28:28.639027 | 2021-01-12T12:10:33 | 2021-01-12T12:10:33 | 328,449,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | import re
from .node import ConnectorType, ConnectorNode, AtomNode, NegativeNode
from .rpn_parser import ImplyType, OPERATORS
from .color import Color
LST_OP = {'+': ConnectorType.AND, '|': ConnectorType.OR, '^': ConnectorType.XOR}
class Validation:
def __init__(self, left, right):
self.left = left
self.right = right
def __repr__(self):
return f'.left {self.left} .right {self.right}'
def validate(self):
left_res = self.left.decide()
right_res = self.right.decide()
if left_res is True and right_res is False:
raise BaseException(f'{Color.WARNING}ERROR: {self} (True => False) is invalid{Color.END}')
class Tree:
def __init__(self):
self.atoms = {}
self.connectors = []
self.valid_data = []
self.root_node = ConnectorNode(ConnectorType.AND, self)
self.root_node.is_root = True
def init_atom(self, atom_name: str):
atom = self.atoms.get(atom_name)
if atom is None:
atom = AtomNode(atom_name, self)
self.atoms[atom_name] = atom
self.root_node.add_operand(atom)
return atom
def create_connect(self, type1: ConnectorType):
return ConnectorNode(type1, self)
def set_atom_state(self, atom_name: str, value):
atom = self.atoms.get(atom_name)
if atom is None:
raise BaseException(f"{Color.WARNING}{atom_name} doesn't have any known atom{Color.END}")
atom.state = value
if value is True:
atom.state_fixed = True
def decide_query(self, query: str):
atom = self.atoms.get(query)
if atom is None:
raise BaseException(f"{Color.WARNING}[ERROR] The query {query} doesn't know any atoms{Color.END}")
res = atom.decide()
if res is None:
atom.set_state(False, True)
res = False
self.check_validation()
return res
def check_validation(self):
for valid in self.valid_data:
valid.validate()
class RPNTree(Tree):
def __init__(self, atoms: list, rpn_rules: list, facts: list):
super(RPNTree, self).__init__()
self.init_atoms_list(atoms)
self.set_atoms_state(rpn_rules, facts)
self.set_relations(rpn_rules)
def init_atoms_list(self, atoms: list):
for atom in atoms:
self.atoms[atom] = self.init_atom(atom)
def set_atoms_state(self, rpn_rules: list, facts: list):
conclusion_atoms = []
for atom in rpn_rules:
conclusion_atoms += re.findall(r'[A-Z]', atom.right)
if atom.type is ImplyType.EQUAL:
conclusion_atoms += re.findall(r'[A-Z]', atom.left)
conclusion_atoms = list(set(conclusion_atoms))
for atom in conclusion_atoms:
self.set_atom_state(atom, None)
for atom in facts:
self.set_atom_state(atom, True)
def set_relations(self, rpn_rules: list):
if len(self.atoms) is 0:
raise BaseException(f'{Color.WARNING}Tree is empty{Color.END}')
for rule in rpn_rules:
left = self.get_relations(rule.left)
right = self.get_relations(rule.right)
imply_connect = self.create_connect(ConnectorType.IMPLY)
right.add_child(imply_connect)
imply_connect.add_operand(left)
self.valid_data.append(Validation(left, right))
if rule.type is ImplyType.EQUAL:
imply_connect1 = self.create_connect(ConnectorType.IMPLY)
left.add_child(imply_connect1)
imply_connect1.add_operand(right)
self.valid_data.append(Validation(right, left))
def get_relations(self, rule: str):
stack = []
for ch in rule:
if ch not in OPERATORS:
stack.append(self.atoms[ch])
elif ch == '!':
child = stack.pop()
connector_not = NegativeNode(child)
child.operand_parents.append(connector_not)
stack.append(connector_not)
else:
pop0 = stack.pop()
pop1 = stack.pop()
if isinstance(pop0, ConnectorNode) and pop0.type is LST_OP[ch]:
pop0.add_operand(pop1)
new_connector = pop0
self.connectors.pop()
elif isinstance(pop1, ConnectorNode) and pop1.type is LST_OP[ch]:
pop1.add_operand(pop0)
new_connector = pop1
self.connectors.pop()
else:
connector_ch = self.create_connect(LST_OP[ch])
connector_ch.add_operands([pop0, pop1])
new_connector = connector_ch
self.connectors.append(new_connector)
stack.append(new_connector)
return stack.pop()
| [
"noreply@github.com"
] | MatienkoAndrew.noreply@github.com |
20fcd613d8ca052621c0bbc42237785874523141 | 5bf1f84c0e6488411b0e259b458899bbc2ea056e | /analysis_vis/scripts/PlotFracShells.py | f4174f322dcd62c1a0e52e4d5397a102b523c643 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | arubenstein/deep_seq | 793857a497420daab4703624e1b873f5feea19cc | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | refs/heads/master | 2020-05-21T19:10:39.970685 | 2018-07-31T18:07:45 | 2018-07-31T18:07:45 | 60,021,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | #!/usr/bin/env python
"""bar plot to plot fraction shell cleaved"""
from plot import conv as pconv
from plot import bar
from general_seq import seq_IO
import argparse
import numpy as np
def main(sequence_ratio_file, width, height, pattern, legend):
sequences = seq_IO.read_sequences(sequence_ratio_file, additional_params=True)
shell_data = []
for shell in xrange(1,len(sequences[0])):
shell_data.append([ seq[shell] for seq in sequences ])
avg = []
std = []
label = xrange(1,4)
for sd in shell_data:
avg.append( np.median(sd))
std.append( np.std(sd))
#check if std has to be fixed
#if sum([ 1 for a, s in zip(avg_ratio, std) if a - s < 0 ]):
# min_err = [ a - s if a - s >= 0.0 else 0 for a,s in zip(avg_ratio, std) ]
# max_err = [ a + s for a,s in zip(avg_ratio, std) ]
# err = [min_err, max_err]
#else:
# err = std
err = std
fig, axarr = pconv.create_ax(1, 1, shx=True, shy=True)
bar.draw_actual_plot(axarr[0,0], avg, ['lightsteelblue','lightblue','darkgray'], "", "Shell", "Fraction Cleaved", tick_label=label, yerr = err)
#axarr[0,0].set_ylim([0,1.3])
pconv.save_fig(fig, sequence_ratio_file, "plot", width, height, tight=True, size=10)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--sequence_ratio_file', help="Text file that contains name of sequences, avg ratio, std. and label.")
parser.add_argument ('--width')
parser.add_argument ('--height')
parser.add_argument ('--pattern', action='store_true', default=False)
parser.add_argument ('--legend', action='store_true', default=False)
args = parser.parse_args()
main(args.sequence_ratio_file, args.width, args.height, args.pattern, args.legend)
| [
"aliza.rubenstein@gmail.com"
] | aliza.rubenstein@gmail.com |
78f88c2bfb07503a16ff2fcf20e54fc5ed3d0790 | 470b8b6915abbaed3ddf3835934cf98c5220053b | /GHubScraper/solution/main.py | 7fc3eaa6e887cbca6410d1d69028ddcbc99761e9 | [] | no_license | daveyo89/github_scraper | fdedc0b378cdbbd25af81a312c4e4f95b4977e26 | 850e16a550b20ab6d3b7cea67ed5047396038171 | refs/heads/master | 2022-12-13T14:32:21.232563 | 2019-02-23T17:03:03 | 2019-02-23T17:03:03 | 168,829,507 | 1 | 0 | null | 2022-12-08T01:34:57 | 2019-02-02T12:29:12 | Python | UTF-8 | Python | false | false | 8,693 | py | import bs4 as bs
import urllib.request
import requests
import csv
import traceback
from tqdm import tqdm
import time
import argparse
import os
import sys
"""The code is heavily commented due to it being a "homework" and all. If something is still not clear, don't worry,
it is obviously my fault. For manual testing besides the default github scraping I used mine, I encourage you,
to try it on yours as well. If I did well here, it is harder to produce an error than a result.
Enjoy! """
class Main:
# Get number of pages at initialisation (was not part of task, but no errors with valid github page.)
# I know __init__ is a bit too long like this, but I choose this solution to make the class ready-to-use as soon as
# it is instantiated.
# If you don't want to use defaults, give github username to url parameter like so: Main(url="githubusername")
# or url=https://github.com/githubusername.
def __init__(self, username="github", output_file="results.csv"):
if username is not None and len(str(username)) > 1:
self.username = username.strip()
# Check if user comes from python console:
if ap.prog != os.path.basename(__file__) and args['name'] is None:
args['name'] = "github"
# On win cmd this puts the results to solution/GHubScraper/results/
self.file_path = os.getcwd() + "/GHubScraper/solution/results/"
elif ap.prog != os.path.basename(__file__):
self.file_path = os.getcwd() + "/GHubScraper/solution/results/"
else:
self.file_path = os.getcwd() + "/results/"
# If we have a name from argparse or default.
if args['name'] is not None:
self.username = args['name'].strip()
# Get arg or default.
if args['file'] is not None:
self.output_file = args['file'].strip()
else:
self.output_file = output_file
# Init total pages here to silence minor errors on the side.
self.total_pages = 1
def __str__(self):
# In case someone is deeply curious.
return f"Getting info from the repositories of this user: {str(self.username)}, saving them to " \
f"{str(self.output_file)}"
def __repr__(self):
return f"Default user: github\nCurrent user: {str(self.username)}\nCurrent target filename: {self.output_file}"
def check_url(self):
url = f"https://github.com/{str(self.username)}?page=1"
# In case of a non-existent user or a bad url we ask for new input.
if self.username is None or requests.head(url).status_code >= 300:
while requests.head(url).status_code >= 300:
# Get new username by input.
new_username = str(input("Enter valid username or leave blank for default. (github/github)")).strip()
if new_username == "":
new_username = "github"
url = "https://github.com/" + new_username + "?page=1"
# Preparing multi page compatibility with given username.
self.username = url[:-1]
return url
else:
# Preparing multi page comp. with url.
self.username = url[:-1]
return url
def get_first_page(self):
# Url we get back will surely be valid github url.
url = self.check_url()
output_file = self.output_file
# Read url.
sauce = urllib.request.urlopen(url).read()
# Couldn't think of a better, or more efficient way to get max page number,
# however task was only for github/github so it could've been 10.
total_pages = bs.BeautifulSoup(sauce, 'html.parser').find('em', {'data-total-pages': [int]})
if total_pages is not None:
# If target user has less than two pages of repositories, data-total-pages will be non-existent in html.
self.total_pages = int(total_pages.get('data-total-pages'))
else:
# Github pages work with github/"username?page=1" even if there is only 1 page.
self.total_pages = 1
# Make sure output file has .csv extension.
if self.output_file[-4:] != ".csv":
self.output_file += ".csv"
os.makedirs(self.file_path, exist_ok=True)
# Excessive use of utf-8 encoding prevents encoding errors.
# Also had to change pycharm default encoding settings to utf-8, but it's a windows only thing as far as I know.
with open(f"{self.file_path}{self.output_file}", 'w', encoding="utf-8", newline='') as f:
wr = csv.writer(f)
# Make sure we delete existing results plus adding column names in first row.
wr.writerow(["name", "description", "language", "tags"])
print("\nGit scraper initialised, ready for duty!")
print(f"URL: {url}\nTotal pages: {self.total_pages}\nOutput filename: {output_file}\n")
# Little time so the user can read printed info.
time.sleep(1.5)
def scraping(self):
print("Scraping sequence activated...\n")
# Progress bar because it's fancy and useful. I Love Progressbar.
for i in tqdm(range(self.total_pages)):
if self.total_pages == 1:
# Take away ?page= from url to replace it with repositories seen below.
url = self.username[:-5] + "tab=repositories"
else:
# Or adding ?page=n to url.
url = self.username + f"{str(i + 1)}"
# Scraping starts here with the prepared url.
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce, 'html.parser')
# Get all repository blocks and iterate by divs.
for div in soup.find_all('li', {'itemprop': ['owns']}):
try:
tags = []
# In each div we look for the info we need.
repo_name = div.find('a', {'itemprop': ['name codeRepository']}).text.strip()
for tag in div.find_all('a', {'class': ['topic-tag topic-tag-link f6 my-1']}):
if tag is not None:
tags.append(tag.text.strip())
else:
tags.append(" ")
repo_desc = div.find('p', {'itemprop': ['description']})
if repo_desc is not None:
repo_desc = repo_desc.text.strip()
else:
repo_desc = " "
repo_lang = div.find('span', {'itemprop': ['programmingLanguage']})
if repo_lang is not None:
repo_lang = repo_lang.text.strip()
else:
repo_lang = " "
# Put findings in a list.
results = [repo_name, repo_desc, repo_lang, tags]
# Then write it as a line.
with open(f"{self.file_path}{self.output_file}", 'a', encoding="utf-8", newline='') as f:
writer = csv.writer(f)
writer.writerow(results)
# Using 'with' statement makes sure we close resources after use.
# Uncomment to see data flowing in on console.
# print(
# f"\n Repo name: {repo_name}\n Short description: {repo_desc}\n"
# f" Programming Language: {repo_lang}\n Tags: {tags}\n")
except (UnicodeEncodeError, TypeError) as e:
traceback.print_exc(e)
continue
while True:
# Argparse catches given arguments within cmd/terminal, also "ap.prog" will equal given parameter from python shell,
# making it losing its name "main". I couldn't find an elegant workaround yet, so I use it now as an indicator
# that someone is trying to give arguments through shell commands.. Please contact me if you have a solution.
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--name", required=False,
help="Github username.")
ap.add_argument("-f", "--file", required=False,
help="Output filename.")
args, unknown = ap.parse_known_args()
args = vars(args)
# args = vars(ap.parse_args())
# input("Press Enter to start David's github scraper : ")
print("Starting scraper....\n")
time.sleep(2.0)
main = Main()
main.get_first_page()
main.scraping()
print(f"\nScraping finished, please find your results in \"{main.output_file}\"")
time.sleep(1.5)
break
| [
"j.tothdavid@gmail.com"
] | j.tothdavid@gmail.com |
bc8469924be1f68ac9d4b7d73aa1a8558c5923b6 | 5d639e4743ad3b958b0134aef58e0781f696bcf5 | /SQRDSUB.py | 7a04a887c0800c792bb972c95a73d586eed6e7a2 | [] | no_license | amberbhanarkar/CompetitiveProgramming | 5601cf9734dedb1a2a80d37fcf93a810ac038ccd | 6e516f05d8d4a9ca946734927d043af43af42798 | refs/heads/master | 2021-07-09T10:43:32.802055 | 2020-10-10T10:20:26 | 2020-10-10T10:20:26 | 190,732,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | '''import math
T = int(input())
for i in range(0,T):
N = int(input())
A = list(map(int,input().split()))
lis = ''.join(map(str, A))
sz = len(lis)
cnt = 0
for i in range(0,sz):
product = 1
for j in range(i,sz):
product *= ord(lis[j])-ord('0')
if product%4!=2:
cnt+=1
print(cnt)
'''
import itertools
def allSubArrays(xs):
n = len(xs)
indices = list(range(n+1))
for i,j in itertools.combinations(indices,2):
yield xs[i:j]
a = list(allSubArrays([2,3, 4, 5]))
| [
"noreply@github.com"
] | amberbhanarkar.noreply@github.com |
35e250ddb36f9bda71a9edb9402cff3dc7b06ecd | 1b9075ffea7d4b846d42981b41be44238c371202 | /tags/2007-EOL/applications/multimedia/xsane/actions.py | a5dcf88f3f4b48317cf764f6179f90f66eb3cf6d | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
pisitools.dosed("src/xsane.h", "# include \"lcms.h\"", "# include \"lcms/lcms.h\"")
shelltools.export("CXXFLAGS", "%s -I/usr/include/lcms" % get.CXXFLAGS())
shelltools.export("LDFLAGS", "%s -L/usr/lib -llcms" % get.LDFLAGS())
autotools.configure("--enable-gtk2 \
--enable-nls \
--enable-jpeg \
--enable-png \
--enable-tiff \
--enable-gimp \
--enable-lcms \
--disable-sanetest \
--disable-gimptest \
--disable-gtktest")
def build():
autotools.make()
def install():
autotools.install()
# Make xsane symlink. Now, it is seen as a plugin in gimp.
pisitools.dosym("/usr/bin/xsane", "/usr/lib/gimp/2.0/plug-ins/xsane")
pisitools.dodoc("xsane.*")
pisitools.removeDir("/usr/sbin")
| [
"turkay.eren@gmail.com"
] | turkay.eren@gmail.com |
dfacb1524091b4aea9b2999d26309ec4c9b6d79b | eb3150b60206484398e0fe479d8bbff9794b8565 | /Task_2.py | ffac63a2684022ae8210890e20f2e790e64362f8 | [] | no_license | ABROLAB/Basecamp-Technical-Tasks | f0d67b436f76a36354bef85167f596e7bc7b9319 | 0beacf0b4857581e0a82525681d44a4e1102424a | refs/heads/main | 2023-06-23T18:57:39.509833 | 2021-07-20T22:59:28 | 2021-07-20T22:59:28 | 387,595,189 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | '''
Write a function that accepts a positive integer
and determines if it is a prime number. The
function should return true if it is a prime
number or false if it isn’t.
'''
def Determine_if_Prime():
input_num = int(input("Enter your value: "))
Output = False
if input_num > 1:
if input_num == 2:
Output = True
else:
# Iterate from 2 to n / 2
for i in range(2, int(input_num/2)+1):
# If input_num is divisible by any number between
# 2 and n / 2, it is not prime
if (input_num % i) == 0:
Output = False
break
else:
Output = True
else:
Output = False
return Output
print("Will display True if number is prime else false")
Result = Determine_if_Prime()
print(Result)
| [
"noreply@github.com"
] | ABROLAB.noreply@github.com |
1697c0111932a0c9cad342f698ed370b0c72284d | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/histogram/_outsidetextfont.py | 91c57eed75e3073c405ed483e18e2d95722ed640 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 1,566 | py | import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="outsidetextfont", parent_name="histogram", **kwargs
):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Outsidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
28ed6a07f4024bbe8bc6f8f5b3dd52d0fda40afa | 37d1d3db1327f7f5da4551143a306d037ea2a836 | /app.py | 947690e15aea2e111eac1e161a01cf42c1c1e6d0 | [] | no_license | webclinic017/Option-Payoff | e3a738779a736b4fe7578761be0f27e54bafe12b | 6b0fd3993490a30432966b84ba881ae27919fd08 | refs/heads/master | 2023-03-25T17:20:45.228452 | 2021-03-11T19:10:54 | 2021-03-11T19:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,109 | py | import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
import plotly.io as pio
from plotly.subplots import make_subplots
from dash.dependencies import State, Input, Output, ALL
from dash.exceptions import PreventUpdate
from Opt import Strategies
from Stock import Stock
import pandas as pd
import os
stock = Stock()
strategy = Strategies()
app = dash.Dash(
__name__,
meta_tags=[
{
"name": "viewport",
"content": "width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=no",
}
],
)
server = app.server
app.config["suppress_callback_exceptions"] = True
tabs_styles = {
'height': '44px'
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px'
}
def build_upper_left_panel():
return html.Div(
id="upper-left",
className="six columns select-area",
children=[
html.P(
className="section-title",
children="",
),
html.Div(
className="control-row-1",
children=[
html.Div(
id="ticker-select-outer",
children=[
html.Label("Select a Ticker"),
dcc.Dropdown(
id="ticker-select",
options=stock.tickers_select,
value="AAPL",
),
],
),
html.Div(
id="select-metric-outer",
children=[
html.Label("Choose an Expiry"),
dcc.Dropdown(
id="expiry-select",
# options=Stock.get_options_expirations(),
),
],
),
],
),
html.Div(
id="region-select-outer",
className="control-row-2",
children=[
html.Label("Pick a Strategy"),
html.Div(
id="region-select-dropdown-outer",
children=dcc.Dropdown(
id="strategy-select", multi=False, searchable=True,
options=strategy.strategies_select
),
),
],
),
html.Div(id="strategy-desc"),
html.Ul(id="strategy-action"),
html.Ul(id="dummy-output"),
html.Ul(id="dummy-output-2"),
html.Ul(id="dummy-input", hidden=True),
html.Div(
id="table-container",
className="table-container",
children=[
html.Div(
id="table-upper",
children=[
html.P(""),
html.P(id="underlying-price"),
html.Div(id="tabs-area-1", children=[dcc.Tabs(id='tabs-buy-sell', value='tab-buy', children=[
dcc.Tab(
label='Buy', value='tab-buy', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Sell', value='tab-sell',
style=tab_style, selected_style=tab_selected_style),
], style=tabs_styles), ]),
html.Div(id="tabs-area-2", children=[
dcc.Tabs(id='tabs-example', value='calls', children=[
dcc.Tab(
label='Calls', value='calls', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(
label='Puts', value='puts', style=tab_style, selected_style=tab_selected_style),
], style=tabs_styles)]),
html.Div(id="moneyness", children=[
html.P("In the Money", id="itm"),
html.P("Out the Money", id="otm"),
html.P("At the Money", id="atm")
]),
html.P(stock.live_price_comment),
html.P("Select Options Below", id="select-text-anim"),
dcc.Loading(children=html.Div(
id="buy-calls-stats-container")),
dcc.Loading(children=html.Div(
id="sell-calls-stats-container")),
dcc.Loading(children=html.Div(
id="buy-puts-stats-container")),
dcc.Loading(children=html.Div(
id="sell-puts-stats-container")),
# dash_table.DataTable(
# id="option-chain-table"
# )
],
),
],
),
],
)
app.layout = html.Div(
className="container scalable",
children=[
html.Div(
id="banner",
className="banner",
children=[
html.H6("Option Strategy & Payoff Calculator"),
],
),
html.Div(
id="upper-container",
className="row",
children=[
build_upper_left_panel(),
html.Div(
id="geo-map-outer",
className="six columns graph-area",
children=[
html.P(
id="map-title",
children="Option Payoff"
),
html.Div(
id="geo-map-loading-outer",
children=[
dcc.Loading(
id="loading",
children=dcc.Graph(
id="option-payoff-graph",
style={"display": "none"}
),
),
],
),
html.Div(
id="map-loading-outer",
children=[
dcc.Loading(
id="loading-2",
children=[dcc.Graph(
id="option-greek-graph",
style={"display": "none"}
),
dcc.Graph(
id="option-greek-graph-gamma",
style={"display": "none"}
)
],
)
],
),
],
),
],
),
],
)
"""@app.callback([Output("buy-calls-stats-container","style")],
Input('tabs-buy-sell', 'value'))
def render_content(tab):
if tab == 'tab-buy':
strategy.direction = '1'
return {'display': 'block'}
elif tab == 'tab-sell':
strategy.direction = '-1'
return {'display': 'none'}"""
@app.callback([Output("buy-calls-stats-container", "style"), Output("sell-calls-stats-container", "style"),
Output("buy-puts-stats-container", "style"), Output("sell-puts-stats-container", "style")],
[Input('tabs-example', 'value'), Input('tabs-buy-sell', 'value')])
def render_content(tab_pc, tab_bs):
"""
order: BUY CALL, SELL CALL, BUY PUT, SELL PUT
"""
if tab_pc == 'calls':
if tab_bs == 'tab-buy':
strategy.direction = '1'
return {'display': 'block'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}
elif tab_bs == 'tab-sell':
strategy.direction = '-1'
return {'display': 'none'}, {'display': 'block'}, {'display': 'none'}, {'display': 'none'}
elif tab_pc == 'puts':
if tab_bs == 'tab-buy':
strategy.direction = '1'
return {'display': 'none'}, {'display': 'none'}, {'display': 'block'}, {'display': 'none'}
elif tab_bs == 'tab-sell':
strategy.direction = '-1'
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'block'}
@app.callback([Output("expiry-select", "options"), Output("expiry-select", "value"), Output("underlying-price", "children")], [
Input("ticker-select", "value")]
)
def update_ticker(ticker):
"""
After ticker is updated, populate expiry dropdown
"""
stock.update_ticker(ticker)
expirations = stock.get_options_expirations()
expiry = expirations[2]["value"]
#underlying = stock.get_underlying_price()
underlying = "Underlying Price: %s" % (stock.underlying)
strategy.reset()
# print(expirations)
return expirations, expiry, underlying
@app.callback([Output("buy-calls-stats-container", "children"), Output("sell-calls-stats-container", "children"),
Output("buy-puts-stats-container", "children"), Output("sell-puts-stats-container", "children")],
[Input("expiry-select", "value")]
)
def update_expiry(expiry):
stock.update_expiry_date(expiry)
calls, puts, c_strike_idx, p_strike_idx = stock.get_calls_and_puts_formated(
expiry_date=expiry)
strategy.reset()
print("New strike indexes c & p: ", c_strike_idx,
p_strike_idx, "type: ", type(c_strike_idx))
print("pages c & p: ", c_strike_idx//17, p_strike_idx//17)
print("update_expiry_func current portfolio", strategy.current_portfolio)
#print("underlying price: ", stock.underlying)
buy_calls_data_table = dash_table.DataTable(
id="buy-call-stats-table",
columns=[{"name": i, "id": i} for i in calls.columns],
data=calls.to_dict('records'),
# filter_action="native",
row_selectable='multi',
selected_rows=[],
hidden_columns=[],
page_size=17,
page_current=(c_strike_idx//17),
style_cell={"background-color": "#242a3b", "color": "#7b7d8d"},
# style_as_list_view=False,
style_header={"background-color": "#1f2536", "padding": "0px 5px"},
style_data_conditional=[
{
'if': {
'filter_query': '{Strike} = %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': '#0074D9',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} < %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': 'green',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} > %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': 'red',
'color': 'white'
},
]
)
sell_calls_data_table = dash_table.DataTable(
id="sell-call-stats-table",
columns=[{"name": i, "id": i} for i in calls.columns],
data=calls.to_dict('records'),
# filter_action="native",
row_selectable='multi',
selected_rows=[],
hidden_columns=[],
page_size=17,
page_current=(c_strike_idx//17),
style_cell={"background-color": "#242a3b", "color": "#7b7d8d"},
# style_as_list_view=False,
style_header={"background-color": "#1f2536", "padding": "0px 5px"},
style_data_conditional=[
{
'if': {
'filter_query': '{Strike} = %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': '#0074D9',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} < %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': 'green',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} > %s' % (calls["Strike"].iloc[c_strike_idx])
},
'backgroundColor': 'red',
'color': 'white'
},
]
)
buy_puts_data_table = dash_table.DataTable(
id="buy-puts-stats-table",
columns=[{"name": i, "id": i} for i in puts.columns],
data=puts.to_dict('records'),
# filter_action="native",
row_selectable='multi',
selected_rows=[],
page_size=17,
page_current=(p_strike_idx//17),
style_cell={"background-color": "#242a3b", "color": "#7b7d8d"},
# style_as_list_view=False,
style_header={"background-color": "#1f2536", "padding": "0px 5px"},
style_data_conditional=[
{
'if': {
'filter_query': '{Strike} = %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': '#0074D9',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} < %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': 'red',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} > %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': 'green',
'color': 'white'
}
]
)
sell_puts_data_table = dash_table.DataTable(
id="sell-puts-stats-table",
columns=[{"name": i, "id": i} for i in puts.columns],
data=puts.to_dict('records'),
# filter_action="native",
row_selectable='multi',
selected_rows=[],
page_size=17,
page_current=(p_strike_idx//17),
style_cell={"background-color": "#242a3b", "color": "#7b7d8d"},
# style_as_list_view=False,
style_header={"background-color": "#1f2536", "padding": "0px 5px"},
style_data_conditional=[
{
'if': {
'filter_query': '{Strike} = %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': '#0074D9',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} < %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': 'red',
'color': 'white'
},
{
'if': {
'filter_query': '{Strike} > %s' % (puts["Strike"].iloc[p_strike_idx])
},
'backgroundColor': 'green',
'color': 'white'
}
]
)
return buy_calls_data_table, sell_calls_data_table, buy_puts_data_table, sell_puts_data_table
@app.callback(
Output("strategy-desc", "children"),
Output("strategy-action", "children"),
[Input("strategy-select", "value")])
def update_strategy(strat):
if strat:
desc = strategy.strategies_descs[strat]['desc']
actions = strategy.strategies_descs[strat]['action']
ret_desc = "Description: " + desc
ret_actions = [html.Li(className="strategy-action",
children=[item]) for item in actions]
return ret_desc, ret_actions
return '', []
@app.callback(
[Output("dummy-output", "children"), Output("option-payoff-graph", "figure"),Output("option-payoff-graph", "style"),
Output("option-greek-graph", "figure"), Output("option-greek-graph", "style")],
[Input('buy-call-stats-table', 'selected_rows'), Input('sell-call-stats-table', 'selected_rows'),
Input('buy-puts-stats-table',
'selected_rows'), Input('sell-puts-stats-table', 'selected_rows'),
Input({'type': "delete", 'index': ALL}, 'n_clicks'), Input("strategy-desc", "children")]
)
def select_option_from_chain(buy_call_selected_rows, sell_call_selected_rows, buy_puts_selected_rows, sell_puts_selected_rows, n_clicks, strategy_state):
ctx = dash.callback_context
if strategy_state and 'strategy-desc' in ctx.triggered[0]["prop_id"]:
print("STRAT")
print("strategy state: ", ctx.triggered)
strategy.reset()
return '', {}, '', ''
#print("current state: ", output_state)
if ctx.triggered and 'selected_rows' in ctx.triggered[0]["prop_id"]:
print("trigger ", ctx.triggered)
call_or_put = ctx.triggered[0]["prop_id"].split('.')[0]
print("c or p", call_or_put)
if call_or_put == "buy-call-stats-table":
if buy_call_selected_rows:
print("buy_call_selected_rows", buy_call_selected_rows)
idx = buy_call_selected_rows[-1]
row = stock.calls_formatted.iloc[idx]
print("direction = ", strategy.direction)
#print("Selected Row Data", row)
if strategy.direction == '1':
# strategy.update_current("Buy","Calls",idx)
opt = strategy.create_option(
strategy.direction, "call", row["Strike"], row["Ask"], row["Implied Volatility"], "BC_%s" % (idx), stock.underlying)
strategy.add_option_to_portfolio(opt)
# return output_state + '\n' + strategy.direction + ' %s Call for %s '%(row["Strike"],row["Ask"])
elif call_or_put == "sell-call-stats-table":
if sell_call_selected_rows:
print("sell_call_selected_rows", sell_call_selected_rows)
idx = sell_call_selected_rows[-1]
row = stock.calls_formatted.iloc[idx]
print("direction = ", strategy.direction)
if strategy.direction == '-1':
# strategy.update_current("Sell","Calls",idx)
opt = strategy.create_option(
strategy.direction, "call", row["Strike"], row["Bid"], row["Implied Volatility"], "SC_%s" % (idx), stock.underlying)
strategy.add_option_to_portfolio(opt)
# return output_state + '\n' + strategy.direction + ' %s Call for %s '%(row["Strike"],row["Bid"])
elif call_or_put == "buy-puts-stats-table":
if buy_puts_selected_rows:
idx = buy_puts_selected_rows[-1]
row = stock.puts_formatted.iloc[idx]
#print("Selected Row Data", row)
if strategy.direction == '1':
# strategy.update_current("Buy","Puts",idx)
opt = strategy.create_option(
strategy.direction, "put", row["Strike"], row["Ask"], row["Implied Volatility"], "BP_%s" % (idx), stock.underlying)
strategy.add_option_to_portfolio(opt)
# return output_state + strategy.direction + ' %s Put for %s '%(row["Strike"],row["Ask"])
elif call_or_put == "sell-puts-stats-table":
if sell_puts_selected_rows:
idx = sell_puts_selected_rows[-1]
row = stock.puts_formatted.iloc[idx]
if strategy.direction == '-1':
# strategy.update_current("Sell","Puts",idx)
opt = strategy.create_option(
strategy.direction, "put", row["Strike"], row["Bid"], row["Implied Volatility"], "SP_%s" % (idx), stock.underlying)
strategy.add_option_to_portfolio(opt)
# return output_state + strategy.direction + ' %s Put for %s '%(row["Strike"],row["Bid"])
print("Current Portfolio: ", strategy.current_portfolio)
#print("Current Strat", strategy.current)
if n_clicks and ctx.triggered and 'delete' in ctx.triggered[0]["prop_id"]:
if ctx.triggered[0]["value"] > 0:
ctx_trig = eval(ctx.triggered[0]['prop_id'].split('.')[0])
opt_idx = ctx_trig["index"]
print("Removing ", opt_idx)
strategy.remove_option_from_portfolio(opt_idx)
return update_frontend_choices()
def update_frontend_choices():
options_text_list = []
for j, i in enumerate(strategy.current_portfolio):
options_text_list.append(html.Li(id=strategy.current_portfolio[i].option_id, className="li-port-selection",
children=[strategy.option_to_text(strategy.current_portfolio[i]),
html.Button(id={'type': "delete",
'index': strategy.current_portfolio[i].option_id}, n_clicks=0, className="opt-del-btn")]
))
print(['delete-%s' % (i)for i in strategy.current_portfolio])
fig = {}
fig_style = {"display": "none"}
fig_greeks = {}
fig_greeks_style = {"display": "none"}
if options_text_list:
ave_strike = sum(
[strategy.current_portfolio[i].strike for i in strategy.current_portfolio])/len(strategy.current_portfolio)
print("Average Strike: ", ave_strike)
payoff = strategy.calculate_portfolio_payoff()
S = [p for p in range(0, int(stock.underlying*2))]
fig = px.line(x=S, y=payoff, template="plotly_dark")
"""
if abs(min(payoff)) > abs(max(payoff)):
if payoff_count[payoff[0]] > 1: ## If the min is displayed along horizontal line rather than bottom of slope
fig.update_layout(xaxis=dict(range=[int(stock.underlying*0.9), int(stock.underlying*1.1)]),
yaxis=dict(range=[int(min(payoff)-15), int(max(payoff)+50)]))
else:
fig.update_layout(xaxis=dict(range=[int(stock.underlying*0.9), int(stock.underlying*1.1)]),
yaxis=dict(range=[int(min(payoff)*0.1), int(max(payoff)+50)]))
else:
fig.update_layout(xaxis=dict(range=[int(stock.underlying*0.9), int(stock.underlying*1.1)]),
yaxis=dict(range=[int(min(payoff)-15), int(max(payoff)*0.1)]))"""
# fig.update_layout(xaxis=dict(range=[int(stock.underlying*0.9), int(stock.underlying*1.1)]),
# yaxis=dict(range=[-30, 30]))
fig.update_layout(xaxis=dict(range=[int(ave_strike*0.9), int(ave_strike*1.1)]),
yaxis=dict(range=[-30, 30]))
fig.update_yaxes(title_text="Profit/Loss")
fig.update_xaxes(title_text="Underlying Price")
max_gain = strategy.max_gain(payoff)
max_loss = strategy.max_loss(payoff)
fig.add_annotation(text=max_gain,
xref="paper", yref="paper",
x=0.1,y=1,
showarrow=False)
fig.add_annotation(text=max_loss,
xref="paper", yref="paper",
x=0.1,y=0.9,
showarrow=False)
greeks = strategy.calculate_portfolio_greeks(
stock.current_date, stock.expiry_date)
deltas_X = [p for p in range(0, int(stock.underlying*2))]
fig_greeks = greek_subplots(greeks, deltas_X)
fig_style = {}
fig_greeks_style = {}
return options_text_list, fig,fig_style, fig_greeks, fig_greeks_style
def greek_subplots(greeks,X_axis):
greeks_plot = {"delta":(1,1),"gamma":(1,2),"vega":(2,1),"theta":(2,2)}
fig_greeks = make_subplots(
rows=2, cols=2,
subplot_titles=("Delta", "Gamma", "Vega", "Theta"))
fig_greeks.add_trace(go.Scatter(x=X_axis, y=greeks['delta']),
row=1, col=1)
"""
fig_greeks.add_shape(go.layout.Shape(type="line",x0=0, y0=greeks['delta'][int(stock.underlying)],
x1=int(stock.underlying),y1=greeks['delta'][int(stock.underlying)],line=dict(dash="dot"))
,row=1,col=1)"""
fig_greeks.add_trace(go.Scatter(x=X_axis, y=greeks['gamma']),
row=1, col=2)
fig_greeks.add_trace(go.Scatter(x=X_axis, y=greeks['vega']),
row=2, col=1)
fig_greeks.add_trace(go.Scatter(x=X_axis, y=greeks['theta']),
row=2, col=2)
fig_greeks.update_layout(showlegend = False, template="plotly_dark")
for key in greeks:
fig_greeks.add_shape(go.layout.Shape(type="line",x0=0, y0=greeks[key][int(stock.underlying)],
x1=int(stock.underlying),y1=greeks[key][int(stock.underlying)],line=dict(dash="dot"))
,row=greeks_plot[key][0] ,col=greeks_plot[key][1])
fig_greeks.add_shape(go.layout.Shape(type="line",x0=int(stock.underlying), y0=0,
x1=int(stock.underlying),y1=greeks[key][int(stock.underlying)],line=dict(dash="dot"))
,row=greeks_plot[key][0] ,col=greeks_plot[key][1])
return fig_greeks
"""@app.callback(Output('dummy-output-2', 'style'),
[Input('dummy-output','children')])
def make_button_callbacks(child):
print("Change to dummy output")"""
"""
@app.callback(
Output('dummy-output-2', 'children'),
[Input({'type':"delete",'index':ALL}, 'n_clicks')]
)
def delete_button(*args):
print("inside delete button")
print("BUTTON ",args)
ctx = dash.callback_context
print("ctx trig: ", ctx.triggered)
#if args[0] > 0:
# opt_id = args[1]
# strategy.remove_option_from_portfolio(opt_id)
# update_frontend_choices()
"""
"""
def return_choices(strategy_dict):
res = []
buy = strategy_dict["Buy"]
sell = strategy_dict["Sell"]
for c in buy["calls"]:
row = stock.calls_formatted.iloc[c]
res.append('+1 %s Call for %s '%(row["Strike"],row["Ask"])
for c in buy["calls"]:
row = stock.calls_formatted.iloc[c]
res.append('+1 %s Call for %s '%(row["Strike"],row["Ask"])
"""
if __name__ == "__main__":
app.run_server(debug=True)
| [
"dlspy@hotmail.com"
] | dlspy@hotmail.com |
7a3131ae28be4405ce5a794b47ed688f2fecf0cb | 71b11008ab0455dd9fd2c47107f8a27e08febb27 | /04、 python编程/day06/3-code/06-函数的返回值.py | 7adbf30fba433ca7320decfaec8f19bc9ce11693 | [] | no_license | zmh19941223/heimatest2021 | 49ce328f8ce763df0dd67ed1d26eb553fd9e7da4 | 3d2e9e3551a199bda9945df2b957a9bc70d78f64 | refs/heads/main | 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # # 我们没有使用过函数 带返回值
# print("hello python")
# # 对于没有返回值的函数,调用方法,直接函数名(参数)
# # len是有返回值的函数
# a = len("hello python") # 会把一个值返回给调用者
# print(a)
# print(len("hello python"))
def my_sum(a, b):
return a + b # 把a + b 的结果,返回给调用者
num1 = my_sum(2, 3) # 这里就是调用my_sum函数,所以num1得到了函数的返回值
print(num1)
print(my_sum(5, 6)) | [
"1780858508@qq.com"
] | 1780858508@qq.com |
d6e56c15b0f194f7b909df2a5188ab2032d0ca47 | 801ee167e5490ec43f9ad141e3ec0d43c6dddb7b | /venv/bin/pilfile.py | e25cf57c5c556fecd724ee126b3b2f10c4c2a629 | [] | no_license | yoshi2095/Online-shop | bd9c76485e70c3c985b36ef4db0c6b6fb9d131ea | 0c536b6d904357d0ca30f5528580895221077cb6 | refs/heads/master | 2021-01-20T09:45:02.203095 | 2017-05-04T21:29:51 | 2017-05-04T21:29:51 | 90,279,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | #!/home/yoshi2095/Desktop/Desktop/ebooks/Django_By_Example/Django_By_Example_Code/9/venv/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG += 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"arora4.yoshi@gmail.com"
] | arora4.yoshi@gmail.com |
390337c20108c028311e9b8d4a7ad2dee312ecbb | 54e6a67e42ac9b075af108e7b06dbe8aebf0ca4f | /1. Python/block.py | d6c4b99ff1013e996eb35ae6ea2eb681c68d6181 | [] | no_license | hrishikeshathalye/PPL-Assignments | e41702cdf8126b4e815f88722d2aa93e65846f40 | 1c5b0dffb011400e17d73d76c94f12a8dcc250e8 | refs/heads/master | 2022-07-14T11:00:14.295014 | 2020-05-19T07:50:32 | 2020-05-19T07:50:32 | 264,370,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | host = r"/etc/hosts"
redirect = "127.0.0.1"
def block():
inp = input("Enter list of website URLs to block seperated by comma:\n")
sites = inp.split(',')
with open(host, "r+") as file:
content = file.read()
for site in sites:
if site in content:
pass
else:
file.write(redirect + " " + site + "\n")
def unblock():
inp = input("Enter list of website URLs to unblock seperated by comma:\n")
sites = inp.split(',')
with open(host, "r+") as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any (site in line for site in sites):
file.write(line)
file.truncate()
opt = int(input("1. Block Sites" + "\n" + "2. Unblock Sites\n"))
if(opt == 1):
block()
elif(opt == 2):
unblock()
else:
print("Invalid Option!")
| [
"hathalye7@gmail.com"
] | hathalye7@gmail.com |
8ff917ca510cfe98a72c0f7dbf65d10c62872997 | 2351d66f6bf3a6dd05f4ea2edc65599999c89b82 | /ObjectData004_Item.py | f6f2d0e696da70b11550825e426450d6ce260590 | [] | no_license | sddragon616/2DGame_Programming_Python | 990e20d7d4d137e623fe02d812965a340308afff | 92a36a8914e2564b502986e7677e8280ac5dac29 | refs/heads/master | 2021-09-25T09:25:29.040917 | 2018-10-20T11:59:51 | 2018-10-20T11:59:51 | 103,901,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | import json
from pico2d import *
Item_data_file = open('UnitData\\Item.json', 'r')
Item_Data = json.load(Item_data_file)
Item_data_file.close()
class Item:
def __init__(self, numbers):
self.image = None
self.number = numbers
self.use_sound = None
def use(self): # Item 클래스의 부모 사용 함수. 이 함수가 False를 return하면 아이템을 사용할 수 없다.
if self.number > 0:
if self.use_sound is not None:
self.use_sound.play()
self.number -= 1
return True
else:
return False
class HpPotion(Item):
def __init__(self, numbers):
super(HpPotion, self).__init__(numbers)
self.healing = Item_Data['HP_Potion']['Healing']
if self.image is None:
self.image = load_image('Resource_Image\\Item001_HP_Potion.png')
if self.use_sound is None:
self.use_sound = load_wav('Resource_Sound\\Effect_Sound\\Drinking.wav')
self.use_sound.set_volume(64)
def use(self, user):
if super(HpPotion, self).use():
user.hp_heal(self.healing)
class MpPotion(Item):
def __init__(self, numbers):
super(MpPotion, self).__init__(numbers)
self.healing = Item_Data['MP_Potion']['Healing']
if self.image is None:
self.image = load_image('Resource_Image\\Item002_MP_Potion.png')
if self.use_sound is None:
self.use_sound = load_wav('Resource_Sound\\Effect_Sound\\Drinking.wav')
self.use_sound.set_volume(64)
def use(self, user):
if super(MpPotion, self).use():
user.mp_heal(self.healing)
class StaminaPotion(Item):
def __init__(self, numbers):
super(StaminaPotion, self).__init__(numbers)
self.healing = Item_Data['SP_Potion']['Healing']
if self.image is None:
self.image = load_image('Resource_Image\\Item003_Stamina_Potion.png')
if self.use_sound is None:
self.use_sound = load_wav('Resource_Sound\\Effect_Sound\\Drinking.wav')
self.use_sound.set_volume(64)
def use(self, user):
if super(StaminaPotion, self).use():
user.sp_heal(self.healing)
| [
"ksdragon616@naver.com"
] | ksdragon616@naver.com |
21be7459ba6b302c4c38d2f3f41a099bd02b6231 | d6409d7008e4b8e78ed4f4cb6d3260097687a401 | /bot/TTS.py | 4d6be8c1a31eed5ad1134f687ce14e6404533ab7 | [] | no_license | gabrielDonnantuoni/Discord-Music-Bot | d69d0ea6ad3db2b76fbc6e3f110a97a38344d5f5 | f78ec013d99a88e0152a45c2042d8b4732d04097 | refs/heads/main | 2023-08-03T18:02:19.200840 | 2021-09-17T23:06:16 | 2021-09-17T23:06:16 | 406,581,546 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import gtts
from discord.ext import commands
import discord
class TTS(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def diz(self, ctx, *, args=''):
tts = t
discord.FFmpegPCMAudio(filename, **ffmpeg_options)
| [
"gabrieldonnantuoni@gmail.com"
] | gabrieldonnantuoni@gmail.com |
f3ca38f34ec5874a1060fa758044d3557344e6b8 | 51ae5874dbb2e62d4cefc20f897f09c148d9d817 | /dj_ecommerce/wsgi.py | efbdbae0c7d576ac65c973e7437f6257eaac38ec | [] | no_license | MehediEhteshum/dj-ecommerce | 90178df1d8659a25bb00cf33f3114c848cd9341f | 9ff1e94228bfc95da2f9922d24e356890365ede6 | refs/heads/master | 2023-08-31T22:10:36.922146 | 2023-08-26T07:20:45 | 2023-08-26T07:20:45 | 322,723,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for dj_ecommerce project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj_ecommerce.settings')
application = get_wsgi_application()
| [
"me69oshan@gmail.com"
] | me69oshan@gmail.com |
4ef3847e2ddeb8e512a14747caf084bdbd7d87ae | 32ad93209ea9be15eb324257006057931058f1e8 | /check_junos_clusterled.py | 05165961020741bf04991e6175fb7c9edda35f1b | [] | no_license | azielke/junos-checks | 0603d6c3355cf7a1da117cccad6d01f6f584f2a0 | 10949dd5c9fed6614e8f1875ba4933871b117e06 | refs/heads/master | 2023-08-04T17:56:55.614019 | 2021-09-14T11:00:12 | 2021-09-14T11:00:12 | 406,351,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | #!/opt/monitoring/plugins/junos-checks/venv/bin/python3
from jnpr.junos import Device
from pprint import pprint
import re
import sys
import getopt
def usage():
print("check_junos_clusterled.py <-H|--host> <-u|--user> <-p|--pasword>")
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],
"H:u:p:h",
["host=","user=","password=","help"]
)
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(3)
router = None
user = None
password = None
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(3)
elif opt in ("-H", "--host"):
router = arg
elif opt in ("-u", "--user"):
user = arg
elif opt in ("-p", "--password"):
password = arg
else:
usage()
sys.exit(3)
if router is None or user is None or password is None:
usage()
sys.exit(3)
dev = Device(host=router, user=user, password=password)
try:
dev.open()
except Exception as err:
print("Cannot connect: %s"%err)
sys.exit(3)
res = dev.rpc.get_chassis_cluster_information()
dev.close()
rc = 0
leds = []
status_re = re.compile('\(Status: (.*)\)')
for item in res.xpath('.//multi-routing-engine-item'):
re = item.xpath('.//re-name')[0].text
color = item.xpath('.//chassis-cluster-information/chassis-cluster-led-information/current-led-color')[0].text
leds.append("%s: %s"%(re, color))
if color == "Green":
pass
elif color == "Amber":
rc = 2
else:
print("Unknown color: %s: %s"%(re, color))
sys.exit(3)
status = None
if rc == 0:
status = "OK"
elif rc == 1:
status = "Warning"
elif rc == 2:
status = "Critical"
else:
status = "Unknown"
print("HA %s: "%(status) + ', '.join(leds))
sys.exit(rc)
| [
"azielke@vegasystems.de"
] | azielke@vegasystems.de |
c68626bb709ed9ea5dd78ce4744d9767acf23822 | 9449315c3bd99e319d4c0ebe8e7d4c023afa1c00 | /frontend/urls.py | 538586cb3b1aa205208f9d8074d5f35d89b2397a | [] | no_license | djym77/phytorganic | 14e8860c072cc8c274f6e99e080fbbb1c27b6467 | e0be8c2275c68ca2d5dceb2034040612197a9ad5 | refs/heads/master | 2020-09-20T05:59:04.877237 | 2019-09-19T11:22:32 | 2019-09-19T11:22:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from django.urls import path, re_path
from .views import *
app_name = 'frontend'
urlpatterns = [
path('frontend/menu/<str:page_name>', FrontMenuView.as_view(), name = 'front-menu'),
path('member-migration', MigrationView.as_view(), name = "member-migration"),
path('landing', WelcomeView.as_view(), name = ""),
path('', HomeView.as_view(), name = "home"),
path('about', AboutView.as_view(), name = "about"),
path('products', ProductListView.as_view(), name = "products"),
path('products/<str:slug>', ProductDetailsView.as_view(), name = "product"),
path('faq', FAQView.as_view(), name = "faq"),
path('about', AboutView.as_view(), name = "about"),
re_path(r'^ajax/validate_username/$', views.validate_username, name = 'validate_username'),
re_path(r'^ajax/validate_sponsor/$', views.validate_sponsor, name = 'validate_sponsor'),
re_path(r'^ajax/validate_email/$', views.validate_email, name = 'validate_email'),
re_path(r'^ajax/validate_placement_name/$', views.validate_placement_name, name = 'validate_placement_name'),
re_path(r'^ajax/validate_placement_mode/$', views.validate_placement_mode, name = 'validate_placement_mode'),
]
| [
"55067005+git-kessededieu@users.noreply.github.com"
] | 55067005+git-kessededieu@users.noreply.github.com |
d63bde31dcfe959195289dfa61022199ab9133a9 | 2778561f666f270484eb1f3565d6878b1c8add65 | /loancalulator.py | b3448a83321973b70c75ecfec4ae7091d9153909 | [] | no_license | georgevincent91/Python-GUI-Learning | a75fb16b7c072b3c72529b53cdf9c3aabd2d6069 | fb5f6c383fc47813b753fbfe9c707c286a878756 | refs/heads/master | 2016-09-05T19:03:46.413175 | 2015-06-24T07:18:32 | 2015-06-24T07:18:32 | 37,968,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | #!/usr/bin/python3
from Tkinter import *
fields = ('Annual Rate', 'Number of Payments', 'Loan Principle', 'Monthly Payment', 'Remaining Loan')
def monthly_payment(entries):
# period rate:
r = (float(entries['Annual Rate'].get()) / 100) / 12
print("r", r)
# principal loan:
loan = float(entries['Loan Principle'].get())
n = float(entries['Number of Payments'].get())
remaining_loan = float(entries['Remaining Loan'].get())
q = (1 + r)** n
monthly = r * ( (q * loan - remaining_loan) / ( q - 1 ))
monthly = ("%8.2f" % monthly).strip()
entries['Monthly Payment'].delete(0,END)
entries['Monthly Payment'].insert(0, monthly )
print("Monthly Payment: %f" % monthly)
def final_balance(entries):
# period rate:
r = (float(entries['Annual Rate'].get()) / 100) / 12
print("r", r)
# principal loan:
loan = float(entries['Loan Principle'].get())
n = float(entries['Number of Payments'].get())
q = (1 + r)** n
monthly = float(entries['Monthly Payment'].get())
q = (1 + r)** n
remaining = q * loan - ( (q - 1) / r) * monthly
remaining = ("%8.2f" % remaining).strip()
entries['Remaining Loan'].delete(0,END)
entries['Remaining Loan'].insert(0, remaining )
print("Remaining Loan: %f" % remaining)
def makeform(root, fields):
entries = {}
for field in fields:
row = Frame(root)
lab = Label(row, width=22, text=field+": ", anchor='w')
ent = Entry(row)
ent.insert(0,"0")
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
entries[field] = ent
return entries
if __name__ == '__main__':
root = Tk()
ents = makeform(root, fields)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
b1 = Button(root, text='Final Balance',
command=(lambda e=ents: final_balance(e)))
b1.pack(side=LEFT, padx=5, pady=5)
b2 = Button(root, text='Monthly Payment',
command=(lambda e=ents: monthly_payment(e)))
b2.pack(side=LEFT, padx=5, pady=5)
b3 = Button(root, text='Quit', command=root.quit)
b3.pack(side=LEFT, padx=5, pady=5)
root.mainloop() | [
"George@Bits-213"
] | George@Bits-213 |
ce27a165316ce461b682625bb761c33ca84d3514 | d5fd8ae33e781e3dc52dadd2552da041ddfd4e81 | /Projeto-Fechadura-master/server.py | 25c1dc4b24b1715de7d78bfae6a7b3baad6286b0 | [] | no_license | marcusv-fs/fechadura | fb8cb325a2a4acda642afb1889ae27996bc3375e | 15ab4fcbd55e4f1265f08f61d0c599c6e67efbea | refs/heads/master | 2022-10-04T18:59:22.129135 | 2020-06-05T01:40:05 | 2020-06-05T01:40:05 | 269,503,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | # -*- coding: utf-8 -*-
import socket
import threading
from datetime import datetime
def trata_cliente(conexao, cliente):
mensagem = conexao.recv(100)
mensagem = mensagem.decode()
cont = 0
with open('Permissoes.json') as mac:
arquivo = mac.readlines()
for i in range(len(arquivo)):
arquivo[i] = arquivo[i].strip('\n')
for i in arquivo:
if mensagem == i:
cont = 1
if cont == 1:
texto = "Abrir"
else:
texto = 'O Dispositivo {} não está cadastrado!'.format(mensagem)
btexto = bytes(texto)
conexao.send(btexto)
print(texto, "{}".format(datetime.now()))
conexao.close()
if __name__ == '__main__':
r = input("Deseja cadastrar ou remover um endereço? \n"
"Digite c para cadastrar\n"
"Digite r para remover\n"
"Digite i inicializar o servidor\n")
while r != 'i':
if r == 'c':
with open('Permissoes.json', 'a') as mac:
mc = input("Digite o MAC do dispositivo à ser adicionado: \n")
mac.write("{}\n".format(mc))
with open('Permissoes.json') as mac:
arquivo = mac.readlines()
print(arquivo)
r = input("Deseja cadastrar ou remover um endereço? \n"
"Digite c para cadastrar\n"
"Digite r para remover\n"
"Digite i inicializar o servidor\n")
if r == 'r':
with open('Permissoes.json') as mac:
arquivo = mac.readlines()
with open('Permissoes.json', 'w') as mac:
mc = input("Digite o MAC do dispositivo à ser removido: \n")
arquivo.remove("{}\n".format(mc))
print(arquivo)
for i in arquivo:
mac.write(i)
r = input("Deseja cadastrar ou remover um endereço? \n"
"Digite c para cadastrar\n"
"Digite r para remover\n"
"Digite i inicializar o servidor\n")
print("Iniciando servidor às: {}".format(datetime.now()))
host = '' # Endereco IP do Servidor
porta = 5003 # Porta que o Servidor esta
soquete = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
origem = (host, porta)
soquete.bind(origem)
soquete.listen(0)
while True:
tc = threading.Thread(target=trata_cliente, args=soquete.accept())
tc.start()
| [
"noreply@github.com"
] | marcusv-fs.noreply@github.com |
eb10463a7e5d49a7ad97ed88dba552351fa256bc | fc28b92ee5609e9697a29df7b7db929caab860e4 | /flip_the_coin.py | 8afa8919e72c9d665fd2e0bc40998da8d0bc6359 | [] | no_license | gunjeet210/Flip-the-Coin | 4ac36382596180ad9764ab8f6b313458e7311ef5 | 219907d445aa905df4c3ad16f82f87358b868099 | refs/heads/master | 2021-01-09T01:17:38.439988 | 2020-02-21T18:07:45 | 2020-02-21T18:07:45 | 242,201,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import random, time
print("Flip the coin")
while True:
outcome=random.randint(1,2)
if(outcome == 1):
print("Heads")
elif(outcome == 2):
print("Tails")
time.sleep(1)
| [
"noreply@github.com"
] | gunjeet210.noreply@github.com |
35402b7014185c6925b7c2facb9d7f38dab62952 | 0a996368d6fff548b77daed59e46b5b957ab20f7 | /get_star_fusion_cmds.py | db61f1792553785a00d8272cc0f8de753a0d2d23 | [] | no_license | lincoln-harris/process_new_cells | e3740de76c3289651074ae229e6b4ca65ee4ced1 | 2e8b871c317363f1a6b570c9ce802499ffa83524 | refs/heads/master | 2021-03-05T20:22:02.841586 | 2020-04-02T19:37:26 | 2020-04-02T19:37:26 | 246,150,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import os
roots = []
for f in os.listdir('fastq/'):
root = f.split('_')[0] + '_' + f.split('_')[1]
if root in roots:
continue
else:
fq1 = root + '_R1.fastq.gz'
fq2 = root + '_R2.fastq.gz'
cmd = '/usr/local/src/STAR-Fusion/STAR-Fusion --left_fq ' + fq1 + ' --right_fq ' + fq2 + ' --genome_lib_dir `pwd`/ctat_genome_lib_build_dir -O StarFusionOut/' + root + '/ --FusionInspector validate --examine_coding_effect --denovo_reconstruct --CPU 8'
print(cmd)
roots.append(root) | [
"lincoln.harris@czbiohub.org"
] | lincoln.harris@czbiohub.org |
f974a8e5a82c58897eb4a600cd6ac6b1571c4c74 | 8e036faa8824e2d8da53c5a40089eb5f171216a2 | /Dietary Info/backup.py | ed0678e5bc9db03840cf58980deb6f61968f132a | [] | no_license | Lookuptothemoon/DietaryInfo-hacknyu | 3b603e21685b257de2fc3fe9b4e021f2235ebd24 | 772207bd542359cdd483d62157a9c06ecd003d46 | refs/heads/master | 2021-07-06T16:00:11.293442 | 2020-08-29T02:19:40 | 2020-08-29T02:19:40 | 175,728,585 | 0 | 0 | null | 2020-08-26T05:42:30 | 2019-03-15T01:43:30 | Python | UTF-8 | Python | false | false | 1,839 | py | from flask import Flask, render_template, request, jsonify, redirect, url_for
import requests
import json
app = Flask(__name__)
@app.route("/", methods=['GET'])
def index():
return render_template('index.html')
@app.route("/barcode", methods=['GET','POST'])
def get_barcode():
if request.method == 'POST':
barcode = request.form['javascript_data']
# get JSON file for product
barcode_url = ("http://api.walmartlabs.com/v1/search?query=%s&format=json&apiKey=" + walmartKey) % barcode
barcode_response = requests.get(barcode_url)
barcode_data = barcode_response.json()
# prints id of barcodes if there are any
walmartKey = 'jmrjysyvwn77dsnsrmktru5e'
message = ""
if(barcode_data['numItems'] == 0):
return jsonify(error="This item can not be found")
else:
# get first id of product with barcode
nutritionixKey = "420146a314d511263c1c33479a8fed23"
item_id = barcode_data["items"][0]["itemId"]
id_url = ("http://api.walmartlabs.com/v1/items/%s?format=json&apiKey=" + walmartKey) % item_id
id_response = requests.get(id_url)
id_data = id_response.json()
product_info = id_data["name"]
data = {
"appId":"c2ea5310",
"appKey": nutritionixKey,
"query": product_info,
"fields":["item_name","brand_name","nf_calories","nf_serving_size_qty","nf_serving_size_unit"],
"sort":{
"field":"_score",
"order":"desc"
},
"filters":{
"item_type":2
}
}
product_url = "https://api.nutritionix.com/v1_1/search/%s?" % product_info
r = requests.get(url=product_url, data=data)
product_data = r.text
#return render_template('index.html', message=product_data)
return product_data
else:
index()
if __name__ == '__main__':
app.run()
'''
http://api.walmartlabs.com/v1/search?query=[BARCODE]]&format=json&apiKey=jmrjysyvwn77dsnsrmktru5e
''' | [
"luna.e.ruiz@gmail.com"
] | luna.e.ruiz@gmail.com |
3d20037e829dd3f789988f1af0e175ae21e00067 | 27a1a28e219beafad1fa21d328452597df51fabe | /megadiff.py | f7a96e8aa7ab288db96c87dad072e9f6cefc0d04 | [] | no_license | DeaconSeals/notebook_template_diff | a8f49714c065678235c147b6b272d44a4ce526a5 | d471f8a9020ff4fe30dcb8d1acd49f244197acf8 | refs/heads/main | 2023-08-03T12:59:10.032067 | 2021-09-17T00:46:59 | 2021-09-17T00:46:59 | 407,327,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | import argparse
from glob import glob
import jupytext
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('base', type=str)
parser.add_argument('students', type=str, nargs='+')
args = parser.parse_args()
rel_path = args.base #'../../GPacBase/'
file_exclude = ['.pyc', '.pyo']
provided_code = set()
assert Path(rel_path).is_dir(), f"Directory {rel_path} doesn't exist!"
filenames = glob(rel_path + '**/*' + '.py' + '*', recursive=True)
filenames = [i for i in filenames if not any([ex for ex in file_exclude if ex in i])]
for filename in filenames:
with open(filename) as file:
for line in file:
code = line.strip()
if code != '':
provided_code.add(code)
filenames = glob(rel_path + '**/*' + '.ipynb' + '*', recursive=True)
filenames = [i for i in filenames if not any([ex for ex in file_exclude if ex in i])]
for filename in filenames:
notebook = jupytext.read(filename, fmt='py')
for cell in notebook['cells']:
if cell['cell_type'] == 'code':
source = cell['source']
for line in source.split('\n'):
code = line.strip()
if code != '':
provided_code.add(code)
for student_dir in args.students:
filenames = glob(student_dir + '**/*' + '.py' + '*', recursive=True)
filenames = [i for i in filenames if not any([ex for ex in file_exclude if ex in i])]
for filename in filenames:
cleanfile = list()
with open(filename) as file:
for line in file:
code = line.strip()
if code != '' and code not in provided_code:
cleanfile.append(code)
localfile = filename.lstrip('./')
localdir = localfile.rpartition('/')[0]
if localdir != '':
Path(f'./diff/{localdir}').mkdir(parents=True,exist_ok=True)
if cleanfile:
with open(f'./diff/{localfile}', mode='w') as file:
for i in range(len(cleanfile)-1):
cleanfile[i] = f'{cleanfile[i]}\n'
for line in cleanfile:
file.write(line)
filenames = glob(student_dir + '**/*' + '.ipynb' + '*', recursive=True)
filenames = [i for i in filenames if not any([ex for ex in file_exclude if ex in i])]
for filename in filenames:
cleanfile = list()
notebook = jupytext.read(filename, fmt='py')
for cell in notebook['cells']:
if cell['cell_type'] == 'code':
source = cell['source']
for line in source.split('\n'):
code = line.strip()
if code != '' and code not in provided_code:
cleanfile.append(code)
localfile = filename.lstrip('./')
localdir = localfile.rpartition('/')[0]
if localdir != '':
Path(f'./diff/{localdir}').mkdir(parents=True,exist_ok=True)
if cleanfile:
with open(f'./diff/{localfile}', mode='w') as file:
for i in range(len(cleanfile)-1):
cleanfile[i] = f'{cleanfile[i]}\n'
for line in cleanfile:
file.write(line) | [
"deaconseals18@gmail.com"
] | deaconseals18@gmail.com |
98d962d303e316845b4a01a0847eb8e0c36ade3c | e75a40843a8738b84bd529a549c45776d09e70d9 | /samples/openapi3/client/petstore/python/test/test_outer_enum.py | aa195260019e50c396a5107af8708f89aed3f908 | [
"Apache-2.0"
] | permissive | OpenAPITools/openapi-generator | 3478dbf8e8319977269e2e84e0bf9960233146e3 | 8c2de11ac2f268836ac9bf0906b8bb6b4013c92d | refs/heads/master | 2023-09-02T11:26:28.189499 | 2023-09-02T02:21:04 | 2023-09-02T02:21:04 | 133,134,007 | 17,729 | 6,577 | Apache-2.0 | 2023-09-14T19:45:32 | 2018-05-12T09:57:56 | Java | UTF-8 | Python | false | false | 816 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.outer_enum import OuterEnum # noqa: E501
from petstore_api.rest import ApiException
class TestOuterEnum(unittest.TestCase):
"""OuterEnum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOuterEnum(self):
"""Test OuterEnum"""
inst = OuterEnum("placed")
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | OpenAPITools.noreply@github.com |
6bbd7506cb05eb4e4065865fdd18cc17fcea1b2b | 8bccc05fcb3cfc6ed93991927a514a96f53f7ec0 | /example_extender/add_mention_dummy_extender.py | de5c32d684d6884597a818c80c3c1a1b17752451 | [
"MIT"
] | permissive | afcarl/QuestionAnsweringGCN | 54101c38549405d65ef22e38fed9e5bd58122ada | e9c1987b40a553f0619fa796f692c8880de32846 | refs/heads/master | 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | import numpy as np
from example_reader.graph_reader.edge_type_utils import EdgeTypeUtils
class AddMentionDummyExtender:
relation_index = None
vertex_index = None
inner = None
def __init__(self, inner, relation_index, vertex_index):
self.inner = inner
self.relation_index = relation_index
self.vertex_index = vertex_index
self.edge_type_utils = EdgeTypeUtils()
def extend(self, example):
example = self.inner.extend(example)
if not example.has_mentions():
return example
mention_vertices = [None]*len(example.mentions)
mention_edges = [None]*len(example.mentions)
graph_vertex_count = example.count_vertices()
for i, mention in enumerate(example.mentions):
mention_vertices[i] = self.vertex_index.index("<mention_dummy>")
mention.dummy_index = graph_vertex_count + i
mention_edges[i] = [mention.dummy_index,
self.relation_index.index("<dummy_to_mention>"),
mention.entity_index]
mention_vertices = np.array(mention_vertices)
mention_vertex_types = np.array([[0, 0, 1, 0, 0, 0] for _ in mention_vertices], dtype=np.float32)
mention_edges = np.array(mention_edges)
example.graph.add_vertices(mention_vertices, mention_vertex_types)
example.graph.edge_types[self.edge_type_utils.index_of("mention_dummy")] = np.arange(len(mention_edges), dtype=np.int32) + example.graph.edges.shape[0]
example.graph.add_edges(mention_edges)
return example
| [
"michael.sejr@gmail.com"
] | michael.sejr@gmail.com |
b6df2c47c2e660f59205c497b027827cc1e83442 | 52e83d67c8b76f83278b61a4c0787abebfa2423c | /DeepLense/Shubham Jain/pipelines/beginner/features/redshifts_lens_and_source.py | f7fbc9325206394e42474457af943383399ac661 | [] | no_license | mlsft/gsc_tasks- | 3935142c93cebc978ff35e3f37486438c4dceeed | 84b62aa04f2333d26f8f95a7c0b24c3922bac647 | refs/heads/master | 2022-04-13T16:22:18.054908 | 2020-04-14T11:59:45 | 2020-04-14T11:59:45 | 249,394,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | import autofit as af
import autolens as al
### PIPELINE DESCRIPTION ###
# In this pipeline, we'll demonstrate passing redshifts to a pipeline - which means that the results and images of this
# pipeline will be returned in physical unit_label (e.g. lengths in kpcs as well as arcsec, luminosities in magnitudes,
# masses in solMass, etc).
# The redshift of the lens and source are input parameters of all pipelines, and they take default values of 0.5 and
# 1.0. Thus, *all* pipelines will return physical values assuming these fiducial values if no other values are
# specified. Care must be taken interpreting the distances and masses if these redshifts are not correct or if the
# true redshifts of the lens and / or source galaxies are unknown.
# We'll perform a basic analysis which fits a lensed source galaxy using a parametric light profile where
# the lens's light is omitted. This pipeline uses two phases:
# Phase 1:
# Description: Fit the lens mass model and source light profile using x1 source.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: None
# Notes: Inputs the pipeline default redshifts where the lens has redshift 0.5, source 1.0.
# Phase 1:
# Description: Fit the lens and source model again..
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: Lens mass (model -> phase 1), source light (model -> phase 1)
# Notes: Manually over-rides the lens redshift to 1.0 and source redshift to 2.0, to illustrate the different results.
def make_pipeline(phase_folders=None, redshift_lens=0.5, redshift_source=1.0):
### SETUP PIPELINE & PHASE NAMES, TAGS AND PATHS ###
# We setup the pipeline name using the tagging module. In this case, the pipeline name is not given a tag and
# will be the string specified below. However, its good practise to use the 'tag.' function below, incase
# a pipeline does use customized tag names.
pipeline_name = "pipeline__feature"
pipeline_tag = "redshifts"
# Unlike other features, the redshifts of the lens and source do not change the setup tag and phase path. Thus,
# our output will simply go to the phase path:
# phase_path = 'phase_name/setup'
# This function uses the phase folders and pipeline name to set up the output directory structure,
# e.g. 'autolens_workspace/output/pipeline_name/pipeline_tag/phase_name/phase_tag//'
phase_folders.append(pipeline_name)
phase_folders.append(pipeline_tag)
### PHASE 1 ###
# In phase 1, we fit the lens galaxy's mass and one source galaxy, where we:
# 1) Use the input value of redshifts from the pipeline.
mass = af.PriorModel(al.mp.EllipticalIsothermal)
mass.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.1)
mass.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.1)
phase1 = al.PhaseImaging(
phase_name="phase_1__x1_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=redshift_lens, mass=mass, shear=al.mp.ExternalShear
),
source_0=al.GalaxyModel(
redshift=redshift_source, sersic=al.lp.EllipticalSersic
),
),
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
### PHASE 2 ###
# In phase 2, we fit the lens galaxy's mass and two source galaxies, where we:
# 1) Use manually specified new values of redshifts for the lens and source galaxies.
phase2 = al.PhaseImaging(
phase_name="phase_2__x2_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=1.0,
mass=phase1.result.model.galaxies.lens.mass,
shear=phase1.result.model.galaxies.lens.shear,
),
source=al.GalaxyModel(
redshift=2.0, sersic=phase1.result.model.galaxies.source.sersic
),
),
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 50
phase2.optimizer.sampling_efficiency = 0.3
return al.PipelineDataset(pipeline_name, phase1, phase2)
| [
"alihariri@MacBook-Air-de-Ali.local"
] | alihariri@MacBook-Air-de-Ali.local |
8c35acf2054e58652195c1bb39c97e6e4ac9ded4 | 1145b96df89a34bee419c070a286239b06d5564d | /AI/chunm18_estes17_old.py | 9218143512005a360830010fc50c2c248303ba0e | [] | no_license | NalaniKai/AI-HW5 | 4e7b514ef41927d5a30ee0c018408757026f7872 | bda1ca3117797d05685ccf4d4a9581e0097c42cd | refs/heads/master | 2020-11-29T12:00:17.654018 | 2017-04-13T21:21:46 | 2017-04-13T21:21:46 | 87,498,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,428 | py | import random
import sys
import math as math
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) # nopep8
from Player import Player
import Constants as c
from Construction import CONSTR_STATS, Construction
from Ant import UNIT_STATS, Ant
from Move import Move
from GameState import addCoords, subtractCoords, GameState
import AIPlayerUtils as utils
import unittest
from Location import Location
from Inventory import Inventory
from Building import Building
import unittest
class AIPlayer(Player):
"""
Description:
The responsibility of this class is to interact with the game
by deciding a valid move based on a given game state. This class has
methods that will be implemented by students in Dr. Nuxoll's AI course.
Variables:
playerId - The id of the player.
"""
def __init__(self, inputPlayerId):
"""
Creates a new Player
Parameters:
inputPlayerId - The id to give the new player (int)
"""
super(AIPlayer, self).__init__(inputPlayerId, "Clever")
self.weights_changed = False
self.same_count_limit = 75
self.same_count = 0
self.sum_in = 0
self.network_inputs = []
self.network_weights = []
self.bias = 1
self.bias_weight = 1
self.dLim = 3 #depth limit
self.searchMult = 2.5 #generate search limits for depths
self.searchLim = [] #search limit size control
for i in range(self.dLim+1):
self.searchLim.append((i)*self.searchMult)
#@staticmethod
def score_state(self, state):
"""
score_state: Compute a 'goodness' score of a given state for the current player.
The score is computed by tallying up a total number of possible 'points',
as well as a number of 'good' points.
Various elements are weighted heavier than others, by providing more points.
Some metrics, like food difference, is weighted by difference between the two
players.
Note: This is a staticmethod, it can be called without instanceing this class.
Parameters:
state - GameState to score.
"""
enemy_id = 1 - self.playerId
our_inv = state.inventories[self.playerId]
enemy_inv = [inv for inv in state.inventories if inv.player == enemy_id].pop()
we_win = 1.0
enemy_win = 0.0
our_food = our_inv.foodCount
enemy_food = enemy_inv.foodCount
food_difference = abs(our_food - enemy_food)
our_anthill = our_inv.getAnthill()
our_tunnel = our_inv.getTunnels()
food_drop_offs = []
if len(our_tunnel) != 0:
food_drop_offs.append(our_tunnel[0].coords)
food_drop_offs.append(our_anthill.coords)
enemy_anthill = enemy_inv.getAnthill()
our_queen = our_inv.getQueen()
enemy_queen = enemy_inv.getQueen()
food = utils.getConstrList(state, None, (c.FOOD,))
# Total points possible
total_points = 1
# Good points earned
good_points = 0
# Initial win condition checks:
if (our_food == c.FOOD_GOAL or
enemy_queen is None or
enemy_anthill.captureHealth == 0):
return we_win
# Initial lose condition checks:
if (enemy_food == c.FOOD_GOAL or
our_queen is None or
our_anthill.captureHealth == 0):
return enemy_win
# Score food
total_points += (our_food + enemy_food) * 50
good_points += our_food * 50
# Differences over, say, 3 are weighted heavier
if food_difference > 3:
total_points += food_difference * 100
if our_food > enemy_food:
good_points += food_difference * 100
# Carrying food is good
food_move = 150
our_workers = [ant for ant in our_inv.ants if ant.type == c.WORKER]
# Food drop off points
dropping_off = [
ant for ant in our_workers if ant.coords in food_drop_offs and ant.carrying]
# Depositing food is even better!
if len(dropping_off) != 0:
total_points += food_move * 80
good_points += food_move * 80
picking_up = [
ant for ant in our_workers if ant.coords in food]
if len(picking_up) != 0:
total_points += food_move * 50
good_points += food_move * 50
# Worker movement
for ant in our_workers:
ant_x = ant.coords[0]
ant_y = ant.coords[1]
for enemy in enemy_inv.ants:
if ((abs(ant_x - enemy.coords[0]) > 3) and
(abs(ant_y - enemy.coords[1]) > 3)):
good_points += 30
total_points += 30
if ant.carrying and ant not in dropping_off:
# Good if carrying ants move toward a drop off.
total_points += food_move
good_points += food_move
for dist in range(2, 6):
for dropoff in food_drop_offs:
if ((abs(ant_x - dropoff[0]) < dist) and
(abs(ant_y - dropoff[1]) < dist)):
good_points += food_move - (dist * 25)
total_points += food_move - (dist * 25)
else:
if food != []:
for f in food:
x_dist = abs(ant_x - f.coords[0])
y_dist = abs(ant_y - f.coords[1])
# weighted more if closer to food
for dist in range(2, 7):
if x_dist < dist and y_dist < dist:
good_points += 70 - (dist * 10)
total_points += 70 - (dist * 10)
if len(our_workers) < 3:
good_points += 800
total_points += 800
# Raw ant numbers comparison
total_points += (len(our_inv.ants) + len(enemy_inv.ants)) * 10
good_points += len(our_inv.ants) * 10
# Weighted ant types
# Workers, first 3 are worth 10, the rest are penalized
enemy_workers = [ant for ant in enemy_inv.ants if ant.type == c.WORKER]
if len(our_workers) <= 3:
total_points += len(our_workers) * 10
good_points += len(our_workers) * 10
else:
return 0.001
total_points += len(enemy_workers) * 50
# prefer workers to not leave home range
our_range = [(x, y) for x in xrange(10) for y in xrange(5)]
if len([ant for ant in our_workers if ant.coords not in our_range]) != 0:
return .001
# Offensive ants
# Let's just say each ant is worth 20x its cost for now
offensive = [c.SOLDIER, c.R_SOLDIER, c.DRONE]
our_offense = [ant for ant in our_inv.ants if ant.type in offensive]
enemy_offense = [
ant for ant in enemy_inv.ants if ant.type in offensive]
for ant in our_offense:
ant_x = ant.coords[0]
ant_y = ant.coords[1]
attack_move = 150
good_points += UNIT_STATS[ant.type][c.COST] * 20
total_points += UNIT_STATS[ant.type][c.COST] * 20
if ant.type == c.R_SOLDIER:
good_points += 300
total_points += 300
if ant.type == c.DRONE:
good_points -= UNIT_STATS[ant.type][c.COST] * 20
total_points -= UNIT_STATS[ant.type][c.COST] * 20
# good if on enemy anthill
if ant.coords == enemy_anthill.coords:
total_points += 100
good_points += 100
for enemy_ant in enemy_inv.ants:
enemy_x = enemy_ant.coords[0]
enemy_y = enemy_ant.coords[1]
x_dist = abs(ant_x - enemy_x)
y_dist = abs(ant_y - enemy_y)
# good if attacker ant attacks
if x_dist + y_dist == 1:
good_points += attack_move * 2
total_points += attack_move * 2
# weighted more if closer to attacking
for dist in xrange(1, 8):
if x_dist < dist and y_dist < dist:
good_points += attack_move - (dist * 20)
total_points += attack_move - (dist * 20)
for ant in enemy_offense:
total_points += UNIT_STATS[ant.type][c.COST] * 60
# Stop building if we have more than 5 ants
if len(our_inv.ants) > 5:
return .001
# Queen stuff
# Queen healths, big deal, each HP is worth 100!
total_points += (our_queen.health + enemy_queen.health) * 100
good_points += our_queen.health * 100
queen_coords = our_queen.coords
if queen_coords in food_drop_offs or queen_coords[1] > 2 or queen_coords in food:
# Stay off food_drop_offs and away from the front lines.
total_points += 80
# queen attacks if under threat
for enemy_ant in enemy_inv.ants:
enemy_x = enemy_ant.coords[0]
enemy_y = enemy_ant.coords[1]
x_dist = abs(queen_coords[0] - enemy_x)
y_dist = abs(queen_coords[1] - enemy_y)
if (x_dist + y_dist) == 1:
good_points += 100
total_points += 100
# Anthill stuff
total_points += (our_anthill.captureHealth +
enemy_anthill.captureHealth) * 100
good_points += our_anthill.captureHealth * 100
return float(good_points) / float(total_points)
def evaluate_nodes(self, nodes, agents_turn):
"""Evalute a list of Nodes and returns the best score."""
if agents_turn:
return max(nodes, key=lambda node: node.score)
return min(nodes, key=lambda node: node.score)
def getPlacement(self, currentState):
"""
getPlacement:
The getPlacement method corresponds to the
action taken on setup phase 1 and setup phase 2 of the game.
In setup phase 1, the AI player will be passed a copy of the
state as current_state which contains the board, accessed via
current_state.board. The player will then return a list of 11 tuple
coordinates (from their side of the board) that represent Locations
to place the anthill and 9 grass pieces. In setup phase 2, the
player will again be passed the state and needs to return a list
of 2 tuple coordinates (on their opponent's side of the board)
which represent locations to place the food sources.
This is all that is necessary to complete the setup phases.
Parameters:
current_state - The current state of the game at the time the Game is
requesting a placement from the player.(GameState)
Return: If setup phase 1: list of eleven 2-tuples of ints ->
[(x1,y1), (x2,y2),...,(x10,y10)]
If setup phase 2: list of two 2-tuples of ints ->
[(x1,y1), (x2,y2)]
"""
numToPlace = 0
# implemented by students to return their next move
if currentState.phase == c.SETUP_PHASE_1: # stuff on my side
numToPlace = 11
moves = []
for i in range(0, numToPlace):
move = None
while move is None:
# Choose any x location
x = random.randint(0, 9)
# Choose any y location on your side of the board
y = random.randint(0, 3)
# Set the move if this space is empty
if currentState.board[x][y].constr is None and (x, y) not in moves:
move = (x, y)
# Just need to make the space non-empty. So I threw
# whatever I felt like in there.
currentState.board[x][y].constr is True
moves.append(move)
return moves
elif currentState.phase == c.SETUP_PHASE_2: # stuff on foe's side
numToPlace = 2
moves = []
for i in range(0, numToPlace):
move = None
while move is None:
# Choose any x location
x = random.randint(0, 9)
# Choose any y location on enemy side of the board
y = random.randint(6, 9)
# Set the move if this space is empty
if currentState.board[x][y].constr is None and (x, y) not in moves:
move = (x, y)
# Just need to make the space non-empty. So I threw
# whatever I felt like in there.
currentState.board[x][y].constr is True
moves.append(move)
return moves
else:
return [(0, 0)]
def adjust_weights(self, target, actual):
"""
Description: Back propagates through the neural network to
adjust weights.
Parameters:
target - the output of the eval function score_state
actual - the output of the neural network
"""
error = target - actual
#print("Error: " + str(error))
#if actual < .5:
#print("neural net: " + str(actual))
#print("eval ftn: " + str(target))
delta = error * self.calc_g() * (1-self.calc_g())
for x in range(0, len(self.network_inputs)):
output = self.network_weights[x] + (delta * self.network_inputs[x])
if output != 0.0:
self.network_weights[x] = output
def calc_g(self):
return 1 / (1 + math.exp(self.sum_in*-1))
def getMove(self, currentState):
"""
Description:
Gets the next move from the Player.
Parameters:
current_state - The current state of the game at the time the Game is
requesting a move from the player. (GameState)
Return: Move(moveType [int],
coordList [list of 2-tuples of ints],
buildType [int])
"""
node = Node(None, currentState)
node.beta = -2
node.alpha = 2
self.weights_changed = False
move = self.expand(node, self.dLim, True, -2,2)
'''if(not self.weights_changed):
print("start")
for x in range(0, len(self.network_weights)):
print(self.network_weights[x])
print("end")'''
if move is None:
return Move(c.END, None, None)
return move
def getAttack(self, currentState, attackingAnt, enemyLocations):
"""
Description:
Gets the attack to be made from the Player
Parameters:
current_state - The current state of the game at the time the
Game is requesting a move from the player. (GameState)
attackingAnt - A clone of the ant currently making the attack. (Ant)
enemyLocation - A list of coordinate locations for valid attacks
(i.e. enemies within range) ([list of 2-tuples of ints])
Return: A coordinate that matches one of the entries of enemyLocations.
((int,int))
"""
# Attack a random enemy.
return enemyLocations[random.randint(0, len(enemyLocations) - 1)]
def neural_network(self, currentState):
"""
Description:
Fills in inputs to neural network and weights if none have been set. Calculates the
output for the neural network.
Parameters:
currentState - the game state being looked at
Return:
output value of the neural network
"""
self.fill_inputs(currentState) #fill in inputs to neural network
correct_weights = [-0.19989987869, 0.575296537021, -0.790157016078, 0.122348581121, 0.439977493331, 0.0945533634476, 0.193257595531,
0.608610635316, -0.547091789577, -0.556154410349, 0.664053540487, 1.28211813459, -2.3348782998, -0.630480359892, -0.658038267324,
0.279114055434, 0.285908744918, 0.288456116063, -0.674821787348, 0.196087050865, 0.239985502563, 0.766111417156,
-0.644112336034, 0.262415516703,-0.567031131381, -0.0227533193014,0.321295510228, 0.394677078019, 0.43694189521, -0.886360727604,
0.116928308988, 0.280371143609, -0.680139242929, 0.274653505746]
if(len(self.network_weights) != len(self.network_inputs)): #fill in random weights if none
for x in range(0, len(self.network_inputs)):
'''val = 0
while val == 0:
val = random.uniform(-1,1)'''
self.network_weights.append(correct_weights[x])
self.sum_in = 0
for x in range(0, len(self.network_inputs)): #getting sum in for node
self.sum_in += self.network_inputs[x]*self.network_weights[x]
return self.calc_g()
def fill_inputs(self, currentState):
self.network_inputs = []
current_input = 0
for x in range(0,2):
if(x == 0):
player = self.playerId
else:
player = 1 - self.playerId
inv = currentState.inventories[player]
food = inv.foodCount
anthill = inv.getAnthill()
queen = inv.getQueen()
workers = [ant for ant in inv.ants if ant.type == c.WORKER]
offensive = [c.SOLDIER, c.R_SOLDIER, c.DRONE]
attackers = [ant for ant in inv.ants if ant.type in offensive]
self.food_health_inputs(food, 3, 6)
self.food_health_inputs(queen.health, 3, 6)
self.food_health_inputs(anthill.captureHealth, 1, 2)
self.ant_inputs(len(workers), 0, 1, 3)
self.ant_inputs(len(attackers), 0, 1, 3)
def ant_inputs(self, obj, val1, val2, val3):
if(obj <= val1):
self.insert_inputs(4, [1,0,0,0])
elif(obj <= val2):
self.insert_inputs(4, [0,1,0,0])
elif(obj <= val3):
self.insert_inputs(4, [0,0,1,0])
else:
self.insert_inputs(4, [0,0,0,1])
def food_health_inputs(self, obj, val1, val2):
if(obj <= val1):
self.insert_inputs(3, [1,0,0])
elif(obj <= val2):
self.insert_inputs(3, [0,1,0])
else:
self.insert_inputs(3, [0,0,1])
def insert_inputs(self, size, vals):
for x in range(0, size):
self.network_inputs.append(vals[x])
def expand(self, node, depth, maxPlayer, a, b):
'''
Description: Recursive method that searches for the best move to optimize resulting state at given depth
prunes moves and general search space to save time
Parameters:
state - current state of the game
depth - starting depth for search
Return: overall score of nodes if the depth is greater than 0
else, when depth is 0, returns the best move
'''
# if depth = 0 or node is terminal return heuristic
if depth == 0:
node.score = self.score_state(node.nextState)
return node.score
#get all possible moves for the current player
moves = utils.listAllLegalMoves(node.nextState)
badmoves = []
for i in moves:
if (i.moveType == c.BUILD and i.buildType == c.TUNNEL) or \
(i.moveType == c.MOVE_ANT and not utils.isPathOkForQueen(i.coordList) and\
utils.getAntAt(node.nextState, i.coordList[0]).type == c.WORKER):
badmoves.append(i)
for m in badmoves:
moves.remove(m)
# prune moves randomly
random.shuffle(moves)
moves = moves[0:(len(moves)*depth)/self.dLim]
#generate a list of all next game states
gameStates = []
for m in moves:
gameStates.append(self.getNextStateAdversarial(node.nextState,m))
childrentemp = []
children = []
random.shuffle(gameStates)
self.same_count = 0
for n in range(len(gameStates)):
score = self.score_state(gameStates[n]) #eval ftn
network_score = self.neural_network(gameStates[n]) #call neural network
if(abs(score - network_score) > .03):
self.weights_changed = True
self.adjust_weights(score, network_score)
else:
self.same_count += 1
if(self.same_count > self.same_count_limit):
print("weights start")
for x in range(0, len(self.network_weights)):
print(self.network_weights[x])
print("weights end")
childrentemp.append([score,Node(moves[n], gameStates[n], score, node)])
childrentemp = sorted(childrentemp, key=lambda x: x[0])
if self.playerId == node.nextState.whoseTurn:
childrentemp = reversed(childrentemp)
for n in childrentemp:
if len(children) >= self.searchLim[depth]:
break
children.append(n[1])
random.shuffle(children)
# if depth = 0 or node is terminal return heuristic
if len(children) == 0:
node.score = self.score_state(node.nextState)
return node.score
if maxPlayer:
node.score = -2
for child in children:
v = self.expand(child, depth - 1, child.nextState.whoseTurn == self.playerId, a,b)
node.score = max(v, node.score)
if node.score >= b:
if depth == self.dLim:
return childnode.move
return node.score
a = max(a, child.score)
if depth == self.dLim:
return self.evaluate_nodes(children, True).move
return self.evaluate_nodes(children, True).score#node.score
else:
node.score = 2
for child in children:
v = self.expand(child, depth - 1, child.nextState.whoseTurn == self.playerId,a,b)
node.score = min(v, node.score)
if node.score <= a:
return node.score
b = min(b, node.score)
return self.evaluate_nodes(children, False).score
def getNextStateAdversarial(self, currentState, move):
'''
Version of getNextStateAdversarial that calls this class' getNextState
Description: This is the same as getNextState (above) except that it properly
updates the hasMoved property on ants and the END move is processed correctly.
Parameters:
currentState - A clone of the current state (GameState)
move - The move that the agent would take (Move)
Return: A clone of what the state would look like if the move was made
'''
# variables I will need
nextState = self.getNextState(currentState, move)
myInv = utils.getCurrPlayerInventory(nextState)
myAnts = myInv.ants
# If an ant is moved update their coordinates and has moved
if move.moveType == c.MOVE_ANT:
startingCoord = move.coordList[0]
for ant in myAnts:
if ant.coords == startingCoord:
ant.hasMoved = True
elif move.moveType == c.END:
for ant in myAnts:
ant.hasMoved = False
nextState.whoseTurn = 1 - currentState.whoseTurn;
return nextState
@staticmethod
def getNextState(currentState, move):
"""
Version of genNextState with food carrying bug fixed.
Description: Creates a copy of the given state and modifies the inventories in
it to reflect what they would look like after a given move. For efficiency,
only the inventories are modified and the board is set to None. The original
(given) state is not modified.
Parameters:
currentState - A clone of the current state (GameState)
move - The move that the agent would take (Move)
Return: A clone of what the state would look like if the move was made
"""
# variables I will need
myGameState = currentState.fastclone()
myInv = utils.getCurrPlayerInventory(myGameState)
me = myGameState.whoseTurn
myAnts = myInv.ants
# If enemy ant is on my anthill or tunnel update capture health
myTunnels = myInv.getTunnels()
myAntHill = myInv.getAnthill()
for myTunnel in myTunnels:
ant = utils.getAntAt(myGameState, myTunnel.coords)
if ant is not None:
opponentsAnts = myGameState.inventories[not me].ants
if ant in opponentsAnts:
myTunnel.captureHealth -= 1
if utils.getAntAt(myGameState, myAntHill.coords) is not None:
ant = utils.getAntAt(myGameState, myAntHill.coords)
opponentsAnts = myGameState.inventories[not me].ants
if ant in opponentsAnts:
myAntHill.captureHealth -= 1
# If an ant is built update list of ants
antTypes = [c.WORKER, c.DRONE, c.SOLDIER, c.R_SOLDIER]
if move.moveType == c.BUILD:
if move.buildType in antTypes:
ant = Ant(myInv.getAnthill().coords, move.buildType, me)
myInv.ants.append(ant)
# Update food count depending on ant built
if move.buildType == c.WORKER:
myInv.foodCount -= 1
elif move.buildType == c.DRONE or move.buildType == c.R_SOLDIER:
myInv.foodCount -= 2
elif move.buildType == c.SOLDIER:
myInv.foodCount -= 3
# If a building is built update list of buildings and the update food
# count
if move.moveType == c.BUILD:
if move.buildType == c.TUNNEL:
building = Construction(move.coordList[0], move.buildType)
myInv.constrs.append(building)
myInv.foodCount -= 3
# If an ant is moved update their coordinates and has moved
if move.moveType == c.MOVE_ANT:
newCoord = move.coordList[len(move.coordList) - 1]
startingCoord = move.coordList[0]
for ant in myAnts:
if ant.coords == startingCoord:
ant.coords = newCoord
ant.hasMoved = False
# If an ant is carrying food and ends on the anthill or tunnel
# drop the food
if ant.carrying and ant.coords == myInv.getAnthill().coords:
myInv.foodCount += 1
# ant.carrying = False
for tunnels in myTunnels:
if ant.carrying and (ant.coords == tunnels.coords):
myInv.foodCount += 1
# ant.carrying = False
# If an ant doesn't have food and ends on the food grab
# food
if not ant.carrying:
foods = utils.getConstrList(
myGameState, None, (c.FOOD,))
for food in foods:
if food.coords == ant.coords:
ant.carrying = True
# If my ant is close to an enemy ant attack it
if ant.type == c.WORKER:
continue
adjacentTiles = utils.listAdjacent(ant.coords)
for adj in adjacentTiles:
# If ant is adjacent my ant
if utils.getAntAt(myGameState, adj) is not None:
closeAnt = utils.getAntAt(myGameState, adj)
if closeAnt.player != me: # if the ant is not me
closeAnt.health = closeAnt.health - \
UNIT_STATS[ant.type][c.ATTACK] # attack
# If an enemy is attacked and looses all its health remove it from the other players
# inventory
if closeAnt.health <= 0:
enemyAnts = myGameState.inventories[
not me].ants
for enemy in enemyAnts:
if closeAnt.coords == enemy.coords:
myGameState.inventories[
not me].ants.remove(enemy)
# If attacked an ant already don't attack any
# more
break
return myGameState
##
#class to represent node containing info for next state given a move
##
class Node:
"""
Simple class for a search tree Node.
Each Node requires a Move and a GameState. If a score is not
provided, then one is calculated with AIPlayer.score_state().
"""
def __init__(self, move = None, nextState = None, score = 0, parent = None, child = None):
self.move = move;
self.nextState = nextState
self.score = score
self.parent = parent
self.child = child
self.beta = None
self.alpha = None
class Unit_Tests(unittest.TestCase):
def test_neural_network(self):
ai = AIPlayer(0)
self.state = self.create_state(ai)
output = ai.neural_network(self.state)
self.assertTrue(type(output) is float)
def test_adjust_weights(self):
#target - o/p out neural net, actual - o/p of eval ftn
ai = AIPlayer(0)
self.state = self.create_state(ai)
output_neural = ai.neural_network(self.state)
output_eval_ftn = ai.score_state(self.state)
ai.adjust_weights(output_neural, output_eval_ftn)
for x in range(0, len(ai.network_weights)):
self.assertTrue(type(ai.network_weights[x]) is float)
def setup_state(self):
board = [[Location((col, row)) for row in xrange(0,c.BOARD_LENGTH)] for col in xrange(0,c.BOARD_LENGTH)]
p1Inventory = Inventory(c.PLAYER_ONE, [], [], 0)
p2Inventory = Inventory(c.PLAYER_TWO, [], [], 0)
neutralInventory = Inventory(c.NEUTRAL, [], [], 0)
return GameState(board, [p1Inventory, p2Inventory, neutralInventory], c.SETUP_PHASE_1, c.PLAYER_ONE)
def place_items(self, piece, constrsToPlace, state):
#translate coords to match player
piece = state.coordLookup(piece, state.whoseTurn)
#get construction to place
constr = constrsToPlace.pop(0)
#give constr its coords
constr.coords = piece
#put constr on board
state.board[piece[0]][piece[1]].constr = constr
if constr.type == c.ANTHILL or constr.type == c.TUNNEL:
#update the inventory
state.inventories[state.whoseTurn].constrs.append(constr)
else: #grass and food
state.inventories[c.NEUTRAL].constrs.append(constr)
def setup_play(self, state):
p1inventory = state.inventories[c.PLAYER_ONE]
p2inventory = state.inventories[c.PLAYER_TWO]
#get anthill coords
p1AnthillCoords = p1inventory.constrs[0].coords
p2AnthillCoords = p2inventory.constrs[0].coords
#get tunnel coords
p1TunnelCoords = p1inventory.constrs[1].coords
p2TunnelCoords = p2inventory.constrs[1].coords
#create queen and worker ants
p1Queen = Ant(p1AnthillCoords, c.QUEEN, c.PLAYER_ONE)
p2Queen = Ant(p2AnthillCoords, c.QUEEN, c.PLAYER_TWO)
p1Worker = Ant(p1TunnelCoords, c.WORKER, c.PLAYER_ONE)
p2Worker = Ant(p2TunnelCoords, c.WORKER, c.PLAYER_TWO)
#put ants on board
state.board[p1Queen.coords[0]][p1Queen.coords[1]].ant = p1Queen
state.board[p2Queen.coords[0]][p2Queen.coords[1]].ant = p2Queen
state.board[p1Worker.coords[0]][p1Worker.coords[1]].ant = p1Worker
state.board[p2Worker.coords[0]][p2Worker.coords[1]].ant = p2Worker
#add the queens to the inventories
p1inventory.ants.append(p1Queen)
p2inventory.ants.append(p2Queen)
p1inventory.ants.append(p1Worker)
p2inventory.ants.append(p2Worker)
#give the players the initial food
p1inventory.foodCount = 1
p2inventory.foodCount = 1
#change to play phase
state.phase = c.PLAY_PHASE
def create_state(self, ai):
self.state = self.setup_state()
players = [c.PLAYER_ONE, c.PLAYER_TWO]
for player in players:
self.state.whoseTurn = player
constrsToPlace = []
constrsToPlace += [Building(None, c.ANTHILL, player)]
constrsToPlace += [Building(None, c.TUNNEL, player)]
constrsToPlace += [Construction(None, c.GRASS) for i in xrange(0,9)]
setup = ai.getPlacement(self.state)
for piece in setup:
self.place_items(piece, constrsToPlace, self.state)
self.state.flipBoard()
self.state.phase = c.SETUP_PHASE_2
for player in players:
self.state.whoseTurn = player
constrsToPlace = []
constrsToPlace += [Construction(None, c.FOOD) for i in xrange(0,2)]
setup = ai.getPlacement(self.state)
for food in setup:
self.place_items(food, constrsToPlace, self.state)
self.state.flipBoard()
self.setup_play(self.state)
self.state.whoseTurn = c.PLAYER_ONE
return self.state
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"megan.nalani@gmail.com"
] | megan.nalani@gmail.com |
4856bde0b0b864ee66218ab2cf5abb1934f118c2 | 27bdcba25df8b2416783d8a1229bfce08dc77189 | /tests/util/httpretty/test_decorator.py | d2ccd74525dfd97109047417dea28c64ee280b8a | [
"Apache-2.0"
] | permissive | BenjamenMeyer/stackInABox | 5fbeab6aac38c52d5360f9dbabb9101447e32eb5 | 15586e61a2013b6f4997c652e8412a1784f8fc93 | refs/heads/master | 2022-04-01T01:04:33.103603 | 2021-01-09T05:52:55 | 2021-01-09T05:52:55 | 30,074,880 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,522 | py | """
Stack-In-A-Box: Basic Test
"""
import collections
import sys
import types
import unittest
import requests
from stackinabox.util.httpretty import decorator
from tests.util import base
from tests.utils.services import AdvancedService
from tests.utils.hello import HelloService
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorErrors(base.UtilTestCase):
def test_basic(self):
decor_instance = decorator.activate('localhost')
with self.assertRaises(TypeError):
decor_instance.process_service({}, raise_on_type=True)
@decorator.stack_activate('localhost', HelloService())
def test_deprecated(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', HelloService())
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate('localhost', HelloService(),
200, value='Hello')
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate('localhost', HelloService(),
200, value='Hello',
access_services="stack")
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyAdvancedWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', AdvancedService())
def test_basic(self):
res = requests.get('http://localhost/advanced/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
res = requests.get('http://localhost/advanced/h')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Good-Bye')
expected_result = {
'bob': 'bob: Good-Bye alice',
'alice': 'alice: Good-Bye bob',
'joe': 'joe: Good-Bye jane'
}
res = requests.get('http://localhost/advanced/g?bob=alice;'
'alice=bob&joe=jane')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json(), expected_result)
res = requests.get('http://localhost/advanced/1234567890')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'okay')
res = requests.get('http://localhost/advanced/_234567890')
self.assertEqual(res.status_code, 595)
res = requests.put('http://localhost/advanced/h')
self.assertEqual(res.status_code, 405)
res = requests.put('http://localhost/advanced2/i')
self.assertEqual(res.status_code, 597)
def httpretty_generator():
yield HelloService()
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndGenerator(base.UtilTestCase):
def test_verify_generator(self):
self.assertIsInstance(httpretty_generator(), types.GeneratorType)
@decorator.activate(
'localhost',
httpretty_generator()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
def httpretty_list():
return [
HelloService()
]
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndList(base.UtilTestCase):
def test_verify_list(self):
self.assertIsInstance(httpretty_list(), collections.Iterable)
@decorator.activate(
'localhost',
httpretty_list()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
| [
"bm_witness@yahoo.com"
] | bm_witness@yahoo.com |
b6ee240936b79843ff0fcec2a5ea0a3952af3fb6 | fd86f86950d1ee18336b391a5d7e77825fa284ab | /orders/migrations/0024_auto_20191206_1005.py | 9b7d116defdcc574496dd875f55d301ee9cc729c | [] | no_license | vijay7979/proj3 | 03c45ef33ce1f2196a1e76dfd5a7b1975003b052 | 43774d8984361366421321db372d8542451a1c60 | refs/heads/master | 2022-12-06T17:55:15.152442 | 2020-01-22T08:10:41 | 2020-01-22T08:10:41 | 231,880,467 | 0 | 0 | null | 2022-11-22T02:24:48 | 2020-01-05T07:01:14 | Python | UTF-8 | Python | false | false | 692 | py | # Generated by Django 2.1.5 on 2019-12-06 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0023_auto_20191205_2124'),
]
operations = [
migrations.RemoveField(
model_name='pasta',
name='size',
),
migrations.RemoveField(
model_name='salad',
name='size',
),
migrations.AddField(
model_name='sub',
name='additions',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='orders.Addition'),
),
]
| [
"vijay@saturnsystems.com.my"
] | vijay@saturnsystems.com.my |
49925c76cad1aa2bce0fe243f7a98de849a7f0b7 | e8315fb57c1ca507d413c7db88631d28bbd8a4a8 | /post/models.py | a23e00294de8616fd2104645721c96c09e0a1670 | [] | no_license | Ogeneral/website | a4119b05d91209518c928ef6af357ad921719a8d | ff65bb45c75ee66759462d9c5ccc8f302c90144d | refs/heads/master | 2020-08-17T16:20:25.350802 | 2019-10-17T23:09:50 | 2019-10-17T23:09:50 | 215,676,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
Your_name = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published', auto_now_add=True)
Your_Post_title = models.CharField(max_length=200)
Your_Post = models.TextField(max_length=2000)
id = models.IntegerField(primary_key=True)
def __str__(self):
return self.Your_Post
class Comment(models.Model):
name = models.CharField(max_length=42)
text = models.TextField()
post = models.OneToOneField(Post, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
| [
"adesiyanoladipo@gmail.com"
] | adesiyanoladipo@gmail.com |
44169eb3847ec870ceabdbf7343dc31c946a5041 | ba2eea85cef560c54d5cb4af0e4f2c7c3ee3eb2f | /nesfr3_workspace/catkin_ws/devel/lib/python2.7/dist-packages/hdl_people_tracking/msg/__init__.py | 25469b939f284ee573515fba8efc8d24068cf7de | [] | no_license | HiSeun/nesfr3_selfwork | 0c782597ffd66d736d53ae05594d23fa7f1d9a85 | 855d43117a235462335c6693b334e7a6235d1d31 | refs/heads/master | 2023-02-08T07:33:15.637998 | 2021-01-05T08:48:45 | 2021-01-05T08:48:45 | 326,935,430 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from ._Cluster import *
from ._ClusterArray import *
from ._Track import *
from ._TrackArray import *
| [
"ksw05088@naver.com"
] | ksw05088@naver.com |
fd9be4ed5b8e96adc9b7c7179e75e6a561966ed9 | e8020d8d1c4503193b92b351a1a550de1a0f5cb0 | /features/steps/filter_games.py | 3313eb448c875d5e5d35a60c4c642d564a82e0ec | [] | no_license | gejurier/AcceptanceTesting | 4b50311a7045bb839fdad0cfde9c5ab93ae0c9d9 | 95832306c180eb8a5d6fbb21454b6a4f91a6663d | refs/heads/main | 2023-07-10T07:15:04.974784 | 2021-08-15T01:58:12 | 2021-08-15T01:58:12 | 396,184,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | from behave import *
from src.Game import *
from src.Catalogue import *
#Condiciones antes de empezar cualquier STEP
def before_scenario(context, scenario):
context = {}
@given("a set of games")
def step_impl(context):
game_list = []
for row in context.table:
elenco = []
idiomas = []
game = Game(row['NAME'], row['RELEASE DATE'], row['DEVELOPER'], row['RATE'])
game_list.append(game)
context.games = game_list
@given('the user enters the name: {name}')
def step_impl(context, name):
context.name = name
@given('the user selects the rating: {rating}')
def step_impl(context, rating):
context.rating = rating
@given('the user enters a studio: {studio}')
def step_impl(context, studio):
context.studio = studio
@when("the user search games by {criteria}")
def step_impl(context, criteria):
if(criteria == 'name'):
result, message = get_game_name(context.games, context.name)
print(result)
context.result = result
context.message = message
elif(criteria == 'rating'):
result, message, error = get_game_rating(context.games, context.rating)
print(result)
context.result = result
context.message = message
if error!=None:
context.message = error
else:
context.error = error
elif(criteria == 'studio'):
result, message = get_game_developer(context.games, context.studio)
print(result)
context.result = result
context.message = message
@then("{total} games will match")
def step_impl(context, total):
assert len(context.result) == int(total)
@then("the names of these games are")
def step_impl(context):
expected_games = True
result_games = []
for row in context.table:
result_games.append(row['NAME'])
for game in context.result:
if game.name not in result_games:
print("No game " + game.name)
expected_games = False
assert expected_games is True
@then("the following message is displayed: {message}")
def step_impl(context, message):
print(message)
print(context.message)
assert context.message == message
| [
"julianariera1998@gmail.com"
] | julianariera1998@gmail.com |
63dc99d84353e4b326e0d3cee7bc532a9b973e2d | d16dd92e46304afd6fe026861a64efd25687b1be | /05 Funções/04.py | 52480a5f982888071699d186525d79043d1d93b0 | [] | no_license | carlosalbertoestrela/Lista-de-exercios-Python | 6c51eb131e79dc5c50b55816209cfe6326f3a591 | f7edd962c6b79ab621d624d2a23e20de1e0635ee | refs/heads/master | 2022-12-12T11:36:06.236246 | 2020-09-18T02:06:21 | 2020-09-18T02:06:21 | 292,004,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
04) Faça um programa, com uma função que necessite de um argumento.
A função retorna o valor de caractere ‘P’, se seu argumento for positivo, e ‘N’, se seu argumento for zero ou negativo.
"""
def teste_p_n(num):
if num > 0:
return 'P'
else:
return 'N'
n = int(input('Digite um número para saber se é N - negativo, P - positivo: '))
print(f'O resultado é: {teste_p_n(n)}')
| [
"carlos.alberto-estrela@hotmail.com"
] | carlos.alberto-estrela@hotmail.com |
e83de71bd10a5bcc7fd572a2a34f005b4128c01d | d5c50527791c1514ab3674273036f036d78d1d3c | /assignment1/cs231n/classifiers/softmax.py | e33356ed54cb3fd5f4e1b69f3d56fad5c4621a27 | [] | no_license | Jaycolas/CS231n | d6c408d02a164ac460ffdef1988c6e4b4e441215 | 6170ca7a1898f3d328110751db3e315f6194dd0f | refs/heads/master | 2021-01-21T06:33:01.586491 | 2017-03-07T06:41:42 | 2017-03-07T06:41:42 | 83,252,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
D = W.shape[0]
C = W.shape[1]
N = X.shape[0]
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
L = np.zeros(N)
for i in xrange(N):
#f: 1*C
f = X[i].dot(W)
f -= np.max(f)
for j in xrange(C):
if j==y[i]:
dW[:,j] += ((np.exp(f[y[i]])/np.sum(np.exp(f)))-1)*X[i].T
else:
dW[:,j] += (np.exp(f[j])/np.sum(np.exp(f)))*X[i].T
p = np.exp(f[y[i]])/np.sum(np.exp(f))
L[i]=-np.log(p)
loss = np.sum(L)/N + 0.5*reg*np.sum(W*W)
dW = dW/N + reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
D = W.shape[0]
C = W.shape[1]
N = X.shape[0]
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
f = X.dot(W)
f -= np.max(f, axis=1, keepdims=True)
#sigma : N*1
sigma = np.sum(np.exp(f),axis=1,keepdims=True)
#fyi: N*1
fyi = np.exp(f[range(N),y])
fyi = np.reshape(fyi, (N,1))
L = np.sum((-np.log(fyi/sigma)))
loss = L/N+0.5*reg*np.sum(W*W)
prob = np.exp(f)/sigma
prob[range(N), y]-=1
dW = np.dot(X.T, prob)/N+reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"nangeya@126.com"
] | nangeya@126.com |
f9de6b21825748128663735e57396329fedbf62c | 4e0f222e54e6e3bfa50d6d1e2dae967375837a94 | /djangoprojects/urls.py | 8f7f0bc53e584271da144e8f593067d3df9dec68 | [] | no_license | halfwitted/calculator-django | 533d8dac4729a7ed62cb11b1a70dc9760b2be719 | f0704be5675025c8049db2a028776fea3796ca7c | refs/heads/master | 2023-02-02T06:20:25.841830 | 2020-12-20T05:55:22 | 2020-12-20T05:55:22 | 322,900,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | """djangoprojects URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('calcapp',include('calculatorapp.urls'))
]
| [
"abhiabi.fm@gmail.com"
] | abhiabi.fm@gmail.com |
def403f049d5e996b41cb5806ab74dcfb8c5e0d6 | d0d00a6f4a6a4aa1e49390d4bd787ffcf06fafa7 | /api/api.py | daff4b6e6d06f350ce9a568fa9465a00f72a0283 | [] | no_license | mrantry/flask-react-app | 47fd06b42b0e6ed1b489be93ae7c131077c3c4bd | 772445fb366e7ff5f2b72e00fad9d3a36d2b94cc | refs/heads/main | 2023-01-19T22:44:43.620850 | 2020-11-11T04:29:48 | 2020-11-11T04:29:48 | 309,777,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | from flask import Flask, request
import sqlite3
import uuid
from werkzeug.exceptions import HTTPException
import json
import csv
app = Flask(__name__)
@app.route('/initdb')
def init_db():
conn = sqlite3.connect('movies.db')
print("Opened database successfully");
# clear out db if table already exists...
conn.execute(
'DROP TABLE IF EXISTS movies'
)
conn.execute(
'CREATE TABLE movies (' +
'id TEXT NOT NULL PRIMARY KEY, ' +
'release_year TEXT, ' +
'title TEXT, ' +
'origin TEXT, ' +
'director TEXT,' +
'cast TEXT,' +
'genre TEXT,' +
'wiki_page TEXT,' +
'plot TEXT' +
')'
)
conn.close()
return 'database initialized', 200
# TODO: Allow specification of file location
@app.route('/populatedb')
def populate_db():
try:
con = sqlite3.connect("movies.db")
cur = con.cursor()
filename="movie_plots.csv"
movie_plots = open(filename)
rows = csv.reader(movie_plots)
movies_with_ids = [[str(uuid.uuid1())] + row for row in rows]
print(movies_with_ids[0])
cur.executemany("INSERT INTO movies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", movies_with_ids[1:len(movies_with_ids)])
con.commit()
con.close()
return "db has been populated from csv", 200
except Exception as e:
return f'an exception has occured in populating the database: {e}', 500
# TODO: DRY...
@app.route('/movies', defaults={'movie_id': None}, methods=['GET'])
@app.route('/movies/<movie_id>')
def get_movie_by_id(movie_id):
if movie_id != None:
con = sqlite3.connect('movies.db')
cur = con.cursor()
cur.execute(f'SELECT * FROM movies where ID=\'{movie_id}\'')
data = cur.fetchall()
con.close()
formatted = []
for item in data:
formatted.append(
{
"id": item[0],
"release_year": item[1],
"title": item[2],
"origin": item[3],
"director": item[4],
"cast": item[5],
"genre": item[6],
"wiki": item[7],
"plot": item[8],
}
)
return {'content': formatted}
else:
con = sqlite3.connect('movies.db')
cur = con.cursor()
pagenumber = request.args.get('page')
if pagenumber != None:
cur.execute(f'SELECT id, release_year, title, origin, director, genre FROM movies ORDER BY id LIMIT 50 OFFSET {str((int(pagenumber)-1)*50)};')
else:
cur.execute(f'SELECT id, release_year, title, origin, director, genre FROM movies ORDER BY id;')
data = cur.fetchall()
con.close()
formatted = []
for item in data:
formatted.append(
{
"id": item[0],
"release_year": item[1],
"title": item[2],
"origin": item[3],
"director": item[4],
"genre": item[5]
}
)
return {'content': formatted, "page": pagenumber}
@app.route('/movies', methods=['POST'])
def create_movie():
body = request.get_json()
try:
con = sqlite3.connect("movies.db")
cur = con.cursor()
movie_id = str(uuid.uuid1())
new_movie = [
movie_id,
body["releaseYear"],
body["title"],
body["origin"],
body["director"],
body["cast"],
body["genre"],
body["wiki"],
body["plot"]
]
cur.execute("INSERT INTO movies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", new_movie)
cur.execute(f'SELECT * FROM movies where ID=\'{movie_id}\'')
data = cur.fetchall()
con.commit()
con.close()
formatted = []
for item in data:
formatted.append(
{
"id": item[0],
"release_year": item[1],
"title": item[2],
"origin": item[3],
"director": item[4],
"cast": item[5],
"genre": item[6],
"wiki": item[7],
"plot": item[8],
}
)
print(formatted)
return {'content': formatted}, 200
except Exception as e:
return f'error creating movie: {e}', 500
@app.route('/movies/<movie_id>', methods=['PUT'])
def update_movie(movie_id):
body = request.get_json()
try:
con = sqlite3.connect("movies.db")
cur = con.cursor()
new_movie = [
body["releaseYear"],
body["title"],
body["origin"],
body["director"],
body["cast"],
body["genre"],
body["wiki"],
body["plot"],
movie_id
]
sql = """
UPDATE movies
SET release_year = ? ,
title = ? ,
origin = ? ,
director = ? ,
cast = ? ,
genre = ? ,
wiki_page = ? ,
plot = ?
WHERE id = ?"""
cur.execute(sql, new_movie)
cur.execute(f'SELECT * FROM movies where ID=\'{movie_id}\'')
data = cur.fetchall()
con.commit()
con.close()
formatted = []
for item in data:
formatted.append(
{
"id": item[0],
"release_year": item[1],
"title": item[2],
"origin": item[3],
"director": item[4],
"cast": item[5],
"genre": item[6],
"wiki": item[7],
"plot": item[8],
}
)
return {'content': formatted}, 200
except Exception as e:
return f'error creating movie: {e}'
@app.route('/movies/<movie_id>', methods=['DELETE'])
def delete_movie(movie_id):
body = request.get_json()
try:
con = sqlite3.connect("movies.db")
cur = con.cursor()
cur.execute(f'DELETE FROM movies where ID=\'{movie_id}\'')
con.commit()
con.close()
return f'deleted movie with id: {movie_id}', 200
except Exception as e:
return f'error creating movie: {e}'
return f'Deleteing movie with id: {movie_id}'
@app.route('/search/<search_string>', methods=['GET'])
def search(search_string):
try:
con = sqlite3.connect("movies.db")
cur = con.cursor()
pagenumber = request.args.get('page')
cur.execute(f'SELECT id, release_year, title, origin, director, genre FROM movies WHERE title LIKE \'{search_string}%\' ORDER BY title LIMIT 50 OFFSET {str((int(pagenumber)-1)*50)};')
data = cur.fetchall()
con.commit()
con.close()
formatted = []
for item in data:
formatted.append(
{
"id": item[0],
"release_year": item[1],
"title": item[2],
"origin": item[3],
"director": item[4],
"genre": item[5]
}
)
return {'content': formatted}
except Exception as e:
return f'error searching for movie: {e}'
@app.errorhandler(HTTPException)
def handle_bad_request(e):
response = e.get_response()
# replace the body with JSON
response.data = json.dumps({
"code": e.code,
"name": e.name,
"description": e.description,
})
response.content_type = "application/json"
return response
| [
"mrantry@gmail.com"
] | mrantry@gmail.com |
cafca80bf7bddc1cd15435d72b69eb647c4067eb | 71ca661b74aa7b9662ad1dc54fe807b63bc9c4bc | /ids.py | e0bec233d4e010246fb91d67c064ccdb747da3b4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | estuans/fsrip | d3a8dca3120942ed5239bdf21b2e87f810b733a3 | 161493e76d359c8bf2ebab8eeb42e5cf42f1b7b8 | refs/heads/master | 2021-05-09T10:47:32.794141 | 2015-05-27T15:27:42 | 2015-05-27T15:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | #!/opt/local/bin/python3.3
# {"id":"0000",
# "t":{
# "fsmd":{
# "fs":{"byteOffset":1048576,"blockSize":512,"fsID":"f7c7b628","partName":"part-0-0"},
# "path":"",
# "name":{"flags":"Allocated","meta_addr":4,"meta_seq":0,"name":"Documents","shrt_name":"DOCUME~1","type":"Folder"},
# "meta":{"addr":4,"accessed":"2012-08-07T04:00:00Z","content_len":8,"created":"2012-08-07T22:13:53.84Z","metadata":"1970-01-01T00:00:00Z","flags":"Allocated, Used","gid":0,"mode":511,"modified":"2012-08-07T22:13:52Z","nlink":1,"seq":0,"size":8192,"type":"Folder","uid":0,
# "attrs":[
# {"flags":"In Use, Non-resident","id":0,"name":"","size":8192,"type":1,"rd_buf_size":0,"nrd_allocsize":8192,"nrd_compsize":0,"nrd_initsize":8192,"nrd_skiplen":0,
# "nrd_runs":[
# {"addr":3880,"flags":0,"len":8,"offset":0},
# {"addr":618808,"flags":0,"len":8,"offset":8}
# ]
# }
# ]
# }
# }
# }
# }
import sys
import json
input = sys.stdin
for l in input:
line = l.strip()
metadata = json.loads(line)
if 'id' not in metadata:
print("** no id on %s" % (line), file=sys.stderr)
id = metadata['id']
if 't' not in metadata:
print("** no t on %s" % (line), file=sys.stderr)
t = metadata['t']
if 'fsmd' not in t:
print("** no fsmd in t on %s" % (line), file=sys.stderr)
fsmd = t['fsmd']
if 'path' not in fsmd:
print("** no path in fsmd on %s" % (line), file=sys.stderr)
if 'name' not in fsmd or 'name' not in fsmd['name']:
print("** no name in fsmd on %s" % (line), file=sys.stderr)
print("%s\t%s" % (id, fsmd['path'] + fsmd['name']['name']))
| [
"jonathan.l.stewart@acm.org"
] | jonathan.l.stewart@acm.org |
8746be1fd3b410f5feea5dc25408026a13c2840a | b5445f9a1f3597472f47df89696465bca7735406 | /app/program.py | fbad7d4d00617fce1af32fa10d72252d695d045e | [
"MIT"
] | permissive | mikeckennedy/pyramid-web-builder-python-gui | 8af5a4dde9ff1bd6173f789464b67bdaba8bd3fa | d842e116730e9b0ed9daaf1125e1fb6e2b3ea40e | refs/heads/master | 2021-05-03T11:00:32.390158 | 2018-02-17T16:12:56 | 2018-02-17T16:12:56 | 120,542,873 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | import cookiecutter.main
import sys
from gooey import Gooey, GooeyParser
from utils import to_project_style
@Gooey(
program_name='Pyramid app builder',
program_description='Create a Pyramid web app',
show_success_modal=False,
requires_shell=False)
def main():
info = get_user_values()
proj_dir = build_app(info)
print("Project created: {}".format(proj_dir))
def get_user_values():
parser = GooeyParser()
parser.add_argument(dest='template',
metavar='Project type',
help="Type of Pyramid project",
choices=['Starter', "Talk Python Entrepreneur's", 'SQLAlchemy', 'SubstanceD', 'ZODB'])
parser.add_argument('project_name',
metavar='Project name',
help="The user-visible name of your project")
parser.add_argument(
dest='template_language',
metavar='Template language',
widget='Dropdown',
choices=["jinja2", "chameleon", "mako"]
)
parser.add_argument(
dest="working_dir",
metavar='Output directory',
help='Directory for project',
widget="DirChooser")
sysargs = sys.argv[1:]
args = parser.parse_args(sysargs)
return args
def template_to_url(template_name: str) -> str:
if template_name == 'Starter':
return 'https://github.com/Pylons/pyramid-cookiecutter-starter'
elif template_name == 'SQLAlchemy':
return 'https://github.com/Pylons/pyramid-cookiecutter-alchemy'
elif template_name == 'SubstanceD':
return 'https://github.com/Pylons/substanced-cookiecutter'
elif template_name == "Talk Python Entrepreneur's":
return 'https://github.com/mikeckennedy/cookiecutter-pyramid-talk-python-starter'
else:
raise Exception("Unknown template type")
def build_app(info):
template = template_to_url(info.template)
proj_dir = cookiecutter.main.cookiecutter(
template,
no_input=True,
output_dir=info.working_dir,
extra_context={
'project_name': info.project_name,
'repo_name': to_project_style(info.project_name),
'template_language': info.template_language,
"project_slug": to_project_style(info.project_name),
"contact_name": "Company Name",
"domain_name": "yourcompany.com",
"contact_email": "contact@company.com",
"description": "",
"integrations": "",
"mailchimp_api": "",
"mailchimp_list_id": "",
"outbound_smtp_username": "",
"outbound_smtp_password": "",
"outbound_smtp_server": "",
"outbound_smtp_port": "587",
"rollbar_access_token": ""
}
)
return proj_dir
if __name__ == '__main__':
sys.exit(main())
| [
"mikeckennedy@gmail.com"
] | mikeckennedy@gmail.com |
73f8a9ab6ec9a7c2c33e70014762322b82ac8488 | 45829716d3dfe520f5f1efdd4088b1de3baf447f | /ChoicesModelApi/app/src/route_choice_model.py | eee96cac83460b4e9fa3652d93ca60a02c69a1a2 | [] | no_license | SanTransport/My-TRAC-choice-model-API | c48037f14396948eea00b97d2f268b50b03f52e3 | 077399c631289356af4b8baf57a2753db008d952 | refs/heads/master | 2020-12-23T19:13:50.240972 | 2020-02-19T13:44:11 | 2020-02-19T13:44:11 | 237,245,095 | 0 | 0 | null | 2020-01-30T15:39:28 | 2020-01-30T15:39:28 | null | UTF-8 | Python | false | false | 22,374 | py | '''
Pretty similar to the mode/departure time choice models. Assumes a maximum of
10 itineraries. Parses the request_reply field in user_chooses_route for model
estimation. For preditcion, expects a URL describing all attributes as in below
example:
http://localhost:5000/choice/route/?user_country=NL
&transitTime_0=1500&transitTime_1=3000&transitTime_2=0&transitTime_3=0&transitTime_4=0&transitTime_5=0&transitTime_6=0&transitTime_7=0&transitTime_8=0&transitTime_9=0
&transfers_0=1&transfers_1=0&transfers_2=0&transfers_3=0&transfers_4=0&transfers_5=0&transfers_6=0&transfers_7=0&transfers_8=0&transfers_9=0
&waitingTime_0=1000&waitingTime_1=300&waitingTime_2=0&waitingTime_3=0&waitingTime_4=0&waitingTime_5=0&waitingTime_6=0&waitingTime_7=0&waitingTime_8=0&waitingTime_9=0
&routeAvail_0=1&routeAvail_1=1&routeAvail_2=0&routeAvail_3=0&routeAvail_4=0&routeAvail_5=0&routeAvail_6=0&routeAvail_7=0&routeAvail_8=0&routeAvail_9=0
&user_birthday=19861224&user_gender=1&user_income=1&user_traveller_type=0&user_often_pt=2&trip_id=567&mytrac_id=765
'''
import database_connection_route
import pandas as pd
import biogeme.database as db
import biogeme.biogeme as bio
import biogeme.models as models
import collections
from datetime import datetime
from biogeme.expressions import *
import os
import numpy as np
import json
class RouteChoice:
def __init__(self):
return
def __del__(self):
try:
self.__cleanup_after_model_training()
except:
pass
return None
def __cleanup_after_model_training(self):
for file in os.listdir(os.getcwd()):
if os.path.isfile(file) and file.startswith("logitEstimation"):
# print(file, ' deleted.') # Debug print
os.remove(file)
os.remove('headers.py')
def __birthday_to_age(self, birthday):
'''
Takes a birthday in YYYMMDD format and returns a classification for age, ranging from 1 to 6.
18-24 1
25-34 2
35-44 3
45-54 4
55-64 5
≥65 6
:param birthday: A string or int variable for birthday YYYYMMDD
:return: An int from 1 to 6, representing the age category of the user
'''
age = (datetime.now() - datetime.strptime(str(int(birthday)), "%Y%m%d")).days / 365
if age < 25:
age = 1
elif age < 35:
age = 2
elif age < 45:
age = 3
elif age < 55:
age = 4
elif age < 65:
age = 5
else:
age = 6
return age
def connect_to_db(self, database_name, country):
"""
connect to a database, for a given country. return a pandas dataframe.
:param database_name: the name of the database
:param country: the code of the country (i.e. gr, es, nl, pt)
:return: a pandas db that is readable by biogeme
"""
# connect to the database
sql = database_connection_route.DatabaseConnection(database_name)
if country == 'ES': # FIXME remove the workaround, once we have data for ES
country = 'GR'
# select data from the database, for the country that is currently being estimated
sql.run_query(sql.select_data, {
'table_name': 'user_chooses_route',
'columns': [
'user_id',
'request_reply',
'user_choice'
],
'where_cols': [],
'where_values': []
})
routeData = pd.DataFrame(data=sql.output).transpose()
maxItineraries = 10 # NOTE: maxItineraries fixed to 10; will also be used for BIOGEME code in the utility definition
# Get route attributes assuming a maximum of 10 alternatives
transitTime = np.zeros([len(routeData),maxItineraries])
transfers = np.zeros([len(routeData),maxItineraries])
waitingTime = np.zeros([len(routeData),maxItineraries])
routeAvail = np.zeros([len(routeData),maxItineraries])
numItineraries = np.zeros([len(routeData),1],dtype=int)
for i in range(len(routeData)):
replyJson = json.loads(routeData['request_reply'].iloc[i])
numItineraries[i] = len(replyJson)
for j in range(numItineraries[i][0]):
transitTime[i,j] = replyJson[j]['transitTime']
transfers[i,j] = replyJson[j]['transfers']
waitingTime[i,j] = replyJson[j]['waitingTime']
routeAvail[i,j] = 1
# Put route attributes in the same dataframe
routeData = pd.concat([
routeData,
pd.DataFrame(data=numItineraries,columns=['numItineraries']),
pd.DataFrame(data=transitTime,columns=['transitTime_'+str(i) for i in range(maxItineraries)]),
pd.DataFrame(data=transfers,columns=['transfers_'+str(i) for i in range(maxItineraries)]),
pd.DataFrame(data=waitingTime,columns=['waitingTime_'+str(i) for i in range(maxItineraries)]),
pd.DataFrame(data=routeAvail,columns=['routeAvail_'+str(i) for i in range(maxItineraries)])],
axis=1)
routeData = routeData.loc[routeData['user_choice']!=-1] # removing replies where no user choice is available
sql.run_query(sql.select_data, {
'table_name': 'user',
'columns': [
'user_id',
'user_birthday',
'user_gender', # 2 categories: male, female
'user_income', # 3 categories: low, medium, high
'user_often_pt', # 4 categories: never, rarely, 1-2times/week, daily
'user_traveller_type' # 2 categories: work/edu, other
],
'where_cols': [
'user_country'
],
'where_values': [
country
]
})
userData = pd.DataFrame(data=sql.output).transpose()
userData = userData.loc[(userData['user_id']!=0)&(userData['user_birthday']!=0)] # remove invalid users and users with missing information
# from birthday to age
userData['AGE'] = np.ceil(
(
(
(datetime.now()-pd.to_datetime(userData['user_birthday'],format='%Y%m%d',errors='coerce')
).dt.days/(365))-25)/10)+1
userData.loc[userData['AGE']<1,'AGE'] = 1
# store data in a pandas dataframe, readable by biogeme
pandas_df_for_specified_country = pd.merge(routeData,userData,on='user_id') # only users of the said country will remain; others will be filtered out with this merge
pandas_df_for_specified_country['AGE'] = pandas_df_for_specified_country['AGE'].astype(int)
pandas_df_for_specified_country['user_id'] = pandas_df_for_specified_country['user_id'].astype(int)
pandas_df_for_specified_country['user_choice'] = pandas_df_for_specified_country['user_choice'].astype(int)+1
pandas_df_for_specified_country = pandas_df_for_specified_country.drop(columns=['request_reply'])
return pandas_df_for_specified_country
def evaluate_model(self, pandas_df_for_specified_country, model):
# Estimation of probabilities for each alternative on aggregate. Simulate / forecast.
def print_route_shares(routename, routenumber):
seriesObj = simresults.apply(lambda x: True if x['Actual choice'] == routenumber else False, axis=1)
REAL = len(seriesObj[seriesObj == True].index)
seriesObj = simresults.apply(lambda x: True if x['Simulated choice'] == routenumber else False, axis=1)
SIMU = len(seriesObj[seriesObj == True].index)
shares = (routename, '--> Real:' + "{0:.1%}".format(REAL / simresults.shape[0]) +
'| Simu:' + "{0:.1%}".format(SIMU / simresults.shape[0]))
print(shares)
biosim = bio.BIOGEME(db.Database('estimationdb', pandas_df_for_specified_country), model.structure)
biosim.modelName = "simulated_model"
simresults = biosim.simulate(model.betas)
# Add a column containing the suggestion from the model
simresults['Simulated choice'] = simresults.idxmax(axis=1)
# Add a column containing the actual choice of the individual
simresults['Actual choice'] = pandas_df_for_specified_country['user_choice'].to_numpy()
# Add a column which compares the predicted against the RP choice (correct prediction = 1, wrong prediction = 0)
simresults['Correct prediction'] = np.where(simresults['Simulated choice'] == simresults['Actual choice'], 1, 0)
return {'Model prediction accuracy': "{0:.1%}".format(simresults['Correct prediction'].mean()),
'Rho-square': "{0:.3}".format(model.results.getGeneralStatistics()['Rho-square-bar for the init. model'][0])}
def estimate_model(self, pandas_df_for_specified_country, country):
'''
:param pandas_df_for_specified_country:
:param country:
:return: The estimated model, in a variable with 3 attributes: betas, structure, results.
'''
mypanda = pandas_df_for_specified_country
# create the respective database (needed for biogeme)
estimationdb = db.Database('estimationdb', mypanda)
print('Training Route Choice model for', country)
# NOTE: max number of itineraries fixed to 10
# Beta variables (i.e. coefficients) - alternative specific
bTotalIvt = Beta('bTotalIvt',0,-1000,1000,0) # transitTime in OTP reply
bTransfers = Beta('bTransfers',0,-1000,1000,0) # transfers in OTP reply
bTotalWt = Beta('bTotalWt',0,-1000,1000,0) # waitingTime in OTP reply
# Beta variables (i.e. coefficients) - traveller
bAge = Beta('bAge',0,-1000,1000,0) # Age
bTrFreq = Beta('bTrFreq',0,-1000,1000,0) # PT trip frequency
bGender2 = Beta('bGender2',0,-1000,1000,0) # Gender: female
bPurp2 = Beta('bPurp2',0,-1000,1000,0) # trip purpose: others (non-commuting)
bNetInc1 = Beta('bNetInc1',0,-1000,1000,0) # income level: low
bNetInc3 = Beta('bNetInc3',0,-1000,1000,0) # income level: high
# Variables: choice situation
transitTime_0 = Variable('transitTime_0')
transitTime_1 = Variable('transitTime_1')
transitTime_2 = Variable('transitTime_2')
transitTime_3 = Variable('transitTime_3')
transitTime_4 = Variable('transitTime_4')
transitTime_5 = Variable('transitTime_5')
transitTime_6 = Variable('transitTime_6')
transitTime_7 = Variable('transitTime_7')
transitTime_8 = Variable('transitTime_8')
transitTime_9 = Variable('transitTime_9')
transfers_0 = Variable('transfers_0')
transfers_1 = Variable('transfers_1')
transfers_2 = Variable('transfers_2')
transfers_3 = Variable('transfers_3')
transfers_4 = Variable('transfers_4')
transfers_5 = Variable('transfers_5')
transfers_6 = Variable('transfers_6')
transfers_7 = Variable('transfers_7')
transfers_8 = Variable('transfers_8')
transfers_9 = Variable('transfers_9')
waitingTime_0 = Variable('waitingTime_0')
waitingTime_1 = Variable('waitingTime_1')
waitingTime_2 = Variable('waitingTime_2')
waitingTime_3 = Variable('waitingTime_3')
waitingTime_4 = Variable('waitingTime_4')
waitingTime_5 = Variable('waitingTime_5')
waitingTime_6 = Variable('waitingTime_6')
waitingTime_7 = Variable('waitingTime_7')
waitingTime_8 = Variable('waitingTime_8')
waitingTime_9 = Variable('waitingTime_9')
# Variables: alternative availability
routeAvail_0 = Variable('routeAvail_0')
routeAvail_1 = Variable('routeAvail_1')
routeAvail_2 = Variable('routeAvail_2')
routeAvail_3 = Variable('routeAvail_3')
routeAvail_4 = Variable('routeAvail_4')
routeAvail_5 = Variable('routeAvail_5')
routeAvail_6 = Variable('routeAvail_6')
routeAvail_7 = Variable('routeAvail_7')
routeAvail_8 = Variable('routeAvail_8')
routeAvail_9 = Variable('routeAvail_9')
# Variables: personal
# NOTE: Currently unused!!!
# Personal variables may be used when there is reasonable intuition
# that different socio-demographics will consider an attribute differently
user_traveller_type = Variable('user_traveller_type')
AGE = Variable('AGE')
user_gender = Variable('user_gender')
user_often_pt = Variable('user_often_pt')
user_income = Variable('user_income')
user_choice = Variable('user_choice')
if country == 'GR' or country == 'ES': # FIXME create a separate model for ES
### Definition of utility functions - one for each alternative:
V_0 = bTotalIvt * transitTime_0 + \
bTransfers * transfers_0 + \
bTotalWt * waitingTime_0
V_1 = bTotalIvt * transitTime_1 + \
bTransfers * transfers_1 + \
bTotalWt * waitingTime_1
V_2 = bTotalIvt * transitTime_2 + \
bTransfers * transfers_2 + \
bTotalWt * waitingTime_2
V_3 = bTotalIvt * transitTime_3 + \
bTransfers * transfers_3 + \
bTotalWt * waitingTime_3
V_4 = bTotalIvt * transitTime_4 + \
bTransfers * transfers_4 + \
bTotalWt * waitingTime_4
V_5 = bTotalIvt * transitTime_5 + \
bTransfers * transfers_5 + \
bTotalWt * waitingTime_5
V_6 = bTotalIvt * transitTime_6 + \
bTransfers * transfers_6 + \
bTotalWt * waitingTime_6
V_7 = bTotalIvt * transitTime_7 + \
bTransfers * transfers_7 + \
bTotalWt * waitingTime_7
V_8 = bTotalIvt * transitTime_8 + \
bTransfers * transfers_8 + \
bTotalWt * waitingTime_8
V_9 = bTotalIvt * transitTime_9 + \
bTransfers * transfers_9 + \
bTotalWt * waitingTime_9
# Associate the availability conditions with the alternatives. (Does not really apply on ToD modelling)
av = {1: routeAvail_0,
2: routeAvail_1,
3: routeAvail_2,
4: routeAvail_3,
5: routeAvail_4,
6: routeAvail_5,
7: routeAvail_6,
8: routeAvail_7,
9: routeAvail_8,
10: routeAvail_9}
# Associate utility functions with the numbering of alternatives
V = {1: V_0,
2: V_1,
3: V_2,
4: V_3,
5: V_4,
6: V_5,
7: V_6,
8: V_7,
9: V_8,
10: V_9,
}
elif country == 'NL':
### Definition of utility functions - one for each alternative:
V_0 = bTotalIvt * transitTime_0 + \
bTransfers * transfers_0 + \
bTotalWt * waitingTime_0
V_1 = bTotalIvt * transitTime_1 + \
bTransfers * transfers_1 + \
bTotalWt * waitingTime_1
V_2 = bTotalIvt * transitTime_2 + \
bTransfers * transfers_2 + \
bTotalWt * waitingTime_2
V_3 = bTotalIvt * transitTime_3 + \
bTransfers * transfers_3 + \
bTotalWt * waitingTime_3
V_4 = bTotalIvt * transitTime_4 + \
bTransfers * transfers_4 + \
bTotalWt * waitingTime_4
V_5 = bTotalIvt * transitTime_5 + \
bTransfers * transfers_5 + \
bTotalWt * waitingTime_5
V_6 = bTotalIvt * transitTime_6 + \
bTransfers * transfers_6 + \
bTotalWt * waitingTime_6
V_7 = bTotalIvt * transitTime_7 + \
bTransfers * transfers_7 + \
bTotalWt * waitingTime_7
V_8 = bTotalIvt * transitTime_8 + \
bTransfers * transfers_8 + \
bTotalWt * waitingTime_8
V_9 = bTotalIvt * transitTime_9 + \
bTransfers * transfers_9 + \
bTotalWt * waitingTime_9
# Associate the availability conditions with the alternatives. (Does not really apply on ToD modelling)
av = {1: routeAvail_0,
2: routeAvail_1,
3: routeAvail_2,
4: routeAvail_3,
5: routeAvail_4,
6: routeAvail_5,
7: routeAvail_6,
8: routeAvail_7,
9: routeAvail_8,
10: routeAvail_9}
# Associate utility functions with the numbering of alternatives
V = {1: V_0,
2: V_1,
3: V_2,
4: V_3,
5: V_4,
6: V_5,
7: V_6,
8: V_7,
9: V_8,
10: V_9,
}
elif country == 'PT':
### Definition of utility functions - one for each alternative:
V_0 = bTotalIvt * transitTime_0 + \
bTransfers * transfers_0 + \
bTotalWt * waitingTime_0
V_1 = bTotalIvt * transitTime_1 + \
bTransfers * transfers_1 + \
bTotalWt * waitingTime_1
V_2 = bTotalIvt * transitTime_2 + \
bTransfers * transfers_2 + \
bTotalWt * waitingTime_2
V_3 = bTotalIvt * transitTime_3 + \
bTransfers * transfers_3 + \
bTotalWt * waitingTime_3
V_4 = bTotalIvt * transitTime_4 + \
bTransfers * transfers_4 + \
bTotalWt * waitingTime_4
V_5 = bTotalIvt * transitTime_5 + \
bTransfers * transfers_5 + \
bTotalWt * waitingTime_5
V_6 = bTotalIvt * transitTime_6 + \
bTransfers * transfers_6 + \
bTotalWt * waitingTime_6
V_7 = bTotalIvt * transitTime_7 + \
bTransfers * transfers_7 + \
bTotalWt * waitingTime_7
V_8 = bTotalIvt * transitTime_8 + \
bTransfers * transfers_8 + \
bTotalWt * waitingTime_8
V_9 = bTotalIvt * transitTime_9 + \
bTransfers * transfers_9 + \
bTotalWt * waitingTime_9
# Associate the availability conditions with the alternatives. (Does not really apply on ToD modelling)
av = {1: routeAvail_0,
2: routeAvail_1,
3: routeAvail_2,
4: routeAvail_3,
5: routeAvail_4,
6: routeAvail_5,
7: routeAvail_6,
8: routeAvail_7,
9: routeAvail_8,
10: routeAvail_9}
# Associate utility functions with the numbering of alternatives
V = {1: V_0,
2: V_1,
3: V_2,
4: V_3,
5: V_4,
6: V_5,
7: V_6,
8: V_7,
9: V_8,
10: V_9,
}
else:
V = 1
av = 1
print('There is no model specification for ', country)
# The choice model is a log logit, with availability conditions
logprob = bioLogLogit(util=V, av=av, choice=user_choice)
biogeme = bio.BIOGEME(database=estimationdb, formulas=logprob)
biogeme.modelName = "logitEstimation"
# Create the outputs of the estimation and store in a namedtuple (= Model)
results = biogeme.estimate()
betas = results.getBetaValues() # To be used later for the simulation of the model
structure = {1: models.logit(V, av, 1),
2: models.logit(V, av, 2),
3: models.logit(V, av, 3),
4: models.logit(V, av, 4),
5: models.logit(V, av, 5),
6: models.logit(V, av, 6),
7: models.logit(V, av, 7),
8: models.logit(V, av, 8),
9: models.logit(V, av, 9),
10: models.logit(V, av, 10)}
Output = collections.namedtuple('Output', ['betas', 'structure', 'results'])
Model = Output(betas, structure, results)
self.__cleanup_after_model_training()
# print(self.evaluate_model(pandas_df_for_specified_country, Model))
return Model
def predict(self, trip_data, model_for_specified_country):
#FIXME: check
trip_data['AGE'] = self.__birthday_to_age(trip_data['user_birthday'])
# The trip is stored in a biogeme database, since it is required by Biogeme in order for it to function
tripdb = db.Database("SuggestionDB", trip_data)
# Simulate / forecast
biosuggest = bio.BIOGEME(tripdb, model_for_specified_country.structure)
biosuggest.modelName = "suggestion_to_user"
suggestionresults = biosuggest.simulate(model_for_specified_country.betas)
# Get the column index number of the max probability. This is My-TRAC's recommendation. Store it in a new col.
suggestionresults['Recommendation'] = suggestionresults.idxmax(axis=1)
suggestion = suggestionresults.values[0]
return {'route_0': suggestion[0],
'route_1': suggestion[1],
'route_2': suggestion[2],
'route_3': suggestion[3],
'route_4': suggestion[4],
'route_5': suggestion[5],
'route_6': suggestion[6],
'route_7': suggestion[7],
'route_8': suggestion[8],
'route_9': suggestion[9],
} | [
"s.shelat@tudelft.nl"
] | s.shelat@tudelft.nl |
411345c0f65612ba6ffbc7676affbf602610f570 | b639cc785f3e548c77090fb8d2bc35d5aebfa27c | /tests/test_patterns/test_des.py | 79d37243bb32adcaed183884512f9af4dcd4d33f | [] | no_license | jmcarp/neurotrends | 92b7c33a0fe7a216af4cbbb5d4d26f8ee051286e | 724c06f6a31ecfe37780b51038b3367cd501be37 | refs/heads/master | 2016-09-05T15:49:35.435697 | 2014-11-02T04:27:21 | 2014-11-02T04:27:21 | 6,889,235 | 6 | 3 | null | 2014-10-19T18:33:44 | 2012-11-27T19:15:19 | Python | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
import pytest
from neurotrends.pattern import des
from . import check_taggers
@pytest.mark.parametrize('input, expected', [
# Positives
('block design', {}),
('blocked paradigm', {}),
('epoch based', {}),
('epoched analysis', {}),
# PMID 21625502
('we used a blocked factorial design', {}),
])
def test_block(input, expected):
check_taggers([des.block], input, expected)
| [
"jm.carp@gmail.com"
] | jm.carp@gmail.com |
2b19976d56131f88072e56b83b4b12483db4afa8 | 774af99a0091e1b0e26dc8bb21909c610e76b990 | /findtest.py | 603bbcd50aa89856dcf70c84b35c4378ed018d36 | [
"MIT"
] | permissive | ConstantFun/finding_lanes | b53ca6c7db974ab36b93c516011e4d6124f7ffc0 | ebf361c51fb489166a9b591c502c70d56169cbba | refs/heads/master | 2020-04-01T07:21:44.835929 | 2018-10-19T11:05:12 | 2018-10-19T11:05:12 | 152,987,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | # Do relevant imports
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# Read in and grayscale the image
image = mpimg.imread('test_images/solidWhiteCurve.jpg')
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
masked_edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 10
max_line_gap = 1
line_image = np.copy(image)*0 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on the blank
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((masked_edges, masked_edges, masked_edges))
# Draw the lines on the edge image
combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
plt.imshow(combo)
| [
"m.kingeryjp@gmail.com"
] | m.kingeryjp@gmail.com |
50ed4c1e4c8f3a3d0004a7364916f829ebeb823e | e831c22c8834030c22c54b63034e655e395d4efe | /171-ExcelSheetColumnNumber.py | b0ecffe70dcf519041cda5b5ec7b971faf11ca34 | [] | no_license | szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | class Solution:
def titleToNumber(self, columnTitle: str) -> int:
ans = 0
for char in columnTitle:
num = ord(char) - ord('A') + 1
ans = ans * 26 + num
return ans
if __name__ == '__main__':
solution = Solution()
result = solution.titleToNumber("FXSHRXW")
print(result)
result = solution.titleToNumber("ZY")
print(result) | [
"szhmery@gmail.com"
] | szhmery@gmail.com |
90b7348d06a42580f3926bcd866c1b08c03a1841 | c66e8c4bdc4cceaa3b62cb50879ccd48e58a9b5f | /app/main.py | 759e6849b1e9f3c5957270514f5920236c628d23 | [] | no_license | hopeaktian/smart_factory | 779d47abf6344555cd437842ab011de10735a9ea | 31cd6a74f8f65b3ea0c5339690e0003e95c6d9e1 | refs/heads/master | 2020-03-28T10:56:16.587472 | 2019-05-01T15:00:14 | 2019-05-01T15:00:14 | 148,161,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,069 | py | # -*- coding: utf-8 -*-
import datetime, os, time
from flask import Flask, render_template, request, flash, session, redirect, url_for
from config import DevConfig
from werkzeug.utils import secure_filename
# from flask_sqlalchemy import SQLAlchemy
from models import db
# from sqlalchemy.sql.expression import not_, or_
app = Flask(__name__)
app.config.from_object(DevConfig)
db.init_app(app)
# 自定义jinja过滤器
def time_format(l):
return str(l)[:-7]
app.add_template_filter(time_format, 'format_time')
# UPLOAD_FOLDER = "./static/Upload_File/"
# ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# 路由部分
def checkuser():
if 'username' in session:
global user
user = User.query.filter_by(Username=session['username']).first()
@app.route('/')
def index():
if 'username' in session:
global user
user = User.query.filter_by(Username=session['username']).first()
return render_template('user.html', title=u'智慧工厂', userlogin_name=session['username'], user=user)
return render_template('user.html', title=u"智慧工厂")
@app.route('/phone')
def phone():
return render_template('phone.html')
# @app.route('/MP_verify_FcIWodemzDz6J6Op.txt')
# def weixin():
# return render_template('MP_verify_FcIWodemzDz6J6Op.txt')
#
#
# @app.errorhandler(404)
# def page_not_found(e):
# if 'username' in session:
# global user
# user = User.query.filter_by(Username=session['username']).first()
# return render_template('error.html', title=u"错误", user=user, userlogin_name=session['username']), 404
# return render_template('error.html', title=u"错误"), 404
#
# @app.errorhandler(500)
# def page_not_found(e):
# if 'username' in session:
# global user
# user = User.query.filter_by(Username=session['username']).first()
# return render_template('error.html', title=u"错误", user=user), 500
# return render_template('error.html', title=u"错误"), 500
#
# @app.route('/messagewall', methods=['GET', 'POST'])
# def messagewall():
# global success
# global lenth
# global userlogin_name
# success = 0 #评论初始值为0即失败
# lenth = 0
# allCri = Criticism.query.order_by(Criticism.Id.desc()).all()
# lenth = len(allCri)
#
# if 'username' in session:
# user = User.query.filter_by(Username=session['username']).first()
# userlogin_name = session['username']
# if request.method == 'POST':
# Criticismfrosql = Criticism(request.form.get("nickname"), request.form.get("criticism"))
# db.session.add(Criticismfrosql)
# db.session.commit()
# success = 1
# allCri = Criticism.query.order_by(Criticism.Id.desc()).all()
# lenth = len(allCri)
#
# return render_template('messagewall.html', title=u"留言墙", success=success, allCri=allCri, lenth=lenth, userlogin_name=session['username'], user=user)
# else:
# if request.method == 'POST':
# Criticismfrosql = Criticism(request.form.get("nickname"), request.form.get("criticism"))
# db.session.add(Criticismfrosql)
# db.session.commit()
# success = 1
# allCri = Criticism.query.order_by(Criticism.Id.desc()).all()
# lenth = len(allCri)
#
# return render_template('messagewall.html', title=u"留言墙", success=success, allCri=allCri, lenth=lenth)
#
#
# @app.route('/login', methods=['GET', 'POST'])
# def login():
# checkuser()
# global log
# global status
# log = 0
# status = 1
# if request.method == 'POST':
# userlogin_name = request.form.get("name_login")
# userlogin_password = request.form.get("password_login")
#
# # if User.query.filter(or_(User.Username==userlogin_name, User.Email==userlogin_name)).all() and User.query.filter(User.Password==userlogin_password) :
# # return render_template('index2.html', userlogin_name=userlogin_name)
# # print "Success"
# user = User.query.filter_by(Username=userlogin_name).first()
# if user is not None and user.Password==userlogin_password:
# status = 1
# log = 1
# # flash(u'登陆成功', category="success")
# session['username'] = userlogin_name
#
# return render_template('index2.html', userlogin_name=session['username'], log=log, title=u"登陆", user=user)
#
# else:
# # flash(u'用户名或密码错误!', category="danger")
# status = 0
# return render_template('login2.html', status=status, log=log, title=u"登陆", user=user)
# return render_template('login2.html', title=u"登陆", log=log)
#
#
# @app.route('/logout', methods=['GET', 'POST'])
# def logout():
# session.pop('username', None)
# return redirect(url_for('user'))
#
# @app.route('/register', methods=['GET', 'POST'])
# def register():
# global exist
# global flag
# global repassword
# global password_lenth
# repassword = 1
# password_lenth = 1
# exist = 0
# flag = 0
# if request.method == 'POST':
# new_username = request.form.get("Name")
#
# if User.query.filter_by(Username=new_username).all():
# exist = 1
# # flash(u"注册失败!!用户名已存在! 换个更个性的用户名吧 -_-", category="danger")
# elif request.form.get("Password") != request.form.get("repassword"):
# repassword = 0
# return render_template('register.html', exist=exist, flag=flag, title=u"注册", repassword=repassword)
# elif len(request.form.get("Password")) < 6:
#
# password_lenth = 0
# return render_template('register.html', exist=exist, flag=flag, title=u"注册", password_lenth=password_lenth)
#
# else:
# user_forsql = User(new_username, request.form.get("Password"), request.form.get("Gender"), request.form.get("Email"), request.form.get("Tel"))
# db.session.add(user_forsql)
# db.session.commit()
# flag = 1
# # flash("恭喜您!注册成功", category="success")
# return render_template('register.html', exist=exist, flag=flag, title=u"注册")
#
# # 发订单
# @app.route('/order', methods=['GET', 'POST'])
# def order(success=0):
# checkuser()
# global now_time
# now_time = datetime.datetime.now()
#
#
# if 'username' not in session:
# return render_template('notlogin.html', title=u"创建订单")
# elif request.method == 'POST' and not request.files.has_key("inputFile"):
#
# user_now = User.query.filter_by(Username=session['username']).first()
# new_order = Order(request.form.get("title"))
# new_order.Details = request.form.get("detials")
# new_order.Dead_Date = request.form.get("diedate")
# new_order.Finish = 0
# new_order.User_id = user_now.Id
#
# db.session.add(new_order)
# db.session.commit()
# success = 1
#
# return render_template('order.html', title=u"创建订单", userlogin_name=session['username'], user=user, success=success, now_time=now_time)
#
# elif request.method == 'POST':
# user_now = User.query.filter_by(Username=session['username']).first() # 数据库查询并取得当前用户对象
# new_order = Order(request.form.get("title")) # 数据库实例化新的Order对象
#
# new_order.Details = request.form.get("detials")
# new_order.Dead_Date = request.form.get("diedate")
# new_order.Finish = 0
# new_order.User_id = user_now.Id
#
#
# db.session.add(new_order)
# db.session.commit() # 先将除了图片的信息提交数据库,以免下面图片Id无法获取
#
# file = request.files['inputFile']
# filename = file.filename
# index_point = filename.user(".")
# filename = str(new_order.Id)+filename[index_point:]
# basepath = os.path.dirname(__file__)
# upload_path = os.path.join(basepath, 'static/Upload_File', secure_filename(filename))
# file.save(upload_path)
#
# new_order.Picture_Name = filename
# db.session.add(new_order)
# db.session.commit() # 再将图片信息提交数据库
#
# success = 1
#
# return render_template('order.html', title=u"创建订单", userlogin_name=session['username'], user=user, success=success, now_time=now_time)
#
# # 任务大厅展示
# @app.route('/orderwall', methods=['GET', 'POST'])
# def orderwall():
# # now_time = float(time.mktime(datetime.datetime.now().timetuple()))
# global datetime
# datetime = datetime
#
# checkuser()
# allorderwall = Order.query.order_by(Order.Id.desc()).all()
# # user = User.query.all()
# lenth = Order.query.count()
#
# if 'username' in session:
# return render_template('orderwall.html', title=u"任务广场",allorderwall=allorderwall, lenth=lenth, userlogin_name=session['username'], user=user, datetime=datetime)
# return render_template('orderwall.html', title=u"任务广场",allorderwall=allorderwall, lenth=lenth, datetime=datetime)
#
# @app.route('/join', methods=['GET', 'POST'])
# def join():
# checkuser()
# if 'username' in session:
# return render_template("join.html", title=u"加入我们", user=user)
# return render_template("join.html", title=u"加入我们")
#
# @app.route('/school_net', methods=['GET', 'POST'])
# def shcool_net():
# checkuser()
# if 'username' in session:
# return render_template("net_tech.html", title=u"校园网共享教程", user=user)
# return render_template("net_tech.html", title=u"校园网共享教程")
#
#
# # 订单详情
# @app.route('/orderwall/<int:order_id>', methods=['GET', 'POST'])
# def showdetails(order_id):
# global datetime
# datetime = datetime
# checkuser()
# AboutOrder = Order.query.filter_by(Id=order_id).first()
# if 'username' in session:
# return render_template('OrderDetails.html', title=u"任务详情", AboutOrder=AboutOrder, userlogin_name=session['username'], user=user, datetime=datetime)
# return render_template('OrderDetails.html', title=u"任务详情", AboutOrder=AboutOrder, datetime=datetime)
#
# # 确认接单
# @app.route('/orderwall/<int:order_id>/confirm', methods=['GET', 'POST'])
# def getorder(order_id):
# checkuser()
# got_success = 0
# AboutOrder = Order.query.filter_by(Id=order_id).first()
# if 'username' not in session:
# return render_template('notlogin.html', title=u"请先登陆")
# elif request.method == 'POST':
# if request.form.get("confirm") == "1":
# get_user = User.query.filter_by(Username=session['username']).first()
# AboutOrder.Got_id = get_user.Id
# AboutOrder.Got_Date = datetime.datetime.now()
# db.session.add(AboutOrder)
# db.session.commit()
# got_success = 1
# return redirect(url_for('takein', user_id=get_user.Id))
# else:
# return redirect(url_for('orderwall'))
#
# return render_template('confirmorder.html', title=u"确认接单", AboutOrder=AboutOrder, userlogin_name=session['username'], got_success=got_success, user=user)
#
# @app.route('/user/<int:user_id>/sendout', methods=['POST', 'GET'])
# def sendout(user_id):
# checkuser()
#
# AboutOrder = Order.query.filter_by(User_id=user_id).order_by(Order.Id.desc()).all()
# lenth = len(AboutOrder)
# if request.method == "POST":
# user_order = Order.query.filter_by(Id=request.form.get("order_id")).first()
# if request.form.get("cancel") == "1": #取消订单
# user_order.Finish = request.form.get("cancel")
# db.session.add(user_order)
# db.session.commit()
# else:
# user_order.Finish = request.form.get("finish") #确认收货
# db.session.add(user_order)
# db.session.commit()
# return render_template('sendout.html', title=u"发出订单", AboutOrder=AboutOrder, lenth=lenth, userlogin_name=session['username'], user=user)
#
# @app.route('/user/<int:user_id>/takein')
# def takein(user_id):
# checkuser()
#
# AboutOrder = Order.query.filter_by(Got_id=user_id).order_by(Order.Id.desc()).all()
# lenth = len(AboutOrder)
# return render_template('takein.html', title=u"接受订单", AboutOrder=AboutOrder, lenth=lenth, userlogin_name=session['username'], user=user)
if __name__ == '__main__':
app.run(host='192.168.3.5', port=80)
| [
"hopeaktian@gmail.com"
] | hopeaktian@gmail.com |
f4b2a1dbd9240673bd7048d07490b2712b5479ef | 4578b30c433510cf370d51475ec11cac9c3de1cb | /serpent/analytics_client.py | f7cc26e803be8a25bf0c6da550b983ec00c7ca18 | [
"MIT"
] | permissive | SerpentAI/SerpentAI | 0a5b2d567b50388722c3a3c5152555ce94256c49 | 00a487dd088c6ca2528d025f3273c0a796efe210 | refs/heads/dev | 2023-03-08T14:14:07.171435 | 2020-05-22T22:34:09 | 2020-05-22T22:34:09 | 88,444,621 | 7,216 | 950 | MIT | 2020-07-15T00:41:35 | 2017-04-16T21:48:39 | Python | UTF-8 | Python | false | false | 1,395 | py | from redis import StrictRedis
from datetime import datetime
from pprint import pprint
from serpent.config import config
import json
class AnalyticsClientError(BaseException):
pass
class AnalyticsClient:
def __init__(self, project_key=None):
if project_key is None:
raise AnalyticsClientError("'project_key' kwarg is expected...")
self.project_key = project_key
self.redis_client = StrictRedis(**config["redis"])
self.broadcast = config["analytics"].get("broadcast", False)
self.debug = config["analytics"].get("debug", False)
self.event_whitelist = config["analytics"].get("event_whitelist")
@property
def redis_key(self):
return f"SERPENT:{self.project_key}:EVENTS"
def track(self, event_key=None, data=None, timestamp=None, is_persistable=True):
if self.event_whitelist is None or event_key in self.event_whitelist:
event = {
"project_key": self.project_key,
"event_key": event_key,
"data": data,
"timestamp": timestamp if timestamp is not None else datetime.utcnow().isoformat(),
"is_persistable": is_persistable
}
if self.debug:
pprint(event)
if self.broadcast:
self.redis_client.lpush(self.redis_key, json.dumps(event))
| [
"info@nicholasbrochu.com"
] | info@nicholasbrochu.com |
6f69f90944511a4dd09b85444b506dbc254f8afb | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsattributedstring.py | b340f942563363e10e7fc3227d6dd2846b890741 | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,273 | py | import objc
import Foundation
import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSAttributedString(TestCase):
def testMethodsFoundation(self):
self.assertArgIsOut(
AppKit.NSAttributedString.attributesAtIndex_effectiveRange_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.attributesAtIndex_longestEffectiveRange_inRange_,
1,
)
self.assertArgIsOut(
AppKit.NSAttributedString.attribute_atIndex_longestEffectiveRange_inRange_,
2,
)
self.assertResultIsBOOL(AppKit.NSAttributedString.isEqualToAttributedString_)
def testConstantsAppKit(self):
self.assertIsInstance(AppKit.NSManagerDocumentAttribute, str)
self.assertIsInstance(AppKit.NSFontAttributeName, str)
self.assertIsInstance(AppKit.NSParagraphStyleAttributeName, str)
self.assertIsInstance(AppKit.NSForegroundColorAttributeName, str)
self.assertIsInstance(AppKit.NSUnderlineStyleAttributeName, str)
self.assertIsInstance(AppKit.NSSuperscriptAttributeName, str)
self.assertIsInstance(AppKit.NSBackgroundColorAttributeName, str)
self.assertIsInstance(AppKit.NSAttachmentAttributeName, str)
self.assertIsInstance(AppKit.NSLigatureAttributeName, str)
self.assertIsInstance(AppKit.NSBaselineOffsetAttributeName, str)
self.assertIsInstance(AppKit.NSKernAttributeName, str)
self.assertIsInstance(AppKit.NSLinkAttributeName, str)
self.assertIsInstance(AppKit.NSStrokeWidthAttributeName, str)
self.assertIsInstance(AppKit.NSStrokeColorAttributeName, str)
self.assertIsInstance(AppKit.NSUnderlineColorAttributeName, str)
self.assertIsInstance(AppKit.NSStrikethroughStyleAttributeName, str)
self.assertIsInstance(AppKit.NSStrikethroughColorAttributeName, str)
self.assertIsInstance(AppKit.NSShadowAttributeName, str)
self.assertIsInstance(AppKit.NSObliquenessAttributeName, str)
self.assertIsInstance(AppKit.NSExpansionAttributeName, str)
self.assertIsInstance(AppKit.NSCursorAttributeName, str)
self.assertIsInstance(AppKit.NSToolTipAttributeName, str)
self.assertIsInstance(AppKit.NSCharacterShapeAttributeName, str)
self.assertIsInstance(AppKit.NSGlyphInfoAttributeName, str)
self.assertIsInstance(AppKit.NSMarkedClauseSegmentAttributeName, str)
self.assertIsInstance(AppKit.NSSpellingStateAttributeName, str)
self.assertEqual(AppKit.NSUnderlineStyleNone, 0x00)
self.assertEqual(AppKit.NSUnderlineStyleSingle, 0x01)
self.assertEqual(AppKit.NSUnderlineStyleThick, 0x02)
self.assertEqual(AppKit.NSUnderlineStyleDouble, 0x09)
self.assertEqual(AppKit.NSUnderlinePatternSolid, 0x0000)
self.assertEqual(AppKit.NSUnderlinePatternDot, 0x0100)
self.assertEqual(AppKit.NSUnderlinePatternDash, 0x0200)
self.assertEqual(AppKit.NSUnderlinePatternDashDot, 0x0300)
self.assertEqual(AppKit.NSUnderlinePatternDashDotDot, 0x0400)
self.assertIsInstance(AppKit.NSUnderlineByWordMask, int)
self.assertEqual(AppKit.NSSpellingStateSpellingFlag, 1)
self.assertEqual(AppKit.NSSpellingStateGrammarFlag, 2)
self.assertIsInstance(AppKit.NSPlainTextDocumentType, str)
self.assertIsInstance(AppKit.NSRTFTextDocumentType, str)
self.assertIsInstance(AppKit.NSRTFDTextDocumentType, str)
self.assertIsInstance(AppKit.NSMacSimpleTextDocumentType, str)
self.assertIsInstance(AppKit.NSHTMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSDocFormatTextDocumentType, str)
self.assertIsInstance(AppKit.NSWordMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSWebArchiveTextDocumentType, str)
self.assertIsInstance(AppKit.NSOfficeOpenXMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSOpenDocumentTextDocumentType, str)
self.assertIsInstance(AppKit.NSPaperSizeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSLeftMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSRightMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTopMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSBottomMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewSizeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewZoomDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewModeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDocumentTypeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSReadOnlyDocumentAttribute, str)
self.assertIsInstance(AppKit.NSConvertedDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCocoaVersionDocumentAttribute, str)
self.assertIsInstance(AppKit.NSBackgroundColorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSHyphenationFactorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDefaultTabIntervalDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCharacterEncodingDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTitleDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCompanyDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCopyrightDocumentAttribute, str)
self.assertIsInstance(AppKit.NSSubjectDocumentAttribute, str)
self.assertIsInstance(AppKit.NSAuthorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSKeywordsDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCommentDocumentAttribute, str)
self.assertIsInstance(AppKit.NSEditorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCreationTimeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSModificationTimeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSExcludedElementsDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTextEncodingNameDocumentAttribute, str)
self.assertIsInstance(AppKit.NSPrefixSpacesDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDocumentTypeDocumentOption, str)
self.assertIsInstance(AppKit.NSDefaultAttributesDocumentOption, str)
self.assertIsInstance(AppKit.NSCharacterEncodingDocumentOption, str)
self.assertIsInstance(AppKit.NSTextEncodingNameDocumentOption, str)
self.assertIsInstance(AppKit.NSBaseURLDocumentOption, str)
self.assertIsInstance(AppKit.NSTimeoutDocumentOption, str)
self.assertIsInstance(AppKit.NSWebPreferencesDocumentOption, str)
self.assertIsInstance(AppKit.NSWebResourceLoadDelegateDocumentOption, str)
self.assertIsInstance(AppKit.NSTextSizeMultiplierDocumentOption, str)
self.assertEqual(AppKit.NSNoUnderlineStyle, 0)
self.assertEqual(AppKit.NSSingleUnderlineStyle, 1)
self.assertIsInstance(AppKit.NSUnderlineStrikethroughMask, int)
def testMethodsAppKit(self):
self.assertResultIsBOOL(AppKit.NSAttributedString.containsAttachments)
self.assertArgIsBOOL(AppKit.NSAttributedString.nextWordFromIndex_forward_, 1)
self.assertArgIsOut(AppKit.NSAttributedString.URLAtIndex_effectiveRange_, 1)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_options_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_options_documentAttributes_error_, 3
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithData_options_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithData_options_documentAttributes_error_, 3
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithPath_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTF_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTFD_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_options_documentAttributes_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_baseURL_documentAttributes_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTFDFileWrapper_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.dataFromRange_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.fileWrapperFromRange_documentAttributes_error_, 2
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_,
2,
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_,
3,
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_,
2,
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_,
3,
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_, 2
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_, 2
)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(AppKit.NSAttributedStringEnumerationReverse, 1 << 1)
self.assertEqual(
AppKit.NSAttributedStringEnumerationLongestEffectiveRangeNotRequired,
1 << 20,
)
self.assertIsInstance(AppKit.NSWritingDirectionAttributeName, str)
self.assertIsInstance(AppKit.NSFileTypeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCategoryDocumentAttribute, str)
self.assertIsInstance(AppKit.NSFileTypeDocumentOption, str)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(AppKit.NSVerticalGlyphFormAttributeName, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionOrientation, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionRange, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionsAttribute, str)
@min_os_level("10.8")
def testConstants10_8(self):
self.assertIsInstance(AppKit.NSTextAlternativesAttributeName, str)
self.assertIsInstance(AppKit.NSUsesScreenFontsDocumentAttribute, str)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertIsInstance(AppKit.NSTextEffectAttributeName, str)
self.assertIsInstance(AppKit.NSTextEffectLetterpressStyle, str)
@min_os_level("12.0")
def test_constants12_0(self):
self.assertEqual(Foundation.NSInlinePresentationIntentEmphasized, 1 << 0)
self.assertEqual(
Foundation.NSInlinePresentationIntentStronglyEmphasized, 1 << 1
)
self.assertEqual(Foundation.NSInlinePresentationIntentCode, 1 << 2)
self.assertEqual(Foundation.NSInlinePresentationIntentStrikethrough, 1 << 5)
self.assertEqual(Foundation.NSInlinePresentationIntentSoftBreak, 1 << 6)
self.assertEqual(Foundation.NSInlinePresentationIntentLineBreak, 1 << 7)
self.assertEqual(Foundation.NSInlinePresentationIntentInlineHTML, 1 << 8)
self.assertEqual(Foundation.NSInlinePresentationIntentBlockHTML, 1 << 9)
self.assertIsInstance(Foundation.NSInlinePresentationIntentAttributeName, str)
self.assertIsInstance(Foundation.NSAlternateDescriptionAttributeName, str)
self.assertIsInstance(Foundation.NSImageURLAttributeName, str)
self.assertIsInstance(Foundation.NSLanguageIdentifierAttributeName, str)
self.assertEqual(
Foundation.NSAttributedStringMarkdownParsingFailureReturnError, 0
)
self.assertEqual(
Foundation.NSAttributedStringMarkdownParsingFailureReturnPartiallyParsedIfPossible,
1,
)
self.assertEqual(Foundation.NSAttributedStringMarkdownInterpretedSyntaxFull, 0)
self.assertEqual(
Foundation.NSAttributedStringMarkdownInterpretedSyntaxInlineOnly, 1
)
self.assertEqual(
Foundation.NSAttributedStringFormattingInsertArgumentAttributesWithoutMerging,
1 << 0,
)
self.assertEqual(
Foundation.NSAttributedStringFormattingApplyReplacementIndexAttribute,
1 << 1,
)
self.assertIsInstance(Foundation.NSReplacementIndexAttributeName, str)
self.assertIsInstance(Foundation.NSMorphologyAttributeName, str)
self.assertIsInstance(Foundation.NSInflectionRuleAttributeName, str)
self.assertIsInstance(Foundation.NSPresentationIntentAttributeName, str)
self.assertIsInstance(Foundation.NSInflectionAlternativeAttributeName, str)
self.assertEqual(Foundation.NSPresentationIntentKindParagraph, 0)
self.assertEqual(Foundation.NSPresentationIntentKindHeader, 1)
self.assertEqual(Foundation.NSPresentationIntentKindOrderedList, 2)
self.assertEqual(Foundation.NSPresentationIntentKindUnorderedList, 3)
self.assertEqual(Foundation.NSPresentationIntentKindListItem, 4)
self.assertEqual(Foundation.NSPresentationIntentKindCodeBlock, 5)
self.assertEqual(Foundation.NSPresentationIntentKindBlockQuote, 6)
self.assertEqual(Foundation.NSPresentationIntentKindThematicBreak, 7)
self.assertEqual(Foundation.NSPresentationIntentKindTable, 8)
self.assertEqual(Foundation.NSPresentationIntentKindTableHeaderRow, 9)
self.assertEqual(Foundation.NSPresentationIntentKindTableRow, 10)
self.assertEqual(Foundation.NSPresentationIntentKindTableCell, 11)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentLeft, 0)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentCenter, 1)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentRight, 2)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgHasType(
AppKit.NSAttributedString.enumerateAttributesInRange_options_usingBlock_,
0,
AppKit.NSRange.__typestr__,
)
self.assertArgIsBlock(
AppKit.NSAttributedString.enumerateAttributesInRange_options_usingBlock_,
2,
b"v@" + AppKit.NSRange.__typestr__ + b"o^" + objc._C_NSBOOL,
)
self.assertArgHasType(
AppKit.NSAttributedString.enumerateAttribute_inRange_options_usingBlock_,
1,
AppKit.NSRange.__typestr__,
)
self.assertArgIsBlock(
AppKit.NSAttributedString.enumerateAttribute_inRange_options_usingBlock_,
3,
b"v@" + AppKit.NSRange.__typestr__ + b"o^" + objc._C_NSBOOL,
)
@min_os_level("12.0")
def test_methods12_0(self):
self.assertResultIsBOOL(
Foundation.NSAttributedStringMarkdownParsingOptions.allowsExtendedAttributes
)
self.assertArgIsBOOL(
Foundation.NSAttributedStringMarkdownParsingOptions.setAllowsExtendedAttributes_,
0,
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithContentsOfMarkdownFileAtURL_options_baseURL_error_,
3,
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithMarkdown_options_baseURL_error_, 3
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithMarkdownString_options_baseURL_error_,
3,
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.initWithFormat_options_locale_, 0
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.localizedAttributedStringWithFormat_, 0
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.localizedAttributedStringWithFormat_options_,
0,
)
self.assertArgIsPrintf(
Foundation.NSMutableAttributedString.appendLocalizedFormat_, 0
)
self.assertResultIsBOOL(
Foundation.NSPresentationIntent.isEquivalentToPresentationIntent_
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
8b15a616e85162f478947d14f75d41b23052b9be | b91dbe904d464923d51b5402a080854c57ce0aa7 | /scripts/stop_vm.py | f3c2a61bb0927e993cd966ec8e376db38be47620 | [] | no_license | ESGF/esgf-jenkins | 0c18822456604e6a161e52bf435ea42996c1ae56 | 3c29189831346761ebfee2c48d88fd3f42fa27b1 | refs/heads/master | 2021-06-01T16:17:25.285427 | 2021-02-08T23:49:32 | 2021-02-08T23:49:32 | 129,452,815 | 0 | 1 | null | 2021-02-08T23:49:33 | 2018-04-13T20:59:07 | Python | UTF-8 | Python | false | false | 697 | py | import sys
import os
import argparse
import re
this_dir = os.path.abspath(os.path.dirname(__file__))
modules_dir = os.path.join(this_dir, '..', 'modules')
sys.path.append(modules_dir)
from Util import *
from vm_util import *
parser = argparse.ArgumentParser(description="prepare a virtual machine",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-H", "--vm_host", help="vm_host", required=True)
parser.add_argument("-x", "--vmx", help="full path vmx file name", required=True)
args = parser.parse_args()
vm_host = args.vm_host
vmx = args.vmx
# stop vm if running
ret_code = stop_vm_if_running(vm_host, vmx)
sys.exit(ret_code)
| [
"muryanto1@ML-9516678.llnl.gov"
] | muryanto1@ML-9516678.llnl.gov |
cdc62e0661ae30c80e83b7d35e680840195d3461 | 2929a5acbe52994cf2f961ed120374b7b330d074 | /form5/migrations/0008_auto_20200724_1433.py | 30b1610c3e20398521e7651d662281109a24371c | [] | no_license | orhunakar01/larasolar01 | a52135747676c587f6dfd98c67bf4c4a323dc448 | 18e12ecd5adc086da56b956a7f8da33f0723c84a | refs/heads/master | 2022-12-04T16:06:32.983099 | 2020-08-26T06:45:03 | 2020-08-26T06:45:03 | 290,418,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 3.0.8 on 2020-07-24 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('form5', '0007_auto_20200724_1430'),
]
operations = [
migrations.AlterField(
model_name='form5',
name='dosya',
field=models.FileField(db_index=True, upload_to='', verbose_name='Fatura PDF Ekleyiniz.'),
),
]
| [
"orhunakar@yandex.com"
] | orhunakar@yandex.com |
fb15d31c5e6405b39011d5a052b68586cdb57a9a | 97608e007540a6507d323beb10ad80213a409180 | /Binary Trees and BST/Level order traversal in spiral form/code.py | ad78d1ffae09e21f074c7d637492c830f607fefd | [] | no_license | saurabh23kgp/Python-codes-for-Standard-Programming-Questions | ca748a8b9cf6a4ff0cd8caf384327574bee82cfd | 5c6cdc1ab9973c115bf70d6ef4142f81ce431d7b | refs/heads/master | 2020-12-23T17:05:58.162011 | 2020-07-26T08:34:32 | 2020-07-26T08:34:32 | 237,211,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | def printSpiral(root):
stack1=[]
stack2=[]
if(root==None):
return
stack1.append(root)
while(len(stack1)!=0 or len(stack2)!=0):
while(len(stack1)!=0):
top=stack1.pop(len(stack1)-1)
if top.right:
stack2.append(top.right)
if top.left:
stack2.append(top.left)
print(top.data, end=' ')
while(len(stack2)!=0):
top=stack2.pop(len(stack2)-1)
if top.left:
stack1.append(top.left)
if top.right:
stack1.append(top.right)
print(top.data, end=' ')
| [
"noreply@github.com"
] | saurabh23kgp.noreply@github.com |
e8bd886a3bdc6cc1e1d74870cc517a83b8118279 | 51885da54b320351bfea42c7dd629f41985454cd | /abc198/e.py | 4bad4cd9760be8cf70992b7142d358622bb251b8 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | #
# abc198 e
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """6
2 7 1 8 2 8
1 2
3 6
3 2
4 3
2 5"""
output = """1
2
3
4
6"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """10
3 1 4 1 5 9 2 6 5 3
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10"""
output = """1
2
3
5
6
7
8"""
self.assertIO(input, output)
def resolve():
N = int(input())
C = list(map(int, input().split()))
AB = [list(map(int, input().split())) for _ in range(N-1)]
G = [[]*N for _ in range(N)]
for a, b in AB:
G[a-1].append(b-1)
G[b-1].append(a-1)
if __name__ == "__main__":
# unittest.main()
# resolve()
| [
"mskt4440@gmail.com"
] | mskt4440@gmail.com |
a0bbe3046b1630dd3f350a6d8da6f98199236651 | be1d6aa78bd2750b55c3967390186651af3a5679 | /mysite/coderslab/urls.py | ca47a097eaa380d732314cab27915254d207679a | [] | no_license | polabernat/bibliotekaplyt | fa8b45501f9cb3d5eac6419185260f6a25f06018 | bab34225c4c5dd1adfc11b3a0af09bc8abf81ae2 | refs/heads/main | 2023-05-04T08:52:47.471902 | 2021-05-28T09:39:05 | 2021-05-28T09:39:05 | 339,998,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | """coderslab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from exercises_app.views import LoginView, UserProfileView, ListUsersView, LogoutView, AddUserView, HistoryView, ListBandsView, show_band, BandAddView, AlbumAddView, SongAddView
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', LoginView.as_view()),
path('list_users/', ListUsersView.as_view()),
path('logout/', LogoutView.as_view()),
path('add_user/', AddUserView.as_view()),
path('history/', HistoryView.as_view()),
path('user_profile/', UserProfileView.as_view()),
path('list_bands/', ListBandsView.as_view()),
path('/show-band/<int:id>/',show_band),
path('add_band/', BandAddView.as_view()),
path('add_song/', SongAddView.as_view()),
path('add_album/', AlbumAddView.as_view()),
]
| [
"polabernatx@gmail.com"
] | polabernatx@gmail.com |
aeb8541180e92184d5a876342cae582c52f814be | 85a551fd2db01fe422aae8b74b608e8c971def80 | /Data Structures and Algorithms/Algorithmic Toolbox/week6/knapsack.py | 11f06860939104f471dd1f448614f8e767e3e7ba | [] | no_license | stellacly0896/DataStructures-Algorithms | ede59113395b2c83af470bb31a5515ac9051e44f | 5a4654aef9376e0a2c06841e5624a8f9972ee4d1 | refs/heads/master | 2020-08-15T11:53:41.192806 | 2019-10-15T15:50:11 | 2019-10-15T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Uses python3
import sys
def optimal_weight(W, weights):
values = weights
cols = W + 1
rows = len(weights) + 1
T = [[0 for _ in range(cols)]for _ in range(rows)]
for i in range(1, rows):
for j in range(1, cols):
if j < weights[i-1]:
T[j][i] = T[j][i - 1]
else:
T[j][i] = max(T[j][i-1], T[i-1][j-weights[i-1]] + values[i-1])
return T[cols-1][rows-1]
if __name__ == '__main__':
input = sys.stdin.read()
W, n, *w = list(map(int, input.split()))
print(optimal_weight(W, w))
| [
"noreply@github.com"
] | stellacly0896.noreply@github.com |
b91fc64b91e32a711570b9de2294a00efffe81b2 | 2995bc1f9a28aae8bf9f0974b3f38ff9926d87aa | /triangle/triangle.py | 676f51ed32098b6f021411f5d55632f9bff839de | [] | no_license | quanvh53/ktpm2013 | bf6fda39c1056935723479c44c277fc920537fb7 | ca64f2a3475b15592709f8a8ea068c5c371ab40e | refs/heads/master | 2020-05-31T08:31:47.241805 | 2013-10-19T16:55:37 | 2013-10-19T16:55:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | import math
def checkinput(a,b,c):
if type(a) is float and type(b) is float and type(c) is float :
return True
return False
def detect_triangle(a,b,c):
if checkinput(a,b,c) :
saiso = 0.00000001
if (a+b>c and b+c>a and c+a>b) :
if a==b and b==c and c==a:
return "tam giac deu"
elif a==b :
if math.fabs(c-math.sqrt(a**2+b**2)) <= saiso :
return "tam giac vuong can"
else :
return "tam giac can"
elif b==c :
if math.fabs(a-math.sqrt(c**2 + b**2)) <= saiso :
return "tam giac vuong can"
else :
return "tam giac can"
elif c==a :
if math.fabs(b-math.sqrt(a**2 + c**2)) <=saiso :
return "tam giac vuong can"
else :
return "tam giac can"
elif a!=b and b!=c and c!=a :
if math.fabs(a-math.sqrt(c**2 + b**2)) <=saiso :
return "tam giac vuong"
if math.fabs(b-math.sqrt(a**2 + c**2)) <=saiso :
return "tam giac vuong"
if math.fabs(c-math.sqrt(a**2 + b**2)) <=saiso :
return "tam giac vuong"
else :
return "tam giac thuong"
else :
return "khong phai la tam giac"
else :
return "loi dau vao"
| [
"quanvh90@gmail.com"
] | quanvh90@gmail.com |
c3f9bcc49bee8ec68162fad1d986ea68c37b9afd | 36c4fd01715ee2b495f0540e4a91709e513f407c | /my_expenses/asgi.py | 862ff845bc0fa972abac620cd8375856b2f30854 | [] | no_license | mjsphdev/my_expenses | 41f72f8f1cb240d039a769814e2259798c83421a | 4b2b7485a9eaa8c7460aba3ff7c34121e27c3346 | refs/heads/main | 2023-07-23T04:52:18.517589 | 2021-08-18T03:53:46 | 2021-08-18T03:53:46 | 396,293,312 | 0 | 0 | null | 2021-08-18T03:53:47 | 2021-08-15T09:21:23 | CSS | UTF-8 | Python | false | false | 399 | py | """
ASGI config for my_expenses project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_expenses.settings')
application = get_asgi_application()
| [
"colibaomarkjoseph@gmail.com"
] | colibaomarkjoseph@gmail.com |
d0021b1c204fd7b628962ab2187afb20653b301c | 76d5f1fca6e58e0547448fa1d80ebfb1751059e5 | /music163/__init__.py | 72958dab4bb8346b0348062f7a911510b9d7bde1 | [] | no_license | shenxuexin/spiders | 897cad179cafb88f0705c1c53807dd3eba95dba2 | fd521102d282f9045d830e43fe02ded42d71ac24 | refs/heads/main | 2023-02-09T22:41:01.526452 | 2021-01-06T03:32:27 | 2021-01-06T03:32:27 | 322,570,998 | 2 | 0 | null | 2021-01-06T03:32:28 | 2020-12-18T11:07:11 | Python | UTF-8 | Python | false | false | 139 | py | # -*- coding: utf-8 -*-
"""
@Author : shenxuexin
@Time : 2021/1/5 13:06
@File : __init__.py.py
@software:PyCharm
""" | [
"shenxuexin@ruc.edu.cn"
] | shenxuexin@ruc.edu.cn |
0589db5b1c71d413e91ef8dc821f3c27deb42db7 | 1c5f3433f96a68387e2eade17bde6635b5f27bdd | /cashback_api/sales/services.py | ea9795a7ecbd89544c8812a2ff19528ca6bf2196 | [] | no_license | guzzz/cashback-api-rest | d2eb92bcc58e9eeeeb3bd1c1b66c04b2b54ba4cd | 85bbea1859d9da2db5a03d1d545e032608847ade | refs/heads/main | 2023-01-19T21:48:59.228941 | 2020-11-25T04:31:45 | 2020-11-25T04:31:45 | 315,829,060 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 352 | py |
from decimal import Decimal
def check_cpf(cpf):
if cpf == '153.509.460-56':
return True
else:
return False
def calculate_cashback(value):
if value <= 1000:
return 10, "{:.2f}".format(value*Decimal(0.10))
elif 1000 < value < 1500:
return 15, "{:.2f}".format(value*Decimal(0.15))
else:
return 20, "{:.2f}".format(value*Decimal(0.20)) | [
"daniel.guzman@finxi.com.br"
] | daniel.guzman@finxi.com.br |
780506e190b672d63cc1156c97bd4e2402b86cb1 | 0a973b3c6aad4518e17ff1eebd59a254fa07a757 | /specialize.py | 5d4c2d2885e8e973257f5b085e313e209e4e3688 | [] | no_license | YiningWang2/DC_power_prediction_rank21 | 946b8acef726aca562c4e143c1c16649efd1d993 | d5aff148c675619a0e71c9900dd28fd027d34f2a | refs/heads/master | 2022-10-06T11:14:52.591322 | 2019-12-16T03:18:31 | 2019-12-16T03:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import StratifiedKFold
import csv
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
import lightgbm as lgb
from lightgbm import LGBMRegressor
from sklearn.tree import export_graphviz
import graphviz
import sys
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from func import get_hour, get_min, get_time, get_month, get_day, add_poly_features, add_plus_features, add_sub_features, add_div_features, dis2peak, lgb_train_actual_irradiance, lgb_train_actual_power, xgb_train_actual_irradiance, xgb_train_actual_power, normalize, shift_1, t_sne, pca, var_4_feature, mean_4_feature, max_4_feature, min_4_feature, daytime_var_4_feature, daytime_mean_4_feature, daytime_max_4_feature, daytime_min_4_feature, numerical_4_feature, daytime_feature, nighttime_feature, season_4_feature, specialize_2,specialize_3, specialize_4, data_missing_process1, data_missing_process2, data_missing_process3, data_missing_process4, data_selection, first_feature_engineering, second_feature_engineering, data_preprocessing
import time
def specialize(train_list_i, pred_label_final):
###specialize_2
if(train_list[i] == 'train_2.csv'):
pred_label_final = specialize_2(test_old['month'], test_old['hour'], test_old['min'], pred_label_final)
###specialize_3
if(train_list[i] == 'train_3.csv'):
pred_label_final = specialize_3(test_old['month'], test_old['hour'], test_old['min'], pred_label_final)
###specialize_4
if(train_list[i] == 'train_4.csv'):
pred_label_final = specialize_4(test_old['month'], test_old['hour'], test_old['min'], pred_label_final)
return pred_label_final | [
"394483135@qq.com"
] | 394483135@qq.com |
eed9894019e05eca7b30267d37c17455147ae279 | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon/.venv/lib/python2.7/site-packages/muranodashboard_org/api/packages.py | 30555b0805e18d567b9299fc0c686cec216987c7 | [
"Apache-2.0"
] | permissive | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 3,859 | py | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from django.conf import settings
import yaml
from muranodashboard import api
from muranodashboard.common import cache
from muranodashboard.dynamic_ui import yaql_expression
def package_list(request, marker=None, filters=None, paginate=False,
page_size=20, sort_dir=None, limit=None):
limit = limit or getattr(settings, 'PACKAGES_LIMIT', 100)
filters = filters or {}
if paginate:
request_size = page_size + 1
else:
request_size = limit
if marker:
filters['marker'] = marker
if sort_dir:
filters['sort_dir'] = sort_dir
client = api.muranoclient(request)
packages_iter = client.packages.filter(limit=request_size,
**filters)
has_more_data = False
if paginate:
packages = list(itertools.islice(packages_iter, request_size))
if len(packages) > page_size:
packages.pop()
has_more_data = True
else:
packages = list(packages_iter)
return packages, has_more_data
def apps_that_inherit(request, fqn):
glare = getattr(settings, 'MURANO_USE_GLARE', False)
if not glare:
return []
apps = api.muranoclient(request).packages.filter(inherits=fqn)
return apps
def app_by_fqn(request, fqn, catalog=True):
apps = api.muranoclient(request).packages.filter(fqn=fqn, catalog=catalog)
try:
return apps.next()
except StopIteration:
return None
def make_loader_cls():
class Loader(yaml.Loader):
pass
def yaql_constructor(loader, node):
value = loader.construct_scalar(node)
return yaql_expression.YaqlExpression(value)
# workaround for PyYAML bug: http://pyyaml.org/ticket/221
resolvers = {}
for k, v in yaml.Loader.yaml_implicit_resolvers.items():
resolvers[k] = v[:]
Loader.yaml_implicit_resolvers = resolvers
Loader.add_constructor(u'!yaql', yaql_constructor)
Loader.add_implicit_resolver(
u'!yaql', yaql_expression.YaqlExpression, None)
return Loader
# Here are cached some data calls to api; note that not every package attribute
# getter should be cached - only immutable ones could be safely cached. E.g.,
# it would be a mistake to cache Application Name because it is mutable and can
# be changed in Manage -> Packages while cache is immutable (i.e. it
# its contents are obtained from the api only the first time).
@cache.with_cache('ui', 'ui.yaml')
def get_app_ui(request, app_id):
return api.muranoclient(request).packages.get_ui(app_id, make_loader_cls())
@cache.with_cache('logo', 'logo.png')
def get_app_logo(request, app_id):
return api.muranoclient(request).packages.get_logo(app_id)
@cache.with_cache('supplier_logo', 'supplier_logo.png')
def get_app_supplier_logo(request, app_id):
return api.muranoclient(request).packages.get_supplier_logo(app_id)
@cache.with_cache('package_fqn')
def get_app_fqn(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.fully_qualified_name
@cache.with_cache('package_name')
def get_service_name(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.name
| [
"Suhaib.Chishti@exponential.com"
] | Suhaib.Chishti@exponential.com |
59084e9a817aa6f960d8e909db12bea043370a81 | 2d41d08982f792d4959b93abf114a75c3a8e9a6f | /Leetcode/two_sum.py | 6dff6ce84dc7729c1fc7f406bb022829b27e1233 | [] | no_license | codeAligned/Programming-Interviews-Problems | 391715470d2b8e1baa204c09243d91ef8382402e | 46d9eba09c4b23162127986b8adc9f8c5689f567 | refs/heads/master | 2021-05-29T03:54:33.860142 | 2015-03-05T18:23:36 | 2015-03-05T18:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | '''
https://oj.leetcode.com/problems/two-sum/
'''
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
hashtable = {}
for n in num:
hashtable[n] = n
for i in range(len(num)):
if hashtable.has_key(target - num[i]):
if num.index(target-num[i]) == i:
continue
if i+1 <=num.index(target-num[i])+1:
return (i+1, num.index(target-num[i])+1)
else:
return (num.index(target-num[i])+1, i+1)
| [
"NicolasRichard3@gmail.com"
] | NicolasRichard3@gmail.com |
0160a4f6fba61d4c54432021f63c0951eec3b1f0 | aee7d5afe722157d42a60bf482b2a8314b7de27b | /startpage/urls.py | cb36b60de80a489e1cf273065304635647728511 | [] | no_license | nikolaiwiskow/adwords-automation-tool | 0c02cb849d8c001fe19c4af45061e05a71b88562 | 4ff90b1023ea52da640d5583d1f912e0a28f117d | refs/heads/master | 2020-05-29T09:35:56.402550 | 2019-05-28T17:02:53 | 2019-05-28T17:02:53 | 189,069,536 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r"^$", views.startpage, name="startpage"),
] | [
"nikolai.wiskow@web.de"
] | nikolai.wiskow@web.de |
be5b5dfd52caa884259bb5fe6da787568439458f | 0800c01c7e3bd33889faf6366afa936e5be2583a | /mtg/mtg/middlewares.py | e19d90d8efd68446e0bf2dc6d0e423d9e4eb08ab | [] | no_license | stberg3/mtg_scrape | 4ce9279d1d0ae97114a1d5f3f441ee383b8363c4 | 975106a660e2bf6ed75013310d3bc8aa5520482c | refs/heads/master | 2021-01-25T04:35:12.255274 | 2017-07-29T20:13:36 | 2017-07-29T20:13:36 | 93,448,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class MtgSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"stberg3@gmail.com"
] | stberg3@gmail.com |
82816f71d39b86ba1fbc28e7c5424832c39d87fc | 457d9b27a97f461de952ffa3b9c15691df6b64f7 | /venv/bin/flask | c64127dcb4426847b803600b6637e3ea090ef696 | [] | no_license | PerlaOvalle/todoflask | 63b140d2bfad8d17b0a5dfd3e4d7c32670315f6d | 7faf56be05c5a69bdae938274db79bf1f0b05a4b | refs/heads/master | 2023-03-23T05:21:09.455871 | 2021-03-25T07:49:15 | 2021-03-25T07:49:15 | 351,350,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/perlaovalle/Documents/todoflask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"perla.ovla@gmail.com"
] | perla.ovla@gmail.com | |
12a68d3c52fd7906308e791dffdf01e13c158e3e | 4e558281ab352b745e970936650e479bbb687982 | /examples/simple_example1.py | 5f9b339dddecc8625b94842f4f3f30d38ea07d00 | [
"MIT"
] | permissive | videoflow/videoflow | e3b84b3acd3591837d30ce51d5023f3dee9823f3 | c49d3fe6c814574bcda1a4e907ce52ea86e1617c | refs/heads/master | 2023-01-24T06:51:56.141621 | 2022-01-20T14:23:58 | 2022-01-20T14:23:58 | 181,554,939 | 1,065 | 96 | MIT | 2022-01-20T14:23:59 | 2019-04-15T19:47:22 | Python | UTF-8 | Python | false | false | 461 | py | from videoflow.core import Flow
from videoflow.producers import IntProducer
from videoflow.processors import IdentityProcessor, JoinerProcessor
from videoflow.consumers import CommandlineConsumer
producer = IntProducer(0, 40, 0.1)
identity = IdentityProcessor()(producer)
identity1 = IdentityProcessor()(identity)
joined = JoinerProcessor()(identity, identity1)
printer = CommandlineConsumer()(joined)
flow = Flow([producer], [printer])
flow.run()
flow.join()
| [
"jadielam@gmail.com"
] | jadielam@gmail.com |
bb4fb6df005a991c68abe7f40c9c555fd11d1cfa | a6a2818cf08f0f62ac1d0b6ddfbcc3ad363cf391 | /mutti/screen.py | 2e2341a8d3ca6b515ddd20b47f27ad7a1ea55c22 | [] | no_license | mkrieger1/mutti | 55519c4b2f65dc8a58646a6258655efa081f9f49 | aa24493d785706eaf7f9515d288203f86f0a7897 | refs/heads/master | 2021-06-10T15:57:44.773758 | 2016-12-15T16:43:09 | 2016-12-15T16:43:09 | 23,322,555 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from .panel import Panel, ExitLoop
from .status import Status
from .colors import color_attr
class Screen(Panel):
_max_children = 2 # one of them is the statusbar
def __init__(self):
Panel.__init__(self)
self.set_focus(True)
statusbar = Status(width=80)
self.adopt(statusbar)
self.statusbar = statusbar
def _get_size(self):
min_height = sum(c.min_height for c in self.children)
min_width = sum(c.min_width for c in self.children)
max_height = sum(c.max_height for c in self.children)
max_width = sum(c.max_width for c in self.children)
return (min_height, min_width, max_height, max_width)
def _layout(self, height, width):
c = self.children[1]
w = max(ch.min_width for ch in self.children)
h = min(c.min_height, height-1)
top = max((height-h) // 2, 0)
left = max((width-w) // 2, 0)
c.give_window(height=h, width=w, top=top, left=left)
self.statusbar.give_window(height=1, width=w, top=h+top, left=left)
def _erase(self, height, width):
self.win.erase()
self.fill(height, width, '/~', color_attr("green"))
def _draw(self, height, width):
pass
def _on_exit(self):
pass
def _handle_key(self, key):
if key in list(map(ord, 'qQ')):
self._on_exit()
raise ExitLoop
else:
return key
| [
"michael.krieger@ziti.uni-heidelberg.de"
] | michael.krieger@ziti.uni-heidelberg.de |
d9a05db0b14042594d75ea9f4cadf8c67f31a6c5 | 0bb63518c921b55032205a1f242eea7e0e4b6c36 | /src/domain/data/about.py | f96e6edd39d1603e0544c7a53c74385e9f2cf1aa | [] | no_license | iosifsecheleadan/professor-pages | 938c97ff9f4df74d68c5177a9856d8cc331cdc51 | 7cede013bb7115e9ad7b01381768fe0833cfbdd8 | refs/heads/master | 2023-03-06T15:08:59.699362 | 2021-02-17T08:38:57 | 2021-02-17T08:38:57 | 320,370,157 | 0 | 0 | null | 2021-01-12T17:59:09 | 2020-12-10T19:29:13 | Python | UTF-8 | Python | false | false | 1,445 | py | from src.domain.data.base import DataEntity, DataException
from src.domain.data.document import Document
# todo add repository method : add_about_document
from src.domain.view.factory import material_card, title
class About(DataEntity):
def __init__(self,
documents: list,
):
super(About, self).__init__(0)
self.documents = documents
@staticmethod
def from_csv(csv_string: str):
csv_list = csv_string.strip().split(",")
if len(csv_list) < 2: raise DataException("Invalid CSV Format")
document_list = []
for document_location in csv_list[1:]:
document_list.append(Document.from_csv(document_location))
about = About(document_list)
about.set_id(int(csv_list[0]))
return about
def to_csv(self) -> str:
csv = str(self.get_id())
for document in self.documents:
csv += f",{document.to_csv()}"
return csv
def to_html(self) -> str:
content = title("About")
for document in self.documents:
content += document.to_html()
return material_card(content)
def __str__(self) -> str:
string = "About Documents:"
for document in self.documents:
string += f"\nAbout {document}"
return string
def __eq__(self, other: object) -> bool:
return isinstance(other, About) and super().__eq__(other)
| [
"iosifsecheleadan@protonmail.com"
] | iosifsecheleadan@protonmail.com |
97362b796c1ce1bd97928be6cc13dd07c41d4c2d | f56a1c0d5af368c38bf4d05e0f251dc525c84ba4 | /pulser-core/pulser/sequence/__init__.py | 4bdbb3dc8d67b8e8ded462f237e96e09c4c1e808 | [
"Apache-2.0"
] | permissive | pasqal-io/Pulser | 11197eb386057f4fbbd4ed254bd874fb134f5cd3 | 2315989dbfbf0e90a8701e9e7b537f18388b404a | refs/heads/develop | 2023-08-30T13:38:50.553841 | 2023-08-29T13:07:10 | 2023-08-29T13:07:10 | 294,159,001 | 123 | 56 | Apache-2.0 | 2023-09-14T16:16:56 | 2020-09-09T15:49:00 | Python | UTF-8 | Python | false | false | 688 | py | # Copyright 2022 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the sequence class definition."""
from pulser.sequence.sequence import Sequence
| [
"noreply@github.com"
] | pasqal-io.noreply@github.com |
9a573f0bab4668cbfca93a5bf4cf5e907b30f55d | d60c01c1a89bc99d3a773676861fd570d1841c00 | /arc/arc004/A/main.py | 186bfb9b93ec49ba15a299d7afa64bee77fc2ff4 | [] | no_license | sunbear0226/atcoder-workspace | 4f1b87605235e350b01413d0f33d8389b1ec6179 | f45b8f0e0e1af3d01f34531623a33aa46909ff75 | refs/heads/master | 2022-09-23T19:14:01.042248 | 2020-05-31T15:11:10 | 2020-05-31T15:11:10 | 265,442,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/env python3
import sys
import math
def solve(N: int, x: "List[int]", y: "List[int]"):
ans = 0
for i in range(N):
for j in range(N):
xd = abs(x[i] - x[j])
yd = abs(y[i] - y[j])
ans = max(ans, math.sqrt(xd**2 + yd**2))
print(ans)
return
# Generated by 1.1.6 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
x = [int()] * (N - 1 - 0 + 1) # type: "List[int]"
y = [int()] * (N - 1 - 0 + 1) # type: "List[int]"
for i in range(N - 1 - 0 + 1):
x[i] = int(next(tokens))
y[i] = int(next(tokens))
solve(N, x, y)
if __name__ == '__main__':
main()
| [
"bump.sunbear@gmail.com"
] | bump.sunbear@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.