blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7db4c9be85ad2e117629be95872cbce4e2efd53 | 03ca9b453e25ee12ef542fd9cfb4253d36b5b681 | /happy301/feedback_controller.py | 363b0ff462c53653975fb52d792b75ce5b39dc41 | [] | no_license | yangljian/happy301 | 842352e7c6021dbc7c5577379de786c35392fddc | 51b9a2cd65e58bc19aa58b387065d124ec22457b | refs/heads/master | 2023-01-05T22:31:11.439677 | 2020-11-03T02:15:45 | 2020-11-03T02:15:45 | 307,324,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import tensorflow as tf
from MyNet import MyNet
import numpy as np
from workload_env import WorkloadEnv
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
# min_max = MinMaxScaler(feature_range=(0, 1))
env = WorkloadEnv(7, 11)
my_net = MyNet(6, 14)
my_net.restore_net()
# 获取当前环境状态
s = env.reset()
def sigmoid(x):
return 1.0 / (1 + np.exp(5 - float(x)))
# 循环
while True:
# res = std.fit_transform(s.reshape(-1, 1))
# res = np.array(res).squeeze()
# 将状态传入模型,获取所有动作Q值,获取最大Q值的动作
print(my_net.get_eval(s))
print(s)
a = np.argmax(my_net.get_eval(s), axis=1)
# 执行动作,返回新的状态,更新状态,继续循环
s, _, Done = env.step(a)
# 当Q值小于阈值T时,退出循环
# if Done:
# break
# 循环结束
| [
"1025230700@qq.com"
] | 1025230700@qq.com |
9367c9fc788b09d6bf9c8369612096e5c5ffa3fa | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py | 2ec9e5a041a24e763dee53002932f7d06da6e9d5 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,452 | py | # 2016.11.19 19:47:39 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py
import BigWorld
import Math
from Math import Vector3, Matrix
import math
from AvatarInputHandler import mathUtils, AimingSystems
from AvatarInputHandler.AimingSystems import IAimingSystem
from AvatarInputHandler.cameras import _clampPoint2DInBox2D
class StrategicAimingSystem(IAimingSystem):
_LOOK_DIR = Vector3(0, -math.cos(0.001), math.sin(0.001))
height = property(lambda self: self.__height)
heightFromPlane = property(lambda self: self.__heightFromPlane)
def __init__(self, height, yaw):
self._matrix = mathUtils.createRotationMatrix((yaw, 0, 0))
self.__planePosition = Vector3(0, 0, 0)
self.__height = height
self.__heightFromPlane = 0.0
def destroy(self):
pass
def enable(self, targetPos):
self.updateTargetPos(targetPos)
def disable(self):
pass
def getDesiredShotPoint(self, terrainOnlyCheck = False):
return AimingSystems.getDesiredShotPoint(self._matrix.translation, Vector3(0, -1, 0), True, True, terrainOnlyCheck)
def handleMovement(self, dx, dy):
shift = self._matrix.applyVector(Vector3(dx, 0, dy))
self.__planePosition += Vector3(shift.x, 0, shift.z)
self.__updateMatrix()
def updateTargetPos(self, targetPos):
self.__planePosition.x = targetPos.x
self.__planePosition.z = targetPos.z
self.__updateMatrix()
def __updateMatrix(self):
bb = BigWorld.player().arena.arenaType.boundingBox
pos2D = _clampPoint2DInBox2D(bb[0], bb[1], Math.Vector2(self.__planePosition.x, self.__planePosition.z))
self.__planePosition.x = pos2D[0]
self.__planePosition.z = pos2D[1]
collPoint = BigWorld.wg_collideSegment(BigWorld.player().spaceID, self.__planePosition + Math.Vector3(0, 1000.0, 0), self.__planePosition + Math.Vector3(0, -250.0, 0), 3)
self.__heightFromPlane = 0.0 if collPoint is None else collPoint[0][1]
self._matrix.translation = self.__planePosition + Vector3(0, self.__heightFromPlane + self.__height, 0)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\AvatarInputHandler\AimingSystems\StrategicAimingSystem.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:47:39 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
c09e7b5259d5a4704dd870d39c296aadcaf6047d | a7c5369a3ba19f6fd0a6840afdc519e606c67e08 | /stanford-topo/no_huan.py | 807321be4260f4d6abfa139bf74b725acd38dbd1 | [] | no_license | chenkaiyue/flow-entry-aggregate | c23f85be3711778b634755bab5b85d924ea9a533 | b3e009f6b91ca3553570d2d6601c7e2df99f1481 | refs/heads/master | 2021-05-15T18:16:08.873634 | 2017-10-10T14:22:35 | 2017-10-10T14:22:35 | 106,426,692 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #!/usr/bin/env python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import Link, Intf, TCLink
from mininet.topo import Topo
from mininet.util import dumpNodeConnections
import logging
import os
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger( __name__ )
class NoLoopTopo(Topo):
# logger.debug("Class HugeTopo")
# CoreSwitchList = []
# AggSwitchList = []
# EdgeSwitchList = []
# HostList = []
# iNUMBER = 0
def __init__(self):
#Init Topo
Topo.__init__(self)
host1 = self.addHost("h1")
host2 = self.addHost("h2")
# host3 = self.addHost("h3")
switch1 = self.addSwitch("s1")
switch2 = self.addSwitch("s2")
self.addLink(host1,switch1)
self.addLink(switch1,switch2)
self.addLink(host2,switch2)
# self.addLink(host3,swithc1)
topos = { 'nolooptopo': ( lambda: NoLoopTopo() ) }
| [
"chenkaiyue2008@163.com"
] | chenkaiyue2008@163.com |
90690f62a46d789508ef78bf4cd7782bb224bf56 | 811ebc62487828ffff943a08ba6d18b7368bb859 | /复赛/附件/代码/utils/miou.py | b16d16e99cd0cd7d45c2a1057b20c8831d4d4c79 | [] | no_license | AlanMorningLight/Remote-sensing-plot-segmentation-and-extraction-based-on-few-samples | 945d5fcd1f5b0e7bf70128ab960c606a6b4e2d7a | 2d593a46dd7722fac3b449c68dcc120510dd139d | refs/heads/master | 2023-04-23T06:10:25.956683 | 2021-05-01T09:16:05 | 2021-05-01T09:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,748 | py | import numpy as np
import argparse
import json
from PIL import Image
from os.path import join
# 设标签宽W,长H
def fast_hist(a, b, n):
# a是转化成一维数组的标签,形状(H×W,);b是转化成一维数组的标签,形状(H×W,)
k = (a >= 0) & (a < n)
# np.bincount计算了从0到n**2-1这n**2个数中每个数出现的次数,返回值形状(n, n)
# 返回中,写对角线上的为分类正确的像素点
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
# 矩阵的对角线上的值组成的一维数组/矩阵的所有元素之和,返回值形状(n,)
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def per_class_PA(hist):
# 矩阵的对角线上的值组成的一维数组/矩阵的所有元素之和,返回值形状(n,)
return np.diag(hist) / hist.sum(1)
def compute_mIoU(gt_dir, pred_dir, png_name_list, num_classes, name_classes):
# 计算mIoU的函数
print('Num classes', num_classes)
## 1
hist = np.zeros((num_classes, num_classes))
gt_imgs = [join(gt_dir, x + ".png") for x in png_name_list] # 获得验证集标签路径列表,方便直接读取
pred_imgs = [join(pred_dir, x + ".png") for x in png_name_list] # 获得验证集图像分割结果路径列表,方便直接读取
# 读取每一个(图片-标签)对
for ind in range(len(gt_imgs)):
# 读取一张图像分割结果,转化成numpy数组
pred = np.array(Image.open(pred_imgs[ind]))
# 读取一张对应的标签,转化成numpy数组
label = np.array(Image.open(gt_imgs[ind]))
# 如果图像分割结果与标签的大小不一样,这张图片就不计算
if len(label.flatten()) != len(pred.flatten()):
print(
'Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(
len(label.flatten()), len(pred.flatten()), gt_imgs[ind],
pred_imgs[ind]))
continue
# 对一张图片计算19×19的hist矩阵,并累加
hist += fast_hist(label.flatten(), pred.flatten(),num_classes)
# 每计算10张就输出一下目前已计算的图片中所有类别平均的mIoU值
if ind > 0 and ind % 10 == 0:
print('{:d} / {:d}: mIou-{:0.2f}; mPA-{:0.2f}'.format(ind, len(gt_imgs),
100 * np.mean(per_class_iu(hist)),
100 * np.mean(per_class_PA(hist))))
# 计算所有验证集图片的逐类别mIoU值
mIoUs = per_class_iu(hist)
mPA = per_class_PA(hist)
# 逐类别输出一下mIoU值
for ind_class in range(num_classes):
print('===>' + name_classes[ind_class] + ':\tmIou-' + str(round(mIoUs[ind_class] * 100, 2)) + '; mPA-' + str(round(mPA[ind_class] * 100, 2)))
# 在所有验证集图像上求所有类别平均的mIoU值,计算时忽略NaN值
print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)) + '; mPA: ' + str(round(np.nanmean(mPA) * 100, 2)))
return mIoUs
if __name__ == "__main__":
gt_dir = "./VOCdevkit/VOC2007/SegmentationClass"
pred_dir = "./miou_pr_dir"
png_name_list = open(r"VOCdevkit\VOC2007\ImageSets\Segmentation\val.txt",'r').read().splitlines()
num_classes = 21
name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
compute_mIoU(gt_dir, pred_dir, png_name_list, num_classes, name_classes) # 执行计算mIoU的函数
| [
"1103540209@qq.com"
] | 1103540209@qq.com |
2c0fb7ef5d66d0bd10e7c849de51c69da576a7ed | 30fc64988818beef7353456c201f1e0a4d656f3d | /cifar10/models/vgg.py | ddf22330d9511524c0a5b830a675b5445d63e4a9 | [
"BSD-2-Clause"
] | permissive | zhaofang0627/IterNorm-pytorch | 48f55a7d16fb0c7efc0e57f6ba0c276a6f6f99b0 | c87bc35bf2dd2cacb643c0692acabead73097ebf | refs/heads/master | 2020-05-16T12:03:10.341683 | 2019-04-14T13:58:40 | 2019-04-14T13:58:40 | 183,035,563 | 1 | 0 | null | 2019-04-23T14:43:12 | 2019-04-23T14:43:12 | null | UTF-8 | Python | false | false | 1,502 | py | import torch.nn as nn
import extension as my
__all__ = ['vgg']
class VGG(nn.Module):
def __init__(self, num_classes=10):
super(VGG, self).__init__()
bias = True
self.net = nn.Sequential( # 2 x 128C3 - MP2
nn.Conv2d(3, 128, 3, 1, 1, bias=bias), my.Norm(128), nn.ReLU(True), nn.Conv2d(128, 128, 3, 1, 1, bias=bias),
nn.MaxPool2d(2, 2), # 2 x 256C3 - MP2
my.Norm(128), nn.ReLU(True), nn.Conv2d(128, 256, 3, 1, 1, bias=bias), my.Norm(256), nn.ReLU(True),
nn.Conv2d(256, 256, 3, 1, 1, bias=bias), nn.MaxPool2d(2, 2), # 2 x 512C3 - MP2
my.Norm(256), nn.ReLU(True), nn.Conv2d(256, 512, 3, 1, 1, bias=bias), my.Norm(512), nn.ReLU(True),
nn.Conv2d(512, 512, 3, 1, 1, bias=bias), nn.MaxPool2d(2, 2), my.View(512 * 4 * 4), # 1024FC
# nn.BatchNorm1d(512 * 4 * 4),
# my.quantizer(512 * 4 * 4, nn.ReLU(True)),
# my.Linear(512 * 4 * 4, 1024, bias=bias),
# Softmax
nn.BatchNorm1d(512 * 4 * 4), nn.ReLU(True), nn.Linear(512 * 4 * 4, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
return
def forward(self, x):
return self.net(x)
def vgg():
return VGG()
| [
"dnvtmf@gmail.com"
] | dnvtmf@gmail.com |
e83f53e3b09d4c31e6cddb4686f5993e3a6dc7b9 | 3899a37d1f500f7935cd04079e0b293bd64fe1cb | /docs/conf.py | 9902ebaf5dadaec57cdc679f1cbc45f4be1e8a5b | [
"MIT"
] | permissive | jubaer145/nlpaug | 06d5fa83d68537f6485ed5afccfe2ece056aae8b | b631660f1997fc503258735ec011ffbe164d12af | refs/heads/master | 2023-06-02T03:45:18.094793 | 2021-06-20T21:17:13 | 2021-06-20T21:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,645 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# nlpaug documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 7 07:37:05 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
from unittest.mock import MagicMock
sys.path.append(os.path.abspath('..'))
# Mock module to bypass pip install
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'librosa', 'librosa.display', 'numpy', 'nltk', 'matplotlib', 'matplotlib.pyplot',
'setuptools', 'python-dotenv', 'nltk.corpus', 'torch', 'transformers']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nlpaug'
copyright = '2019, Edward Ma'
author = 'Edward Ma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.4'
# The full version, including alpha/beta/rc tags.
release = '1.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nlpaugdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nlpaug.tex', 'nlpaug Documentation',
'Edward Ma', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nlpaug', 'nlpaug Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nlpaug', 'nlpaug Documentation',
author, 'nlpaug', 'One line description of project.',
'Miscellaneous'),
]
| [
"makcedward@gmail.com"
] | makcedward@gmail.com |
80c2a3cc92aab525c9bf52c27c5e14d7d24ec6ab | 5864264e878f1ab1c08a8ae926596c7c7a9c636b | /server/main.py | 45c9d5e09ea46a2ddbbd1e7a83d12a07ae94d305 | [] | no_license | wayexists02/hanyang_erica_capstone | 2045ca8b5c363a5e116ad4a33e9b6a563e75aff8 | 600a9bccc51e6a7394b8724666ae38217bd8d7d9 | refs/heads/master | 2020-04-25T20:54:57.701034 | 2019-06-06T13:42:57 | 2019-06-06T13:42:57 | 173,064,470 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py |
import sys
import numpy as np
import cv2
import threading as th
from Server import Server
host = "0.0.0.0"
port = 13333
num_step = 8
num_classes = 4
image_width = 128
image_height = 128
image_channel = 3
sys.path.append("../")
from ai.AI import AI
image_bgr = None
ok = True
trash_map = ["can", "extra", "glass", "plastic", "nothing"]
def image_show():
global image_bgr
global ok
while ok:
if image_bgr is not None:
cv2.imshow("test", cv2.resize(image_bgr, dsize=(240, 240)))
cv2.waitKey(1000 // 60)
def main(args):
global image_bgr
global ok
print("Start server!")
server = Server()
server.open(host, port)
ai = AI()
ai.build()
# debug
#t = th.Thread(target=image_show)
#t.start()
try:
while True:
image_arr = np.zeros((num_step, image_channel, image_height, image_width))
cnt = 0
index = 0
while cnt < num_step:
image_bgr = server.wait_for_image()
if image_bgr is None:
continue
# print(image_bgr.shape)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
cv2.imwrite(f"./test/{index}.jpg", image_bgr)
# image_rgb = cv2.cvtColor(cv2.imread(f"./test/{index}.jpg"), cv2.COLOR_BGR2RGB)
index += 1
image_arr[cnt] = image_rgb.transpose(2, 0, 1)
cnt += 1
image_rgb = None
image_bgr = None
result = ai.predict(image_arr)
print("Result: {}".format(trash_map[result]))
server.send_result(result)
except KeyboardInterrupt as e:
print(e)
print("Keyboard Interrupted.")
except ValueError as e:
print(e)
print("Exception occurs. Server shutdown.")
except TypeError as e:
print(e)
print("Exception occurs. Server shutdown.")
server.close()
ok = False
# t.join()
if __name__ == "__main__":
main(sys.argv)
| [
"wayexists02@gmail.com"
] | wayexists02@gmail.com |
412ccfa8216340ee7038cf82fd1bcb6fb83e324e | ca23ba4449afcd000ffd45b46b2d363728be9a28 | /china_web/align.py | 9a6016ac4d33020281c85d94c42a23e9e5c135db | [] | no_license | jiaperte/Python-WebScraping | 586cc1299ea591df9bc302528423fc1b3fc35f21 | b7416476689eb9485278923ec97f95287a01d65c | refs/heads/master | 2020-09-08T13:09:06.149606 | 2019-11-23T12:49:34 | 2019-11-23T12:49:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | a = ['asdfasd', 'asdf', 'sdfsdf']
b = ['1232', '213', '23']
print("左对齐")
for i in range(3):
print(a[i].ljust(10), b[i])
print()
print("右对齐")
for i in range(3):
print(a[i].rjust(10), b[i])
| [
"jiayong_2010@139.com"
] | jiayong_2010@139.com |
194f1f77634d5f8dac6ca83d307d7045572cc023 | b00ba2f08e3b4f310a63206fa13483339cd1752f | /src/config.py | 70a6b3adcfb7df5de2b17f064e5405e9439f6576 | [] | no_license | amano-honmono/custom-matcher | 4d56b33f621c4bdf139b9427d642a70699b94606 | 75f002f3d276f71a16f3a9fec392cd646f8f2875 | refs/heads/master | 2023-08-17T05:59:51.897026 | 2021-09-20T13:02:57 | 2021-09-20T13:03:24 | 403,336,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | import yaml
with open('/opt/project/config.yml') as file:
config = yaml.safe_load(file) | [
"maplehuke@yahoo.co.jp"
] | maplehuke@yahoo.co.jp |
6150cb6eab8ab3c168a1eead8a17ce4cc4735cb6 | e9348d1689215220b7820134a82c2afdf8aed107 | /backend/young_waterfall_29324/urls.py | 19b2e1e3e6e251b7ff93a5593048f905ce1b2e58 | [] | no_license | crowdbotics-apps/young-waterfall-29324 | 8bf2accb9197c45f59ac717b2ec4fe289830b3f8 | ea74f174180c6af5acca25a82397daa7c48eb7c2 | refs/heads/master | 2023-06-26T05:12:51.154938 | 2021-08-01T20:50:29 | 2021-08-01T20:50:29 | 391,735,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | """young_waterfall_29324 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Young Waterfall"
admin.site.site_title = "Young Waterfall Admin Portal"
admin.site.index_title = "Young Waterfall Admin"
# swagger
api_info = openapi.Info(
title="Young Waterfall API",
default_version="v1",
description="API documentation for Young Waterfall App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
34b4307122c60e6551de4af71b85a1ddb3126279 | c6b41c56cedcb092336101b24c76b63913b2e117 | /picksamples.py | f5e9d1d1a6e775c1004975f35e379df55632adb3 | [] | no_license | Mmhmmmmm/SPU | 2d7ddf55e9a04a7cadaa30e67ad386eff9c2d6ac | 50d8df9f13dfe6dd15f30d3683b4de6a3bfa1fd9 | refs/heads/master | 2023-06-23T19:26:29.472168 | 2021-07-14T12:54:28 | 2021-07-14T12:54:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,769 | py | import os
import numpy as np
import random
class PickSamples():
def __init__(self, exp=0, percent=[0.5, 0.125, 0.125, 0.125, 0.125], pace=0, alpha=[15, 12.5, 10, 7.5, 5],
ent_threshold=-4.2, diff_threshold=100, ent_pick_per=1200, random_pick=False,
train_txt0='./images/tr_10.txt', soft=False, soft_percent=0.9,
img_dir='/root/data/aishijie/Project/Morph_mtcnn_1.3_0.35_0.3/'):
self.exp = exp
self.root_image = './Exp{}/images/'.format(exp)
self.root_Pred = './Exp{}/Pred/'.format(exp)
self.checkdir(self.root_image)
self.checkdir(self.root_Pred)
self.percent = percent
self.pace = pace
self.soft = soft
self.soft_percent = soft_percent
self.ent_threshold = ent_threshold
self.diff_threshold = diff_threshold
self.alpha = alpha
self.random_pick = random_pick
self.img_dir = img_dir
self.fn_traintxt0 =train_txt0
train_images = self.readtxt(self.fn_traintxt0)
self.num_imgs = len(train_images)
self.pace_samples = [int(p*len(train_images)) for p in self.percent]
assert ent_pick_per >= 0.0, 'Curriculum Reconstruction Samples should greater than 0'
if ent_pick_per < 1:
self.ent_pick_per = int(ent_pick_per * self.num_imgs)
else:
self.ent_pick_per = int(ent_pick_per)
def checkdir(self, tmp_dir):
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
def readtxt(self, fn):
with open(fn, 'r') as f:
lines = f.readlines()
return lines
def savetxt(self, fn, lines):
with open(fn, 'w') as f:
f.writelines(lines)
# 'img name: {}, label: {}, pred: {:.6f}, ent: {:.6f}, diff: {:.6f}'
def get_img_name(self, line):
img = line.strip('\n').split('img name: ')[1].split(',')[0]
return img
def get_label(self, line):
label = line.strip('\n').split('label: ')[1].split(',')[0]
return float(label)
def get_diff(self, line):
diff = line.strip('\n').split('diff: ')[-1]
return float(diff)*100.0
def get_ent(self, line):
ent = line.strip('\n').split('ent: ')[1].split(',')[0]
return float(ent)
def pick(self, pace=0, capped=False):
'''
pace represent the txt need to be generated
'''
pick, left, pick_ent, pick_new = [],[],[],[]
if pace == 0:
pick = []
left = self.readtxt(self.fn_traintxt0)
else:
fn_train_previous = './Exp{}/images/Pick-{}.txt'.format(self.exp, pace-1)
fn_pred_pick = './Exp{}/Pred/PredOnPickset-{}.txt'.format(self.exp, pace-1)
fn_pred_left = './Exp{}/Pred/PredOnLeftset-{}.txt'.format(self.exp, pace-1)
pred_pick = self.readtxt(fn_pred_pick)
pred_left = self.readtxt(fn_pred_left)
pred_all = pred_pick + pred_left
# sort left samples according to diff and entopy
pred_pick_sort = []
for i, line in enumerate(pred_left):
diff = self.get_diff(line)
if diff > self.diff_threshold:
diff = self.diff_threshold
img = self.get_img_name(line)
ent = self.get_ent(line)
label = self.get_label(line)
if self.ent_threshold < 0:
if ent < self.ent_threshold:
ent = self.ent_threshold
diff = diff - self.alpha[pace-1] * ent
pred_pick_sort.append((img, label, diff))
pred_pick_sort.sort(key=lambda x:x[2])
# pick samples according to diff and entopy
for i in range(len(pred_pick_sort)):
img_name, label = pred_pick_sort[i][0], pred_pick_sort[i][1]
line = img_name + ' ' + str(label) + '\n'
if i < self.pace_samples[pace-1]:
line = img_name + ' ' + str(label) + '\n'
pick.append(line)
else:
line = img_name + ' ' + str(label) + ' 10000' + '\n'
left.append(line)
# Curriculum Reconstruction
if self.ent_pick_per > 0:
if self.random_pick:
lines = self.readtxt(self.fn_traintxt0)
random.shuffle(lines)
pick_ent = lines[:self.ent_pick_per]
else:
ent_sort = []
for line in pred_all:
ent = self.get_ent(line)
ent_sort.append(ent)
ent_sort_np = np.array(ent_sort)
idx_ent = np.argsort(-ent_sort_np)
idx_ent_pick = idx_ent[:self.ent_pick_per]
for i in range(idx_ent_pick.shape[0]):
idx = idx_ent_pick[i]
line_ = pred_all[idx]
img = self.get_img_name(line_)
label = str(self.get_label(line_))
line = img + ' ' + label + '\n'
pick_ent.append(line)
# Mixture Weighting
tem_ = self.readtxt(fn_train_previous)
tem = []
if self.soft:
for t in tem_:
img_name, label = t.strip('\n').split(' ')[0], t.strip('\n').split(' ')[1]
line = img_name + ' ' + label + '\n'
tem.append(line)
pick_new = pick + tem + pick_ent
if self.soft:
img_all, pick_new_sort, pred_pick_new = [], [], []
for pred in pred_all:
img_name = self.get_img_name(pred)
img_all.append(img_name)
for p in pick_new:
img_name = p.split(' ')[0]
idx = img_all.index(img_name)
pred_pick_new.append(pred_all[idx])
# capped likelihood
if capped != False:
pred_pick_new.sort(key=lambda x:self.get_diff(x))
end = int(len(pred_pick_new)*capped)
pred_pick_new = pred_pick_new[:end+1]
pick_new = pick_new[:end+1]
diffs = []
for pred in pred_pick_new:
diff = self.get_diff(pred)
img_name = self.get_img_name(pred)
ent = self.get_ent(pred)
label = self.get_label(pred)
if self.ent_threshold < 0:
if ent < self.ent_threshold:
ent = self.ent_threshold
diff = diff - self.alpha[pace-1] * ent
pick_new_sort.append((img_name, label, diff))
diffs.append(diff)
pick_new_sort.sort(key=lambda x:x[2])
num_pick = len(pick_new_sort)
diffs.sort(key=lambda x:x)
diffs = np.array(diffs).reshape(-1, 1)
with open('./Exp{}/images/{}diff.txt'.format(self.exp, pace), 'w') as f4:
np.savetxt(f4, diffs, delimiter='\t', newline='\n')
# linear weighting
# lambda0 = pick_new_sort[-1][2]
# for i, (img, label, diff) in enumerate(pick_new_sort):
# weight = 10000.0 * (lambda0 - diff) / lambda0
# pick_new[i] = img + ' ' + str(label) + ' ' + str(weight) + '\n'
# log weighting
# E9: diff /100.0
# max_val, min_val = np.max(diffs), np.min(diffs)
# interval = max_val - min_val
# lambda0 = ((pick_new_sort[-1][2]-min_val) / interval) * 0.8 + 0.1
# print(lambda0)
# for i, (img, label, diff) in enumerate(pick_new_sort):
# diff = ( (diff-min_val) / interval ) * 0.8 + 0.1
# weight = 10000.0 * 1.0 / np.log(1-lambda0) * np.log(diff+1-lambda0)
# pick_new[i] = img + ' ' + str(label) + ' ' + str(weight) + '\n'
# mixture weighting
lambda_0 = pick_new_sort[-1][2] # 12
lambda_1 = pick_new_sort[int(num_pick*self.soft_percent)-2][2] # 4
tmp = 1/lambda_1 - 1/lambda_0
epsilon = 0.0
if abs(tmp) < 1e-5:
epsilon = 0.0
else :
epsilon = 1 / (tmp)
print('lambda_0: {}, lambda_1: {}, epsilon: {}'.format(lambda_0, lambda_1, epsilon))
weight = 0
for i, (img, label, diff) in enumerate(pick_new_sort):
if i < num_pick*self.soft_percent:
weight = 10000
else:
weight = int(10000*(epsilon / diff - epsilon / lambda_0))
pick_new[i] = img + ' ' + str(label) + ' ' + str(weight) + '\n'
# save txt
fn_pick_new = './Exp{}/images/Pick-{}.txt'.format(self.exp, pace)
fn_left_new = './Exp{}/images/Left-{}.txt'.format(self.exp, pace)
fn_pick_ent = './Exp{}/images/ent_pick-{}.txt'.format(self.exp, pace)
self.savetxt(fn_pick_new, pick_new)
self.savetxt(fn_left_new, left)
self.savetxt(fn_pick_ent, pick_ent)
print('new pick: %d' % len(pick_new))
print('entropy pick: %d' % len(pick_ent))
print('new left: %d' % len(left))
return (fn_left_new, fn_pick_new)
| [
"learninginvision@gmail.com"
] | learninginvision@gmail.com |
5145d8712060a87a1544f9ff8ae537b8ecfaac8f | 303fcf1576fbe5599edef3be03c9d412118b4914 | /ex10.py | d849d325003706ec339c5c391d83c6120d20a752 | [] | no_license | mathsrocks/python-learning-lpthw | 0a91d658b921e4f518087f7722d2630278603b57 | fba9d739fc176b1d21b7498e344ffdbfac6529e1 | refs/heads/master | 2021-04-29T05:45:05.084223 | 2017-01-04T10:06:07 | 2017-01-04T10:06:07 | 78,005,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
# Escape Sequences
print("ASCII bell (BELL): [\a]")
print("Horizontal tab (TAB): [\t]")
print("ASCII vertical tab (VT): [\v]")
"""
while True:
for i in ["/","-","|","\\","|"]:
print("%s\r" % i, )
"""
# Study Drills
triple_single_quotes = '''
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
'''
print(triple_single_quotes)
print("Combining %r with double-quote and single-quote escapes and print them out: ")
print("tabby_cat: raw = [%r], str = [%s]" % (tabby_cat, tabby_cat))
print("persian_cat: raw = [%r], str = [%s]" % (persian_cat, persian_cat))
print("backslash_cat: raw = [%r], str = [%s]" % (backslash_cat, backslash_cat))
print("fat_cat: raw = [%r], str = [%s]" % (fat_cat, fat_cat))
| [
"jerry.zwyang@gmail.com"
] | jerry.zwyang@gmail.com |
f9004dc1b017bf4e3d68dba6c51c6be7a0f11102 | c613839f198debf54f386f353ad5609db2b05d29 | /train.py | 46d167fa8cd42f538144d406a2dc3090392cce87 | [
"Apache-2.0"
] | permissive | BenJamesbabala/ner-sequencelearning | 480034de481245d79f7ecaabdbd966886709374f | 8302f2a621ac8082ff4d5d46e5e4b17c3ef1d14b | refs/heads/master | 2020-05-23T07:48:24.274687 | 2017-01-29T19:51:15 | 2017-01-29T19:51:15 | 80,445,762 | 5 | 0 | null | 2017-01-30T17:38:24 | 2017-01-30T17:38:24 | null | UTF-8 | Python | false | false | 3,466 | py | #!/bin/python3
import yaml
import numpy as np
import pandas as pd
from sklearn.cross_validation import KFold
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, Bidirectional, Convolution1D, MaxPooling1D
from keras.layers.advanced_activations import PReLU
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import f1_score, confusion_matrix
# training
nfolds = 10
nb_epoch = 100
batch_size = 512
nlabels = 8
# conv
nb_filter = 512
filter_length = 5
# LSTM
lstm_timesteps = 5
lstm_input_dim = 50
lstm_units = 150
cfg = yaml.load(open("data/meta.yaml", "r"))
if cfg['context']:
lstm_timesteps = cfg['context']
if cfg['embedding_dim']:
lstm_input_dim = cfg['embedding_dim']
if cfg['nlabels']:
nlabels = cfg['nlabels']
print('lstm timesteps: {}, lstm input dim: {}, num output labels: {}'.format(lstm_timesteps, lstm_input_dim, nlabels))
def nn_model():
model = Sequential()
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
input_shape=(lstm_timesteps, lstm_input_dim)))
model.add(PReLU())
model.add(BatchNormalization())
# we tend to overfit very much here because of the big convolution, thus slightly more dropout
model.add(Dropout(0.6))
model.add(Bidirectional(LSTM(lstm_units, activation='tanh', inner_activation='sigmoid', return_sequences=False)))
model.add(Dropout(0.5))
model.add(Dense(nlabels, activation='softmax', init = 'he_normal'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta', metrics=['categorical_accuracy'])
return(model)
df = pd.read_csv('data/vectorized.txt', sep = ' ', header = 0)
X = df.iloc[:,1:].values
print('X shape: ', X.shape)
# reshape again into temporal structure
X = X.reshape(X.shape[0], -1, lstm_input_dim).astype('float32')
y = to_categorical(df.iloc[:,0])
print('X temporal reshape: ', X.shape)
print('#samples: ', len(X))
print('#labels: ', len(y))
folds = KFold(len(y), n_folds = nfolds, shuffle = True)
currentFold = 0
foldScores = []
for (inTrain, inTest) in folds:
xtr = X[inTrain]
ytr = y[inTrain]
xte = X[inTest]
yte = y[inTest]
print('Fold ', currentFold, ' starting...')
model = nn_model()
callbacks = [
EarlyStopping(monitor='val_categorical_accuracy', patience = 6, verbose = 0),
ModelCheckpoint(monitor='val_categorical_accuracy', filepath=('models/model_fold_{}.hdf5'.format(currentFold)), verbose=0, save_best_only = True)
]
model.fit(xtr, ytr, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(xte, yte),
callbacks=callbacks)
ypred = model.predict(xte)
# convert the probabilities back into a single integer class label
ypred_max = ypred.argmax(axis=1)
yte_max = yte.argmax(axis=1)
score = f1_score(yte_max, ypred_max, average = 'weighted')
foldScores.append(score)
print("Confusion matrix:\n%s" % confusion_matrix(yte_max, ypred_max))
print('Fold ', currentFold, '- F1: ', score)
print('avg f1 fold scores so far: ', np.mean(foldScores))
currentFold += 1
print('f1 fold scores: ', foldScores)
print('final avg f1 fold scores: ', np.mean(foldScores)) | [
"thomas.jungblut@gmail.com"
] | thomas.jungblut@gmail.com |
f7ba23621e3f314f4f27451fc1ac407cf330fd24 | 735a290883730930a9afd042eae1a690142fa067 | /mercylog_bashlog/lib/util.py | 2e97840942790e8b59ea6193865554755e92e2ca | [] | no_license | RAbraham/mercylog-bashlog | 09746768d399b532f0568c735a34ead21432cf56 | 1f1b1079752b7e4b68aeddac117813ba31c3ac17 | refs/heads/master | 2020-08-16T00:35:30.340934 | 2019-10-16T01:43:03 | 2019-10-16T01:43:03 | 215,431,550 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from pathlib import Path
def remove_dir(a_dir: Path):
import shutil
shutil.rmtree(a_dir)
pass
def tmp_folder(folder_name: str = None, prefix='delete_me') -> Path:
import uuid
import tempfile
folder_name = folder_name or (prefix + '_' + str(uuid.uuid4()))
tmp_dir = Path(tempfile.gettempdir()) / folder_name
tmp_dir.mkdir(parents=True, exist_ok=True)
return tmp_dir
| [
"rajiv.abraham@gmail.com"
] | rajiv.abraham@gmail.com |
ac873c918d6f735bc2a3bfadda916724136eecc4 | 5e83d62064ea4fd954820960306fb06cc8f0f391 | /newsletter/views.py | 48715b0e963cbe97098121435aa6acbb498d1df8 | [] | no_license | bharatkumarrathod/cfe_ecommerce2_RESTapi | eff2fad0cbff7cb3def2c13de282b085aba7291d | a081cdbf10c1fbde58e128b9c9b287443c726071 | refs/heads/master | 2020-12-25T21:43:44.166109 | 2015-10-27T21:04:19 | 2015-10-27T21:04:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | from django.conf import settings
from django.shortcuts import redirect, render
from django.core.mail import send_mail
from products.models import ProductFeatured, Product
from .forms import ContactForm, SignUpForm
from .models import SignUp
def home(request):
featured_image = ProductFeatured.objects.first()
products = Product.objects.all().order_by("?")[:4]
# form = SignUpForm(request.POST or None)
# if form.is_valid():
# instance = form.save(commit=False)
# email = form.cleaned_data.get('email')
# user_id, domain = email.split('@')
# full_name = form.cleaned_data.get('full_name')
# if not full_name:
# full_name = user_id
# instance.full_name = full_name
# instance.save()
# return redirect('sign_up_successful.html')
context = {
# 'form': form,
'featured_image': featured_image,
'products': products,
}
# if request.user.is_authenticated() and request.user.is_staff:
# data = SignUp.objects.all()
# context = {
# 'data': data
# }
return render(request, 'home.html', context)
def sign_up_successful(request):
return render(request, 'sign_up_successful.html', {})
def message_submitted(request):
return render(request, 'contact_us_submitted.html', {})
def contact(request):
form = ContactForm(request.POST or None)
# we would use the below if we wanted to send an email.
"""
if form.is_valid():
# get values typed into form
form_email = form.cleaned_data.get('email')
form_name = form.cleaned_data.get('name')
form_message = form.cleaned_data.get('message')
# construct email from typed info
subject = 'Site contact form'
message = "{} via {}would like to know: \n{}".format(
form_name,
form_email,
form_message
)
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, 'anotheremail@somedomain.com']
# send the email
send_mail(subject,
message,
from_email,
to_email,
fail_silently=True)
"""
return render(request, 'contact.html', {'form': form})
| [
"carlofusiello@gmail.com"
] | carlofusiello@gmail.com |
98911ce8c4bc073fa0ada3fad0c3d1e3231ad68e | 13c2f109585a033a1acecdd912a3142802170921 | /Python_Object_Serialization_Context_Manager.py | 2566f5ec1aeb15199b4802d5e018e7fa67a537bf | [] | no_license | VakinduPhilliam/Hierachy_Serialization | 88175764e24d03602eca06e8df13223e8ec4dd7e | 61d534b23bc3e072356cb33fd763b0cbb6320896 | refs/heads/master | 2020-05-24T15:59:41.674047 | 2019-11-01T15:02:08 | 2019-11-01T15:02:08 | 187,346,172 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,953 | py | # Python object serialization
# The 'pickle' module implements binary protocols for serializing and de-serializing
# a Python object structure.
# “Pickling” is the process whereby a Python object hierarchy is converted into a byte stream,
# and “unpickling” is the inverse operation, whereby a byte stream (from a binary file or bytes-like
# object) is converted back into an object hierarchy.
# Pickling (and unpickling) is alternatively known as “serialization”, “marshalling,” or “flattening”;
# however, to avoid confusion, the terms used here are “pickling” and “unpickling”.
# sqlite3 — DB-API 2.0 interface for SQLite databases
# SQLite is a C library that provides a lightweight disk-based database that doesn’t require a separate
# server process and allows accessing the database using a nonstandard variant of the SQL query language.
# Some applications can use SQLite for internal data storage.
# It’s also possible to prototype an application using SQLite and then port the code to a larger database
# such as PostgreSQL or Oracle.
# Using the connection as a context manager
# Connection objects can be used as context managers that automatically commit or rollback transactions.
# In the event of an exception, the transaction is rolled back; otherwise, the transaction is committed:
import sqlite3
con = sqlite3.connect(":memory:")
con.execute("create table person (id integer primary key, firstname varchar unique)")
# Successful, con.commit() is called automatically afterwards
with con:
con.execute("insert into person(firstname) values (?)", ("Joe",))
# con.rollback() is called after the with block finishes with an exception, the
# exception is still raised and must be caught
try:
with con:
con.execute("insert into person(firstname) values (?)", ("Joe",))
except sqlite3.IntegrityError:
print("couldn't add Joe twice")
| [
"noreply@github.com"
] | VakinduPhilliam.noreply@github.com |
471f69a116bb3f8ea26d0e157151b03c8573d7fb | 4586fcc1afd15f04dbb269899a5b954adcd8d60e | /bin/ldgp.py | b825bbe162b265a0c48f0c32c7daf4bf04ca4e6c | [] | no_license | gautamits/rgbd | d0f1435a2b91b2aa0e848688d3c1c12fc1c77931 | a055a6b718a1e20957f20f19a0c49bbfa63cbd08 | refs/heads/master | 2021-01-20T05:59:43.891910 | 2017-11-25T09:16:34 | 2017-11-25T09:16:34 | 87,881,081 | 0 | 0 | null | 2017-04-25T19:22:50 | 2017-04-11T02:51:16 | Python | UTF-8 | Python | false | false | 1,636 | py | import cv2
import numpy as np
def dist(x,y):
return np.sqrt(np.sum((x-y)**2)) #this function returns euclidean distance between two one dimensional arrays
#this function returns histogram of image,
def hist(a):
hist, bin_edges = np.histogram(a, bins = range(64))
return hist
#this function returns ldgp of an image
def ldgp(i):
if i.shape >=3:
i=cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
height,width=i.shape
#zero padding
first=np.pad(i,((0,0),(1,0)),'constant')
second=np.pad(i,((0,1),(1,0)),'constant')
third=np.pad(i,((0,1),(0,0)),'constant')
fourth=np.pad(i,((0,1),(0,1)),'constant')
first=first[:,0:width]
second=second[1:height+1,0:width]
third=third[1:height+1,:]
fourth=fourth[1:height+1,1:width+1]
first=i-first #gradient at 0 degree
second=i-second #gradient at 45 degree
third=i-third #gradient at 90 degree
fourth=i-fourth # gradient at 135 degree
combo1=32*np.array( first >= second, dtype=int) #binary arrays being converted to decimal
combo2=16*np.array( first >= third, dtype=int)
combo3=8*np.array( first >= fourth, dtype=int)
combo4=4*np.array( second >= third, dtype=int)
combo5=2*np.array( second >= fourth, dtype=int)
combo6=np.array( third >= fourth, dtype=int)
ldgp=combo1+combo2+combo3+combo4+combo5+combo6
ldgp=np.array(ldgp,dtype='uint8')
return ldgp #final ldgp returned
| [
"gautamamits95@gmail.com"
] | gautamamits95@gmail.com |
e8a6c6d6bc56b44d9ac2fae0497b557fe4c040d9 | b87ea98bc166cade5c78d246aeb0e23c59183d56 | /samples/openapi3/client/petstore/python-nextgen-aiohttp/setup.py | d584a44727dd30a7685acc7a8fbbfecd38037804 | [
"Apache-2.0"
] | permissive | holisticon/openapi-generator | 88f8e6a3d7bc059c8f56563c87f6d473694d94e5 | 6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272 | refs/heads/master | 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 | Apache-2.0 | 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null | UTF-8 | Python | false | false | 1,473 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from setuptools import setup, find_packages # noqa: H301
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
NAME = "petstore-api"
VERSION = "1.0.0"
PYTHON_REQUIRES = ">=3.7"
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
"aiohttp >= 3.0.0",
"pem>=19.3.0",
"pycryptodome>=3.9.0",
"pydantic >= 1.10.5, < 2",
"aenum"
]
setup(
name=NAME,
version=VERSION,
description="OpenAPI Petstore",
author="OpenAPI Generator community",
author_email="team@openapitools.org",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "OpenAPI Petstore"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Apache-2.0",
long_description_content_type='text/markdown',
long_description="""\
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
"""
)
| [
"noreply@github.com"
] | holisticon.noreply@github.com |
650bf7955648b1c12100ec7a4b97ce3e7b453937 | 9e36c3233dabb427257893b117f6a5f7914cf035 | /generate_target_stats.py | d327cbee12c40a77888f0665297fad493eaaf5a2 | [] | no_license | shwhalen/loopdis | bc6d21a77de1747afbbdf07c08d3774bdcb70193 | db5b56c82c73b0b41f456da9d205d9243aae3690 | refs/heads/master | 2021-01-25T09:52:40.201318 | 2018-12-07T22:05:49 | 2018-12-07T22:05:49 | 159,572,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | #!/usr/bin/env python
import pandas as pd
import sklearn.externals.joblib as joblib
from chromatics import *
from common import *
from tqdm import tqdm
def get_accuracies(cell_type):
cell_type_contacts_df = get_closest_genes(
contacts_df.query(f'{cell_type} >= @significant_pchic_threshold'),
genome = 'GRCh37'
)
cell_type_contacts_df['oe_closest_gene_name'] = (
cell_type_contacts_df
['oe_closest_gene_name']
.apply(
lambda x: set([x])
)
)
cell_type_contacts_df['is_bait_closest_gene_to_oe'] = (
(
cell_type_contacts_df['oe_closest_gene_name'] -
cell_type_contacts_df['bait_gene_names']
)
.str.len() == 0
)
closest_accuracy = (
cell_type_contacts_df
['is_bait_closest_gene_to_oe']
.mean()
)
super_pop_accuracies = [
get_contact_ld_blocks(cell_type_contacts_df, super_pop)
['oe_shares_ld_block_with_bait']
.mean()
for super_pop in super_pops
]
return [cell_type, closest_accuracy] + super_pop_accuracies
def get_contact_ld_blocks(contacts_df, super_pop):
def get_ld_block_set(fragment_columns, prefix):
fragment_contacts_df = (
contacts_df
[fragment_columns[:3]]
.drop_duplicates()
)
fragment_ld_blocks_df = ld_blocks_df.rename(columns = lambda x: prefix + x)
fragment_ld_block_columns = fragment_ld_blocks_df.columns.tolist()
fragment_ld_blocks_df = bedtools(
'intersect -sorted -wa -wb',
fragment_contacts_df,
fragment_ld_blocks_df,
genome = 'GRCh37',
)
fragment_ld_blocks_df[prefix + 'ld_block_name'] = concat_coords(
fragment_ld_blocks_df,
fragment_ld_block_columns
)
return (
fragment_ld_blocks_df
.groupby(fragment_columns[:3])
.apply(
lambda x: set(
x
[prefix + 'ld_block_name']
.unique()
)
)
.rename(prefix + 'ld_block_names')
.reset_index()
)
ld_blocks_df = get_plink_ld_blocks(None, super_pop)
oe_ld_blocks_df = get_ld_block_set(oe_columns[:3], 'oe_')
bait_ld_blocks_df = get_ld_block_set(bait_columns[:3], 'bait_')
contacts_df = pd.merge(contacts_df, oe_ld_blocks_df, on = oe_columns[:3])
contacts_df = pd.merge(contacts_df, bait_ld_blocks_df, on = bait_columns[:3])
# note the negation operator
contacts_df['oe_shares_ld_block_with_bait'] = ~(
contacts_df
.apply(
lambda x: x['oe_ld_block_names'].isdisjoint(x['bait_ld_block_names']),
axis = 1
)
)
return contacts_df
contacts_df = get_javierre_contacts()
contacts_df['bait_gene_names'] = (
contacts_df
['bait_gene_names']
.fillna('')
.str.split(';', expand = False)
.apply(set)
)
del contacts_df['oe_gene_names']
results = joblib.Parallel(-1)(
joblib.delayed(get_accuracies)(_)
for _ in tqdm(blood_cell_types, 'cell line')
)
stats_df = pd.DataFrame(
results,
columns = ['Cell Type', 'Closest Gene'] + [f'LD ({_})' for _ in super_pops]
)
stats_df.to_latex(
'output/target_stats-table.tex',
index = False,
float_format = percent_formatter
)
| [
"sean@sixus5.local"
] | sean@sixus5.local |
17040ccedb5c26efb123bc8a9513defa32f9b4dc | f92fbb5ecbcd0adf4998e19d9d27e49386f898ab | /rls/algorithms/single/modelbased/planet.py | be9147eddd57a4a5b08109d7b2682e733572c12f | [
"Apache-2.0"
] | permissive | tonylibing/RLs | 26e5dedbe7e36704ac98fa8efd00184059cdc717 | 21607d93e26f3be7a1243a642ed7e76178c856ae | refs/heads/master | 2023-08-02T06:14:19.142614 | 2021-09-15T16:20:28 | 2021-09-15T16:20:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,145 | py | #!/usr/bin/env python3
# encoding: utf-8
from typing import Dict, List, NoReturn, Union
import numpy as np
import torch as t
from torch import distributions as td
from rls.algorithms.base.sarl_off_policy import SarlOffPolicy
from rls.common.data import Data, get_first_vector, get_first_visual
from rls.common.decorator import iton
from rls.nn.dreamer import DenseModel, RecurrentStateSpaceModel
from rls.nn.utils import OPLR
class PlaNet(SarlOffPolicy):
'''
Learning Latent Dynamics for Planning from Pixels, http://arxiv.org/abs/1811.04551
'''
policy_mode = 'off-policy'
def __init__(self,
stoch_dim=30,
deter_dim=200,
model_lr=6e-4,
kl_free_nats=3,
kl_scale=1.0,
reward_scale=1.0,
cem_horizon=12,
cem_iter_nums=10,
cem_candidates=1000,
cem_tops=100,
action_sigma=0.3,
network_settings=dict(),
**kwargs):
super().__init__(**kwargs)
assert self.is_continuous == True, 'assert self.is_continuous == True'
self.cem_horizon = cem_horizon
self.cem_iter_nums = cem_iter_nums
self.cem_candidates = cem_candidates
self.cem_tops = cem_tops
assert self.use_rnn == False, 'assert self.use_rnn == False'
if self.obs_spec.has_visual_observation \
and len(self.obs_spec.visual_dims) == 1 \
and not self.obs_spec.has_vector_observation:
visual_dim = self.obs_spec.visual_dims[0]
# TODO: optimize this
assert visual_dim[0] == visual_dim[1] == 64, 'visual dimension must be [64, 64, *]'
self._is_visual = True
elif self.obs_spec.has_vector_observation \
and len(self.obs_spec.vector_dims) == 1 \
and not self.obs_spec.has_visual_observation:
self._is_visual = False
else:
raise ValueError("please check the observation type")
self.stoch_dim = stoch_dim
self.deter_dim = deter_dim
self.kl_free_nats = kl_free_nats
self.kl_scale = kl_scale
self.reward_scale = reward_scale
self._action_sigma = action_sigma
self._network_settings = network_settings
if self.obs_spec.has_visual_observation:
from rls.nn.dreamer import VisualDecoder, VisualEncoder
self.obs_encoder = VisualEncoder(self.obs_spec.visual_dims[0],
**network_settings['obs_encoder']['visual']).to(self.device)
self.obs_decoder = VisualDecoder(self.decoder_input_dim,
self.obs_spec.visual_dims[0],
**network_settings['obs_decoder']['visual']).to(self.device)
else:
from rls.nn.dreamer import VectorEncoder
self.obs_encoder = VectorEncoder(self.obs_spec.vector_dims[0],
**network_settings['obs_encoder']['vector']).to(self.device)
self.obs_decoder = DenseModel(self.decoder_input_dim,
self.obs_spec.vector_dims[0],
**network_settings['obs_decoder']['vector']).to(self.device)
self.rssm = self._dreamer_build_rssm()
"""
p(r_t | s_t, h_t)
Reward model to predict reward from state and rnn hidden state
"""
self.reward_predictor = DenseModel(self.decoder_input_dim,
1,
**network_settings['reward']).to(self.device)
self.model_oplr = OPLR([self.obs_encoder, self.rssm, self.obs_decoder, self.reward_predictor],
model_lr, **self._oplr_params)
self._trainer_modules.update(obs_encoder=self.obs_encoder,
obs_decoder=self.obs_decoder,
reward_predictor=self.reward_predictor,
rssm=self.rssm,
model_oplr=self.model_oplr)
@property
def decoder_input_dim(self):
return self.stoch_dim + self.deter_dim
def _dreamer_build_rssm(self):
return RecurrentStateSpaceModel(self.stoch_dim,
self.deter_dim,
self.a_dim,
self.obs_encoder.h_dim,
**self._network_settings['rssm']).to(self.device)
@iton
def select_action(self, obs):
if self._is_visual:
obs = get_first_visual(obs)
else:
obs = get_first_vector(obs)
# Compute starting state for planning
# while taking information from current observation (posterior)
embedded_obs = self.obs_encoder(obs) # [B, *]
state_posterior = self.rssm.posterior(self.rnncs['hx'], embedded_obs) # dist # [B, *]
# Initialize action distribution
mean = t.zeros((self.cem_horizon, 1, self.n_copys, self.a_dim)) # [H, 1, B, A]
stddev = t.ones((self.cem_horizon, 1, self.n_copys, self.a_dim)) # [H, 1, B, A]
# Iteratively improve action distribution with CEM
for itr in range(self.cem_iter_nums):
action_candidates = mean + stddev * t.randn(self.cem_horizon, self.cem_candidates, self.n_copys, self.a_dim) # [H, N, B, A]
action_candidates = action_candidates.reshape(self.cem_horizon, -1, self.a_dim) # [H, N*B, A]
# Initialize reward, state, and rnn hidden state
# These are for parallel exploration
total_predicted_reward = t.zeros((self.cem_candidates*self.n_copys, 1)) # [N*B, 1]
state = state_posterior.sample((self.cem_candidates,)) # [N, B, *]
state = state.view(-1, state.shape[-1]) # [N*B, *]
rnn_hidden = self.rnncs['hx'].repeat((self.cem_candidates, 1)) # [B, *] => [N*B, *]
# Compute total predicted reward by open-loop prediction using pri
for _t in range(self.cem_horizon):
next_state_prior, rnn_hidden = self.rssm.prior(state, t.tanh(action_candidates[_t]), rnn_hidden)
state = next_state_prior.sample() # [N*B, *]
post_feat = t.cat([state, rnn_hidden], -1) # [N*B, *]
total_predicted_reward += self.reward_predictor(post_feat).mean # [N*B, 1]
# update action distribution using top-k samples
total_predicted_reward = total_predicted_reward.view(self.cem_candidates, self.n_copys, 1) # [N, B, 1]
_, top_indexes = total_predicted_reward.topk(self.cem_tops, dim=0, largest=True, sorted=False) # [N', B, 1]
action_candidates = action_candidates.view(self.cem_horizon, self.cem_candidates, self.n_copys, -1) # [H, N, B, A]
top_action_candidates = action_candidates[:, top_indexes, t.arange(self.n_copys).reshape(self.n_copys, 1), t.arange(self.a_dim)] # [H, N', B, A]
mean = top_action_candidates.mean(dim=1, keepdim=True) # [H, 1, B, A]
stddev = top_action_candidates.std(dim=1, unbiased=False, keepdim=True) # [H, 1, B, A]
# Return only first action (replan each state based on new observation)
actions = t.tanh(mean[0].squeeze(0)) # [B, A]
actions = self._exploration(actions)
_, self.rnncs_['hx'] = self.rssm.prior(state_posterior.sample(),
actions,
self.rnncs['hx'])
return actions, Data(action=actions)
def _exploration(self, action: t.Tensor) -> t.Tensor:
"""
:param action: action to take, shape (1,) (if categorical), or (action dim,) (if continuous)
:return: action of the same shape passed in, augmented with some noise
"""
sigma = self._action_sigma if self._is_train_mode else 0.
noise = t.randn(*action.shape) * sigma
return t.clamp(action + noise, -1, 1)
@iton
def _train(self, BATCH):
T, B = BATCH.action.shape[:2]
if self._is_visual:
obs_ = get_first_visual(BATCH.obs_)
else:
obs_ = get_first_vector(BATCH.obs_)
# embed observations with CNN
embedded_observations = self.obs_encoder(obs_) # [T, B, *]
# initialize state and rnn hidden state with 0 vector
state, rnn_hidden = self.rssm.init_state(shape=B) # [B, S], [B, D]
# compute state and rnn hidden sequences and kl loss
kl_loss = 0
states, rnn_hiddens = [], []
for l in range(T):
# if the begin of this episode, then reset to 0.
# No matther whether last episode is beened truncated of not.
state = state * (1. - BATCH.begin_mask[l]) # [B, S]
rnn_hidden = rnn_hidden * (1. - BATCH.begin_mask[l]) # [B, D]
next_state_prior, next_state_posterior, rnn_hidden = self.rssm(state,
BATCH.action[l],
rnn_hidden,
embedded_observations[l]) # a, s_
state = next_state_posterior.rsample() # [B, S] posterior of s_
states.append(state) # [B, S]
rnn_hiddens.append(rnn_hidden) # [B, D]
kl_loss += self._kl_loss(next_state_prior, next_state_posterior)
kl_loss /= T # 1
# compute reconstructed observations and predicted rewards
post_feat = t.cat([t.stack(states, 0), t.stack(rnn_hiddens, 0)], -1) # [T, B, *]
obs_pred = self.obs_decoder(post_feat) # [T, B, C, H, W] or [T, B, *]
reward_pred = self.reward_predictor(post_feat) # [T, B, 1], s_ => r
# compute loss for observation and reward
obs_loss = -t.mean(obs_pred.log_prob(obs_)) # [T, B] => 1
# [T, B, 1]=>1
reward_loss = -t.mean(reward_pred.log_prob(BATCH.reward).unsqueeze(-1))
# add all losses and update model parameters with gradient descent
model_loss = self.kl_scale*kl_loss + obs_loss + self.reward_scale * reward_loss # 1
self.model_oplr.optimize(model_loss)
summaries = dict([
['LEARNING_RATE/model_lr', self.model_oplr.lr],
['LOSS/model_loss', model_loss],
['LOSS/kl_loss', kl_loss],
['LOSS/obs_loss', obs_loss],
['LOSS/reward_loss', reward_loss]
])
return t.ones_like(BATCH.reward), summaries
def _initial_rnncs(self, batch: int) -> Dict[str, np.ndarray]:
return {'hx': np.zeros((batch, self.deter_dim))}
def _kl_loss(self, prior_dist, post_dist):
# 1
return td.kl_divergence(prior_dist, post_dist).clamp(min=self.kl_free_nats).mean()
| [
"keavnn.wjs@gmail.com"
] | keavnn.wjs@gmail.com |
b2d33347aa47d60f79aa8e72131352ebb1500946 | 0eaea7c3d760067436a5fea2f42c1101ddb1f5ca | /tests/test_accmip6.py | fb2b284ea4ef5aaab01e5a4a4aad1fc067550c17 | [
"MIT"
] | permissive | TaufiqHassan/acccmip6 | 297a33e887fc930aab8ef22ae2d29219f1bcf225 | 62ae2d128dbc930d2a1c2491ec352af935a6bf59 | refs/heads/master | 2023-05-23T11:06:12.708738 | 2023-03-01T09:56:24 | 2023-03-01T09:56:24 | 208,252,347 | 93 | 23 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_accmip6
----------------------------------
Tests for `accmip6` module.
"""
import pytest
from pathlib import Path
from acccmip6.utilities.c6db import SearchDB
from acccmip6.utilities.util import _dir_path, _Construct_urls
def test_url_getter():
d = SearchDB()
d.variable = 'var1, var2, var3, varN'
url = d.get_url()
durl=_Construct_urls(['var1', 'var2', 'var3', 'varN'],None,None,None,None)._Durl
assert url == durl+"&variable=var1&variable=var2&variable=var3&variable=varN&limit=10000"
def test_dir_path():
d = _dir_path()
p=Path('.')
assert d._get_dir('') == p.absolute() / 'CMIP6'
| [
"taufiq.hassan06@gmail.com"
] | taufiq.hassan06@gmail.com |
16ea3ba54dfa748779b1a9eef4279bd305f021d3 | 8533e69c8b9496259cc8f7a216ff32dc753306fb | /PycharmProjects/PyQt5/PyQt5Demo/fontdialog.py | 869c63cca7c4e3840f6aeefc334dacdddf1aa4e6 | [] | no_license | huytrv/MyPy | 93b947a4c52134bc2d35e4e406c24775cb248efd | e179f8fdb856d03909149aca00f7da4d46f38edf | refs/heads/master | 2020-03-28T13:41:33.381861 | 2018-09-24T04:27:54 | 2018-09-24T04:27:54 | 148,418,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QPushButton,
QSizePolicy, QLabel, QFontDialog, QApplication)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = QVBoxLayout()
btn = QPushButton('Dialog', self)
btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
btn.move(20, 20)
vbox.addWidget(btn)
btn.clicked.connect(self.showDialog)
self.lbl = QLabel('Knowledge only matters', self)
self.lbl.move(130, 20)
vbox.addWidget(self.lbl)
self.setLayout(vbox)
self.setGeometry(300, 300, 250, 180)
self.setWindowTitle('Font dialog')
self.show()
def showDialog(self):
font, ok = QFontDialog.getFont()
if ok:
self.lbl.setFont(font)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"hstarpp292@gmail.com"
] | hstarpp292@gmail.com |
10238bbad066ca25d54dd84191ef1a24d5bb7730 | 4cb507682aa3d1136221397815c60bac3538e1b0 | /Lesson 6: Dictionaries/Interview Questions/two_sum.py | e9a1bf5fa4b5fb9e660a735baf82cef60f944c92 | [] | no_license | srasheed1018/Blue-Studios-Interview-Prep | fa99274cda4aaa6a37a9499b60ecddcd3ef90330 | b4366ac89e78229c5d424c323d7ee01a79fa878c | refs/heads/master | 2023-03-18T19:58:08.923326 | 2020-01-11T17:26:03 | 2020-01-11T17:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | '''
Given a list of numbers and a number k, determine whether or not any two numbers from the list add up to k.
Ex. k = 17 so [10, 15, 3, 7] => True
Ex. k = 22 so [14, 17, 1, 9] => False
Ex. k = 9 so [2, 7, 11, 15] => True
'''
def two_sum(arr, k):
nums = {}
for i in arr:
if i not in nums:
nums[i] = k - i
if i in nums.values():
return True
return False
test1 = [10, 15, 3, 7]
k1 = 17
print(two_sum(test1, k1))
test2 = [14, 17, 1, 9]
k2 = 22
print(two_sum(test2, k2))
test3 = [2, 7, 11, 15]
k3 = 9
print(two_sum(test3, k3)) | [
"isabella.berryx3@gmail.com"
] | isabella.berryx3@gmail.com |
c1d86210f98f48bbe10dcf368c6f3f9a714d07c0 | 8777df087c7bafae874a24351998964b7be789c1 | /djangoapps/siteroot/views.py | a469d44272fca54e76cec3eb34cfdc43161355a1 | [] | no_license | bestchanges/studyworthy | d5a29cd7a3b51f740501bc6faeb846ab890b9386 | 3f0d83deec09a4ee6141042df6a3b98fe1312615 | refs/heads/master | 2021-11-25T09:14:14.726125 | 2021-11-17T07:20:16 | 2021-11-17T07:20:16 | 193,115,897 | 0 | 2 | null | 2021-11-17T07:27:50 | 2019-06-21T14:54:41 | Python | UTF-8 | Python | false | false | 1,581 | py | import json
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.http import urlencode
from djangoapps.crm.models import CourseProduct
def index(request):
courses = CourseProduct.objects.filter(state=CourseProduct.State.ACTIVE)
context = {
'courses': courses,
}
return render(request, 'index.html', context)
@login_required
def dashboard(request):
user : SiteUser = request.user
auth0user = user.social_auth.get(provider='auth0')
person = user.person
if 'picture' in auth0user.extra_data and person:
if auth0user.extra_data['picture'] != person.avatar_url:
person.avatar_url = auth0user.extra_data['picture']
person.save()
if person and not person.first_name:
# First try to get name, next is to get email
name = user.first_name or auth0user.extra_data.get('name') or auth0user.extra_data.get('email')
if name:
# use name before @ in email as first_name
name = name.split('@', 1)[0]
person.first_name = name
person.save()
userdata = {
'user_id': auth0user.uid,
'name': user.first_name,
'picture': auth0user.extra_data['picture'],
'email': auth0user.extra_data['email'],
}
return render(request, 'dashboard.html', {
'auth0User': auth0user,
'userdata': json.dumps(userdata, indent=4)
})
| [
"egor.fedorov@gmail.com"
] | egor.fedorov@gmail.com |
6203e81e8f7ba910157eeac0f075e7cfc176d5ee | 2d4e9dd73485bbe45ba3fd6df39da2fe548520c4 | /ecom/settings.py | e97a213fd00c61b62e54d4c3412f78e9b239b526 | [] | no_license | bafsr/Stand120 | feecc7639ed0092f51cdca7308f707109512b21b | 6815604addeb6b10fcdca6591a8110e57c4e01bb | refs/heads/master | 2023-06-15T14:50:05.273190 | 2021-07-20T17:59:09 | 2021-07-20T17:59:09 | 387,868,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | import os
import environ
env = environ.Env()
# read the .env file
environ.Env.read_env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = env('SECRET_KEY')
DEBUG = env('DEBUG')
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecom.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecom.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static_root")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if DEBUG is False:
SESSION_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_SECONDS = 31536000
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['www.domain.com']
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
} | [
"briansr03@gmail.com"
] | briansr03@gmail.com |
11fcd8e4a25632b0a49874ec600df5f8b1bd3c74 | 71c854326fe3911a059a21104fdee4588f803ea5 | /source/day07/모듈 소스 코드/prime 구하기.py | 465850ee74b2c8beb8a70aa32390332370ec221f | [] | no_license | Kwon-YoungSun/python | e0d825fca53c6dec85b23baceaf1b3876e45ea75 | ad241a92baace663f410230d346348244e85e2f2 | refs/heads/main | 2023-06-26T09:41:00.544743 | 2021-07-31T06:58:53 | 2021-07-31T06:58:53 | 386,270,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from primePy import primes
list_primes = primes.between(100, 1000)
list_primes_len = len(list_primes)
print(list_primes_len) | [
"dolphini0727@naver.com"
] | dolphini0727@naver.com |
6ea1522396552c0f406e717a9a2af6ff2b38c39d | 508079099164e26703571ea6ce416edcffd8b72a | /LeetCode/Facebook/Medium/658. Find K Closest Elements.py | 18df18b50f659d8de5407e2d77030a4dbe84cb43 | [] | no_license | bashbash96/InterviewPreparation | aa1163ebc789c3d5e3ade742ecf9821bcb80778d | 69d0e01665ff92cf62a6548ca9e150a9b941aac8 | refs/heads/master | 2021-08-06T17:31:27.345756 | 2021-07-29T20:05:51 | 2021-07-29T20:05:51 | 245,353,790 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | """
Given a sorted integer array arr, two integers k and x, return the k closest integers to x in the array. The result should also be sorted in ascending order.
An integer a is closer to x than an integer b if:
|a - x| < |b - x|, or
|a - x| == |b - x| and a < b
Example 1:
Input: arr = [1,2,3,4,5], k = 4, x = 3
Output: [1,2,3,4]
Example 2:
Input: arr = [1,2,3,4,5], k = 4, x = -1
Output: [1,2,3,4]
Constraints:
1 <= k <= arr.length
1 <= arr.length <= 104
arr is sorted in ascending order.
-104 <= arr[i], x <= 104
"""
from bisect import bisect_left
class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
pivot = bisect_left(arr, x) - 1
if pivot == -1:
pivot = 0
return closest_k(arr, k, x, pivot)
def closest_k(arr, k, x, pivot):
left = pivot
right = pivot + 1
count = 0
while count < k:
if left < 0:
right += 1
count += 1
continue
if right >= len(arr) or abs(x - arr[left]) <= abs(x - arr[right]):
left -= 1
else:
right += 1
count += 1
return arr[left + 1: right]
"""
apporach:
1- binary search for x.
2- expand two ways left and right searching for closest k numbers.
time O(log(n) + k)
space O(1)
"""
| [
"amjad.bashiti.96@gmail.com"
] | amjad.bashiti.96@gmail.com |
121b171e2508ea6a42d68aa7319156308a97aab8 | e8ba4ff7a31803c773fb6fdabd98d7d06850165d | /Services_dev/IndexAuth/app/src/IndexAuth_Server.py | 50d932b6630ab1ecb32678c80c4aef2841d00a6f | [
"MIT"
] | permissive | samuelxu999/Microservices_dev | ded488016a2d4b9eca0094ffb8c24ce3cf2ff4f2 | 5ea836d1275427b0a3c08432882f106d6ddab1f8 | refs/heads/master | 2023-05-14T19:34:08.250014 | 2022-12-07T20:14:23 | 2022-12-07T20:14:23 | 157,278,854 | 0 | 3 | MIT | 2023-05-01T20:18:50 | 2018-11-12T21:24:24 | Python | UTF-8 | Python | false | false | 2,665 | py | #!/usr/bin/env python3.5
'''
========================
IndexAuth_Server module
========================
Created on Dec.27, 2018
@author: Xu Ronghua
@Email: rxu22@binghamton.edu
@TaskDescription: This module provide encapsulation of Hashed Index Authentication Microservices API that handle and response client's request.
'''
import datetime
import json
from flask import Flask, jsonify
from flask import abort,make_response,request
from IndexAuth_Policy import IndexPolicy
app = Flask(__name__)
now = datetime.datetime.now()
datestr=now.strftime("%Y-%m-%d")
timestr=now.strftime("%H:%M:%S")
#========================================== Error handler ===============================================
#Error handler for abort(404)
@app.errorhandler(404)
def not_found(error):
#return make_response(jsonify({'error': 'Not found'}), 404)
response = jsonify({'result': 'Failed', 'message': error.description['message']})
response.status_code = 404
return response
#Error handler for abort(400)
@app.errorhandler(400)
def type_error(error):
#return make_response(jsonify({'error': 'type error'}), 400)
response = jsonify({'result': 'Failed', 'message': error.description['message']})
response.status_code = 400
return response
#Error handler for abort(401)
@app.errorhandler(401)
def access_deny(error):
response = jsonify({'result': 'Failed', 'message': error.description['message']})
response.status_code = 401
return response
#========================================== Request handler ===============================================
#GET query indexToken for specific index_id
@app.route('/indexauth/api/v1.0/getIndexToken', methods=['GET'])
def getIndexToken():
#print request.data
index_id = request.args.get('index_id', type = str)
json_data = IndexPolicy.get_indexToken(index_id)
return jsonify({'result': 'Succeed', 'data': json_data}), 201
#GET query authorized nodes
@app.route('/indexauth/api/v1.0/getAuthorizedNodes', methods=['GET'])
def getAuthorizedNodes():
#print request.data
json_data = IndexPolicy.get_AuthorizedNodes()
return jsonify({'result': 'Succeed', 'data': json_data}), 201
#GET apply for verify_indexToken service for specific index_id
@app.route('/indexauth/api/v1.0/verify_indexToken', methods=['GET'])
def verify_indexToken():
#print request.data
index_id = request.args.get('index_id', type = str)
filepath = request.args.get('index_data', type = str)
json_data = IndexPolicy.verify_indexToken(index_id,filepath)
return jsonify({'result': 'Succeed', 'data': json_data}), 201
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| [
"samuelxu999@gmail.com"
] | samuelxu999@gmail.com |
03774aa0b5ad39e6da36e47a0613ab83b5eb40a5 | 924c384344bdae7518e6cb8363ded38cb6e6b6fc | /jobs/my_library/Connector.py | 52e54719da2df508f2a5d2028310535831d95cc8 | [] | no_license | TirolJPN/codeforces-clustering | 09371178f1afc877449309771a152811e490f8e7 | 029cc2b3436af54940956086b4f62eede5ed205e | refs/heads/master | 2022-04-05T09:02:24.879092 | 2020-02-18T08:44:55 | 2020-02-18T08:45:24 | 180,091,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | import mysql.connector as cn
from . import key
class Connector:
# コンストラクタでコネクターを用意する
def __init__(self):
try:
self.cnx = cn.connect(
host=key.DB_HOST,
user=key.DB_USER,
password=key.DB_PASSWORD,
port=key.DB_PORT,
database=key.DB_DATABASE
)
self.cur = self.cnx.cursor(buffered=True, dictionary=True)
except cn.Error as e:
print("Error:", str(e))
def exec_select_sql(self, sql):
self.cur.execute(sql)
return self.cur.fetchall()
def exec_insert_sql(self, sql):
self.cur.execute(sql)
self.cnx.commit() | [
"tirol.jpn@gmail.com"
] | tirol.jpn@gmail.com |
0b8a8030b870e386374e41eca09df9151d321fd8 | 4a04051ba5956068e8180c0ebb94792a2b4da1d4 | /CombinatorialMathematics/Misalignment.py | f0200476cdaf835e638dbfda1e1688d9adfe077a | [] | no_license | Fyy10/Python-Playground | 8ab13f3a255541c4980c99510c15e5813e86fa23 | bdb43396ecfdb6897bd1cd64e4886236cd20923b | refs/heads/master | 2022-07-28T21:29:40.724997 | 2022-07-18T10:31:53 | 2022-07-18T10:31:53 | 250,740,856 | 0 | 1 | null | 2021-06-26T09:05:25 | 2020-03-28T07:48:01 | Jupyter Notebook | UTF-8 | Python | false | false | 230 | py | # n-2, n-1, n
# Misalignment
D = [0, 0, 1]
n = int(input('Input n (n > 2): '))
assert(n > 2)
for i in range(3, n+1):
D[0] = D[1]
D[1] = D[2]
D[2] = (i-1) * (D[0] + D[1])
print('D:', D[2])
print('Q:', D[2] + D[1])
| [
"1093928135@qq.com"
] | 1093928135@qq.com |
5f23d175f0fdaba8935c2f1b1e0de383c9639500 | 0a4d783c5b61113dd35ac87049f6f25b5684d08a | /metrics_iperf_jobs.py | 80f9b9196e7c28242fb92e5ea13aa4ee91fd0c02 | [] | no_license | gdmk/check_mk-iperf_jobs | 7dd36767a8cbdb315fae82610fe09bb1ba8f0b99 | 827f6ac71d2d0e9f46a9dc22914d02bd08c3c358 | refs/heads/master | 2020-04-21T02:35:02.618731 | 2019-02-05T15:29:58 | 2019-02-05T15:29:58 | 169,258,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | metric_info["bw_from"] = {
"title" : _("bandwidth from the Host"),
"unit" : "bits/s",
"color" : "#00e060",
}
metric_info["bw_to"] = {
"title" : _("bandwidth to the Host"),
"unit" : "bits/s",
"color" : "#0080e0",
}
graph_info["Iperf_bandwidth"] = {
"title" : _("Iperf Bandwidth"),
"metrics" : [
( "bw_from", "area", ),
( "bw_to", "-area", ),
],
"scalars": [
("bw_from:warn", "Warning from"),
("bw_from:crit", "Critical from"),
("bw_to:warn,-1,*", "Warning to"),
("bw_to:crit,-1,*", "Critical to"),
]
}
metric_info["pkt_lost_from"] = {
"title" : _("Lost Packets from the Host"),
"unit" : "count",
"color" : "11/a",
}
metric_info["pkt_lost_to"] = {
"title" : _("Lost Packets to the Host"),
"unit" : "count",
"color" : "15/a",
}
graph_info["packets_lost"] = {
"title" : _("Iperf Lost Packets"),
"metrics" : [
( "pkt_lost_from", "area" ),
( "pkt_lost_to", "-area" ),
],
"scalars": [
("pkt_lost_from:warn", "Warning from"),
("pkt_lost_from:crit", "Critical from"),
("pkt_lost_to:warn,-1,*", "Warning to"),
("pkt_lost_to:crit,-1,*", "Critical to"),
]
}
metric_info["prct_lost_from"] = {
"title" : _("Percent of Lost Packets from the Host"),
"unit" : "%",
"color" : "11/c",
}
metric_info["prct_lost_to"] = {
"title" : _("Percent of Lost Packets to the Host"),
"unit" : "%",
"color" : "15/c",
}
graph_info["packets_lost_prct"] = {
"title" : _("Iperf Percent of Lost Packets"),
"metrics" : [
("prct_lost_from", "area"),
("prct_lost_to", "-area"),
],
"scalars": [
("prct_lost_from:warn", "Warning from"),
("prct_lost_from:crit", "Critical from"),
("prct_lost_to:warn,-1,*", "Warning to"),
("prct_lost_to:crit,-1,*", "Critical to"),
]
}
metric_info["jitter_from"] = {
"title" : _("Jitter from the Host"),
"unit" : "s",
"color" : "36/a",
}
metric_info["jitter_to"] = {
"title" : _("Jitter to the Host"),
"unit" : "s",
"color" : "34/b",
}
graph_info["packet_jitter"] = {
"title" : _("Iperf Jitter"),
"metrics" : [
("jitter_from", "area"),
("jitter_to", "-area"),
],
"scalars": [
("jitter_from:warn", "Warning from"),
("jitter_from:crit", "Critical from"),
("jitter_to:warn,-1,*", "Warning to"),
("jitter_to:crit,-1,*", "Critical to"),
]
}
metric_info["retr_from"] = {
"title" : _("TCP retransmits from the Host"),
"unit" : "count",
"color" : "44/a",
}
metric_info["retr_to"] = {
"title" : _("TCP retransmits to the Host"),
"unit" : "count",
"color" : "34/a",
}
graph_info["retransmits"] = {
"title" : _("Iperf TCP Retransmits"),
"metrics" : [
( "retr_from", "area" ),
( "retr_to", "-area" ),
],
"scalars": [
("retr_from:warn", "Warning from"),
("retr_from:crit", "Critical from"),
("retr_to:warn,-1,*", "Warning to"),
("retr_to:crit,-1,*", "Critical to"),
]
}
| [
"noreply@github.com"
] | gdmk.noreply@github.com |
b817d3bbe514f82e994794bd55607533500c36fb | 77a2a754ed5f120b05082a41926214352c92397e | /deque.py | ef52952bbad8545c3d03a32aff729baf11875a40 | [] | no_license | apotree/Python | dd3bfd91d6f1efa2248cc565ac02912d5203c732 | de370f4f56dd5954650fb1b52558c7b4b82315cd | refs/heads/master | 2020-11-26T13:25:05.698994 | 2020-01-13T15:02:51 | 2020-01-13T15:02:51 | 229,085,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from collections import deque
dq = deque('data')
for node in dq:
print(node.upper(), end='')
print()
dq.append('aaa')
dq.appendleft('bbb')
dq.append('ccc')
print(dq)
print('deque => ', dq.pop())
print('deque => ', dq.popleft())
print('deque => ', dq[-1])
print('deque => ', dq[0])
print(dq)
print('t' in dq)
dq.extend('deque')
print(dq)
dq.extendleft(reversed('python'))
print(dq)
dq.reverse()
print(dq) | [
"noreply@github.com"
] | apotree.noreply@github.com |
3492ff51dd31e2b083ca5472109e9def0e2f7238 | 80a18af059a05d96e21c0ce5a083686ad131279b | /__init__.py | 0e62951c282e96ea5321cedb519e647e8266861a | [] | no_license | HasaSarl/vidy-fab | e258957abbc13fe5bc4c9dae90a7fcbb6e3d329c | 6b447d4c210f6a48c4ccc104e35010230de3c5c7 | refs/heads/master | 2020-03-26T00:37:04.075646 | 2018-08-11T17:20:53 | 2018-08-11T17:20:53 | 144,328,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2012 HASA (http://www.hasa.ch) All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import models
| [
"hasa"
] | hasa |
838de5b1fa72bed3804d5b3baa64482bb4c46dc6 | 59eca336bfe5f49257373c46ba12727b57650407 | /mysql_tests.py | 21213db23408548e398fbc524f15163af79f89c5 | [] | no_license | ras592/verbose-garbanzo | 8d206c0478ec7df63d245d8623ad141c20990f5e | d244e5a202f733d0ba879dc12db9c1aa7ea13534 | refs/heads/master | 2020-12-24T20:51:43.783093 | 2016-05-06T22:25:27 | 2016-05-06T22:25:27 | 56,338,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import MySQLdb
from sql import destroy_dbs, build_dbs, rebuild_tables
import sys, traceback
def migration():
try:
conn = MySQLdb.connect(host="localhost",user="rich",
passwd="some_pass")
destroy_dbs(conn)
print "Successfully dropped dbs"
build_dbs(conn)
print "Successfully built dbs"
except Exception as e:
# disconnect from server
conn.close()
print(traceback.print_exc(file=sys.stdout))
print e
else:
print "Successfully ran migration"
conn.close()
| [
"rasharrott@icloud.com"
] | rasharrott@icloud.com |
c09abce2634c8d6a2dbeb5a2f080fd922e2d9fb3 | dbb4d1de645b16fe900d05d93f1fc31545ba9c99 | /Abstract_Data_Type/queue.py | e92053a85236d226d9fa2dcf2cb5c43db67cec44 | [] | no_license | 0x-Robert/Algo_python_Study | 731d5902aec0e9d73b60b8f96e4f931efdcb28ce | 1940b46e5466e060aa69295b167173a316e3247d | refs/heads/main | 2023-07-31T11:49:49.483596 | 2021-09-08T07:24:51 | 2021-09-08T07:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | class Queue(object):
def __init__(self):
self.items = []
def isEmpty(self):
return not bool(self.items)
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
value = self.items.pop()
if value is not None:
return value
else:
print("Queue is empty")
def size(self):
return len(self.items)
def peek(self):
if self.items:
return self.items[-1]
else:
print("Queue is empty")
def __repr__(self):
return repr(self.items)
if __name__ == "__main__":
queue = Queue()
print("큐가 비었나요? {0}".format(queue.isEmpty()))
print("큐에 숫자 0~9를 추가합니다.")
for i in range(10):
queue.enqueue(i)
print("큐 크기: {0}".format(queue.size()))
print("peek: {0}".format(queue.peek()))
print("dequeue: {0}".format(queue.dequeue()))
print("peek: {0}".format(queue.peek()))
print("큐가 비었나요? {0}".format(queue.isEmpty()))
print(queue)
| [
"smartdragon417@gmail.com"
] | smartdragon417@gmail.com |
82dce0291b2883bee2d384403455ea84e9cf1319 | 43a4767a79ade9c353574c1a43d26892ae57dc4f | /tests/integration/test_replicas.py | 5aa26c55e2e2724b7daeb6bddeb49fe18595f74e | [
"Apache-2.0"
] | permissive | volatilemolotov/curator | 224eafdbadb9aaccbec62fb029d656d9e3727998 | b41743a061ad790820affe7acee5f71abe819357 | refs/heads/master | 2023-07-27T21:08:36.636450 | 2023-07-21T22:19:10 | 2023-07-21T22:19:10 | 192,875,097 | 0 | 0 | NOASSERTION | 2023-07-22T03:44:13 | 2019-06-20T07:52:22 | Python | UTF-8 | Python | false | false | 2,004 | py | """Test replica count changing functionality"""
# pylint: disable=missing-function-docstring, missing-class-docstring, line-too-long
import os
from . import CuratorTestCase
from . import testvars
HOST = os.environ.get('TEST_ES_SERVER', 'http://127.0.0.1:9200')
class TestActionFileReplicas(CuratorTestCase):
def test_increase_count(self):
count = 2
idx = 'my_index'
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'], testvars.replicas_test.format(count))
self.create_index(idx)
self.invoke_runner()
assert count == int(self.client.indices.get_settings(index=idx)[idx]['settings']['index']['number_of_replicas'])
def test_no_count(self):
self.create_index('foo')
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'], testvars.replicas_test.format(' '))
self.invoke_runner()
assert 1 == self.result.exit_code
def test_extra_option(self):
self.create_index('foo')
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'], testvars.bad_option_proto_test.format('replicas'))
self.invoke_runner()
assert 1 == self.result.exit_code
class TestCLIReplicas(CuratorTestCase):
def test_increase_count(self):
count = 2
idx = 'my_index'
self.create_index(idx)
args = self.get_runner_args()
args += [
'--config', self.args['configfile'],
'replicas',
'--count', str(count),
'--filter_list', '{"filtertype":"pattern","kind":"prefix","value":"my"}',
]
assert 0 == self.run_subprocess(args, logname='TestCLIOpenClosed.test_open_closed')
assert count == int(self.client.indices.get_settings(index=idx)[idx]['settings']['index']['number_of_replicas'])
| [
"noreply@github.com"
] | volatilemolotov.noreply@github.com |
7424b52ef64456b131e7b2fda2175cc9eddbe318 | fd8019d63cbdbecc4501986c0a8a7500508e630c | /reservations/migrations/0003_reservation_check_out.py | 46dc52cbbd1bdd61b2e247f026a0fc9dc82cfb50 | [] | no_license | woojs1209/airbnb-clone | d00088dc4b1b89162102cc21799ebdf6d2e02033 | 9d3022d9c95bac021fa4ea84736e0ee12d036fcd | refs/heads/master | 2023-03-25T11:15:52.799601 | 2021-03-18T04:32:54 | 2021-03-18T04:32:54 | 341,895,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # Generated by Django 2.2.5 on 2021-03-11 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservations', '0002_remove_reservation_check_out'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='check_out',
field=models.DateField(null=True),
),
]
| [
"woojs1209@naver.com"
] | woojs1209@naver.com |
6411989d36dbc608b8d5cefd3542ef230ae6fa07 | 5f59090d4fc2696774932667e790ffd397a8d3f8 | /HW1/RoboStats3_5_rwma.py | b6045b4e9059e9b33685fa217552395bb6126689 | [] | no_license | lukaeerens93/RoboStats | 5398f01dac15fcb8f713aa192dfc5ee4383da9c3 | 5273b6e79a3761782cb3728088ee3b75fc7908d8 | refs/heads/master | 2020-03-29T11:20:49.468914 | 2019-05-10T15:58:29 | 2019-05-10T15:58:29 | 149,847,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,599 | py | # Robot Stats Assignment 1
# Implementing the Randomized Weighted Majority Algorithm (15 Points)
# For importing the classes
import RoboStats3_2
import matplotlib.pyplot as plt
import time
import random
# ------------------- Main Code ------------------
# Will take in as arguements: nat, eta, T where T = number of trials and eta is the penalty parameter
def RWMA(nat, eta, T):
# Initialize the following:
# - Weight vector with 1 (all weights = 1 before learning starts)
# - Initialize learner loss vector with 0s (losses by the learner from weighted consideration of experts)
# - Initialize expert loss vector with 0s (losses for all experts = 0 before learning starts)
weights = [1,1,1,1]
losses_learner = [0] * T
losses_expert = [[0,0,0,0] for i in range(0, T, 1)]
v=0
# Initialize the cumulative sum of loss for learner and each individual expert
sum_learner = 0
sum_expert = [0,0,0,0]
# 4. for t = 1,...,T do:
for t in range(T):
# 5. Receive expert advice (x(t) -> {-1,1}^N where N is the number of experts)
# expert 1 is a die-hard fan for Tartan's sports team and always says win;
# expert 2 is pessimistic and always says lose;
# expert 3 predicts Tartan will lose for odd-number matches and will win for even-number matches.
expert_3 = 0
if (t % 2 == 0): expert_3 = 1
else: expert_3 = -1
weather = random.choice(['sunny','rainy'])
if (weather == 'sunny'): expert_4 = 1
else: expert_4 = -1
x = [1,-1, expert_3, expert_4]
if (t % 3 == 0): v = 0
else: v = 1
# 6. estimate output (Multinomial w^(t)/Epsilon^(t))
wProb = [ float( weights[0] ) / sum(weights),
float( weights[1] ) / sum(weights),
float( weights[2] ) / sum(weights),
float( weights[3] ) / sum(weights)]
probSum = wProb[0] + wProb[1] + wProb[2] + wProb[3]
rand_num = random.uniform(0,1)
for i in range(0,4,1):
if (rand_num <= probSum):
y_ = x[i]
# 7. Receive y(t)
y_style = 0
if nat is 's': y_style = RoboStats3_2.Stochastic()
if nat is 'd': y_style = RoboStats3_2.Deterministic(v,t)
if nat is 'a': y_style = RoboStats3_2.Adverserial(weights,x)
y = y_style.output
# 8. wn^(t+1)=wn^(t) * (1-eta*(y^t != xn^(t) )) for all n
# Calculate the cumulative loss of expert and learner
for n in range(0, 4, 1):
val, powder = 0, 0
if (y != x[n]): val = 1
if (y == x[n]): val = 0
weights[n] = weights[n] * ( 1 - eta*(val) )
sum_expert[n] += 1*val
losses_expert[t][n] = sum_expert[n]
if (y_ != y): powder = 1
if (y_ == y): powder = 0
sum_learner += 1 * powder
losses_learner[t] = sum_learner
# Plot all of these
#plt.ion()
plt.figure(1)
title = ' '
if (nat == 's'): title = 'Stochastic'
if (nat == 'd'): title = 'Deterministic'
if (nat == 'a'): title = 'Adverserial'
plt.title(title)
plt.xlabel('Timestep')
plt.ylabel('Losses')
loss_exp = losses_expert[0:T]
exp1 = [j[0] for j in loss_exp]
exp2 = [j[1] for j in loss_exp]
exp3 = [j[2] for j in loss_exp]
exp4 = [j[3] for j in loss_exp]
print (exp1)
print (exp3)
plt.plot( exp1,'b', label = 'expert1')
plt.plot( exp2,'g', label = 'expert2')
plt.plot( exp3,'r', label = 'expert3')
plt.plot( exp4,'m', label = 'expert4')
plt.plot( losses_learner,'y', label = 'loss of learner' )
plt.legend(loc = 'upper left')
plt.pause(4)
plt.figure(2)
title = ' '
if (nat == 's'): title = 'Stochastic'
if (nat == 'd'): title = 'Deterministic'
if (nat == 'a'): title = 'Adverserial'
plt.title(title)
plt.xlabel('Timestep')
plt.ylabel('Regret')
regret = [0] * T
# Find who the biggest expert is (smallest loss)
most_expert = [min(loss) for loss in losses_expert]
#print (losses_expert)
for t in range(0,T,1):
#print (T)
#print (len(losses_expert))
#print (losses_expert[t])
#print (most_expert[t])
# Average regret
regret[t] = (float(losses_learner[t] - most_expert[t])/(t+1) )
plt.plot(regret,'r')
plt.pause(4)
#wma = RWMA('s', 0.1, 100)
wma = RWMA('d', 0.5, 100)
#wma = RWMA('a', 0.5, 100)
| [
"noreply@github.com"
] | lukaeerens93.noreply@github.com |
a9ede54f8c311163f3f1593922779cde560263bc | 3d0838cc0d3cca599c2dfb6bea3274ff3cabe0ac | /discore/models/_channel.py | 316c280d8e0f64347a51b6e2f534a923fc679e15 | [
"MIT"
] | permissive | geek-space-hq/discore | 6a799f411c81b580f0b3e0aac238e5dcf48d899c | 45f4870426e635353b3621f5089880cbb30c683c | refs/heads/develop | 2022-12-15T20:51:50.904945 | 2020-09-16T16:05:36 | 2020-09-16T16:05:36 | 295,093,463 | 3 | 0 | null | 2020-09-16T09:41:21 | 2020-09-13T06:36:35 | Python | UTF-8 | Python | false | false | 5,679 | py | from __future__ import annotations
from datetime import datetime
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, List, Optional, Union
from pydantic import BaseModel
from pydantic.fields import Field
if TYPE_CHECKING:
from ._emoji import Emoji
from ._guild import GuildMember, Role
from ._user import User, UserMentioned
class Channel(BaseModel):
id: str
type: int
guild_id: Optional[str] = None
position: Optional[int] = None
permission_overwrites: Optional["Overwrite"] = None
name: Optional[str] = None
topic: Optional[str] = None
nsfw: bool = Field(default=False)
last_message_id: Optional[str] = None
bitrate: Optional[int] = None
user_limit: Optional[int] = None
rate_limit_per_user: Optional[int] = None
recipients: Optional[List["User"]] = None
icon: Optional[str] = None
parent_id: Optional[str] = None
last_pin_timestamp: Optional[datetime] = None
class ChannelType(IntEnum):
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
GUILD_NEWS = 5
GUILD_STORE = 6
class Message(BaseModel):
id: str
channel_id: str
aurhor: "User"
content: str
timestamp: datetime
tts: bool
mention_everyone: bool
mentions: List["UserMentioned"]
mention_roles: List["Role"]
attachments: List["Attachment"]
embeds: List["Embed"]
pinned: bool
type: "MessageType"
guild_id: Optional[str] = None
member: Optional["GuildMember"] = None
mention_channels: Optional[List["ChannelMention"]] = None
reactions: Optional[List["Reaction"]] = None
nonce: Optional[Union[int, str]] = None
webhook_id: Optional[str] = None
activity: Optional["MessageActivity"] = None
application: Optional["MessageApplication"] = None
message_reference: Optional["MessageReference"] = None
flags: Optional[int] = None
class MessageType(IntEnum):
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
USER_PREMIUM_GUILD_SUBSCRIPTION = 8
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_1 = 9
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_2 = 10
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_3 = 11
CHANNEL_FOLLOW_ADD = 12
GUILD_DISCOVERY_DISQUALIFIED = 14
GUILD_DISCOVERY_REQUALIFIED = 15
class MessageActivity(BaseModel):
type: int
party_id: Optional[str] = None
class MessageApplication(BaseModel):
id: str
description: str
name: str
cover_image: Optional[str] = None
icon: Optional[str] = None
class MessageReference(BaseModel):
channel_id: str
message_id: Optional[str] = None
guild_id: Optional[str] = None
class MessageActivityType(IntEnum):
JOIN = 1
SPECTATE = 2
LISTEN = 3
JOIN_REQUEST = 5
class MessageFlag(IntEnum):
CROSSPOSTED = 1 << 0
IS_CROSSPOST = 1 << 1
SUPPRESS_EMBEDS = 1 << 2
SOURCE_MESSAGE_DELETED = 1 << 3
URGENT = 1 << 4
class FollowedChannel(BaseModel):
channel_id: str
webhook_id: str
class Reaction:
count: int
me: bool
emoji: "Emoji"
class OverwriteReceiving(BaseModel):
id: str
type: str
allow: int
allow_new: str
deny: int
deny_new: str
class OverwriteSending(BaseModel):
id: str
type: str
allow: Union[int, str]
deny: Union[int, str]
Overwrite = Union[OverwriteReceiving, OverwriteSending]
class Embed(BaseModel):
title: Optional[str] = None
type: Optional["EmbedType"] = None
description: Optional[str] = None
url: Optional[str] = None
timestamp: Optional[datetime] = None
color: Optional[int] = None
footer: Optional["EmbedFooter"] = None
image: Optional["EmbedImage"] = None
thumbnail: Optional["EmbedThumbnail"] = None
video: Optional["EmbedVideo"] = None
provider: Optional["EmbedProvider"] = None
author: Optional["EmbedAuthor"] = None
fields_: Optional[List["EmbedField"]] = Field(default=None, alias="fields")
class EmbedType(str, Enum):
rich = "rich"
image = "image"
video = "video"
gifv = "gifv"
article = "article"
link = "link"
class EmbedThumbnail(BaseModel):
url: Optional[str] = None
proxy_url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedVideo(BaseModel):
url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedImage(BaseModel):
url: Optional[str] = None
proxy_url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedProvider(BaseModel):
name: Optional[str] = None
url: Optional[str] = None
class EmbedAuthor(BaseModel):
name: Optional[str] = None
url: Optional[str] = None
icon_url: Optional[str] = None
proxy_icon_url: Optional[str] = None
class EmbedFooter(BaseModel):
text: str
icon_url: Optional[str] = None
proxy_icon_url: Optional[str] = None
class EmbedField(BaseModel):
name: str
value: str
inline: Optional[bool] = None
class Attachment(BaseModel):
id: str
filename: str
size: int
url: str
proxy_url: str
height: Optional[int] = None
width: Optional[int] = None
class ChannelMention(BaseModel):
id: str
guild_id: str
type: "ChannelType"
name: str
class AllowedMentionType(str, Enum):
ROLE_MENTIONS = "roles"
USER_MENTIONS = "users"
EVERYONE_MENTINS = "everyone"
class AllowedMention(BaseModel):
parse: "AllowedMentionType"
roles: List[str]
users: List[str]
| [
"in9lude@gmail.com"
] | in9lude@gmail.com |
e7fb2fba235e371b46eb302c8b7833d38d0530ac | 2bde6ce86a4e42f288f05c6fa2c8a4725d019543 | /shuanfatimu/py1.py | 2a139fe0c79ee73ca529dbf1c4e8792494353e4b | [] | no_license | wangpanqiao/python | 8716e0c5f07f44929a4209ee86eb3337497bea78 | 9684562722eabb7f45c37357d1dcd2629a577580 | refs/heads/master | 2020-05-22T21:24:43.915709 | 2018-12-07T03:14:12 | 2018-12-07T03:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | def fuc(n):
c = 0
for i in range(1,n):
if(n % i == 0):
c +=i
return c
n = int(input("please input a number:"))
for i in range(n):
if(fuc(i) > n):
continue
elif(i == fuc(fuc(i)) and i < fuc(i)):
print("{0}-{1}".format(i,fuc(i)))
| [
"noreply@github.com"
] | wangpanqiao.noreply@github.com |
6fa58bf03d5ab769c4d22526c5b9f2bbec752e33 | 9c50fe573ac756093d89354886ebf498fcb73230 | /CorrectOCR/tokens/list/_fs.py | f9b7c168c01d6b6cff736de0303b061961b43d51 | [
"CC-BY-4.0"
] | permissive | xiyuan27/CorrectOCR | 241939312528c609f9674bcdec50ee4f60f48d5a | defeccf167ef0479bde6d90b803bb2fc46851734 | refs/heads/master | 2020-12-07T18:04:58.900860 | 2020-01-09T07:04:10 | 2020-01-09T07:04:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | import logging
from ._super import TokenList
@TokenList.register('fs')
class FSTokenList(TokenList):
log = logging.getLogger(f'{__name__}.FSTokenList')
def load(self, docid: str, kind: str):
from .. import Token
from ...fileio import FileIO
self.docid = docid
self.kind = kind
path = self.config.trainingPath.joinpath(f'{docid}.{kind}.csv')
self.log.debug(f'Load from {path}')
for row in FileIO.load(path):
self.append(Token.from_dict(row))
def save(self, kind: str = None, token: 'Token' = None):
from ...fileio import FileIO
if kind:
self.kind = kind
path = self.config.trainingPath.joinpath(f'{self.docid}.{self.kind}.csv')
self.log.debug(f'Save to {path}')
FileIO.save(self, path)
@staticmethod
def exists(config, docid: str, kind: str):
path = config.trainingPath.joinpath(f'{docid}.{kind}.csv')
return path.is_file() | [
"mikkel.eriksen@gmail.com"
] | mikkel.eriksen@gmail.com |
acb8fc061d465bb3933507b11b2d18656e24afbd | bbe259241170487d94d3a12ed44a6e61ad25f99b | /histogram_eq.py | e8acc51ee8a5025d0a079ec1bc7d59d17953dda7 | [] | no_license | Ravimk07/Image_Processing | a1f6b94355222a5d518840065feda3dafde8aac0 | 72154d9187daafb7b0b7ccb1bdce1b5ee06c2cff | refs/heads/master | 2021-07-11T18:41:33.341160 | 2021-05-03T10:03:25 | 2021-05-03T10:03:25 | 82,921,281 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
image = img_as_float(image)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
ax_img.set_adjustable('box-forced')
# Display histogram
ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
print img.shape
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
print img_eq.shape
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 4), dtype=np.object)
axes[0, 0] = fig.add_subplot(2, 4, 1)
for i in range(1, 4):
axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0, 4):
axes[1, i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
# ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
# ax_img.set_title('Histogram equalization')
# ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
# ax_img.set_title('Adaptive equalization')
# ax_cdf.set_ylabel('Fraction of total intensity')
# ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show() | [
"noreply@github.com"
] | Ravimk07.noreply@github.com |
8b3f00a2cad7a8e6ff44418c62dba66e64da0793 | 153f933f33b28d8cf0ee8c1335cf32c90280d287 | /Proyectofinal.py | 9bbd557a56182d389c023d0a78d2fe1acc4df861 | [] | no_license | proyectoicc/Proyecto | fb034b1d6acb89e0e569f051eac90a8522ed28f4 | 42decab6b9fa03836edee4a7447eecad406f7449 | refs/heads/master | 2020-03-20T18:43:40.750508 | 2018-06-16T18:31:59 | 2018-06-16T18:31:59 | 137,601,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | p = str(input("Ingrese el producto: "))
new_cantidad=float(input("Ingrese la cantidad: "))
clave=str(input("Ingrese clave de comprador: "))
def pu():
if p == "P1":
return 234
elif p == "P2":
return 265
if p == "P3":
return 278
elif p == "P4":
return 299
if p == "P5":
return 334
elif p == "P6":
return 365
else:
print("Error")
def dsct(new_cantidad,p):
if new_cantidad >= 1000:
return new_cantidad*0.90*pu()
elif new_cantidad >= 100 and new_cantidad < 1000:
return new_cantidad*0.93*pu()
elif new_cantidad >= 50 and new_cantidad < 100:
return new_cantidad * 0.98*pu()
elif new_cantidad > 0 and new_cantidad < 50:
return new_cantidad*pu()
def obsequio(clave):
if clave == "CF1":
return 50
elif clave == "CF2":
return 30
elif clave == "CF3":
return 10
else:
return 0
print("El precio final es de: ", dsct(new_cantidad, p), " y su obsequio es :", obsequio)
| [
"40000508+anth24@users.noreply.github.com"
] | 40000508+anth24@users.noreply.github.com |
ba9495c780d12d5273f9746e15bd2e99e7594fb4 | 025912a232947e1d0553d09b1735d2a7db4671a4 | /OPENCV/Kinect_test.py | 28197c0e3b3cb01d20bd0903278b0ec985071a70 | [] | no_license | GmGniap/thesis_dev | 89cd3d12be288a38ed0cf7c5c7ee309906dc1a33 | 055e5e2dfd130a62f757c5c123b099eb5abf124b | refs/heads/master | 2020-03-22T00:57:51.104467 | 2018-07-21T09:09:02 | 2018-07-21T09:09:02 | 139,275,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | #import the necessary modules
import freenect
import cv2
import numpy as np
from math import tan
#function to get RGB image from kinect
def get_video():
array,_ = freenect.sync_get_video()
array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
return array
#function to get depth image from kinect
def get_depth():
array,_ = freenect.sync_get_depth()
'''distance = []
for i in array:
row = []
for j in i:
row.append(0.1236 * tan(j / 2842.5 + 1.1863))
distance.append(row)
print(str(distance))'''
array = array.astype(np.uint8)
return array
if __name__ == "__main__":
while 1:
#get a frame from RGB camera
frame = get_video()
#get a frame from depth sensor
depth = get_depth()
#display RGB image
cv2.imshow('RGB image',frame)
#display depth image
cv2.imshow('Depth image',depth)
# quit program when 'esc' key is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| [
"thetpaingmyo43@gmail.com"
] | thetpaingmyo43@gmail.com |
1419cec96ecb7e565ae9b248b3a4e81be07bfef5 | 91d343e7fc01cfda3dbaff441972241dddb90c6b | /ХТ.tab/Misc.panel/Tools.stack/Мусор.pulldown/Nastil.pushbutton/Nastil_script.py | 1f4aa116b3719d93db696083dc3331192d44a075 | [] | no_license | Thomas84/extention-for-pyRevit | af2f1a25fba8aaed52d56320b66100513aa04986 | da7ae5cb75a4bf710f3075c25f45340a5769c2da | refs/heads/master | 2023-06-23T01:31:29.069338 | 2021-07-19T20:03:00 | 2021-07-19T20:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,691 | py | # -*- coding: utf-8 -*-
""""""
__title__ = 'Настил'
__author__ = 'SG'
import clr
clr.AddReference('System.Core')
from System.Collections.Generic import *
from Autodesk.Revit.DB import IndependentTag, XYZ, FilteredElementCollector, BuiltInCategory, Transaction, TransactionGroup, BuiltInParameter, ElementId
import sys
from Autodesk.Revit.UI.Selection import ObjectType, ISelectionFilter
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
k = 304.8
t = Transaction(doc, 'Настил')
t.Start()
sel = [doc.GetElement(id) for id in uidoc.Selection.GetElementIds()]
# for line in lines:
# print(line.LineStyle.Name == 'Рез')
# print(sel[0].Location.Curve.GetEndPoint(0))
# print(sel[0].Location.Curve.GetEndPoint(1))
# if sel:
# nass = []
# lines = []
# for el in sel:
# nass.append(el) if el.Name == 'Настил' else None
# lines.append(el) if el.Name == 'Линии детализации' else None
# if len(nass) > 1:
# print('Ошибка: выбрано несколько настилов')
# t.Commit()
# sys.exit()
# nas = nass[0]
# nas.LookupParameter('Длина линий').Set(sum([el.LookupParameter('Длина').AsDouble() for el in lines]))
els = FilteredElementCollector(doc)\
.OfCategory(BuiltInCategory.OST_GenericModel)\
.WhereElementIsNotElementType().ToElements()
decks = [el for el in els if el.Name == 'Настил']
# levels = {}
# for el in decks:
# h = round(el.Location.Point.Z * k / 10) * 10
# if h not in levels:
# levels[h] = []
# levels[h].append(el)
lines = FilteredElementCollector(doc)\
.OfCategory(BuiltInCategory.OST_Lines)\
.WhereElementIsNotElementType().ToElements()
# levels = {}
# for line in lines:
# h = round(((line.Location.Curve.GetEndPoint(0) + line.Location.Curve.GetEndPoint(0)) / 2).Z * k / 10) * 10
# if h not in levels:
# levels[h] = []
# levels[h].append(line)
d = 1 / k
def belong(line, deck):
mx = deck.get_BoundingBox(doc.ActiveView).Max
mn = deck.get_BoundingBox(doc.ActiveView).Min
point = (line.Location.Curve.GetEndPoint(0) + line.Location.Curve.GetEndPoint(1)) / 2
if mn.X + d < point.X < mx.X - d and mn.Y + d < point.Y < mx.Y - d and mn.Z < point.Z < mx.Z:
return True
return False
dct = {}
for line in lines:
if 'Белый' not in line.LineStyle.Name:
for deck in decks:
if belong(line, deck):
if deck.Id not in dct:
dct[deck.Id] = []
dct[deck.Id].append(line)
break
for deck in decks:
len = 0
if deck.Id in dct.keys():
len = sum([line.LookupParameter('Длина').AsDouble() for line in dct[deck.Id]])
deck.LookupParameter('Длина линий').Set(len)
# for deckId in dct.keys():
# deck = doc.GetElement(deckId)
# deck.LookupParameter('Длина линий').Set(sum([line.LookupParameter('Длина').AsDouble() for line in dct[deckId]]))
# for line in dct[deckId]:
# print('- ', line.Id)
for deck in decks:
len = 0
if deck.LookupParameter('в1').AsInteger():
len += deck.LookupParameter('a1').AsDouble()
len += deck.LookupParameter('b1').AsDouble()
len += deck.LookupParameter('Зазор').AsDouble() * 2
if deck.LookupParameter('в2').AsInteger():
len += deck.LookupParameter('a2').AsDouble()
len += deck.LookupParameter('b2').AsDouble()
len += deck.LookupParameter('Зазор').AsDouble() * 2
if deck.LookupParameter('в3').AsInteger():
len += deck.LookupParameter('a3').AsDouble()
len += deck.LookupParameter('b3').AsDouble()
len += deck.LookupParameter('Зазор').AsDouble() * 2
if deck.LookupParameter('в4').AsInteger():
len += deck.LookupParameter('a4').AsDouble()
len += deck.LookupParameter('b4').AsDouble()
len += deck.LookupParameter('Зазор').AsDouble() * 2
len += deck.LookupParameter('Длина линий').AsDouble()
deck.LookupParameter('ХТ Длина ОВ').Set(len * k)
a = deck.LookupParameter('A').AsDouble() * k
b = deck.LookupParameter('B').AsDouble() * k
deck.LookupParameter('ХТ Размер фитинга ОВ').Set('{:.0f}x{:.0f}'.format(a, b))
linesLen = deck.LookupParameter('Длина линий').AsDouble() * k
s = ''
if len and linesLen:
s = '{:.0f} ({:.0f})'.format(len * k, linesLen)
elif len:
s = '{:.0f}'.format(len * k)
elif linesLen:
s = '({:.0f})'.format(linesLen)
deck.LookupParameter('Комментарии').Set(s)
t.Commit()
| [
"fazacz@ya.ru"
] | fazacz@ya.ru |
a07cab13bbac62cbe9da389c04efe73253dd55ba | c6b1919498776cfc408076246390e2bba56f4c4e | /devops_tool/settings.py | e422d3a5d42866f11728863fdae9c727b4dd35e6 | [] | no_license | huozhihui/devops_tool | f2ceaf7f1828853e43859645f5ab36a00b0fa7df | 0eb7b4a14203e30bb2c262075864cec0db21829f | refs/heads/master | 2020-05-20T19:02:47.855055 | 2017-04-18T05:25:59 | 2017-04-18T05:25:59 | 84,509,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | """
Django settings for devops_tool project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xv*oxmw8)_0jw=e!f6bi1bop1#cpi4_2=jy2da04gf*1!h2he*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
LOGIN_REDIRECT_URL = '/role_manage'
# custom
# ===================================
TMP = os.path.join(BASE_DIR, 'tmp')
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
UPLOAD_FILE = (
os.path.join(TMP, 'upload_file')
)
# ANSIBLE = "/etc/ansible"
# ANSIBLE_ROLES = os.path.join(ANSIBLE, 'roles')
# ANSIBLE_YAMLS = os.path.join(ANSIBLE)
ANSIBLE = "/Users/huozhihui/huo/paas_deploy"
ANSIBLE_ROLES = os.path.join(ANSIBLE, 'roles')
ANSIBLE_YAMLS = ANSIBLE
ANSIBLE_HOSTS = ANSIBLE
ANSIBLE_INIT_USER = 'ubunt'
ANSIBLE_INIT_PASS = 'huo244'
# ===================================
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'ext_command',
'client',
'workflow',
'client.templatetags.ext_template',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'devops_tool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
# 'DIRS': [os.path.join(os.path.dirname(__file__), 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'devops_tool.wsgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "devops_tool.routing.channel_routing",
},
}
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh_cn'
TIME_ZONE = 'Asia/Shanghai'
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'console.log'),
},
# 'console': {
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# # 'formatter': 'simple'
# },
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'INFO',
'propagate': False,
},
},
}
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# # 'level': 'INFO',
# # 'filename': os.path.join(BASE_DIR, 'console.log'),
# # 'maxBytes': 1024 * 1024 * 15, # 15MB
# # 'backupCount': 10,
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# },
# },
# }
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# },
# 'applogfile': {
# 'level':'INFO',
# 'class':'logging.handlers.RotatingFileHandler',
# 'filename': os.path.join(BASE_DIR, 'APPNAME.log'),
# 'maxBytes': 1024*1024*15, # 15MB
# 'backupCount': 10,
# },
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['applogfile'],
# 'level': 'INFO',
# 'propagate': True,
# },
# }
# }
| [
"240516816@qq.com"
] | 240516816@qq.com |
a4056d802d7c03cae1448c6bd3ee6630b1581df5 | 5cf7b8e028a4f4d88fc6e57563632780f9490d67 | /text.py | acae250a9310e87f5d62942b11ab2fc0d7c22b88 | [
"MIT"
] | permissive | QuirkyDevil/alex-boat-old | 75dff27ab020299982dfaa80e2bc567f4de87254 | 6ca1f883a13a49b0377d434e22bb25366ff64b26 | refs/heads/main | 2023-07-08T19:10:58.340517 | 2021-08-23T17:05:31 | 2021-08-23T17:05:31 | 399,185,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from discord.ext import commands
import asyncio
class text(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("fun cog loaded")
@commands.command()
async def on_message(self, message):
if message.content.startswith('Alex!'):
await message.reply('Hallo there! wat do you want me to do?', mention_author=True)
if message.content.startswith('is kath dumb?'):
await message.reply('she is.. (not) till the time', mention_author=True)
if message.content.startswith('do you like kath'):
await message.reply('next please', mention_author=False)
await asyncio.sleep(3)
await message.channel.send(f'why would you ask that?')
def setup(client):
client.add_cog(text(client)) | [
"81952913+QuirkyDevil@users.noreply.github.com"
] | 81952913+QuirkyDevil@users.noreply.github.com |
776daaafd211d28ee6ca163c5fb8bd99c5595928 | d2fcd448b53d79fad8ece056ff67063bd69bd502 | /bextr/app/assets.py | afba988931f7b4ee7076f65aa023b5f7a4d9161f | [] | no_license | Bextr/Bextr-Website | 95cbda2bb485439ee0d875397c49d874e8363a13 | a12a520db20f6ea08786239f680248ace4d9aa4c | refs/heads/master | 2016-09-06T17:47:46.615185 | 2015-01-08T04:31:38 | 2015-01-08T04:31:38 | 27,047,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from flask.ext.assets import Environment, Bundle
assets = Environment()
js = Bundle('js/jquery-ui.js', 'js/script.js',
filters='rjsmin', output='js/packed.min.js')
assets.register('js_all', js)
css = Bundle('css/style.css', 'css/colorbox.css',
filters='cssutils', output='css/packed.min.css')
assets.register('css_all', css)
| [
"dessant@kivy.org"
] | dessant@kivy.org |
9aa6b11bb08a0531530e3856638408b4a2fd9be2 | faabba24ec8cab081ac1821a719839ce14f11a29 | /project.py | 5d09f38f60ba7635170e709c32887c9a8123a8d1 | [] | no_license | PROPERAT/pyCue | 2e73f4e7762f4439540fc1a58320a69d45882948 | d1930cd345c656e774e960696037bdba11a4e9c1 | refs/heads/master | 2016-09-09T21:39:53.496816 | 2012-06-28T10:31:43 | 2012-06-28T10:31:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,265 | py | import pygame, sys
from OpenGL.GL import *
from OpenGL.GLU import *
from VideoDriver import *
from Node import *
from TestCubeMesh import *
from Camera import *
class GameEvent:
def __init__(self, code):
self.code = code
class PygameWindow:
def __init__(self):
pygame.init()
self.key_states = [0 for i in range(0,512)]
self.game_event_queue = []
self.frame_time = 0
self.last_time = 0
self.eye_pos = (0,0,0)
def push_game_event(self, event):
self.game_event_queue.append(event)
def start_main_loop(self):
self.last_time = pygame.time.get_ticks()
while True:
current_time = pygame.time.get_ticks()
self.frame_time = current_time - self.last_time
self.last_time = current_time
self.process_system_events()
self.read_user_input()
self.process_game_events()
self.update_game()
self.render_window()
pygame.display.flip()
def process_system_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key >= 0 and event.key <= 512:
self.key_states[event.key] = True
elif event.type == pygame.KEYUP:
if event.key >= 0 and event.key <= 512:
self.key_states[event.key] = False
def read_user_input(self):
for key, pressed in enumerate(self.key_states):
if pressed:
if key == ord('w'):
self.game_event_queue.append(GameEvent(1))
elif key == ord('s'):
self.game_event_queue.append(GameEvent(2))
def process_game_events(self):
for event in self.game_event_queue:
if event.code == 1:
self.eye_pos = (self.eye_pos[0], self.eye_pos[1], self.eye_pos[2] -1)
elif event.code == 2:
self.eye_pos = (self.eye_pos[0], self.eye_pos[1], self.eye_pos[2] +1)
del self.game_event_queue[:]
def update_game(self):
pass
def render_window(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor4f(1,1,1,1)
glLoadMatrixf([2,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1])
gluLookAt(self.eye_pos[0],self.eye_pos[1],self.eye_pos[2], self.eye_pos[0],self.eye_pos[1],self.eye_pos[2]-1, 0,1,0)
pointer = [[0.0, 20.0, -50.0],[20.0, 20.0, -50.0],[20.0, 0.0, -50.0],[0.0, 0.0, -50.0]]
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(pointer)
glDrawElementsui(GL_QUADS, [0,1,2,3])
#a = PygameWindow()
#a.start_main_loop()
n = Node(10)
n.set_mesh(TestCubeMesh())
n.set_scale(vector([5,5,5,1]))
n.set_rotation(vector([0,0,90,1]))
d = OpenGLDriver()
d.start_video()
eye_pos = vector([10.0,0.0,-10.0,1.0])
c = Camera()
c.set_eye(eye_pos)
c.set_reference(vector([0,0,0,1]))
alpha = 0.1
from PIL import Image
img = Image.open('./ball_8.jpg') # .jpg, .bmp, etc. also work
img_data = array(list(img.getdata()), int8)
texture = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, texture)
glEnable(GL_TEXTURE_2D)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.size[0], img.size[1], 0, GL_RGB, GL_UNSIGNED_BYTE, img_data)
def process_system_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYUP:
c.set_eye(vector([c.get_eye().x*cos(alpha)-c.get_eye().z*sin(alpha),c.get_eye().y, c.get_eye().x*sin(alpha)+c.get_eye().z*cos(alpha),1]))
pass
while(1):
process_system_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor4f(1,1,1,1)
d.render_camera(c, 0)
d.render_node(n)
pygame.display.flip() | [
"PROPERAT@properat-81cc9d.(none)"
] | PROPERAT@properat-81cc9d.(none) |
0bcd2606a9ecb387878f6e66e500284c4af9354d | d258b0e30853e439bd0dcd4b660543c8d17dc757 | /soloproject/appone/admin.py | 2aab4a38906bc218d57b47902044a7c697cc6025 | [] | no_license | Mholliday6611/BESTPROJECTEVER | fb8d00e7182b3bc1d2954fb8cdc3dba58f3bbb42 | 0a934de0ff055054373cd49b76744224ef6e6467 | refs/heads/master | 2021-01-13T04:13:30.377819 | 2017-01-13T02:24:31 | 2017-01-13T02:24:31 | 77,493,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django.contrib import admin
from models import UserProfile, Post, Category, Page, Comment
# Register your models here.
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('title',)}
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Comment)
admin.site.register(Page)
admin.site.register(UserProfile)
| [
"danielorrego02@gmail.com"
] | danielorrego02@gmail.com |
6af287c25e7567cc97a37b41e9b9df7d8d589d3a | 69427716f39ddb8541b7dca39d26015a26e04104 | /学习脚本/Python基础学习脚本/select_socket_server.py | 619aaeec0a826ffbd3a7f9299ce892eb9ef5e5a3 | [] | no_license | xiatian0918/auto_scripts | a0fa80f3ec8a5e49e1b049ebed39a8ae3e7cdf7a | 413c614260340557cf9e615b1339eae68a8f9acf | refs/heads/master | 2020-05-14T13:32:46.556775 | 2020-01-21T00:18:56 | 2020-01-21T00:18:56 | 181,812,978 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author: xiatian
import select,socket,sys,queue
server = socket.socket()
server.bind(('localhost',9000))
server.listen(1000)
server.setblocking(False) #不阻塞
inputs = [server,]
outputs = []
readable , writeable, exceptional = select.select(inputs, outputs, inputs)
print(readable,writeable,exceptional)
for i in readable:
if r is server: #代表来了一个新链接
conn,addr = server.accept()
print("来了个新链接",addr)
inputs.append(conn)
else:
data = conn.recv(1024)
print("收到数据",data)
| [
"18810434724@163.com"
] | 18810434724@163.com |
7f9ea1866114fe062661f28006ec80d13194dd03 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N875_Koko_Eating_Bananas.py | 2d632a0fdaa6c0078dec7406cb6fa8e0e852a916 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | class Solution(object):
def minEatingSpeed(self, piles, H):
"""
:type piles: List[int]
:type H: int
:rtype: int
"""
length = len(piles)
if length == H:
return max(piles)
right = max(piles)
total = sum(piles)
if total <= H:
return 1
left = total // H
while left < right:
mid = (right - left) // 2 + left
if self.helper(mid, piles, H):
right = mid
else:
left = mid + 1
return left
def helper(self, value, piles, H):
hours = 0
for pile in piles:
if pile % value:
hours += pile // value + 1
else:
hours += pile // value
if hours > H:
return False
else:
return True
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
1d1b1eda7603a88d4beacbcf621d06a32779d4e2 | b52d968d8af2d31a93ee75a4dc02667794579656 | /StepLambda/lambdaauthentication.py | 0bbad12f7a060c245d1308463da68a484ab3ee91 | [] | no_license | Rocha57/TicketingAWS | d8f1e4212111885b37b97b6b9adc20ef5b77bde8 | f9cf5ac81cbb63196ba4bd84e13bd90a7a91b761 | refs/heads/master | 2021-03-24T10:41:08.184784 | 2017-05-27T17:45:01 | 2017-05-27T17:45:01 | 92,286,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | import boto3
client_db = boto3.client('dynamodb')
client_rek = boto3.client('rekognition')
table_name = 'project3'
bucket_name = 'esproject3bucket'
bucket_register_name = 'esproject3bucketregister'
def rekognition(filename, table_items):
for item in table_items['Items']:
response = client_rek.compare_faces(
SourceImage={
'S3Object': {
'Bucket': bucket_name,
'Name': filename,
}
},
TargetImage={
'S3Object': {
'Bucket': bucket_register_name,
'Name': item['filename']['S'],
}
},
SimilarityThreshold=90
)
if response['FaceMatches']:
print(response['FaceMatches'][0]['Similarity'])
if response['FaceMatches'][0]['Similarity'] > 90:
return item['name']['S']
return None
def lambda_handler(event, context):
# TODO implement
filename = event['filename']
cost = event['cost']
table_items = client_db.scan(TableName=table_name)
name = rekognition(filename, table_items)
correct = 0
if name != None:
correct = 1
return {'name' : name, 'cost' : str(cost), 'correct' : correct} | [
"fmrocha@student.dei.uc.pt"
] | fmrocha@student.dei.uc.pt |
a2519d05e37adb104bac8b8b46a2b7f4eceb98db | 023c7135f9d1ceb320b6b847d1a46ed679adfbf2 | /superlists/lists/migrations/0001_initial.py | b2a0376bc479bebc385384ac0450971865f8a0af | [] | no_license | mayankkapoor/Test-Driven-Development-with-Python | 5a49644c29d6bae47c22496ebf358337903b2ef0 | 3b94be497d35355b3f4a3fa8e89104b7652c6e65 | refs/heads/master | 2021-01-23T07:10:39.498040 | 2015-02-11T03:12:39 | 2015-02-11T03:12:39 | 28,521,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('text', models.TextField(default='')),
],
options={
},
bases=(models.Model,),
),
]
| [
"mayankkapoormail@gmail.com"
] | mayankkapoormail@gmail.com |
85a8fe446187a595ad11c4c0a6dba3786a9af595 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/web/v20201001/web_app_auth_settings.py | 8c0f10d3d946c6e637d1c161b9fabc2e4c33aae2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,144 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['WebAppAuthSettingsArgs', 'WebAppAuthSettings']
@pulumi.input_type
class WebAppAuthSettingsArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
aad_claims_authorization: Optional[pulumi.Input[str]] = None,
additional_login_params: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_external_redirect_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auth_file_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_certificate_thumbprint: Optional[pulumi.Input[str]] = None,
client_secret_setting_name: Optional[pulumi.Input[str]] = None,
default_provider: Optional[pulumi.Input['BuiltInAuthenticationProvider']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
facebook_app_id: Optional[pulumi.Input[str]] = None,
facebook_app_secret: Optional[pulumi.Input[str]] = None,
facebook_app_secret_setting_name: Optional[pulumi.Input[str]] = None,
facebook_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
git_hub_client_id: Optional[pulumi.Input[str]] = None,
git_hub_client_secret: Optional[pulumi.Input[str]] = None,
git_hub_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
git_hub_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
google_client_id: Optional[pulumi.Input[str]] = None,
google_client_secret: Optional[pulumi.Input[str]] = None,
google_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
google_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_auth_from_file: Optional[pulumi.Input[str]] = None,
issuer: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
microsoft_account_client_id: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
microsoft_account_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
runtime_version: Optional[pulumi.Input[str]] = None,
token_refresh_extension_hours: Optional[pulumi.Input[float]] = None,
token_store_enabled: Optional[pulumi.Input[bool]] = None,
twitter_consumer_key: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret_setting_name: Optional[pulumi.Input[str]] = None,
unauthenticated_client_action: Optional[pulumi.Input['UnauthenticatedClientAction']] = None,
validate_issuer: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a WebAppAuthSettings resource.
:param pulumi.Input[str] name: Name of web app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] aad_claims_authorization: Gets a JSON string containing the Azure AD Acl settings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_login_params: Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_audiences: Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_external_redirect_urls: External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
:param pulumi.Input[str] auth_file_path: The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
:param pulumi.Input[str] client_id: The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
:param pulumi.Input[str] client_secret: The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
:param pulumi.Input[str] client_secret_certificate_thumbprint: An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
:param pulumi.Input[str] client_secret_setting_name: The app setting name that contains the client secret of the relying party application.
:param pulumi.Input['BuiltInAuthenticationProvider'] default_provider: The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
:param pulumi.Input[bool] enabled: <code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
:param pulumi.Input[str] facebook_app_id: The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] facebook_app_secret: The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] facebook_app_secret_setting_name: The app setting name that contains the app secret used for Facebook Login.
:param pulumi.Input[Sequence[pulumi.Input[str]]] facebook_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] git_hub_client_id: The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
:param pulumi.Input[str] git_hub_client_secret: The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
:param pulumi.Input[str] git_hub_client_secret_setting_name: The app setting name that contains the client secret of the Github
app used for GitHub Login.
:param pulumi.Input[Sequence[pulumi.Input[str]]] git_hub_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
:param pulumi.Input[str] google_client_id: The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] google_client_secret: The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] google_client_secret_setting_name: The app setting name that contains the client secret associated with
the Google web application.
:param pulumi.Input[Sequence[pulumi.Input[str]]] google_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] is_auth_from_file: "true" if the auth config settings should be read from a file,
"false" otherwise
:param pulumi.Input[str] issuer: The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] microsoft_account_client_id: The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
:param pulumi.Input[str] microsoft_account_client_secret: The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
:param pulumi.Input[str] microsoft_account_client_secret_setting_name: The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] microsoft_account_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
:param pulumi.Input[str] runtime_version: The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
:param pulumi.Input[float] token_refresh_extension_hours: The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
:param pulumi.Input[bool] token_store_enabled: <code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
:param pulumi.Input[str] twitter_consumer_key: The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
:param pulumi.Input[str] twitter_consumer_secret: The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
:param pulumi.Input[str] twitter_consumer_secret_setting_name: The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
:param pulumi.Input['UnauthenticatedClientAction'] unauthenticated_client_action: The action to take when an unauthenticated client attempts to access the app.
:param pulumi.Input[bool] validate_issuer: Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if aad_claims_authorization is not None:
pulumi.set(__self__, "aad_claims_authorization", aad_claims_authorization)
if additional_login_params is not None:
pulumi.set(__self__, "additional_login_params", additional_login_params)
if allowed_audiences is not None:
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
if allowed_external_redirect_urls is not None:
pulumi.set(__self__, "allowed_external_redirect_urls", allowed_external_redirect_urls)
if auth_file_path is not None:
pulumi.set(__self__, "auth_file_path", auth_file_path)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_certificate_thumbprint is not None:
pulumi.set(__self__, "client_secret_certificate_thumbprint", client_secret_certificate_thumbprint)
if client_secret_setting_name is not None:
pulumi.set(__self__, "client_secret_setting_name", client_secret_setting_name)
if default_provider is not None:
pulumi.set(__self__, "default_provider", default_provider)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if facebook_app_id is not None:
pulumi.set(__self__, "facebook_app_id", facebook_app_id)
if facebook_app_secret is not None:
pulumi.set(__self__, "facebook_app_secret", facebook_app_secret)
if facebook_app_secret_setting_name is not None:
pulumi.set(__self__, "facebook_app_secret_setting_name", facebook_app_secret_setting_name)
if facebook_o_auth_scopes is not None:
pulumi.set(__self__, "facebook_o_auth_scopes", facebook_o_auth_scopes)
if git_hub_client_id is not None:
pulumi.set(__self__, "git_hub_client_id", git_hub_client_id)
if git_hub_client_secret is not None:
pulumi.set(__self__, "git_hub_client_secret", git_hub_client_secret)
if git_hub_client_secret_setting_name is not None:
pulumi.set(__self__, "git_hub_client_secret_setting_name", git_hub_client_secret_setting_name)
if git_hub_o_auth_scopes is not None:
pulumi.set(__self__, "git_hub_o_auth_scopes", git_hub_o_auth_scopes)
if google_client_id is not None:
pulumi.set(__self__, "google_client_id", google_client_id)
if google_client_secret is not None:
pulumi.set(__self__, "google_client_secret", google_client_secret)
if google_client_secret_setting_name is not None:
pulumi.set(__self__, "google_client_secret_setting_name", google_client_secret_setting_name)
if google_o_auth_scopes is not None:
pulumi.set(__self__, "google_o_auth_scopes", google_o_auth_scopes)
if is_auth_from_file is not None:
pulumi.set(__self__, "is_auth_from_file", is_auth_from_file)
if issuer is not None:
pulumi.set(__self__, "issuer", issuer)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if microsoft_account_client_id is not None:
pulumi.set(__self__, "microsoft_account_client_id", microsoft_account_client_id)
if microsoft_account_client_secret is not None:
pulumi.set(__self__, "microsoft_account_client_secret", microsoft_account_client_secret)
if microsoft_account_client_secret_setting_name is not None:
pulumi.set(__self__, "microsoft_account_client_secret_setting_name", microsoft_account_client_secret_setting_name)
if microsoft_account_o_auth_scopes is not None:
pulumi.set(__self__, "microsoft_account_o_auth_scopes", microsoft_account_o_auth_scopes)
if runtime_version is not None:
pulumi.set(__self__, "runtime_version", runtime_version)
if token_refresh_extension_hours is not None:
pulumi.set(__self__, "token_refresh_extension_hours", token_refresh_extension_hours)
if token_store_enabled is not None:
pulumi.set(__self__, "token_store_enabled", token_store_enabled)
if twitter_consumer_key is not None:
pulumi.set(__self__, "twitter_consumer_key", twitter_consumer_key)
if twitter_consumer_secret is not None:
pulumi.set(__self__, "twitter_consumer_secret", twitter_consumer_secret)
if twitter_consumer_secret_setting_name is not None:
pulumi.set(__self__, "twitter_consumer_secret_setting_name", twitter_consumer_secret_setting_name)
if unauthenticated_client_action is not None:
pulumi.set(__self__, "unauthenticated_client_action", unauthenticated_client_action)
if validate_issuer is not None:
pulumi.set(__self__, "validate_issuer", validate_issuer)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of web app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="aadClaimsAuthorization")
def aad_claims_authorization(self) -> Optional[pulumi.Input[str]]:
"""
Gets a JSON string containing the Azure AD Acl settings.
"""
return pulumi.get(self, "aad_claims_authorization")
@aad_claims_authorization.setter
def aad_claims_authorization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aad_claims_authorization", value)
@property
@pulumi.getter(name="additionalLoginParams")
def additional_login_params(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
"""
return pulumi.get(self, "additional_login_params")
@additional_login_params.setter
def additional_login_params(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_login_params", value)
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
"""
return pulumi.get(self, "allowed_audiences")
@allowed_audiences.setter
def allowed_audiences(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_audiences", value)
@property
@pulumi.getter(name="allowedExternalRedirectUrls")
def allowed_external_redirect_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
"""
return pulumi.get(self, "allowed_external_redirect_urls")
@allowed_external_redirect_urls.setter
def allowed_external_redirect_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_external_redirect_urls", value)
@property
@pulumi.getter(name="authFilePath")
def auth_file_path(self) -> Optional[pulumi.Input[str]]:
"""
The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
"""
return pulumi.get(self, "auth_file_path")
@auth_file_path.setter
def auth_file_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_file_path", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="clientSecretCertificateThumbprint")
def client_secret_certificate_thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
"""
return pulumi.get(self, "client_secret_certificate_thumbprint")
@client_secret_certificate_thumbprint.setter
def client_secret_certificate_thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret_certificate_thumbprint", value)
@property
@pulumi.getter(name="clientSecretSettingName")
def client_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name that contains the client secret of the relying party application.
"""
return pulumi.get(self, "client_secret_setting_name")
@client_secret_setting_name.setter
def client_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret_setting_name", value)
@property
@pulumi.getter(name="defaultProvider")
def default_provider(self) -> Optional[pulumi.Input['BuiltInAuthenticationProvider']]:
"""
The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
return pulumi.get(self, "default_provider")
@default_provider.setter
def default_provider(self, value: Optional[pulumi.Input['BuiltInAuthenticationProvider']]):
pulumi.set(self, "default_provider", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="facebookAppId")
def facebook_app_id(self) -> Optional[pulumi.Input[str]]:
"""
The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_id")
@facebook_app_id.setter
def facebook_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "facebook_app_id", value)
@property
@pulumi.getter(name="facebookAppSecret")
def facebook_app_secret(self) -> Optional[pulumi.Input[str]]:
"""
The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_secret")
@facebook_app_secret.setter
def facebook_app_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "facebook_app_secret", value)
@property
@pulumi.getter(name="facebookAppSecretSettingName")
def facebook_app_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name that contains the app secret used for Facebook Login.
"""
return pulumi.get(self, "facebook_app_secret_setting_name")
@facebook_app_secret_setting_name.setter
def facebook_app_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "facebook_app_secret_setting_name", value)
@property
@pulumi.getter(name="facebookOAuthScopes")
def facebook_o_auth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_o_auth_scopes")
@facebook_o_auth_scopes.setter
def facebook_o_auth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "facebook_o_auth_scopes", value)
@property
@pulumi.getter(name="gitHubClientId")
def git_hub_client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
"""
return pulumi.get(self, "git_hub_client_id")
@git_hub_client_id.setter
def git_hub_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "git_hub_client_id", value)
@property
@pulumi.getter(name="gitHubClientSecret")
def git_hub_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
"""
return pulumi.get(self, "git_hub_client_secret")
@git_hub_client_secret.setter
def git_hub_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "git_hub_client_secret", value)
@property
@pulumi.getter(name="gitHubClientSecretSettingName")
def git_hub_client_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name that contains the client secret of the Github
app used for GitHub Login.
"""
return pulumi.get(self, "git_hub_client_secret_setting_name")
@git_hub_client_secret_setting_name.setter
def git_hub_client_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "git_hub_client_secret_setting_name", value)
@property
@pulumi.getter(name="gitHubOAuthScopes")
def git_hub_o_auth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
"""
return pulumi.get(self, "git_hub_o_auth_scopes")
@git_hub_o_auth_scopes.setter
def git_hub_o_auth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "git_hub_o_auth_scopes", value)
@property
@pulumi.getter(name="googleClientId")
def google_client_id(self) -> Optional[pulumi.Input[str]]:
"""
The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_id")
@google_client_id.setter
def google_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "google_client_id", value)
@property
@pulumi.getter(name="googleClientSecret")
def google_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_secret")
@google_client_secret.setter
def google_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "google_client_secret", value)
@property
@pulumi.getter(name="googleClientSecretSettingName")
def google_client_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name that contains the client secret associated with
the Google web application.
"""
return pulumi.get(self, "google_client_secret_setting_name")
@google_client_secret_setting_name.setter
def google_client_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "google_client_secret_setting_name", value)
@property
@pulumi.getter(name="googleOAuthScopes")
def google_o_auth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_o_auth_scopes")
@google_o_auth_scopes.setter
def google_o_auth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "google_o_auth_scopes", value)
@property
@pulumi.getter(name="isAuthFromFile")
def is_auth_from_file(self) -> Optional[pulumi.Input[str]]:
"""
"true" if the auth config settings should be read from a file,
"false" otherwise
"""
return pulumi.get(self, "is_auth_from_file")
@is_auth_from_file.setter
def is_auth_from_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "is_auth_from_file", value)
@property
@pulumi.getter
def issuer(self) -> Optional[pulumi.Input[str]]:
"""
The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
"""
return pulumi.get(self, "issuer")
@issuer.setter
def issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="microsoftAccountClientId")
def microsoft_account_client_id(self) -> Optional[pulumi.Input[str]]:
"""
The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_id")
@microsoft_account_client_id.setter
def microsoft_account_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "microsoft_account_client_id", value)
@property
@pulumi.getter(name="microsoftAccountClientSecret")
def microsoft_account_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_secret")
@microsoft_account_client_secret.setter
def microsoft_account_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "microsoft_account_client_secret", value)
@property
@pulumi.getter(name="microsoftAccountClientSecretSettingName")
def microsoft_account_client_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
"""
return pulumi.get(self, "microsoft_account_client_secret_setting_name")
@microsoft_account_client_secret_setting_name.setter
def microsoft_account_client_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "microsoft_account_client_secret_setting_name", value)
@property
@pulumi.getter(name="microsoftAccountOAuthScopes")
def microsoft_account_o_auth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
"""
return pulumi.get(self, "microsoft_account_o_auth_scopes")
@microsoft_account_o_auth_scopes.setter
def microsoft_account_o_auth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "microsoft_account_o_auth_scopes", value)
@property
@pulumi.getter(name="runtimeVersion")
def runtime_version(self) -> Optional[pulumi.Input[str]]:
"""
The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
"""
return pulumi.get(self, "runtime_version")
@runtime_version.setter
def runtime_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_version", value)
@property
@pulumi.getter(name="tokenRefreshExtensionHours")
def token_refresh_extension_hours(self) -> Optional[pulumi.Input[float]]:
"""
The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
"""
return pulumi.get(self, "token_refresh_extension_hours")
@token_refresh_extension_hours.setter
def token_refresh_extension_hours(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "token_refresh_extension_hours", value)
@property
@pulumi.getter(name="tokenStoreEnabled")
def token_store_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
"""
return pulumi.get(self, "token_store_enabled")
@token_store_enabled.setter
def token_store_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "token_store_enabled", value)
@property
@pulumi.getter(name="twitterConsumerKey")
def twitter_consumer_key(self) -> Optional[pulumi.Input[str]]:
"""
The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_key")
@twitter_consumer_key.setter
def twitter_consumer_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter_consumer_key", value)
@property
@pulumi.getter(name="twitterConsumerSecret")
def twitter_consumer_secret(self) -> Optional[pulumi.Input[str]]:
"""
The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_secret")
@twitter_consumer_secret.setter
def twitter_consumer_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter_consumer_secret", value)
@property
@pulumi.getter(name="twitterConsumerSecretSettingName")
def twitter_consumer_secret_setting_name(self) -> Optional[pulumi.Input[str]]:
"""
The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
"""
return pulumi.get(self, "twitter_consumer_secret_setting_name")
@twitter_consumer_secret_setting_name.setter
def twitter_consumer_secret_setting_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter_consumer_secret_setting_name", value)
@property
@pulumi.getter(name="unauthenticatedClientAction")
def unauthenticated_client_action(self) -> Optional[pulumi.Input['UnauthenticatedClientAction']]:
"""
The action to take when an unauthenticated client attempts to access the app.
"""
return pulumi.get(self, "unauthenticated_client_action")
@unauthenticated_client_action.setter
def unauthenticated_client_action(self, value: Optional[pulumi.Input['UnauthenticatedClientAction']]):
pulumi.set(self, "unauthenticated_client_action", value)
@property
@pulumi.getter(name="validateIssuer")
def validate_issuer(self) -> Optional[pulumi.Input[bool]]:
"""
Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
return pulumi.get(self, "validate_issuer")
@validate_issuer.setter
def validate_issuer(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validate_issuer", value)
class WebAppAuthSettings(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aad_claims_authorization: Optional[pulumi.Input[str]] = None,
additional_login_params: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_external_redirect_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auth_file_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_certificate_thumbprint: Optional[pulumi.Input[str]] = None,
client_secret_setting_name: Optional[pulumi.Input[str]] = None,
default_provider: Optional[pulumi.Input['BuiltInAuthenticationProvider']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
facebook_app_id: Optional[pulumi.Input[str]] = None,
facebook_app_secret: Optional[pulumi.Input[str]] = None,
facebook_app_secret_setting_name: Optional[pulumi.Input[str]] = None,
facebook_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
git_hub_client_id: Optional[pulumi.Input[str]] = None,
git_hub_client_secret: Optional[pulumi.Input[str]] = None,
git_hub_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
git_hub_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
google_client_id: Optional[pulumi.Input[str]] = None,
google_client_secret: Optional[pulumi.Input[str]] = None,
google_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
google_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_auth_from_file: Optional[pulumi.Input[str]] = None,
issuer: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
microsoft_account_client_id: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
microsoft_account_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runtime_version: Optional[pulumi.Input[str]] = None,
token_refresh_extension_hours: Optional[pulumi.Input[float]] = None,
token_store_enabled: Optional[pulumi.Input[bool]] = None,
twitter_consumer_key: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret_setting_name: Optional[pulumi.Input[str]] = None,
unauthenticated_client_action: Optional[pulumi.Input['UnauthenticatedClientAction']] = None,
validate_issuer: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aad_claims_authorization: Gets a JSON string containing the Azure AD Acl settings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_login_params: Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_audiences: Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_external_redirect_urls: External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
:param pulumi.Input[str] auth_file_path: The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
:param pulumi.Input[str] client_id: The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
:param pulumi.Input[str] client_secret: The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
:param pulumi.Input[str] client_secret_certificate_thumbprint: An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
:param pulumi.Input[str] client_secret_setting_name: The app setting name that contains the client secret of the relying party application.
:param pulumi.Input['BuiltInAuthenticationProvider'] default_provider: The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
:param pulumi.Input[bool] enabled: <code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
:param pulumi.Input[str] facebook_app_id: The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] facebook_app_secret: The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] facebook_app_secret_setting_name: The app setting name that contains the app secret used for Facebook Login.
:param pulumi.Input[Sequence[pulumi.Input[str]]] facebook_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
:param pulumi.Input[str] git_hub_client_id: The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
:param pulumi.Input[str] git_hub_client_secret: The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
:param pulumi.Input[str] git_hub_client_secret_setting_name: The app setting name that contains the client secret of the Github
app used for GitHub Login.
:param pulumi.Input[Sequence[pulumi.Input[str]]] git_hub_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
:param pulumi.Input[str] google_client_id: The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] google_client_secret: The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] google_client_secret_setting_name: The app setting name that contains the client secret associated with
the Google web application.
:param pulumi.Input[Sequence[pulumi.Input[str]]] google_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
:param pulumi.Input[str] is_auth_from_file: "true" if the auth config settings should be read from a file,
"false" otherwise
:param pulumi.Input[str] issuer: The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] microsoft_account_client_id: The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
:param pulumi.Input[str] microsoft_account_client_secret: The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
:param pulumi.Input[str] microsoft_account_client_secret_setting_name: The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
:param pulumi.Input[Sequence[pulumi.Input[str]]] microsoft_account_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
:param pulumi.Input[str] name: Name of web app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] runtime_version: The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
:param pulumi.Input[float] token_refresh_extension_hours: The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
:param pulumi.Input[bool] token_store_enabled: <code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
:param pulumi.Input[str] twitter_consumer_key: The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
:param pulumi.Input[str] twitter_consumer_secret: The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
:param pulumi.Input[str] twitter_consumer_secret_setting_name: The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
:param pulumi.Input['UnauthenticatedClientAction'] unauthenticated_client_action: The action to take when an unauthenticated client attempts to access the app.
:param pulumi.Input[bool] validate_issuer: Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppAuthSettingsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
:param str resource_name: The name of the resource.
:param WebAppAuthSettingsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppAuthSettingsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aad_claims_authorization: Optional[pulumi.Input[str]] = None,
additional_login_params: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_external_redirect_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auth_file_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_certificate_thumbprint: Optional[pulumi.Input[str]] = None,
client_secret_setting_name: Optional[pulumi.Input[str]] = None,
default_provider: Optional[pulumi.Input['BuiltInAuthenticationProvider']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
facebook_app_id: Optional[pulumi.Input[str]] = None,
facebook_app_secret: Optional[pulumi.Input[str]] = None,
facebook_app_secret_setting_name: Optional[pulumi.Input[str]] = None,
facebook_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
git_hub_client_id: Optional[pulumi.Input[str]] = None,
git_hub_client_secret: Optional[pulumi.Input[str]] = None,
git_hub_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
git_hub_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
google_client_id: Optional[pulumi.Input[str]] = None,
google_client_secret: Optional[pulumi.Input[str]] = None,
google_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
google_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_auth_from_file: Optional[pulumi.Input[str]] = None,
issuer: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
microsoft_account_client_id: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret: Optional[pulumi.Input[str]] = None,
microsoft_account_client_secret_setting_name: Optional[pulumi.Input[str]] = None,
microsoft_account_o_auth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runtime_version: Optional[pulumi.Input[str]] = None,
token_refresh_extension_hours: Optional[pulumi.Input[float]] = None,
token_store_enabled: Optional[pulumi.Input[bool]] = None,
twitter_consumer_key: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret: Optional[pulumi.Input[str]] = None,
twitter_consumer_secret_setting_name: Optional[pulumi.Input[str]] = None,
unauthenticated_client_action: Optional[pulumi.Input['UnauthenticatedClientAction']] = None,
validate_issuer: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppAuthSettingsArgs.__new__(WebAppAuthSettingsArgs)
__props__.__dict__["aad_claims_authorization"] = aad_claims_authorization
__props__.__dict__["additional_login_params"] = additional_login_params
__props__.__dict__["allowed_audiences"] = allowed_audiences
__props__.__dict__["allowed_external_redirect_urls"] = allowed_external_redirect_urls
__props__.__dict__["auth_file_path"] = auth_file_path
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["client_secret_certificate_thumbprint"] = client_secret_certificate_thumbprint
__props__.__dict__["client_secret_setting_name"] = client_secret_setting_name
__props__.__dict__["default_provider"] = default_provider
__props__.__dict__["enabled"] = enabled
__props__.__dict__["facebook_app_id"] = facebook_app_id
__props__.__dict__["facebook_app_secret"] = facebook_app_secret
__props__.__dict__["facebook_app_secret_setting_name"] = facebook_app_secret_setting_name
__props__.__dict__["facebook_o_auth_scopes"] = facebook_o_auth_scopes
__props__.__dict__["git_hub_client_id"] = git_hub_client_id
__props__.__dict__["git_hub_client_secret"] = git_hub_client_secret
__props__.__dict__["git_hub_client_secret_setting_name"] = git_hub_client_secret_setting_name
__props__.__dict__["git_hub_o_auth_scopes"] = git_hub_o_auth_scopes
__props__.__dict__["google_client_id"] = google_client_id
__props__.__dict__["google_client_secret"] = google_client_secret
__props__.__dict__["google_client_secret_setting_name"] = google_client_secret_setting_name
__props__.__dict__["google_o_auth_scopes"] = google_o_auth_scopes
__props__.__dict__["is_auth_from_file"] = is_auth_from_file
__props__.__dict__["issuer"] = issuer
__props__.__dict__["kind"] = kind
__props__.__dict__["microsoft_account_client_id"] = microsoft_account_client_id
__props__.__dict__["microsoft_account_client_secret"] = microsoft_account_client_secret
__props__.__dict__["microsoft_account_client_secret_setting_name"] = microsoft_account_client_secret_setting_name
__props__.__dict__["microsoft_account_o_auth_scopes"] = microsoft_account_o_auth_scopes
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["runtime_version"] = runtime_version
__props__.__dict__["token_refresh_extension_hours"] = token_refresh_extension_hours
__props__.__dict__["token_store_enabled"] = token_store_enabled
__props__.__dict__["twitter_consumer_key"] = twitter_consumer_key
__props__.__dict__["twitter_consumer_secret"] = twitter_consumer_secret
__props__.__dict__["twitter_consumer_secret_setting_name"] = twitter_consumer_secret_setting_name
__props__.__dict__["unauthenticated_client_action"] = unauthenticated_client_action
__props__.__dict__["validate_issuer"] = validate_issuer
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20210115:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20210115:WebAppAuthSettings"), pulumi.Alias(type_="azure-native:web/v20210201:WebAppAuthSettings"), pulumi.Alias(type_="azure-nextgen:web/v20210201:WebAppAuthSettings")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppAuthSettings, __self__).__init__(
'azure-native:web/v20201001:WebAppAuthSettings',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppAuthSettings':
"""
Get an existing WebAppAuthSettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppAuthSettingsArgs.__new__(WebAppAuthSettingsArgs)
__props__.__dict__["aad_claims_authorization"] = None
__props__.__dict__["additional_login_params"] = None
__props__.__dict__["allowed_audiences"] = None
__props__.__dict__["allowed_external_redirect_urls"] = None
__props__.__dict__["auth_file_path"] = None
__props__.__dict__["client_id"] = None
__props__.__dict__["client_secret"] = None
__props__.__dict__["client_secret_certificate_thumbprint"] = None
__props__.__dict__["client_secret_setting_name"] = None
__props__.__dict__["default_provider"] = None
__props__.__dict__["enabled"] = None
__props__.__dict__["facebook_app_id"] = None
__props__.__dict__["facebook_app_secret"] = None
__props__.__dict__["facebook_app_secret_setting_name"] = None
__props__.__dict__["facebook_o_auth_scopes"] = None
__props__.__dict__["git_hub_client_id"] = None
__props__.__dict__["git_hub_client_secret"] = None
__props__.__dict__["git_hub_client_secret_setting_name"] = None
__props__.__dict__["git_hub_o_auth_scopes"] = None
__props__.__dict__["google_client_id"] = None
__props__.__dict__["google_client_secret"] = None
__props__.__dict__["google_client_secret_setting_name"] = None
__props__.__dict__["google_o_auth_scopes"] = None
__props__.__dict__["is_auth_from_file"] = None
__props__.__dict__["issuer"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["microsoft_account_client_id"] = None
__props__.__dict__["microsoft_account_client_secret"] = None
__props__.__dict__["microsoft_account_client_secret_setting_name"] = None
__props__.__dict__["microsoft_account_o_auth_scopes"] = None
__props__.__dict__["name"] = None
__props__.__dict__["runtime_version"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["token_refresh_extension_hours"] = None
__props__.__dict__["token_store_enabled"] = None
__props__.__dict__["twitter_consumer_key"] = None
__props__.__dict__["twitter_consumer_secret"] = None
__props__.__dict__["twitter_consumer_secret_setting_name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["unauthenticated_client_action"] = None
__props__.__dict__["validate_issuer"] = None
return WebAppAuthSettings(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="aadClaimsAuthorization")
def aad_claims_authorization(self) -> pulumi.Output[Optional[str]]:
"""
Gets a JSON string containing the Azure AD Acl settings.
"""
return pulumi.get(self, "aad_claims_authorization")
@property
@pulumi.getter(name="additionalLoginParams")
def additional_login_params(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
"""
return pulumi.get(self, "additional_login_params")
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
"""
return pulumi.get(self, "allowed_audiences")
@property
@pulumi.getter(name="allowedExternalRedirectUrls")
def allowed_external_redirect_urls(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
"""
return pulumi.get(self, "allowed_external_redirect_urls")
@property
@pulumi.getter(name="authFilePath")
def auth_file_path(self) -> pulumi.Output[Optional[str]]:
"""
The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
"""
return pulumi.get(self, "auth_file_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[Optional[str]]:
"""
The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretCertificateThumbprint")
def client_secret_certificate_thumbprint(self) -> pulumi.Output[Optional[str]]:
"""
An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
"""
return pulumi.get(self, "client_secret_certificate_thumbprint")
@property
@pulumi.getter(name="clientSecretSettingName")
def client_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name that contains the client secret of the relying party application.
"""
return pulumi.get(self, "client_secret_setting_name")
@property
@pulumi.getter(name="defaultProvider")
def default_provider(self) -> pulumi.Output[Optional[str]]:
"""
The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
return pulumi.get(self, "default_provider")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="facebookAppId")
def facebook_app_id(self) -> pulumi.Output[Optional[str]]:
"""
The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_id")
@property
@pulumi.getter(name="facebookAppSecret")
def facebook_app_secret(self) -> pulumi.Output[Optional[str]]:
"""
The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_secret")
@property
@pulumi.getter(name="facebookAppSecretSettingName")
def facebook_app_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name that contains the app secret used for Facebook Login.
"""
return pulumi.get(self, "facebook_app_secret_setting_name")
@property
@pulumi.getter(name="facebookOAuthScopes")
def facebook_o_auth_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_o_auth_scopes")
@property
@pulumi.getter(name="gitHubClientId")
def git_hub_client_id(self) -> pulumi.Output[Optional[str]]:
"""
The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
"""
return pulumi.get(self, "git_hub_client_id")
@property
@pulumi.getter(name="gitHubClientSecret")
def git_hub_client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
"""
return pulumi.get(self, "git_hub_client_secret")
@property
@pulumi.getter(name="gitHubClientSecretSettingName")
def git_hub_client_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name that contains the client secret of the Github
app used for GitHub Login.
"""
return pulumi.get(self, "git_hub_client_secret_setting_name")
@property
@pulumi.getter(name="gitHubOAuthScopes")
def git_hub_o_auth_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
"""
return pulumi.get(self, "git_hub_o_auth_scopes")
@property
@pulumi.getter(name="googleClientId")
def google_client_id(self) -> pulumi.Output[Optional[str]]:
"""
The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_id")
@property
@pulumi.getter(name="googleClientSecret")
def google_client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_secret")
@property
@pulumi.getter(name="googleClientSecretSettingName")
def google_client_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name that contains the client secret associated with
the Google web application.
"""
return pulumi.get(self, "google_client_secret_setting_name")
@property
@pulumi.getter(name="googleOAuthScopes")
def google_o_auth_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_o_auth_scopes")
@property
@pulumi.getter(name="isAuthFromFile")
def is_auth_from_file(self) -> pulumi.Output[Optional[str]]:
"""
"true" if the auth config settings should be read from a file,
"false" otherwise
"""
return pulumi.get(self, "is_auth_from_file")
@property
@pulumi.getter
def issuer(self) -> pulumi.Output[Optional[str]]:
"""
The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
"""
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="microsoftAccountClientId")
def microsoft_account_client_id(self) -> pulumi.Output[Optional[str]]:
"""
The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_id")
@property
@pulumi.getter(name="microsoftAccountClientSecret")
def microsoft_account_client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_secret")
@property
@pulumi.getter(name="microsoftAccountClientSecretSettingName")
def microsoft_account_client_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
"""
return pulumi.get(self, "microsoft_account_client_secret_setting_name")
@property
@pulumi.getter(name="microsoftAccountOAuthScopes")
def microsoft_account_o_auth_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
"""
return pulumi.get(self, "microsoft_account_o_auth_scopes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="runtimeVersion")
def runtime_version(self) -> pulumi.Output[Optional[str]]:
"""
The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
"""
return pulumi.get(self, "runtime_version")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tokenRefreshExtensionHours")
def token_refresh_extension_hours(self) -> pulumi.Output[Optional[float]]:
"""
The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
"""
return pulumi.get(self, "token_refresh_extension_hours")
@property
@pulumi.getter(name="tokenStoreEnabled")
def token_store_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
"""
return pulumi.get(self, "token_store_enabled")
@property
@pulumi.getter(name="twitterConsumerKey")
def twitter_consumer_key(self) -> pulumi.Output[Optional[str]]:
"""
The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_key")
@property
@pulumi.getter(name="twitterConsumerSecret")
def twitter_consumer_secret(self) -> pulumi.Output[Optional[str]]:
"""
The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_secret")
@property
@pulumi.getter(name="twitterConsumerSecretSettingName")
def twitter_consumer_secret_setting_name(self) -> pulumi.Output[Optional[str]]:
"""
The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
"""
return pulumi.get(self, "twitter_consumer_secret_setting_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unauthenticatedClientAction")
def unauthenticated_client_action(self) -> pulumi.Output[Optional[str]]:
"""
The action to take when an unauthenticated client attempts to access the app.
"""
return pulumi.get(self, "unauthenticated_client_action")
@property
@pulumi.getter(name="validateIssuer")
def validate_issuer(self) -> pulumi.Output[Optional[bool]]:
"""
Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
return pulumi.get(self, "validate_issuer")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
9e58c5c02e59e93500da923b7b57630d670d2f00 | a64eaa15164806cfe1fa5132cdedd97e6567d3b7 | /XCTF(攻防世界)/MISC/4-1/bwm.py | e55f7127ed66b2356bd5f950ba2c514fa796a69b | [] | no_license | ThoseBygones/CTF_Write-Up | 46b679001daf87934289161c99568c07807fbdca | 3c41db37005df7f1d1a41cb9c2edb30f9a3f6f6e | refs/heads/master | 2022-11-24T20:20:24.167658 | 2020-07-24T14:38:26 | 2020-07-24T14:38:26 | 255,052,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,609 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import random
cmd = None
debug = False
seed = 20160930
alpha = 3.0
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv or len(sys.argv) < 2:
print('Usage: python bwm.py <cmd> [arg...] [opts...]')
print(' cmds:')
print(' encode <image> <watermark> <image(encoded)>')
print(' image + watermark -> image(encoded)')
print(' decode <image> <image(encoded)> <watermark>')
print(' image + image(encoded) -> watermark')
print(' opts:')
print(' --debug, Show debug')
print(' --seed <int>, Manual setting random seed (default is 20160930)')
print(' --alpha <float>, Manual setting alpha (default is 3.0)')
sys.exit(1)
cmd = sys.argv[1]
if cmd != 'encode' and cmd != 'decode':
print('Wrong cmd %s' % cmd)
sys.exit(1)
if '--debug' in sys.argv:
debug = True
del sys.argv[sys.argv.index('--debug')]
if '--seed' in sys.argv:
p = sys.argv.index('--seed')
if len(sys.argv) <= p+1:
print('Missing <int> for --seed')
sys.exit(1)
seed = int(sys.argv[p+1])
del sys.argv[p+1]
del sys.argv[p]
if '--alpha' in sys.argv:
p = sys.argv.index('--alpha')
if len(sys.argv) <= p+1:
print('Missing <float> for --alpha')
sys.exit(1)
alpha = float(sys.argv[p+1])
del sys.argv[p+1]
del sys.argv[p]
import cv2
import numpy as np
import matplotlib.pyplot as plt
# OpenCV是以(BGR)的顺序存储图像数据的
# 而Matplotlib是以(RGB)的顺序显示图像的
def bgr_to_rgb(img):
b, g, r = cv2.split(img)
return cv2.merge([r, g, b])
def getImgAndWm(image, watermark):
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
if len(cv2.split(img)) < 4:
img = cv2.imread(image)
wm = cv2.imread(watermark)
else:
wm = cv2.imread(watermark, cv2.IMREAD_UNCHANGED)
return img, wm
def encode(image, watermark):
print('image<%s> + watermark<%s>' % (image, watermark))
img, wm = getImgAndWm(image, watermark)
if debug:
plt.subplot(231), plt.imshow(bgr_to_rgb(img)), plt.title('image')
plt.xticks([]), plt.yticks([])
plt.subplot(234), plt.imshow(bgr_to_rgb(wm)), plt.title('watermark')
plt.xticks([]), plt.yticks([])
# print img.shape # 高, 宽, 通道
h, w = img.shape[0], img.shape[1]
hwm = np.zeros((int(h * 0.5), w, img.shape[2]))
if hwm.shape[0] < wm.shape[0]:
return
if hwm.shape[1] < wm.shape[1]:
return
hwm2 = np.copy(hwm)
for i in xrange(wm.shape[0]):
for j in xrange(wm.shape[1]):
hwm2[i][j] = wm[i][j]
random.seed(seed)
m, n = range(hwm.shape[0]), range(hwm.shape[1])
random.shuffle(m)
random.shuffle(n)
for i in xrange(hwm.shape[0]):
for j in xrange(hwm.shape[1]):
hwm[i][j] = hwm2[m[i]][n[j]]
rwm = np.zeros(img.shape)
for i in xrange(hwm.shape[0]):
for j in xrange(hwm.shape[1]):
rwm[i][j] = hwm[i][j]
rwm[rwm.shape[0] - i - 1][rwm.shape[1] - j - 1] = hwm[i][j]
if debug:
plt.subplot(235), plt.imshow(bgr_to_rgb(rwm)), \
plt.title('encrypted(watermark)')
plt.xticks([]), plt.yticks([])
f1 = np.fft.fft2(img)
f2 = f1 + alpha * rwm
_img = np.fft.ifft2(f2)
if debug:
plt.subplot(232), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image)')
plt.xticks([]), plt.yticks([])
img_wm = np.real(_img)
assert cv2.imwrite(image, img_wm, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if cmd == 'encode':
watermark = sys.argv[2]
for x in range(3, len(sys.argv)):
image = sys.argv[x]
encode(image, watermark)
elif cmd == 'decode':
fn1 = sys.argv[2]
fn2 = sys.argv[3]
fn3 = sys.argv[4]
print('image<%s> + image(encoded)<%s> -> watermark<%s>' % (fn1, fn2, fn3))
img = cv2.imread(fn1)
img_wm = cv2.imread(fn2)
if debug:
plt.subplot(231), plt.imshow(bgr_to_rgb(img)), plt.title('image')
plt.xticks([]), plt.yticks([])
plt.subplot(234), plt.imshow(bgr_to_rgb(img_wm)), plt.title('image(encoded)')
plt.xticks([]), plt.yticks([])
random.seed(seed)
m, n = range(int(img.shape[0] * 0.5)), range(img.shape[1])
random.shuffle(m)
random.shuffle(n)
f1 = np.fft.fft2(img)
f2 = np.fft.fft2(img_wm)
if debug:
plt.subplot(232), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image)')
plt.xticks([]), plt.yticks([])
plt.subplot(235), plt.imshow(bgr_to_rgb(np.real(f1))), \
plt.title('fft(image(encoded))')
plt.xticks([]), plt.yticks([])
rwm = (f2 - f1) / alpha
rwm = np.real(rwm)
if debug:
plt.subplot(233), plt.imshow(bgr_to_rgb(rwm)), \
plt.title('encrypted(watermark)')
plt.xticks([]), plt.yticks([])
wm = np.zeros(rwm.shape)
for i in xrange(int(rwm.shape[0] * 0.5)):
for j in xrange(rwm.shape[1]):
wm[m[i]][n[j]] = np.uint8(rwm[i][j])
for i in xrange(int(rwm.shape[0] * 0.5)):
for j in xrange(rwm.shape[1]):
wm[rwm.shape[0] - i - 1][rwm.shape[1] - j - 1] = wm[i][j]
assert cv2.imwrite(fn3, wm)
if debug:
plt.subplot(236), plt.imshow(bgr_to_rgb(wm)), plt.title(u'watermark')
plt.xticks([]), plt.yticks([])
if debug:
plt.show()
| [
"1273789365@qq.com"
] | 1273789365@qq.com |
fddbba3a857cda22e1cecd3113887d8608a0881d | 7a5948da9c7b5fb2b0447769016882754f889338 | /LogisticRegression/logisticRegression.py | 83a063dbf0d1ea34b92c851c2286fa52183d84cb | [
"MIT"
] | permissive | darwin-b/MachineLearning | 3f0e4ed82a3b065bc74cdff25cc55fc6e828aecb | 834a07fcb5052053d7a7d6d9fe4fc5abbe3117d3 | refs/heads/master | 2023-02-13T07:10:05.751517 | 2021-01-12T01:13:23 | 2021-01-12T01:13:23 | 291,417,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,536 | py | import sys
import os
import pandas as pd
import numpy as np
import nltk
import re
from nltk.tokenize import word_tokenize
import time
import random
nltk.download('punkt')
# data_path = "C:\\Users\\darwi\\OneDrive - " \
# "The University of Texas at Dallas\\Acads\\Machine Learning\\Assignments\\MachineLearning\\Data"
cwd = os.getcwd()
def read(file_path):
with open(file_path, encoding='cp437') as file:
text = file.read()
return text
def bag_words(text_data, bag):
clean_text = nltk.sent_tokenize(text_data)
for i in range(len(clean_text)):
clean_text[i] = re.sub(r'\d', ' ', clean_text[i]) # Matches digits and replaces with blank space
clean_text[i] = re.sub(r'\W', ' ', clean_text[i]) # Matches non-word and replaces with blank space
clean_text[i] = re.sub(r'\s+', ' ', clean_text[i]) # Matches white-space and replaces with blank space
clean_text[i] = clean_text[i].lower() # Converts text to lower-case
for sentence in clean_text:
words = nltk.word_tokenize(sentence)
for word in words:
if word in bag.keys():
bag[word] = bag[word] + 1
else:
bag[word] = 1
return bag
def sigmoid(x):
return 1/(1+np.exp(-1*x))
def sigmoid2(x):
return np.exp(-1 * x) / (1 + np.exp(-1 * x))
# data_path = "C:\\Users\\darwi\\OneDrive - " \
# "The University of Texas at Dallas\\Acads\\Machine Learning\\Assignments\\MachineLearning\\Data\\enron4"
data_path=sys.argv[1]
test_path_ham = data_path + os.path.sep + "test" + os.path.sep + "ham" + os.path.sep
test_path_spam = data_path + os.path.sep + "test" + os.path.sep + "spam" + os.path.sep
train_path_ham = data_path + os.path.sep + "train" + os.path.sep + "ham" + os.path.sep
train_path_spam = data_path + os.path.sep + "train" + os.path.sep + "spam" + os.path.sep
bag={}
for file in os.listdir(train_path_ham):
bag= bag_words(read(train_path_ham + file),bag)
# bag_spam = {}
for file in os.listdir(train_path_spam):
bag = bag_words(read(train_path_spam + file), bag)
count_features = bag.__len__()
hamFiles_count = os.listdir(train_path_ham).__len__()
spamFiles_count = os.listdir(train_path_spam).__len__()
data_X = np.zeros((hamFiles_count+spamFiles_count,count_features+1))
data_X[0:hamFiles_count,-1]=1
data_X[hamFiles_count:,-1]=0
data_y = np.ones((hamFiles_count+spamFiles_count,1))
data_y[hamFiles_count:,0]=0
z= os.listdir(test_path_ham)
baggedIndex={}
index=0
index_file=0
for file in os.listdir(train_path_ham):
words = bag_words(read(train_path_ham + file),{})
for word in words:
if word not in baggedIndex:
baggedIndex[word]=index
data_X[index_file][index]=words[word]
index +=1
else:
data_X[index_file][baggedIndex[word]]=words[word]
index_file +=1
for file in os.listdir(train_path_spam):
words = bag_words(read(train_path_spam + file),{})
for word in words:
if word not in baggedIndex:
baggedIndex[word]=index
data_X[index_file][index]=words[word]
index +=1
else:
data_X[index_file][baggedIndex[word]]=words[word]
index_file +=1
# ----------------------------- Splitting Data : 70-30 Ratio------------------------- #
np.random.shuffle(data_X)
splitValue= int((hamFiles_count+spamFiles_count)*0.7)
train_X,valid_X = data_X[:splitValue,:], data_X[splitValue:,:]
train_y,valid_y = data_X[:splitValue,-1], data_X[splitValue:,-1]
# -----------------------------Data engineering done-------------------------------------#
print("------------------------Data Engineering done------------------------")
# ----------------------------------Training Model--------------------------------------------#
weights = np.zeros(count_features)
rates = np.linspace(0.1,1,5)
lambdas = np.linspace(0,2,10)
runtimes={}
accuracies={}
spam_acc={}
ham_acc={}
w=[]
max_acc=-1
tuned_lambda=0
tuned_rate=0
# llbm = [x for x in range(1,0.5)]
for learning_rate in rates:
for l in lambdas:
print("\n")
print("learning rate: ",learning_rate)
print("lambda: ",l)
start=time.time()
weights_tune = np.zeros(count_features)
for iterations in range(200):
weighted_features = weights_tune*train_X[:,:-1]
linear_score =np.sum(weighted_features,axis=1)
diff_matrix=train_y-sigmoid(linear_score)
errorWeighted_features= np.multiply(diff_matrix,np.transpose(train_X[:,:-1]))
weights_tune = weights_tune + learning_rate*np.sum(errorWeighted_features,axis=1) - learning_rate*l*weights_tune
runtimes[(learning_rate,l)]=time.time()-start
print("runtime : ",time.time()-start)
w.append(weights_tune)
# ----------------------------------Validation--------------------------------------------#
valid_weighted_features = weights_tune*valid_X[:,:-1]
valid_linear_score =np.sum(valid_weighted_features,axis=1)
valid_ham_predict=sigmoid(valid_linear_score)
count1=0
count2=0
count=0
true_ham=0
true_spam=0
for each in range(len(valid_y)):
if valid_y[each]==1:
true_ham+=1
else:
true_spam+=1
if valid_y[each]==1 and valid_ham_predict[each]>0.5:
count+=1
count1+=1
if valid_y[each]==0 and valid_ham_predict[each]<0.5:
count+=1
count2+=1
ham_acc[(learning_rate,l)]=count1/true_ham
spam_acc[(learning_rate,l)] = count2 / true_spam
accuracies[(learning_rate,l)]=count/(true_ham+true_spam)
print("Acc : ",count/(true_ham+true_spam)," Spam Accc : ",count2 / true_spam)
if count/(true_ham+true_spam)>max_acc:
max_acc=count/(true_ham+true_spam)
weights=weights_tune
tuned_lambda=l
tuned_rate=learning_rate
print("Max Acc: ",max_acc)
# print("Valid ham is ham : ", count1," Acc : ",count1/true_ham," true ham: ",true_ham)
# print("Valid spam is spam : ", count2," Acc : ",count2/true_spam," true spam: ",true_spam)
# print("Accuracy : ",count/(true_ham+true_spam))
# ----------------------------------Read Test files--------------------------------------------#
testHam_files_count=os.listdir(test_path_ham).__len__()
testSpam_files_count=os.listdir(test_path_spam).__len__()
test_ham=np.zeros((testHam_files_count,count_features+1))
test_spam=np.zeros((testSpam_files_count,count_features+1))
# ----------------------------------Predict test ham--------------------------------------------#
index_file=0
for file in os.listdir(test_path_ham):
words = bag_words(read(test_path_ham + file), {})
for word in words:
if word in baggedIndex:
test_ham[index_file][baggedIndex[word]] = words[word]
index_file += 1
testHam_weighted_features = weights*test_ham[:,:-1]
testHam_linear_score =np.sum(testHam_weighted_features,axis=1)
test_ham_predict=sigmoid(testHam_linear_score)
count1=0
true_ham=len(test_ham_predict)
for each in range(len(test_ham_predict)):
if test_ham_predict[each]>0.5:
count1+=1
# print("test ham is ham : ", count1," Acc : ",count1/true_ham," true ham: ",true_ham)
# print("Valid ham is ham : ", count," Acc : ",count/true_spam," true ham: ",true_spam)
# ----------------------------------Predict test spam--------------------------------------------#
index_file=0
for file in os.listdir(test_path_spam):
words = bag_words(read(test_path_spam + file), {})
for word in words:
if word in baggedIndex:
test_spam[index_file][baggedIndex[word]] = words[word]
index_file += 1
testSpam_weighted_features = weights*test_spam[:,:-1]
testSpam_linear_score =np.sum(testSpam_weighted_features,axis=1)
test_spam_predict=sigmoid(testSpam_linear_score)
count2=0
true_spam=len(test_spam_predict)
for each in range(len(test_spam_predict)):
if test_spam_predict[each]>0.5:
count2+=1
# print("test spam is spam : ", count2," Acc : ",count2/true_spam," true spam: ",true_spam)
# print("Valid ham is ham : ", count," Acc : ",count/true_spam," true ham: ",true_spam)
# tp=count2
# tn=count1
# fp=true_spam-count2
# fn=true_ham-count1
tp = count1
tn = count2
fp = true_ham - count1
fn = true_spam - count2
acc=(tp+tn)/(tp+tn+fp+fn)
precision=(tp)/(tp+fp)
recall = tp/(tp+fn)
f1_score = 2*(recall * precision) / (recall + precision)
print("\n\n-----------------------------------Summary----------------------------------------------")
print("--------------------------------Validation Results------------------")
print("max Acc : ",max_acc)
print("rate : ",tuned_rate)
print("lambda : ",tuned_lambda)
print("\n-----------------------------Test DataSet Result----------------------------------------------\n")
print("rate : ",tuned_rate)
print("lambda : ",tuned_lambda)
print("\n Accuracy on test files : ",acc)
print(" precision : ",precision)
print(" Recall : ",recall)
print(" F1_score : ",f1_score)
file_name="resultsLogisticRegression_"+data_path.split(os.path.sep)[-1]+".txt"
with open(file_name,'w') as file:
text = "Logistic Regression Model trained with shuffled 70-30 Data split into training & validation Data\n\n"
text = text + "--------------Validation Results------------------" + "\n\n"
text = text + "Best_Accuracy : " + repr(max_acc) + "\n"
text = text + "lambda tuned : " + repr(tuned_lambda) + "\n"
text = text + "Learning Rate : " + repr(tuned_rate) + "\n"
text = text + "Total Runtime : " + repr(np.sum([runtimes[x] for x in runtimes])) + "\n"
text = text + "learning rates : 0.1 to 1 with step increment of 0.225 -----> 5 values \n"
text = text + "lambda values : 0 to 2 with step increment of 0.2222 ----> 10 values \n\n"
text = text + "--------------Results Test Data------------------"+"\n\n"
text = text + "\n Accuracy on test files : "+ str(acc) + "\n"
text = text + " precision : " + str(precision) + "\n"
text = text + " Recall : " + str(recall) + "\n"
text = text + " F1_score : " + str(f1_score) + "\n"
text = text + "\n\n\n"
text = text + "Accuracies : \n"+repr(accuracies)+"\n\n"
text = text + "Runtime : \n" + repr(runtimes) + "\n\n"
text = text + "Spam_Accuracies : \n" + repr(spam_acc) + "\n\n"
text = text + "Ham_Accuracies : \n" + repr(ham_acc) + "\n\n"
file.write(text)
| [
"darwin.bollepalli@gmail.com"
] | darwin.bollepalli@gmail.com |
1d78b2f287093aaabba4344add7cc6fae44f8d34 | d5aa24b75c2344358752b0af0a47293533820578 | /data_analysis/IO/load_data.py | 6333a0cb4611a6478b56ee4e3cef726e09a8e012 | [] | no_license | ModelDBRepository/234992 | 913da9efaadb704171da907ebd953fe59efe5fb1 | b969a4c623b92c1bd79138f4132885bc424b114c | refs/heads/master | 2020-05-29T18:28:48.883803 | 2019-05-31T03:42:59 | 2019-05-31T03:42:59 | 189,300,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import numpy as np
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).resolve().parents[2]))
import data_analysis.IO.axon_to_python as axon
import data_analysis.IO.binary_to_python as binary
def load_file(filename, zoom=[0,np.inf]):
if filename.endswith('.bin'):
return binary.load_file(filename, zoom=zoom)
elif filename.endswith('.abf'):
print(filename)
return axon.load_file(filename, zoom=zoom)
else:
return None
def get_metadata(filename, infos={}):
print('filename is', filename)
if filename.endswith('.bin'):
return binary.get_metadata(filename, infos=infos)
elif filename.endswith('.abf'):
return axon.get_metadata(filename, infos=infos)
elif filename.endswith('.npz'):
return {'main_protocol':'modeling_work'}
else:
return None
def get_formated_data(filename):
t, VEC = load_file(filename)
meta = get_metadata(filename)
data = {'t':t, 'Vm':VEC[0],
'infos':meta, 'dt':t[1]-t[0]}
return data
if __name__ == '__main__':
import sys
import matplotlib.pylab as plt
filename = sys.argv[-1]
print(get_metadata(filename))
t, data = load_file(filename, zoom=[-5.,np.inf])
plt.plot(t[10000:], data[0][10000:])
plt.show()
| [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
bda783c687d550284ea64c93dd66f035fb1f1dfb | fdfd9cab4e26491da5d2a06a15960362ccf01460 | /ex32.py | 2a85257657bbb1b65928785ed5e54f5bf092b766 | [] | no_license | WilliamsHerrmann/MWM15 | c182f7f8eca4f30a41e602a8e907497bc927af81 | 3f17abd57473f328ddd1e1a2a7591423f32da0f8 | refs/heads/master | 2021-07-07T05:01:06.486909 | 2017-10-02T18:18:44 | 2017-10-02T18:18:44 | 103,341,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists, first start with an empty oranges
elements = []
# the use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
# append is a function that lists understand
elements. append(i)
# now we can print them out too
for i in elements:
print "Element was: %d" % i
| [
"you@example.com"
] | you@example.com |
fee5ba290a6c0c69abc55bd163e7344e718aa456 | f201aa7b963cd6c22fabdc9c301cbf75942f5b91 | /Algorithms/Analysis/draw_edge_costs.py | 048b14569dda4e08bdba495caf2cc5d30774eb24 | [
"MIT"
] | permissive | Kanavoy/UODS | 5a4e0d5ec52e8be7467702222f3fa13767eaaf91 | 2da38b749e721b051aeaa6a7bcb3a921aeb5a09c | refs/heads/master | 2022-12-09T18:49:24.534122 | 2020-09-08T18:04:26 | 2020-09-08T18:04:26 | 286,434,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | from networkx import draw_networkx_edge_labels
from networkx import Graph, connected_components, DiGraph, all_neighbors
from networkx.exception import NetworkXError as nxerror
from random import random
def analyse(graph,opts,chosen):
return
def final_analysis(graph,opts):
# make a temporary copy of the giant component
temp = DiGraph(graph)
cc = []
try:
cc = list(max(connected_components(graph), key=len))
except:
cc = list(graph)
ncc = [n for n in list(graph.nodes()) if n not in cc]
temp.remove_nodes_from(ncc)
# get a dictionary of "is a node extremist"
eh = opts.initial.max * 0.9
el = opts.initial.min * 0.9
extremists = {n: not el < graph.values["opinion"][n] < eh for n in temp.nodes()}
# assign weights to edges
target_edges_influence = {}
target_edges_vulnerability = {}
for node in temp.nodes():
# we don't trim edges from the extremist agents, but rather from their targets
if extremists[node]:
continue
neighbors = list(temp.predecessors(node))
myop = graph.values["opinion"][node]
extreme_neighbors = [n for n in neighbors if extremists[n]]
# weight is your degree, shared equally between edges leading to extremists (influence targeting)
if len(extreme_neighbors):
weight = len(neighbors)/len(extreme_neighbors)
for ex_n in extreme_neighbors:
target_edges_influence[(node,ex_n)] = weight
# weight is absolute value of 1 divided by the distance between your opinion and your closest extreme neighbor (vulnerability targeting)
if len(extreme_neighbors):
weight = max(abs(1/(myop-graph.values["opinion"][n] or 0.001)) for n in extreme_neighbors)
for ex_n in extreme_neighbors:
target_edges_vulnerability[(node,ex_n)] = round(weight,3)
# compute costs
try:
opts.intervention.costs
except AttributeError:
opts.intervention.costs = []
edge_costs = {}
for k in target_edges_influence:
my_adj = list(all_neighbors(temp,k[0]))
other_adj = list(all_neighbors(temp,k[1]))
if opts.intervention.mode in ["degree","degree-ignore"]:
edge_costs[k] = (len(my_adj)+len(other_adj))/(2*max([len(list(all_neighbors(temp,n))) for n in temp]))
elif opts.intervention.mode == "random":
edge_costs[k] = random()
elif opts.intervention.mode in ["paths","paths-ignore"]:
edge_costs[k] = (sum([n in other_adj for n in my_adj])/len(my_adj)+sum([n in my_adj for n in other_adj])/len(other_adj))/2
if edge_costs[k] == 0:
edge_costs[k] = 0.01 # avoids /0 error when we divide by costs
else:
edge_costs[k] = 1
draw_networkx_edge_labels(graph, opts.layout.pos, edge_labels=target_edges_influence)
return | [
"noreply@github.com"
] | Kanavoy.noreply@github.com |
3ded07fad9594a685e55921207a73becd76c6fab | 1150d536b0181c56d15b155ae8c04633626a9ed7 | /mysite/mysite/settings.py | 677ada1762d1bbd40c92f08e4e7734238d6c1659 | [] | no_license | fanzhang312/FetchTwitterFeeds_PythonTwitter | f4a36dd3aff28ff06f30b499ba41d3691c961ffe | 1dbfb788d1209d6188cf2577036cb2fef634a8bd | refs/heads/master | 2021-01-25T10:14:24.166996 | 2013-06-10T18:13:05 | 2013-06-10T18:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,281 | py | # Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'b68#jp(8s1$yj%$1p9t7lxe&)v2t97f^5c(5*^^ihx2$ar6q+2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/Users/Shared/Zhan/projects/twitterFeeds/mysite/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'feeds',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"fanzhang312@gmail.com"
] | fanzhang312@gmail.com |
7cb7503f8d3b5067d417729a24c032e9f96f80f9 | 16d05542f024bc2a86b1abdf9cb452d760dfe60b | /run_svd_J.py | f5c19ce972c9e3e330816590591f435f2ededeef | [] | no_license | RoboRiot/CMSC471_The_Bards | 84452ea9d1c8845fc030b9ca183d2e9d3223409c | b0da312e809e695e200ea08d46b6ab3d47e5c5b1 | refs/heads/master | 2020-04-03T13:39:55.917097 | 2018-12-01T01:40:03 | 2018-12-01T01:40:03 | 155,292,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | #utilizing the newly made user-item matrix to create a rating prediction matrix
#new matrix will be saved as text file for quick use in other files
import os
import numpy
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import svds, eigs
#we will first load our user-item matrix into data
data = numpy.loadtxt("C:\\CMSC471-Python\\user_item_mat.txt", delimiter=",")
#convert the data to a scipy sparse matrix
scipy_data = csc_matrix(data)
#perform scipy sparse svd on the matrix
#NOTE: we will need to determine a K parameter value
#To do this:
#1. Run our algorithm without a low K value on training set
#2. Then run on validation set and compute Root Mean Square Error
#3. Repeats steps 1 and 2, increasing K, and find the K value that minimizes the Root Mean Square Error
#Remember: 1 <= K < MIN(matrix.rows, matrix.columns)
#Basically, a higher K value will overfit our data to our training set, so this is the key component of our recommendation engine
#For movies, lower rank matricies with 20 <= k <= 100 has been shown to be a good range for accurate predictions
U, S, Vt = svds(scipy_data, k=50)
#converts S from an list of values into a diagonal matrix
S = numpy.diag(S)
predicted_ratings = numpy.dot(numpy.dot(U, S), Vt)
#save the predicted_ratings matrix into a txt file so it can be utilized by other files
numpy.savetxt("C:\\CMSC471-Python\\predictions_mat.txt", predicted_ratings, fmt='%1.3f', delimiter=",", newline="\n")
| [
"elfishfanatic18@hotmail.com"
] | elfishfanatic18@hotmail.com |
c05b2d2d9ecd3eba54b5f2efb976613d93068b2e | 5389214afd2a1607925c2104227395a4f2a2800e | /ajax_guide/urls.py | 453bb0f440546b9de8d098f5eca2b16974c1770b | [] | no_license | vinoyjoshi/bandit | 272081b3c843e85969e1a2217080beb08c2b0df5 | 2421d742bbf31faf9b699bd20058c242cbe68773 | refs/heads/main | 2023-01-06T01:49:58.327732 | 2020-10-15T19:47:39 | 2020-10-15T19:47:39 | 304,411,565 | 1 | 0 | null | 2020-10-15T19:47:40 | 2020-10-15T18:13:48 | Python | UTF-8 | Python | false | false | 1,013 | py | """ajax_guide URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app1 import views as app1
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('',app1.contactPage),
url(r'^ajax/contact-submit/$',app1.contact_submit, name = 'contact_submit'),
path(r'^ajax/get_contact_info/$',app1.get_contact_info,name = 'get_contact_info')
]
| [
"vnitikesh@gmail.com"
] | vnitikesh@gmail.com |
7ed93578216aac980f00d00bb895797a9107acd9 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /018_dictionaries/examples/Python 3 Most Nessesary/9.3.Listing 9.4. Enumerating dictionary elements.py | 68e2d165ac09ae3d6584391151010bbb29be77b9 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,111 | py | d = {"x": 1, "y": 2, "z": 3}
for key in d.keys(): # Использование метода keys()
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (y => 2) (x => 1) (z => 3)
print() # Вставляем символ перевода строки
for key in d: # Словари также поддерживают итерации
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (y => 2) (x => 1) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
k = list(d.keys()) # Получаем список ключей
k.sort() # Сортируем список ключей
for key in k:
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
for key in sorted(d.keys()):
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
for key in sorted(d):
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
a8d39dbd537d0aaba6ada0180f907c14038de717 | a4f485b9ebe59372415a3ed75888d1e73e0b97e2 | /kenzie_starter_app/migrations/0005_auto_20210617_1138.py | 01e2974bf713618ed1723f850c41300d3b3ff5ef | [] | no_license | felipe16sm/kenzie-starter | 51f630e6c9c2dd254d89a4d2d36e7cf24ac459b5 | ff5c6831a95e53dbf61393f367a47c92160a2bab | refs/heads/master | 2023-06-13T05:03:31.387902 | 2021-07-07T02:31:35 | 2021-07-07T02:31:35 | 383,645,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # Generated by Django 3.1 on 2021-06-17 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kenzie_starter_app', '0004_auto_20210617_1136'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['name', 'state']},
),
migrations.AddIndex(
model_name='project',
index=models.Index(fields=['name', 'state'], name='kenzie_star_name_499018_idx'),
),
]
| [
"felipe16sm@gmail.com"
] | felipe16sm@gmail.com |
10516b7519d41fd20fd759eba64a4f6122d08f7a | d40db4a1e8d2c80431b57373d746ae709bba50aa | /resource/py/webapp_helper.py | 99e66121c9093b1311c6b2f04c5ab5a564fab4d9 | [
"Apache-2.0"
] | permissive | dataiku/dss-plugin-ab-testing | dc3b122b5fb2c73c12cf6e757b802fcbb35f6378 | 3f8770bc3fdeab976639d9df2d82f4f1c32e7520 | refs/heads/master | 2023-07-21T10:38:10.318490 | 2021-03-01T17:14:24 | 2021-03-01T17:14:24 | 278,628,231 | 0 | 0 | Apache-2.0 | 2021-03-01T17:14:25 | 2020-07-10T12:29:42 | HTML | UTF-8 | Python | false | false | 509 | py | import dataiku
api_client = dataiku.api_client()
def do(payload, config, plugin_config, inputs):
project_key = dataiku.default_project_key()
project_managed_folders = api_client.get_project(project_key).list_managed_folders()
choices = [{
'label': '{} ({})'.format(mf['name'], mf['type']),
'value': mf['id']
} for mf in project_managed_folders]
choices.append({'label': 'Create new Filesystem folder...', 'value': 'create_new_folder'})
return {"choices": choices}
| [
"marine@DKU-MBP-marine.local"
] | marine@DKU-MBP-marine.local |
53b5a7771fd57d104ac7621b2ed2b6c9e1c01f96 | aa265e03e73f718d4008cfe30ada7ee32c852eec | /ABC_A/ABC033_A.py | 956f4fb0fc9fc60fd343a72f21a9d322326c5e91 | [
"MIT"
] | permissive | ryosuke0825/atcoder_python | 4fb9de9733cd9ef41c2ad9ad38b3f190f49d3ad5 | 52d037d0bc9ef2c721bf2958c1c2ead558cb0cf5 | refs/heads/master | 2023-03-11T22:47:56.963089 | 2023-03-05T01:21:06 | 2023-03-05T01:21:06 | 181,768,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | n = input()
if n.count(n[0]) == 4:
print("SAME")
else:
print("DIFFERENT")
| [
"ayakobon@gmail.com"
] | ayakobon@gmail.com |
def2fc41b751673fb8775b648f289d98ef9a0106 | 51f6443116ef09aa91cca0ac91387c1ce9cb445a | /Curso_Python_3_UDEMY/desafios/desafio_html.py | d5648ffde600f190c5bda1912b3dff47252566db | [
"MIT"
] | permissive | DanilooSilva/Cursos_de_Python | f449f75bc586f7cb5a7e43000583a83fff942e53 | 8f167a4c6e16f01601e23b6f107578aa1454472d | refs/heads/main | 2023-07-30T02:11:27.002831 | 2021-10-01T21:52:15 | 2021-10-01T21:52:15 | 331,683,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | def tag(tag, *args, **kwargs):
if 'html_class' in kwargs:
kwargs['class'] = kwargs.pop('html_class')
attrs = ''.join(f'{k}="{v}" ' for k, v in kwargs.items())
inner = ''.join(args)
return f'<{tag} {attrs}>{inner}</{tag}>'
if __name__ == '__main__':
print(tag('p',
tag('span', 'Curso de Python 3, por'),
tag('strong', 'Juracy Filho', id='jf'),
tag('span', ' e '),
tag('strong', 'Leonador Leitão', id='ll'),
tag('span', '.'),
html_class='alert')) | [
"dno.gomesps@gmail.com"
] | dno.gomesps@gmail.com |
e9f449bd7a483ae454100e835054532f0789264f | 6f8e46c84940be19aa33dc8abacaaf069181c482 | /discount.py | 041ddd2544ef8b1dcd8e9414f4f4189f2b83070f | [] | no_license | rmaryada-devops/rmaryada | e0d33e559996c921f5ba4838a68dbc302a7d3490 | 40c6097c9751051c97e2fb14d11d32b333b8e728 | refs/heads/master | 2020-05-22T11:45:23.515382 | 2019-09-06T16:20:22 | 2019-09-06T16:20:22 | 186,329,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | #/usr/bin/python
"""
Purpose : Cost of Grocery after Discount and GST
Products : Home Grocery
"""
GST = 12.5
DISCOUNT = 20
white_rice = 25 # PerKG
santoor_soap = 12 # PerPiece
print("Hello Sir Welcome to the store !")
Type_Of_Rice = input("Which Rice you want to buy :")
print("Sir , You have chosen :" , Type_Of_Rice)
Quantity_Of_Rice = int(input("Number of Kgs of Rice you want to buy:"))
print("Sir , You have chosen :" , Quantity_Of_Rice)
Type_Of_Soap = input("Which Soap you want to buy :")
print("Sir , You have chosen :" , Type_Of_Soap)
Num_Of_Soap = int(input("Number of Soaps you want to buy:"))
Total_Rice = ( white_rice * Quantity_Of_Rice )
print("Rice Price is :" ,Total_Rice)
Total_Soap = ( santoor_soap * Num_Of_Soap )
print("Soap Price is :" ,Total_Soap)
Total_Before_Discount = Total_Rice + Total_Soap
print("Total Before Discount is :" , Total_Before_Discount)
Total_After_Discount = Total_Before_Discount - ( Total_Before_Discount * ( DISCOUNT / 100 ))
print("Total After 20% Discount is : ", Total_After_Discount)
Final_Total_Gst = Total_After_Discount + ( Total_After_Discount * ( GST / 100 ))
print("Total Amount to paid after adding GST - 12.5% , Thanks for shopping !!! : ", Final_Total_Gst)
| [
"K26114@corp.root.nasd.com"
] | K26114@corp.root.nasd.com |
9a96a1af8ecc27b48b600a8999b24dc3d0df94fd | 501b09f8f9b034a8bbc27847abe82423e1aace67 | /Weather_API/src/weather_API/urls.py | 8e206f010683d4ade1fac533b7d0e7a6d43b877b | [] | no_license | Vivekgupta2227/Weather_API_Django | 9cf6cb434a28833fc3552d09c50fdb04adfa9b74 | f2d406675f8e02c7971f2cdcc1549c8e0cdb7bb2 | refs/heads/master | 2020-11-24T04:47:59.935278 | 2019-12-15T06:44:54 | 2019-12-15T06:44:54 | 227,971,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py |
from django.conf.urls import url
from django.urls import path
from .views import WeatherListAPIView,Get_or_Post,get_with_temperature
from rest_framework.routers import DefaultRouter
urlpatterns = [
url(r'^', view = WeatherListAPIView.as_view(), name = None),
url(r'^weather$', view = Get_or_Post.as_view(), name = None),
url(r'^weather/temperature$',view = get_with_temperature, name = None),
] | [
"noreply@github.com"
] | Vivekgupta2227.noreply@github.com |
c259087c7579124a1bf129ebea8b89faf2db13d6 | 458060b1616b61203e88c8e03eade3a786505a18 | /machinelearning/tensorflow/basic-operation-4.py | 91ecba5c81da505a7aff116007dc9c0b5896062f | [] | no_license | wangsqly0407/easypack | 3251b56c2611ebc7d696a9a24a037f090d0c5d24 | 49c24e75fba554e42f552de9118e83ed4d951041 | refs/heads/master | 2020-08-30T21:10:07.792761 | 2019-10-23T22:15:15 | 2019-10-23T22:15:15 | 218,490,221 | 1 | 0 | null | 2019-10-30T09:28:15 | 2019-10-30T09:28:15 | null | UTF-8 | Python | false | false | 1,331 | py | import tensorflow as tf
import numpy as np
import pandas as pd
import os
import csv
from sklearn import datasets
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
IRIS_TEST = "iris_test.csv"
print("##Example 1: csv file read: tf.contrib.learn.datasets.base.load_csv_with_header")
print(" filename: " + IRIS_TEST)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
print test_set
print("\n##Example 2: csv file read: tf.data.TextLineDataset + make_one_shot_iterator")
print(" filename: " + IRIS_TEST)
datafiles = [IRIS_TEST]
#dataset = tf.data.TextLineDataset(IRIS_TEST)
dataset = tf.data.TextLineDataset(datafiles)
iterator = dataset.make_one_shot_iterator()
with tf.Session() as sess:
for i in range(5):
print(sess.run(iterator.get_next()))
print("\n##Example 3: iris dataset load: datasets.load_iris")
dataset = datasets.load_iris()
data = dataset.data[:,:4]
print(data)
print("\n##Example 4: csv module: ")
print(" filename: " + IRIS_TEST)
with open(IRIS_TEST,'r') as csvfile:
csvdata= csv.reader(csvfile)
for line in csvdata:
print line
print("\n##Example 5: pandas module: ")
print(" filename: " + IRIS_TEST)
csvdata = pd.read_csv(IRIS_TEST)
print("Shape of the data:" + str(csvdata.shape))
print(csvdata)
| [
"liumiaocn@outlook.com"
] | liumiaocn@outlook.com |
3cf060b15654c3e0d1da082d3010333669ddf5c1 | 9f0e8f9602542614f23e039e1b20e9b53afa1391 | /app.py | 9dc17a226658c04104d3a2491359b22f8506828f | [] | no_license | JK-More/old-car-price-prediciton-jk | 79d56ab9a187cff57a25a089aa9313b0a86775e7 | 3a03ae5f00ad009aa550b3871271dd07901dab70 | refs/heads/main | 2023-06-23T07:35:53.255508 | 2021-07-14T17:14:38 | 2021-07-14T17:14:38 | 386,013,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | # import requied packages
import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
from flask import Flask, render_template , request
# create flask object
app = Flask("car_price_model")
# load ml model which is store in .pkl format
model = pickle.load(open('car_price_model.pkl','rb'))
# route to which we need to send http request
@app.route('/',methods=['GET'])
# function that will return index.html
def Home():
return render_template('index.html')
# create odject for Standardscaler
standard_to = StandardScaler()
# HTTP post request method
@app.route("/predict",methods=['POST'])
# function to predict the result from ml model
def predict():
if request.method == 'POST':
#use request.method to get the data from html form through post method
Year = int(request.form['Year'])
Year = 2021 -Year
Present_price = float(request.form['Present_Price'])
Kms_Driven = int(request.form['Kms_Driven'])
Kms_Driven2 = np.log(Kms_Driven)
Owner = int(request.form['Owner'])
Fuel_Type_Petrol = request.form['Fuel_Type_Petrol']
# Fuel_type is categorised into petrol, diesel, cng . one-hot encoding is needed for it.
if(Fuel_Type_Petrol == 'Petrol'):
Fuel_Type_Petrol = 1
Fuel_Type_Diesel = 0
elif (Fuel_Type_Petrol == 'Diesel'):
Fuel_Type_Petrol = 0
Fuel_Type_Diesel = 1
else :
Fuel_Type_Petrol = 0
Fuel_Type_Diesel = 0
Seller_Type_Individual = request.form['Seller_Type_Individual']
# seller_type is categorised into individual and dealer
if (Seller_Type_Individual == 'Individual'):
Seller_Type_Individual = 1
else :
Seller_Type_Individual = 0
Transmission_Mannual = request.form['Transmission_Mannual']
# Transmission_Mannual is categorised into mannual and automatic
if (Transmission_Mannual == 'Mannual'):
Transmission_Mannual = 1
else:
Transmission_Mannual = 0
prediction = model.predict([[Present_price,Kms_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output = round(prediction[0],2)
#condition for invalid value and valid value
if output<0:
return render_template('index.html',prediction_text="Sorry you can't sell this car")
else:
return render_template('index.html',prediction_text="You can sell this car at {} lakhs.".format(output))
# Page display when no value are inserted.without any output.
else:
return render_template('index.html')
if __name__ == "__main__":
# to start web server
# debug : when i save something in my structure, server should restart again
app.run(debug=True) | [
"noreply@github.com"
] | JK-More.noreply@github.com |
198a70fffd6976125de8387f9dece7923a9b9298 | 679e1a2645084d65040a4e0c48fbdd4b7659295b | /tests/test_guides.py | 5a90b4895c887d90b212ad754c16f263cc94cb44 | [
"MIT"
] | permissive | macdaliot/riposte | 1e8634a443e9ad4ad41044ae4a2049c91bd4d6f8 | 4f51528e31636586c36218ed0993ba1f4b4a8ab8 | refs/heads/master | 2023-03-15T02:38:18.485232 | 2021-01-11T19:56:44 | 2021-02-11T20:04:48 | 347,116,138 | 0 | 0 | MIT | 2021-03-12T15:43:34 | 2021-03-12T15:43:24 | null | UTF-8 | Python | false | false | 2,514 | py | from typing import AnyStr, Dict, List, Set, Text
from unittest import mock
import pytest
from riposte import Riposte, guides
from riposte.exceptions import GuideError
@mock.patch("riposte.guides.ast")
def test_literal(mocked_ast):
value = "foo"
processed_value = guides.literal(value)
mocked_ast.literal_eval.assert_called_once_with(value)
assert processed_value == mocked_ast.literal_eval.return_value
@mock.patch("riposte.guides.ast")
def test_literal_exception(mocked_ast):
mocked_ast.literal_eval.side_effect = TypeError
with pytest.raises(GuideError):
guides.literal("foo")
def test_encode():
mocked_value = mock.Mock()
processed_value = guides.encode(mocked_value)
mocked_value.encode.assert_called_once_with()
assert processed_value == mocked_value.encode.return_value
def test_encode_exception():
mocked_value = mock.Mock()
mocked_value.encode.side_effect = UnicodeEncodeError
with pytest.raises(GuideError):
guides.encode(mocked_value)
@pytest.mark.parametrize(
("type_", "return_value"),
(
(str, tuple()),
(AnyStr, tuple()),
(Text, tuple()),
(bytes, (guides.encode,)),
(int, (guides.literal,)),
(Dict, (guides.literal,)),
(List, (guides.literal,)),
(Set, (guides.literal,)),
),
)
def test_get_guides(type_, return_value):
assert guides.get_guides(type_) == return_value
@mock.patch("riposte.guides.get_guides")
def test_extract_guides(mocked_get_guides):
type_hint = int
func = mock.Mock(__annotations__={"foo": type_hint})
extracted_guides = guides.extract_guides(func)
mocked_get_guides.assert_called_once_with(type_hint)
assert extracted_guides == {"foo": mocked_get_guides.return_value}
@pytest.mark.parametrize(
("input", "guide", "expected"),
(
("foobar", str, "foobar"),
("'foobar'", str, "foobar"),
("'foo bar'", str, "foo bar"),
("foobar", bytes, b"foobar"),
("'foobar'", bytes, b"foobar"),
("'foo bar'", bytes, b"foo bar"),
("1", int, 1),
("'1'", int, 1),
("\"[1, 'foo']\"", list, [1, "foo"]),
("\"{'foo': 'bar'}\"", dict, {"foo": "bar"}),
),
)
@mock.patch("builtins.input")
def test_guides(mocked_input, input, guide, expected, repl: Riposte):
mocked_input.return_value = "foobar " + input
@repl.command("foobar")
def handler_function(x: guide):
assert x == expected
repl._process()
| [
"f4wkes@gmail.com"
] | f4wkes@gmail.com |
26bf6114fd1d3d3bef96fdc4b9fda2fca22820e7 | 8a42e8ef22dd15a62cd407910de96b0873fe5252 | /schedule/dao.py | dd2268501e6fec0d215c3e2301839f2be7c3f4ee | [] | no_license | Vini-S/Fintek_Project | 9293300c798cb5e9f9b84d34972392b411849320 | 406b939832f4a3f03ff8645500502a98c4d7ca75 | refs/heads/master | 2020-06-19T11:31:37.413669 | 2019-07-13T08:16:41 | 2019-07-13T08:16:41 | 196,693,104 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,801 | py | from schedule.models import Users
from django.db import connection
class Dao:
def login(self, emailid, password):
cursor = connection.cursor()
query = ("select * from schedule_users where Email_id=%s and Password=%s")
values = (emailid, password)
cursor.execute(query,values)
a = cursor.fetchall()
b = list(a)
return b
class p_reset:
def reset(self,email):
cursor = connection.cursor()
query = ("select Email_id from schedule_users where Email_id=%s")
values = (email)
cursor.execute(query,values)
a = cursor.rowcount
return a
class p_update:
def update(self, usertype, emailid, password):
cursor = connection.cursor()
query = "update schedule_users set password=%s where usertype=%s and Email_id=%s"
values = (password, usertype, emailid)
cursor.execute(query,values)
connection.commit()
return 1
class Student:
def add(self, f_name, l_name, emailid, password, c_code):
cursor = connection.cursor()
query1 = "insert into schedule_student_view(s_f_name, s_l_name, s_Email_id, Password, c_code_id) values (%s,%s,%s,%s,%s)"
query2 = "insert into schedule_users(Usertype, Email_id, Password) values ('3', %s,%s)"
values1 = (f_name, l_name, emailid, password, c_code)
values2 = (emailid, password)
cursor.execute(query1,values1)
cursor.execute(query2,values2)
connection.commit()
return 1
def viewstudent(self):
cursor = connection.cursor()
query = "select schedule_student_view.s_id, schedule_student_view.s_f_name, schedule_student_view.s_l_name, schedule_student_view.s_Email_id, schedule_student_view.c_code_id,schedule_course.c_name from schedule_course INNER JOIN schedule_student_view ON schedule_student_view.c_code_id=schedule_course.c_code"
cursor.execute(query)
row = cursor.fetchall()
return row
def viewstudentbyid(self, sid): #(Auto fill form)
cursor = connection.cursor()
query = "select s_id, s_f_name, s_l_name, s_Email_id, c_code_id from schedule_student_view where s_id=%s"
values = (sid)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def selectcourse(self): #(Drop Down list)
cursor = connection.cursor()
query = "select c_code, c_name from schedule_course"
cursor.execute(query)
lst1 = cursor.fetchall()
return lst1
def editstudent(self, f_name, l_name, emailid, c_code, sid):
cursor = connection.cursor()
query = "update schedule_student_view set s_f_name=%s, s_l_name=%s, s_Email_id=%s, c_code_id=%s where s_id=%s"
values = (f_name, l_name, emailid, c_code, sid)
cursor.execute(query,values)
connection.commit()
return 1
def deletestudent(self, email):
cursor = connection.cursor()
query1 = "delete from schedule_student_view where s_Email_id=%s"
query2 = "delete from schedule_users where Email_id=%s"
values1 =(email)
values2 = (email)
cursor.execute(query1,values1)
cursor.execute(query2,values2)
connection.commit()
return 1
def viewleave(self):
cursor = connection.cursor()
query = "select l_id, s_email, s_date, e_date, l_reason, s_status from schedule_student_leave"
cursor.execute(query)
row = cursor.fetchall()
return row
def acceptstudent(self, l_id):
cursor = connection.cursor()
query = "update schedule_student_leave set s_status='1' where l_id=%s"
values = (l_id)
cursor.execute(query,values)
connection.commit()
return 1
def rejectstudent(self, l_id):
cursor = connection.cursor()
query = "update schedule_student_leave set s_status='0' where l_id=%s"
values = (l_id)
cursor.execute(query,values)
connection.commit()
return 1
def searchstudent(self, name):
cursor = connection.cursor()
query = "select s_id, s_f_name, s_l_name, s_Email_id, c_code_id from schedule_student_view where s_f_name=%s"
values = (name)
cursor.execute(query, values)
row = cursor.fetchall()
return row
class Faculty:
def addf(self, f_name, l_name, emailid, password, phno):
cursor = connection.cursor()
query1 = "insert into schedule_faculty_view(f_f_name, f_l_name, f_Email_id, Password, f_phno) values (%s,%s,%s,%s,%s)"
query2 = "insert into schedule_users(Usertype, Email_id, Password) values ('2', %s, %s)"
values1 = (f_name, l_name, emailid, password, phno)
values2 = (emailid, password)
cursor.execute(query1,values1)
cursor.execute(query2,values2)
connection.commit()
return 1
def viewfaculty(self):
cursor = connection.cursor()
query = "select f_id, f_f_name, f_l_name, f_Email_id, f_phno from schedule_faculty_view "
cursor.execute(query)
row1 = cursor.fetchall()
return row1
def viewfacultybyid(self, fid): #(Auto fill form)
cursor = connection.cursor()
query = "select f_id, f_f_name, f_l_name, f_Email_id, f_phno from schedule_faculty_view where f_id=%s"
values = (fid)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def editfaculty(self, f_name, l_name, emailid, phno, fid):
cursor = connection.cursor()
query = "update schedule_faculty_view set f_f_name=%s, f_l_name=%s, f_Email_id=%s, f_phno=%s where f_id=%s"
values = (f_name, l_name, emailid, phno, fid)
cursor.execute(query,values)
connection.commit()
return 1
def deletefaculty(self, email):
cursor = connection.cursor()
query1 = "delete from schedule_faculty_view where f_Email_id=%s"
query2 = "delete from schedule_users where Email_id=%s"
values1 = (email)
values2 = (email)
cursor.execute(query1,values1)
cursor.execute(query2,values2)
connection.commit()
return 1
def fviewleave(self):
cursor = connection.cursor()
query = "select l_id, f_email, s_date, e_date, l_reason, f_status from schedule_faculty_leave"
cursor.execute(query)
row = cursor.fetchall()
return row
def selectfaculty(self): #(Drop Down list)
cursor = connection.cursor()
query = "select f_Email_id, f_f_name from schedule_faculty_view"
cursor.execute(query)
lst3 = cursor.fetchall()
return lst3
def acceptfaculty(self, l_id):
cursor = connection.cursor()
query = "update schedule_faculty_leave set f_status='1' where l_id=%s"
values = (l_id)
cursor.execute(query,values)
connection.commit()
return 1
def rejectfaculty(self, l_id):
cursor = connection.cursor()
query = "update schedule_faculty_leave set f_status='0' where l_id=%s"
values = (l_id)
cursor.execute(query,values)
connection.commit()
return 1
def searchfaculty(self, name):
cursor = connection.cursor()
query = "select f_id, f_f_name, f_l_name, f_Email_id, f_phno from schedule_faculty_view where f_f_name=%s"
values = (name)
cursor.execute(query, values)
row = cursor.fetchall()
return row
class Course:
def addc(self, c_code, name):
cursor = connection.cursor()
query = "insert into schedule_course(c_code, c_name) values (%s, %s)"
values = (c_code, name)
cursor.execute(query,values)
connection.commit()
return 1
def viewcourse(self):
cursor = connection.cursor()
query = "select c_code, c_name from schedule_course"
cursor.execute(query)
row = cursor.fetchall()
# print(row)
return row
def viewcoursebyid(self, c_code):
cursor = connection.cursor()
query = "select * from schedule_course where c_code=%s"
values = (c_code)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def selectmodule(self,c_code_id=None):
cursor = connection.cursor()
if c_code_id:
query = "select m_id, m_name from schedule_chapters where c_code_id=%s"
values = (c_code_id)
cursor.execute(query,values)
else:
query = "select m_id, m_name from schedule_chapters"
cursor.execute(query)
lst1 = cursor.fetchall()
return lst1
def editcourse(self, name, c_code):
cursor = connection.cursor()
query = "update schedule_course set c_name=%s where c_code=%s"
values = (name, c_code)
cursor.execute(query,values)
connection.commit()
return 1
def deletecourse(self, c_code):
cursor = connection.cursor()
query = "delete from schedule_course where c_code=%s"
values = (c_code)
cursor.execute(query,values)
connection.commit()
return 1
def searchcourse(self, name):
cursor = connection.cursor()
query = "select c_code,c_name from schedule_course where c_name=%s"
values = (name)
cursor.execute(query, values)
row = cursor.fetchall()
return row
class Module:
def addm(self, data):
cursor = connection.cursor()
query = "insert into schedule_modulepm (m_id_id, Session_key, Session_value) values (%s, %s, %s)"
values = (data)
cursor.executemany(query,values)
connection.commit()
return 1
def viewmodule(self):
cursor = connection.cursor()
query = "select * from schedule_chapters"
cursor.execute(query)
row = cursor.fetchall()
return row
def viewmodulebyid(self, mid):
cursor = connection.cursor()
query = "select * from schedule_chapters where m_id=%s"
values = (mid)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def editmodule(self, name, mid):
cursor = connection.cursor()
query = "update schedule_chapters set m_name=%s where m_id=%s"
values = (name,mid)
cursor.execute(query,values)
connection.commit()
return 1
def deletemodule(self, mid):
cursor = connection.cursor()
query = "delete from schedule_chapters where m_id=%s"
values = (mid)
cursor.execute(query,values)
connection.commit()
return 1
class Assign:
def assignm(self, c_code, mid):
cursor = connection.cursor()
query = "insert into schedule_coursemodule(c_code_id,m_id_id) values(%s,%s)"
values = (c_code,mid)
cursor.execute(query,values)
connection.commit()
return 1
def selectcourse(self): #( course Drop Down list)
cursor = connection.cursor()
query = "select c_code, c_name from schedule_course"
cursor.execute(query)
lst1 = cursor.fetchall()
return lst1
def selectmodule(self): #(Module Drop Down list)
cursor = connection.cursor()
query = "select m_id, m_name from schedule_chapters"
cursor.execute(query)
lst2 = cursor.fetchall()
return lst2
def viewassign(self):
cursor = connection.cursor()
query = "select schedule_coursemodule.cm_id, schedule_coursemodule.c_code_id, schedule_course.c_name,schedule_coursemodule.m_id_id,schedule_chapters.m_name from schedule_coursemodule inner join schedule_course on schedule_course.c_code = schedule_coursemodule.c_code_id inner join schedule_chapters on schedule_chapters.m_id = schedule_coursemodule.m_id_id"
cursor.execute(query)
row = cursor.fetchall()
return row
def viewassignbyid(self, cmid):
cursor = connection.cursor()
query = "select * from schedule_coursemodule where cm_id=%s"
values = (cmid)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def editassign(self, c_code, mid, cmid):
cursor = connection.cursor()
query = "update schedule_coursemodule set c_code_id=%s, m_id_id=%s where cm_id=%s"
values = (c_code,mid,cmid)
cursor.execute(query,values)
connection.commit()
return 1
def deleteassign(self, cmid):
cursor = connection.cursor()
query = "delete from schedule_coursemodule where cm_id=%s"
values = (cmid)
cursor.execute(query,values)
connection.commit()
return 1
class Session:
def selectcourse(self,keyword):
cursor = connection.cursor()
query = "select c_code,c_name from schedule_course where c_name=%s"
values = (keyword)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def viewmodulebycode(self, c_code):
cursor = connection.cursor()
query = "select m_id,m_name from schedule_chapters where c_code_id=%s"
values = (c_code)
cursor.execute(query,values)
row = cursor.fetchall()
return row
def modulesession(self, module): # for fetching count of sessions in a module
cursor = connection.cursor()
query = "select count(*) from schedule_modulepm where m_id_id=%s"
values = (module)
cursor.execute(query, values)
result = cursor.fetchall()
return result
def sessiondisplay(self, mid): # for fetching all sessions name amd values in a module
cursor = connection.cursor()
query = "select Session_key,Session_value,m_id_id from schedule_modulepm where m_id_id=%s"
values = (mid)
cursor.execute(query, values)
row = cursor.fetchall()
return row
def session_display(self): #
cursor = connection.cursor()
query = "select Session_key,Session_value,m_id_id from schedule_modulepm"
cursor.execute(query)
row = cursor.fetchall()
return row
| [
"noreply@github.com"
] | Vini-S.noreply@github.com |
32e2e8da59fbfd97991dcd40e04e00e6e197a6ad | e02a97085e3aa5a699c5a4e8025d03511a92d9c9 | /src/learner.py | 356131ce0989edadda8412880fc93f35f46fa466 | [
"MIT"
] | permissive | vhientran/bionlp17 | 5f2698a4be64e99583f7a40a969da67ea9bfea69 | d2a0d6fdce48760ca456a19d9de7f44b31f1d4a0 | refs/heads/master | 2023-03-17T18:29:50.765763 | 2017-10-25T18:40:31 | 2017-10-25T18:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | from __future__ import unicode_literals, print_function
import glog
from sklearn import linear_model
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import classification_report
class LogisticRegressionLearner(object):
def __init__(self, name, warm_start=True):
self.vocal = DictVectorizer()
self.model = linear_model.LogisticRegression(warm_start=warm_start,
solver='sag',
max_iter=200,
verbose=0,
penalty='l2',
n_jobs=4)
@staticmethod
def convert_list_to_dict(example):
dict_example = {}
for f in example:
if f in dict_example:
dict_example[f] += 1
else:
dict_example[f] = 1
return dict_example
@staticmethod
def dictionarize_examples(examples):
for example in examples:
yield LogisticRegressionLearner.convert_list_to_dict(example)
def learn(self, train_examples, train_labels, max_iter=None):
examples = self.dictionarize_examples(train_examples)
dataset = self.vocal.fit_transform(examples)
if max_iter is not None:
self.model.max_iter = max_iter
self.model.fit(dataset, train_labels)
glog.info('Iter: {}'.format(self.model.n_iter_))
glog.info('Intercept: {}'.format(self.model.intercept_))
def predict(self, test_examples):
examples = self.dictionarize_examples(test_examples)
dataset = self.vocal.transform(examples)
prediction = self.model.predict(dataset)
return list(prediction)
def predict_one(self, test_example):
examples = self.dictionarize_examples([test_example])
dataset = self.vocal.transform(examples)
prediction = self.model.predict(dataset)
return prediction[0]
def predict_prob(self, test_examples):
examples = self.dictionarize_examples(test_examples)
dataset = self.vocal.transform(examples)
probs = self.model.predict_proba(dataset)
example_probs = []
for prob in probs:
label_probs = []
for i, val in enumerate(prob):
label_probs.append((self.model.classes_[i], val))
example_probs.append(label_probs)
return example_probs
def predict_one_prob(self, test_example):
examples = self.dictionarize_examples([test_example])
dataset = self.vocal.transform(examples)
probs = self.model.predict_proba(dataset)
label_probs = []
for i, val in enumerate(probs[0]):
label_probs.append((self.model.classes_[i], val))
return label_probs
def predict_raw_prob(self, test_examples):
examples = self.dictionarize_examples(test_examples)
dataset = self.vocal.transform(examples)
probs = self.model.predict_proba(dataset)
return probs
def evaluate(self, test_examples, test_labels):
predictions = self.predict(test_examples)
print(classification_report(test_labels, predictions))
| [
"leemagpie@gmail.com"
] | leemagpie@gmail.com |
2b49f84356e7df9debe20c8e95d2d2ecc044ca6e | 7dec7703429bf5fc2b108a7b36ac32f0a39220c7 | /NN_1/general.py | 698ac70f30ad17fd57f681ec8fc025482da20bbb | [] | no_license | agupta7/malware-webcrawler | 05132769ca0072fa34bddd624eef4aa2b7eb1673 | 608c2949fd875c5c832c1af02902ba1ec7002038 | refs/heads/master | 2021-08-08T20:01:26.895590 | 2017-11-11T01:45:38 | 2017-11-11T01:45:38 | 103,218,732 | 3 | 1 | null | 2017-10-05T22:15:41 | 2017-09-12T03:42:40 | Python | UTF-8 | Python | false | false | 10,020 | py | import numpy as np
import csv
import operator
import math
#################### load Data from csv file
def Load_Data (filename):
X = []
Y = []
with open (filename, "rb") as file:
reader = csv.reader (file)
for line in reader:
Y.append (float(line[1]))
line.pop(0); line.pop(0) ;
X.append (np.array(line,dtype = np.double))
print "Data loaded with shape: ",np.shape(X),
#count positives and negatives !
pos = Y.count(1)
neg = Y.count (-1)
Break = len(X) - min (neg,pos)*2 # find number of majority label instances
print " with pos: %d and neg: %d"%(pos,neg) # more is the class of majority instances
if neg > pos: more = -1
elif pos>neg: more = 1
else : return X,Y
count = 0
'''
indices = [] # insert indices from last to first to make it easier to delete from array
for indx in range (len(Y)-1,0,-1):
if Y[indx] == more and all (X[indx] ==0 ):
X.pop(indx);Y.pop(indx)
count += 1
indices = [] # insert indices from last to first to make it easier to delet$
for indx in range (len(Y)-1,0,-1):
if Y[indx] == more:
X.pop(indx);Y.pop(indx)
count += 1
if count == Break:
break
for indx in range (len(Y)-1,0,-1):
if all ( X[indx] == 0):
X.pop(indx);Y.pop(indx)
'''
count = 0 ;
for i in range (Break):
indx = Y.index (more)
X.pop(indx);Y.pop(indx)
count += 1
if count == Break:
break
pos = Y.count(1)
neg = Y.count (-1)
print " with pos: %d and neg: %d"%(pos,neg) # more is the class of majority instances
print "data's new shape is: ",np.shape (X)
X = np.array(X)
Y = np.array(Y)
return X , Y
##################### this function computes the accuracy of the model
def Accuracy (output, doutput):
Acc = 0 ; FP = 0 ; FN = 0 ; TP = 0 ; TN = 0 ; unkn = 0
for indx in range (len(output)):
if output[indx] == 0:
unkn += 1
elif doutput[indx] == 1:
if output[indx] > 0:
Acc += 1
TP += 1
elif output[indx] < 0:
FN += 1
elif doutput[indx] == -1:
if output[indx] < 0:
Acc += 1
TN += 1
elif output[indx] > 0:
FP += 1
# print "UNKOWNS: ", unkn
Acc = Acc*100.0/len(output)
# print "Accuracy: %f FP: %d FN: %d TP: %d TN: %d"%(Acc,FP,FN,TP,TN)
return Acc , FP, FN , TP , TN
#################### this function returns the std for a single data instance
def Std (instance):
mean = sum(instance)/float(len(instance))
dif_sqr = [math.pow(feature-mean,2) for feature in instance ]
dif_mean = sum(dif_sqr)/float(len(dif_sqr))
std = math.sqrt(dif_mean)
return std
##################### random shuffel for samples
def shuffle_in_unison_scary(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
print "Shuffled"
return a,b
#################### split data into training and testing, given the test data ratio
def split(ratio_tst, X, Y):
Xtest, X = np.split(X, [ratio_tst*len(Y)] )
Ytest, Y = np.split(Y, [ratio_tst*len(Y)] )
print "splitted"
return Xtest,X,Ytest,Y
###################### F-score feature Selection function
def F_score (X):
FSV = []
Avg_x = np.sum (X, axis = 0)/len(X)
for feat in range (len(X[0])):
Avg_pos = 0 ; n_pos = 0 ;
list = []
for indx in range (len(X)):
if X[indx][feat] >= 0:
Avg_pos += X[indx][feat]
n_pos += 1
list.append (Avg_pos/n_pos) ; list.append (n_pos) ;
FSV.append (list)
sum_dif = []
for feat in range (len(X[0])):
listpos = []
for indx in range (len(X)):
if X[indx][feat] >= 0:
listpos.append ( math.pow( X[indx][feat] - FSV[feat][1], 2) )
sum_dif.append( sum (listpos))
F = []
for feat in range (len(X[0])):
a = math.pow(Avg_x[feat],2)
b = ( 1.0/(FSV[feat][1]-1) )*sum_dif[feat]
F.append(a/b)
#for item in F:
# print item," ",
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
def Variance_Feature_Select(X):
sel = VarianceThreshold(threshold=(.85 * (1 - .85)))
X = sel.fit_transform(X)
print "featured Extracted: ",X.shape
return X
def SVM_Feature_Select(X,Y,Xtest,Ytest, c):
lsvc = LinearSVC(C = c , penalty="l1", dual=False).fit(X,Y)
#lsvc.fit (X,Y)
# score = lsvc.score (Xtest,Ytest)
# model= SelectFromModel(lsvc , prefit=True)
# X = model.transform(X)
# print "featured Extracted: ",X.shape
return lsvc # X
def Normalization (X):
print "Normalizing Data:\n"
Mean = np.zeros(len(X[0]) ,dtype=float)
Dev = np.zeros(len(X[0]) ,dtype=float)
# normalizing on each feature
for i in range (len(X[0])):
Mean[i] = np.mean (X[:,i])
Dev[i] = Std(X[:,i])
X = ( X - Mean)/Dev # Normalizing the data
print "MEAN: ", Mean
print "---------------------------------"
print "DEV: ", Dev
return X
def standarize (X):
X_ = []
for item in X:
norm = np.square(item)
norm = np.sqrt(np.sum (norm))
if norm !=0:
X_.append (item/norm)#item = item/norm
else:
X_.append (item)
return np.array(X_)
def Normalization_min_max(X):
x_min = np.zeros(len(X[0]) ,dtype=float)
x_max= np.zeros(len(X[0]) ,dtype=float)
for i in range( len(X[0])):
x_min[i] = min (X[:,i])
x_max[i] = max (X[:,i])
X = (X - x_min)/(x_max - x_min)
return X
from neupy import algorithms, estimators, environment
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
'''
def GRNN_train (X,Y,test, sigma):
num_train_inst = len(X)
num_features = len(X[0])
S1 = np.zeros(len(test))
S2 = np.zeros(len(test))
# sigma = 0.11853 #1.41
# X = add_dimension (X)
# test = add_dimension (test)
# print "SHAPES:, ",np.shape(X), np.shape(test)
for X_indx , instance in enumerate(X):
# sigma = general.Std (instance)#calling the function to compute std for current instance
instance = np.array(instance)
Diff_sqr_sum = np.zeros(len(test))
for tst_indx, t_inst in enumerate(test): # computing the sum of sqaure differences for all instances
diff= instance - np.array(t_inst)# fi differences
diff *= diff # diff squared
Diff_sqr_sum[tst_indx] = sum (diff)# sum the differences for each test instance
hft_qi = (-0.5*Diff_sqr_sum)/(sigma*sigma) # g = - norm^2/2*sigma^2
hft_qi = np.exp(hft_qi) # hf_i = exp(g )
S1 +=hft_qi # Sum( hf_i (t_q , t_i) )
S2 += hft_qi*Y[X_indx] # Sum( hf_i (t_q , t_i) )*d_i
Output = S2/S1
# print "ouptut: ", Output
return Output
def K_Folds (K, X,Y, sigma):
# print "K fold validation with K= %d"%K
Range = len(X)/K
cnt = 1 ;
GRNN_acc = 0
GRNN_acc1 = 0
GRNN_acc2 = 0
FP = 0; FN = 0
FP1 = 0; FN1 = 0
FP2 = 0; FN2 = 0
for indx in range (0,len(X) , Range):
# print "Test fold num %d "%(cnt),
Xtest = np.array(X[indx:indx+Range])
Ytest = np.array(Y[indx:indx+Range])
Xtrain = np.concatenate (( X[0:indx], X[indx+Range:len(X)] ))
Ytrain =np.concatenate ((Y[0:indx],Y[indx+Range:len(Y)]))
# print " test Size: ", np.shape(test_D), " train size: ",np.shape(train_D)
# predifined GRNN
NN = algorithms.GRNN (std=0.1,verbose=False)
model = NN.train (Xtrain,Ytrain)
# ADA
dt = DecisionTreeClassifier()
bdt = AdaBoostClassifier(base_estimator = dt,n_estimators=200, learning_rate=5)
ADA = bdt.fit (Xtrain,Ytrain)
out1 = []; out2 = []
for item in Xtest:
# out1.append (NN.predict ([item]) )
out2.append (bdt.predict ([item]) )
out1 = np.array(out1) ;#out1 = out1.reshape(len(out1),1)
acc, fp, fn = Accuracy(out1,Ytest)
FP1 += fp ; FN1 += fn; GRNN_acc1 += acc;
out2 = np.array(out2) ;out2 = out2.reshape(len(out2),1)
acc, fp, fn = Accuracy(out2,Ytest)
FP2 += fp ; FN2 += fn; GRNN_acc2 += acc;
output = GRNN_train(Xtrain,Ytrain,Xtest, sigma)
acc, fp, fn = Accuracy(output,Ytest)
FP += fp ; FN += fn; GRNN_acc += acc;
cnt+=1
print "Acuuracy: %f SUM OF FP %d , FN %d" %(GRNN_acc1/K,FP1,FN1)
print "Acuuracy: %f SUM OF FP %d , FN %d" %(GRNN_acc2/K,FP2,FN2)
print "Acuuracy: %f SUM OF FP %d , FN %d" %(GRNN_acc/K,FP,FN)
return 0,0,0#GRNN_acc/K , FP, FN
'''
def Main():
X, Y = Load_Data ("malware_dataset.csv")
X,Y = shuffle_in_unison_scary (X,Y)
X = Normalization_min_max (X)
# sigma= 0.25798202427
sigma = 0.224301902592
Avg_acc , Avg_FP, Avg_FN = K_Folds ( len(X), X,Y ,sigma)
'''
Xtest,X,Ytest,Y= split (0.25,X,Y)
NN = algorithms.GRNN (std=0.1,verbose=False)
model = NN.train (X,Y)
bdt = AdaBoostClassifier(SVM_Feature_Select (X,Y,Xtest,Ytest, 0.1),
algorithm="SAMME",
n_estimators=200)
ADA = bdt.fit (X,Y)
out1 = []; out2 = []
for item in Xtest:
out1.append (NN.predict ([item]) )
out2.append (bdt.predict ([item]) )
out1 = np.array(out1) ;out1 = out1.reshape(len(out1),1)
out2 = np.array(out2) ;out2 = out2.reshape(len(out2),1)
score1 = Accuracy ( out1, Ytest)
score2 = Accuracy (out2, Ytest)
# lsvc= SVM_Feature_Select (X,Y,Xtest,Ytest, 0.1)
# score = lsvc.score (Xtest,Ytest)
print "GRNN", score1, "ADA Boost", score2
'''
#Main()
| [
"hebahlawneh992@gmail.com"
] | hebahlawneh992@gmail.com |
f6549b1f98b57c965fe16910c6aff30ae73b46ff | 94bf29ba05cc2df1de7252878f1ac6715bd4ac6f | /pythonCrashCourse/chapter2/practice2_1.py | cce665870269e76adc1e7acaed47fb2e1e8802bc | [] | no_license | ZhuXingWuChang/CollegePython | c0d1cad7c9a73a615726fb35f47727a3e8bbf47f | a3c46302b1f2fc4a17c8c0a6df46a8e1f9961433 | refs/heads/main | 2023-06-29T03:52:08.274271 | 2021-08-11T12:12:21 | 2021-08-11T12:12:21 | 343,694,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | variable = "Hello World"
print(variable) | [
"2061007923@qq.com"
] | 2061007923@qq.com |
1bb8ee9ffb852419667b955dfb804425d8479371 | 2ed8728b70ae18a581a5985471b539a3bb3c3af6 | /TK/EXAM_FUNCTION.py | 1609a481debf8831d08d57369301a24464bf7cf9 | [] | no_license | zasdx1/python | f9fe82ba68ae465a136cc2a2ce03a5db4ffe1f80 | 4b7a65cd99480f035dd21a83fcab93165bb1d6f9 | refs/heads/master | 2022-07-28T12:15:43.441771 | 2019-08-28T14:27:49 | 2019-08-28T14:27:49 | 184,158,350 | 0 | 0 | null | 2022-07-07T03:11:31 | 2019-04-29T23:29:04 | Python | UTF-8 | Python | false | false | 2,359 | py | '''
def 함수명(매개변수):
<수행할문장1>
<수행할문장2>
return 결과값
def add(a, b):
return a+b
a = 3
b = 4
c = add(a, b)
print(c)
# 입력값이 몇 개 일지 모를 때 --> *매개변수
def add_money(*args):
result = 0
for i in args:
result = result + i
return result
result = add_money(1,2,3,4,5)
print(result)
# 여러개의 입력을 처리할 때
def add_mul(choice, *args):
if choice == "add":
result = 0
for i in args:
result += i
elif choice == "mul":
result = 1
for i in args:
result *= i
return result
result = add_mul('add', 1,2,3,4,5)
print(result)
result = add_mul('mul', 1,2,3,4,5)
print(result)
#키워드 파라미터(Keyword arguments) : ** 매개변수명 앞에 붙이면 매개변수는 딕셔너리가 되고 모든 key=value 형태의 입력 인수가 그 딕셔너리에 저장됨
def print_kwargs(**kwargs):
print(kwargs)
print_kwargs(a=1)
print_kwargs(name='foo', age='3')
def add_and_mul(a, b):
return a+b, a*b
result = add_and_mul(3, 4)
print(result)
'''
#Q1. 10000보다 작거나 같은 셀프넘버(생성자가 없는 숫자) 1줄에 1개씩 출력
# n = 1 , d(n) = 1 + 1 = 2 -> 2는 self num 아님
# n = 2 , d(n) = 2 + 2 = 2 -> 4는 self num 아님
# n = 3 , d(n) = 3 + 3 = 6 -> 6는 self num 아님
# n = 4 , d(n) = 4 + 4 = 8 -> 8는 self num 아님
# n = 5 , d(n) = 5 + 5 = 10 -> 10는 self num 아님
# n = 6 , d(n) = 6 + 6 = 12 -> 12는 self num 아님
# n = 7 , d(n) = 7 + 7 = 14 -> 14는 self num 아님
# n = 8 , d(n) = 8 + 8 = 16 -> 16는 self num 아님
# n = 9 , d(n) = 9 + 9 = 18 -> 18는 self num 아님
# n = 10 , d(n) = 10 + 1 + 0 = 11 -> 11는 self num 아님
# n = 11 , d(n) = 11 + 1 + 1 = 13 -> 13는 self num 아님
# n = 12 , d(n) = 12 + 1 + 2 = 15 -> 15는 self num 아님
# n = 13 , d(n) = 13 + 1 + 3 = 17 -> 17는 self num 아님
# n = 14 , d(n) = 14 + 1 + 4 = 19 -> 19는 self num 아님
# n = 15 , d(n) = 15 + 1 + 5 = 21 -> 21는 self num 아님
# ...
# n = 101 , d(n) = 101 + 0 + 1 = 102 -> 102는 self num 아님
# ...
# n = 1001 , d(n) = 1001 + 1 + 0 + 0 + 1 = 1004 -> 102는 self num 아님
import math
n = 101
mok = math.trunc(n/10)
nmg = n%10
selfnum = n+mok+nmg
print(n, mok, nmg, selfnum)
#자릿수체크, 나누기 10
| [
"ltg1382@gmail.com"
] | ltg1382@gmail.com |
12882036f4128edacd59fb2f3e15e640ab641ba0 | a058f2b24ad41f9c6a84b13f3480d1f237e66d9f | /FixedEffectModelPyHDFE/AlphaStdErr.py | 14f43262fb75ea89adc72317d39c558c72fc5cac | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Mengxi-20/FixedEffectModel | b8fe5f121dadc3e8597c064845bd2adfa457e4f1 | 039a902b34310e652bbd3a21ba0ec663f175dd38 | refs/heads/master | 2023-05-15T02:46:55.824370 | 2021-06-11T07:01:07 | 2021-06-11T07:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,147 | py | """
This module is used to get standard error of each fixed effect. Two functions are concluded:
Function alpha_std0 is a traditional function to calculate std err.
Function alpha_std uses bootstrap method and allow 50 calculations at the same time, which is faster than alpha-std0.
Thus we recommend using alpha_std.
"""
import numpy as np
import statsmodels.api as sm
from multiprocessing import Pool
from FixedEffectModelPyHDFE.Bootstrap import bootstrap
from FixedEffectModelPyHDFE.DemeanDataframe import demean_dataframe
from FixedEffectModelPyHDFE.EstimableCheck import is_estimable, projection2df
from FixedEffectModelPyHDFE.Operation import do_operation
def alpha_std0(result, formula, sample_num=100):
"""
:param result: result of model using function ols_high_d_category
:param formula: equation of relative effect of two fixed variables, like "id_1 - id_2"
:param sample_num: number of samples
:return: estimation of relative effect of two fixed variables and its standard error
"""
data_df = result.data_df
demean_df = result.demeaned_df
coeff = result.params.values
consist_col = result.consist_col
category_col = result.category_col
out_col = result.out_col
index_name = []
e = len(category_col)
for i in range(e):
m = np.unique(data_df[category_col[i]].values)
for l in m:
name = category_col[i] + str(l)
index_name.append(name)
copy_list = consist_col.copy()
copy_list.extend(category_col)
alpha = np.zeros(sample_num, dtype=np.float64)
n = data_df.shape[0]
new_df = data_df[copy_list].copy()
y_pred = data_df[out_col[0]].values-demean_df['resid'].values
y = data_df[out_col[0]]
b_x = np.dot(coeff, data_df[consist_col].values.T)
ori_resid = y-b_x
true_resid = ori_resid-demean_df['resid']
true_alpha = projection2df(new_df, true_resid, category_col, index_name)
demeaned_resid = demean_df['resid'].values
final_result = do_operation(true_alpha, formula)
ori_x = new_df[consist_col].values.T
print(final_result)
if not is_estimable(new_df, true_resid, category_col, formula, index_name):
print('the function you defined is not estimable')
else:
for i in range(sample_num):
sample_resid = np.random.choice(demeaned_resid, n)
y_new = y_pred + sample_resid
new_df['y_new'] = y_new
demeaned_new = demean_dataframe(new_df, ['y_new'], category_col)
model = sm.OLS(demeaned_new['y_new'], demean_df[consist_col])
result = model.fit()
y = new_df['y_new'].values
b_x = np.dot(result.params.values, ori_x)
b_array = y-b_x
pb_array = result.resid
target_array = b_array-pb_array
alpha_df = projection2df(new_df, target_array, category_col, index_name)
result = do_operation(alpha_df, formula)
alpha[i] = result
return final_result, np.std(alpha)
def alpha_std(result, formula, sample_num=100):
"""
:param result: result of model using function ols_high_d_category
:param formula: equation of relative effect of two fixed variables, like "id_1 - id_2"
:param sample_num: number of samples
:return: estimation of relative effect of two fixed variables and its standard error
"""
data_df = result.data_df
demean_df = result.demeaned_df
coeff = result.params.values
consist_col = result.consist_col
category_col = result.category_col
out_col = result.out_col
index_name = []
e = len(category_col)
for i in range(e):
m = np.unique(data_df[category_col[i]].values)
for l in m:
name = category_col[i] + '_' + str(l)
index_name.append(name)
copy_list = consist_col.copy()
copy_list.extend(category_col)
alpha = np.zeros(sample_num, dtype=np.float64)
n = data_df.shape[0]
new_df = data_df[copy_list].copy()
y_pred = data_df[out_col[0]].values-demean_df['resid'].values
y = data_df[out_col[0]]
b_x = np.dot(coeff, data_df[consist_col].values.T)
ori_resid = y-b_x
true_resid = ori_resid-demean_df['resid']
true_alpha = projection2df(new_df, true_resid, category_col, index_name)
demeaned_resid = demean_df['resid'].values
final_result = do_operation(true_alpha, formula)
ori_x = new_df[consist_col].values.T
# print(final_result)
if not is_estimable(new_df, true_resid, category_col, formula, index_name):
print('the function you defined is not estimable')
else:
print(formula)
pool = Pool(processes=50)
alpha_result = []
for i in range(sample_num):
alpha_result.append(pool.apply_async(bootstrap, args=(new_df, demeaned_resid, y_pred, n, category_col,
demean_df,consist_col, formula, index_name, i)))
pool.close()
pool.join()
for i in range(len(alpha_result)):
alpha[i] = alpha_result[i].get()
return 'est:'+str(final_result), 'std:'+str(np.std(alpha))
| [
"andriusb@ethz.ch"
] | andriusb@ethz.ch |
347b2945398aa5cc3183c23004ce594f6246c450 | 84700798ce50fc88661582dca6cc45fa77035d57 | /manage.py | 006205b1fd09abed13a6401d6fe8be549263e860 | [] | no_license | moldabek/URLshortener | 945c3756e40c1df34d7ffe0a8359e42c21538440 | 5ecac943ca842a1c427a13471ff668a4f4be9e2d | refs/heads/master | 2022-12-20T01:18:15.446842 | 2020-09-21T06:40:47 | 2020-09-21T06:40:47 | 290,260,367 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Back.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"abu-99@mail.ru"
] | abu-99@mail.ru |
e7b420a62db0bce2fe381107cc685f1bf88035d8 | f3742f46560486c07c339244f8cf47bb07709561 | /features/steps/test_utils.py | 7cc4d34cc1b40e7598dc65345299f0ee9046838a | [
"MIT"
] | permissive | Azure/azure-event-hubs-python | 55b65920f9d8dbe6cc418d63291ba507ce648d97 | 326f772f5cbe3d3eaf68b24485554aada463430a | refs/heads/master | 2023-03-17T22:03:54.241386 | 2020-04-07T22:33:17 | 2020-04-07T22:33:17 | 91,842,040 | 65 | 66 | MIT | 2020-04-07T22:33:18 | 2017-05-19T20:14:44 | Python | UTF-8 | Python | false | false | 3,540 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import time
import asyncio
def create_mgmt_client(credentials, subscription, location='westus'):
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.eventhub import EventHubManagementClient
resource_client = ResourceManagementClient(credentials, subscription)
rg_name = 'pytest-{}'.format(uuid.uuid4())
resource_group = resource_client.resource_groups.create_or_update(
rg_name, {'location': location})
eh_client = EventHubManagementClient(credentials, subscription)
namespace = 'pytest-{}'.format(uuid.uuid4())
creator = eh_client.namespaces.create_or_update(
resource_group.name,
namespace)
create.wait()
return resource_group, eh_client
def get_eventhub_config():
config = {}
config['hostname'] = os.environ['EVENT_HUB_HOSTNAME']
config['event_hub'] = os.environ['EVENT_HUB_NAME']
config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY']
config['access_key'] = os.environ['EVENT_HUB_SAS_KEY']
config['consumer_group'] = "$Default"
config['partition'] = "0"
return config
def get_eventhub_100TU_config():
config = {}
config['hostname'] = os.environ['EVENT_HUB_100TU_HOSTNAME']
config['event_hub'] = os.environ['EVENT_HUB_100TU_NAME']
config['key_name'] = os.environ['EVENT_HUB_100TU_SAS_POLICY']
config['access_key'] = os.environ['EVENT_HUB_100TU_SAS_KEY']
config['consumer_group'] = "$Default"
config['partition'] = "0"
return config
def send_constant_messages(sender, timeout, payload=1024):
deadline = time.time()
total = 0
while time.time() < deadline:
data = EventData(body=b"D" * payload)
sender.send(data)
total += 1
return total
def send_constant_async_messages(sender, timeout, batch_size=10000, payload=1024):
deadline = time.time()
total = 0
while time.time() < deadline:
data = EventData(body=b"D" * args.payload)
sender.transfer(data)
total += 1
if total % 10000 == 0:
sender.wait()
return total
def send_constant_async_messages(sender, timeout, batch_size=1, payload=1024):
deadline = time.time()
while time.time() < deadline:
if batch_size > 1:
data = EventData(batch=data_generator())
else:
data = EventData(body=b"D" * payload)
async def receive_pump(receiver, timeout, validation=True):
total = 0
deadline = time.time() + timeout
sequence = 0
offset = None
while time.time() < deadline:
batch = await receiver.receive(timeout=5)
total += len(batch)
if validation:
assert receiver.offset
for event in batch:
next_sequence = event.sequence_number
assert next_sequence > sequence, "Received Event with lower sequence number than previous."
assert (next_sequence - sequence) == 1, "Sequence number skipped by a value great than 1."
sequence = next_sequence
msg_data = b"".join([b for b in event.body]).decode('UTF-8')
assert json.loads(msg_data), "Unable to deserialize Event data."
| [
"antisch@microsoft.com"
] | antisch@microsoft.com |
55fc9a1726e44163be89eb8a2441951491ef7af9 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/caffe2/python/layer_model_helper.py | 9bb56400ffb7a26b831eb2f82abdf36f27bdbc1c | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 28,709 | py | # @package layer_model_helper
# Module caffe2.python.layer_model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, model_helper, schema, scope, utils, muji
from caffe2.python.modeling.parameter_info import (
ParameterInfo,
)
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.modeling.net_modifier import NetModifier
from caffe2.python.optimizer import get_param_device, Optimizer
from caffe2.python.regularizer import Regularizer, RegularizationBy
from caffe2.python.layers import layers
from caffe2.proto import caffe2_pb2
from future.utils import viewitems, viewvalues
import logging
import numpy as np
import six
import copy
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
"""
Model helper for building models on top of layers abstractions.
Each layer is the abstraction that is higher level than Operator. Layer
is responsible for ownership of it's own parameters and can easily be
instantiated in multiple nets possible with different sets of ops.
As an example: one can easily instantiate predict and train nets from
the same set of layers, where predict net will have subset of the
operators from train net.
"""
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False):
''' TODO(amalevich): more documnetation on input args
'''
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
# seed default
self._seed = None
self._sequence_seed = True
# optimizer bookkeeping
self.param_to_optim = {}
self.param_to_reg = {}
self._default_optimizer = None
self._loss = None
self._prediction = []
self._output_schema = None
self._post_grad_net_modifiers = []
self._final_net_modifiers = []
# breakdown map; breakdown features are categorical (like dense) but not
# necessarily used to represent data for training
self._breakdown_map = None
# Connect Schema to self.net. That particular instance of schmea will be
# use for generation of the Layers accross the network and would be used
# for connection with Readers.
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._preproc_output_schema = None
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
# additional (hard-coded) diagnose_options to report based on the model
# TODO(xlwang): it's hack!
self.ad_hoc_diagnose_blobs_and_operations = []
self.ad_hoc_plot_blobs = []
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
# an empty white_set will skip everything
def filter_metrics_schema(self, white_set):
logger.info("Filter metric schema with white_set {}".format(white_set))
field_names = self._metrics_schema.field_names()
for name in field_names:
if name not in white_set:
self._metrics_schema = self._metrics_schema - schema.Struct((name, schema.Scalar()))
def add_ad_hoc_plot_blob(self, blob, dtype=None):
assert isinstance(
blob, (six.string_types, core.BlobReference)
), "expect type str or BlobReference, but got {}".format(type(blob))
dtype = dtype or (np.float, (1, ))
self.add_metric_field(str(blob), schema.Scalar(dtype, blob))
self.ad_hoc_plot_blobs.append(blob)
@staticmethod
def _get_global_constant_initializer_op(
blob_name, array=None, dtype=None, initializer=None
):
# to add a global constant to model, one first need to get the
# initializer
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(
op_name, [],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
initializer_op = initializer(blob_name)
return initializer_op
def add_global_constant(
self, name, array=None, dtype=None, initializer=None
):
assert isinstance(name, six.string_types), (
'name should be a string as we are using it as map key')
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants, \
"%s already added in global_constants" % name
blob_name = self.net.NextBlob(name)
self.global_constants[name] = blob_name
initializer_op = LayerModelHelper._get_global_constant_initializer_op(
blob_name, array, dtype, initializer
)
assert blob_name not in self.global_constant_initializers, \
"there is already a initializer op associated with blob %s" % \
blob_name
self.global_constant_initializers[blob_name] = initializer_op
return blob_name
def maybe_add_global_constant(self, name, *args, **kwargs):
# To ad hoc add new global constants without duplication
# if the name was already registered in global_constants, it will not be
# added even if the intended value is different from its original value
if name in self.global_constants:
blob_name = self.global_constants[name]
initializer_op = \
LayerModelHelper._get_global_constant_initializer_op(
blob_name, *args, **kwargs
)
# check if the original initializer is the same as the one intended
# now
assert utils.OpAlmostEqual(
initializer_op,
self.global_constant_initializers[blob_name],
'debug_info'
), \
"conflict initializers for global constant %s, " \
"previous %s, now %s" % (
blob_name, str(initializer_op),
str(self.global_constant_initializers[blob_name]))
return blob_name
return self.add_global_constant(name, *args, **kwargs)
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = {}
self.add_global_constant('ONE', 1.0)
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in viewvalues(self.global_constant_initializers):
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if param_name not in self._param_to_shape:
return
ref_shape = self._param_to_shape[param_name]
if shape != ref_shape:
raise ValueError(
"Got inconsistent shapes between shared parameters "
"when trying to map a blob in scope {0} to {1}. ref_shape : "
" {2}, shape : {3}".format(
scope.CurrentNameScope(), param_name, ref_shape, shape)
)
def _validate_param_optim(self, param_name, optim):
# there are three possible values for optim:
# 1) None (which will use self._default_optimizer after this layer is instantiated)
# 2) self.NoOptim
# 3) an instance of Optimizer class such as AdagradOptimizer
# this implies this parameter is not shared with any other parameter so far
if param_name not in self.param_to_optim:
return
logger.info("{} shares the same parameter with another parameter. "
"Validating if the same optimizer has been specified for them.".format(
param_name,
))
ref_optim = self.param_to_optim[param_name]
if optim is None:
assert ref_optim == self._default_optimizer, (
"Optim for {} is None which will fall back to use default_optimizer. "
"However, the optimizer that has been specified for this shared parameter "
"is {} which is different from default_optimizer {}. "
"Please check the optimizers specified for parameters shared "
"with {} and the default_optimizer to ensure the consistency.".format(
param_name, ref_optim, self._default_optimizer, param_name
)
)
elif optim == self.NoOptim:
assert ref_optim == self.NoOptim, (
"Optim for {} is NoOptim. However, the optimizer for the parameters "
"shared with {} is {} which is different from NoOptim. "
"Please check the optimizer specified for other parameters in the "
"shared group to ensure consistency.".format(
param_name, param_name, ref_optim
)
)
elif isinstance(optim, Optimizer):
assert isinstance(ref_optim, Optimizer), (
"Optim for {} is an instance of Optimizer. However, the optimizer "
"for the parameters shared with {} is {} which is not an instance "
"of Optimizer. Please check the optimizer specified for other "
" parameters in the shared group to ensure consistency.".format(
param_name, param_name, ref_optim, optim
)
)
assert type(optim) is type(ref_optim) and optim.attributes == ref_optim.attributes, (
"Optim for {} is an instance of Optimizer. However, the optimizer "
"for the parameters shared with {} is {}. "
"This optimizer either doesn't have the same type as the current optimizer: "
"{} vs {}, or its attributes such as learning rate are different from "
"that of current optimizer which is {} vs {}. "
"Please check the optimizer specified for other parameters in the "
"shared group to ensure consistency.".format(
param_name, param_name, ref_optim, type(optim), type(ref_optim), optim.attributes, ref_optim.attributes
)
)
else:
raise ValueError("optim should be either None, NoOptim, or an instance of Optimizer, Got {} ".format(optim))
def create_param(self, param_name, shape, initializer, optimizer=None,
ps_param=None, regularizer=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise ValueError("Unsupported type for param_name")
param_blob = core.BlobReference(param_name)
if len(initializer) == 1:
init_op_args = {}
else:
assert len(initializer) == 2
init_op_args = copy.deepcopy(initializer[1])
if shape is not None:
assert 'shape' not in init_op_args
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(
initializer[0],
[],
param_blob,
**init_op_args
)
param = layers.LayerParameter(
parameter=param_blob,
initializer=initializer_op,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer
)
self._validate_param_shape(param_name, shape)
self._validate_param_optim(param_name, optimizer)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
self.params.append(param.parameter)
if isinstance(param, layers.LayerParameter):
logger.info("Add parameter regularizer {0}".format(param.parameter))
self.param_to_reg[param.parameter] = param.regularizer
elif isinstance(param, ParameterInfo):
# TODO:
# Currently, LSTM and RNNcells, which use ModelHelper instead of
# LayerModelHelper as super class, are called in pooling_methods
# In ModelHelper, regularization is not supported in create_param
# We will unify the way of create_param of ModelHelper and
# LayerModelHelper in the future.
logger.info('regularization is unsupported for ParameterInfo object')
else:
raise ValueError(
'unknown object type besides ParameterInfo and LayerParameter: {}'
.format(param)
)
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
# immediately. Other than this - create_x_net should be called.
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
def add_post_grad_net_modifiers(self, modifier):
assert modifier not in self._post_grad_net_modifiers,\
"{0} is already in {1}".format(modifier, self._post_grad_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._post_grad_net_modifiers.append(modifier)
def add_final_net_modifiers(self, modifier):
assert modifier not in self._final_net_modifiers,\
"{0} is already in {1}".format(modifier, self._final_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._final_net_modifiers.append(modifier)
@property
def seed(self):
return self._seed
@property
def sequence_seed(self):
return self._sequence_seed
def store_seed(self, seed, sequence_seed=True):
# Store seed config that will be applied to each op in the net.
self._seed = seed
# If sequence_seed is True, the i-th op has rand_seed=`seed + i`
self._sequence_seed = sequence_seed
def apply_seed(self, net):
if self._seed:
net.set_rand_seed(self._seed, self._sequence_seed)
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
"""
Returns the schema that represents model output that should be used for
metric reporting.
During the training/evaluation this schema will be appended to the
schema that represents model output.
"""
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def preproc_output_schema(self):
assert self._preproc_output_schema is not None
return self._preproc_output_schema
@preproc_output_schema.setter
def preproc_output_schema(self, schema):
assert self._preproc_output_schema is None
self._preproc_output_schema = schema
@property
def prediction(self):
assert self._prediction, "model prediction is empty"
return self._prediction
def add_prediction(self, prediction, weight=1.0):
assert prediction is not None, "Added prediction should not be None"
self._prediction.append((prediction, weight))
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def has_loss(self):
return self._loss is not None
def add_loss(self, loss, name='unnamed'):
assert loss is not None, "Added loss should not be None"
assert isinstance(loss, schema.Scalar) or isinstance(
loss, schema.Struct
), "Added loss should be a scalar or a struct"
if self._loss is None:
self._loss = schema.Struct((name, loss))
else:
# loss could've been set through model.loss directly which could be
# a scalar
if isinstance(self._loss, schema.Scalar):
self._loss = schema.Struct(('unnamed', self._loss))
prefix_base = name + '_auto_'
index = 0
prefix = name
while prefix in self._loss:
prefix = prefix_base + str(index)
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = self._loss + loss_struct
def add_output_schema(self, name, value):
assert value is not None, \
'Added output schema {} should not be None'.format(name)
assert isinstance(value, schema.Scalar) or \
isinstance(value, schema.Struct), \
'Added output schema {} should be a scalar or a struct.\n\
Now it is {}.'.format(name, type(value))
if self._output_schema is None: # be the first field
self._output_schema = schema.Struct((name, value))
else: # merge with other fields
assert name not in self._output_schema.fields, \
'Output Schema Field {} already exists'.format(name)
self._output_schema = \
self._output_schema + schema.Struct((name, value))
def add_trainer_extra_schema(self, trainer_extra_schema):
trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
self._trainer_extra_schema += trainer_extra_record
def __getattr__(self, layer):
def is_functional_layer(layer):
if core.IsOperator(layer):
return True
elif layer.startswith('FunctionalLayer'):
return True
else:
return False
def resolve_functional_layer(layer):
if core.IsOperator(layer):
return layer
elif layer.startswith('FunctionalLayer'):
return layer[len('FunctionalLayer'):]
else:
raise ValueError(
'%s cannot be resolved as functional layer' % layer
)
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
new_layer = layers.create_layer(layer, self, *args, **kwargs)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
elif is_functional_layer(layer):
# TODO(xlwang): Desginated layer shadows the usage of an op as a
# single layer. To enforce using an op (e.g. Split) as functional
# layer, one can call 'model.FunctionalLayerSplit'
layer = resolve_functional_layer(layer)
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
new_layer = layers.create_layer(
'Functional',
self, *args, function=apply_operator,
**kwargs
)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
else:
# this needs to be an AttributeError to fit hasattr semantics
raise AttributeError(
"Trying to create non-registered layer: {}".format(layer))
@property
def layers(self):
return self._layers
def apply_regularizers_on_loss(
self,
train_net,
train_init_net,
blob_to_device=None,
):
logger.info("apply regularizer on loss")
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
logger.info("add regularizer {0} for param {1} to loss".format(regularizer, param))
assert isinstance(regularizer, Regularizer)
added_loss_blob = regularizer(train_net, train_init_net, param, grad=None,
by=RegularizationBy.ON_LOSS)
logger.info(added_loss_blob)
if added_loss_blob is not None:
self.add_loss(
schema.Scalar(blob=added_loss_blob),
str(added_loss_blob)
)
def apply_regularizers_after_optimizer(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
logger.info("apply regularizer after optimizer")
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
logger.info("add regularizer {0} for param {1} to optimizer".format(regularizer, param))
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
regularizer(
train_net, train_init_net, param, grad=grad_map.get(str(param)),
by=RegularizationBy.AFTER_OPTIMIZER
)
def apply_post_grad_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
param_grad_map = {param: grad_map[param]
for param in self.param_to_optim.keys() if param in grad_map}
for modifier in self._post_grad_net_modifiers:
modifier(trainer_net, trainer_init_net, param_grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_final_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
for modifier in self._final_net_modifiers:
modifier(trainer_net, trainer_init_net, grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_optimizers(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, optimizer in viewitems(self.param_to_optim):
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
if device is not None:
# extra info is not applicable for optimizers
del device.extra_info[:]
with core.DeviceScope(device):
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
@property
def breakdown_map(self):
return self._breakdown_map
@breakdown_map.setter
def breakdown_map(self, breakdown_map):
# TODO(xlwang): provide more rich feature information in breakdown_map;
# and change the assertion accordingly
assert isinstance(breakdown_map, dict)
assert all(isinstance(k, six.string_types) for k in breakdown_map)
assert sorted(breakdown_map.values()) == list(range(len(breakdown_map)))
self._breakdown_map = breakdown_map
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
9dcf9692cc7ae29a7f56fa27e78cdc21365c70ba | 8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac | /PySide/QtGui/QStyleOptionTab.py | 264b7c0e040a27794ebb0a067eb56b9436f49f11 | [
"Apache-2.0"
] | permissive | sonictk/python-skeletons | be09526bf490856bb644fed6bf4e801194089f0d | 49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d | refs/heads/master | 2020-04-06T04:38:01.918589 | 2016-06-09T20:37:43 | 2016-06-09T20:37:43 | 56,334,503 | 0 | 0 | null | 2016-04-15T16:30:42 | 2016-04-15T16:30:42 | null | UTF-8 | Python | false | false | 1,122 | py | # encoding: utf-8
# module PySide.QtGui
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtGui.so
# by generator 1.138
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
from QStyleOption import QStyleOption
class QStyleOptionTab(QStyleOption):
# no doc
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
Beginning = None
CornerWidget = None
cornerWidgets = None
CornerWidgets = None
End = None
icon = None
LeftCornerWidget = None
Middle = None
NextIsSelected = None
NoCornerWidgets = None
NotAdjacent = None
OnlyOneTab = None
position = None
PreviousIsSelected = None
RightCornerWidget = None
row = None
selectedPosition = None
SelectedPosition = None
shape = None
StyleOptionType = None
StyleOptionVersion = None
TabPosition = None
text = None
Type = None
Version = None
__new__ = None
| [
"yliangsiew@blizzard.com"
] | yliangsiew@blizzard.com |
857ea37b6d65aa8708b7aa91520822899db9bbaa | b3e90c765a70d0c68a3c703c31ebfbcc67d8f83e | /AN_Bridging/box_ws/src/multi_box/src/Algs/SoftActorCritic.py | 6147e9cd1403a16b426ef7d1722e30eefb48531e | [] | no_license | ronf-ucb/MultiRobot | 619fe1750dd25c336f7ef793e43983d992cbf519 | b509e9c43e330e737135298ea4cfbd4190222328 | refs/heads/master | 2021-07-19T20:39:47.835985 | 2020-09-07T14:32:39 | 2020-09-07T14:32:39 | 211,394,959 | 1 | 2 | null | 2019-10-30T22:21:20 | 2019-09-27T20:15:07 | C++ | UTF-8 | Python | false | false | 3,825 | py | #! /usr/bin/env python
import numpy as np
import torch
import torch.nn as nn
import math
import rospy
from std_msgs.msg import String, Int8
from geometry_msgs.msg import Vector3
import vrep
import matplotlib.pyplot as plt
import torch.optim as optim
from Networks.network import Network
from Networks.softNetwork import SoftNetwork
from agent import Agent
from Buffers.CounterFactualBuffer import Memory
cuda_avail = torch.cuda.is_available()
device = torch.device("cuda" if cuda_avail else "cpu")
class SAC(Agent):
def __init__(self, params, name, task):
super(SAC, self).__init__(params, name, task)
self.aPars = params['actPars']
self.aTrain = params['actTrain']
self.qPars = params['qPars']
self.qTrain = params['qTrain']
if self.trainMode:
self.QNet = Network(self.qPars, self.qTrain).to(device)
self.VNet = Network(self.vPars, self.vTrain).to(device)
self.VTar = Network(self.vPars, self.vTrain).to(device)
self.policyNet = SoftNetwork(self.aPars, self.aTrain).to(device)
else:
print('Not implemented')
for target_param, param in zip(self.VTar.parameters(), self.VNet.parameters()):
target_param.data.copy_(param)
self.expSize = self.vTrain['buffer']
self.actions = self.aPars['neurons'][-1]
self.state = self.aPars['neurons'][0]
self.exp = ReplayBuffer(self.expSize, self.actions, np.float32, self.state, np.float32)
task.initAgent(self)
while(not self.stop):
x = 1+1
task.postTraining()
def load_nets(self):
pass
def saveModel(self):
pass
def get_action(self, s):
action, _ , _, _, _= self.policyNet(torch.FloatTensor(s))
action = np.ravel(action.detach().numpy())
return action
def send_to_device(self, s, a, r, next_s, d):
s = torch.FloatTensor(s).to(device)
a = torch.FloatTensor(a).to(device)
r = torch.FloatTensor(r).unsqueeze(1).to(device)
next_s = torch.FloatTensor(next_s).to(device)
d = torch.FloatTensor(np.float32(d)).unsqueeze(1).to(device)
return s, a, r, next_s, d
def train(self):
if len(self.exp) > 750:
s, a, r, next_s, d = self.exp.sample_batch(self.batch_size)
s, a, r, next_s, d = self.send_to_device(s, a, r, next_s, d)
q = self.QNet(torch.cat([s, a], dim = 1))
v = self.VNet(s)
new_a, log_prob, z, mean, log_std = self.policyNet(s)
target_v = self.VTar(next_s)
next_q = r + (1 - d) * self.discount * target_v
q_loss = self.QNet.get_loss(q, next_q.detach())
new_q = self.QNet(torch.cat([s, new_a], dim=1))
next_v = new_q - log_prob * self.alpha
v_loss = self.VNet.get_loss(v, next_v.detach())
target = new_q - v
actor_loss = (log_prob * (log_prob*self.alpha - target).detach()).mean()
mean_loss = 1e-3 * mean.pow(2).mean()
std_loss = 1e-3 * log_std.pow(2).mean()
actor_loss += mean_loss + std_loss
self.VNet.optimizer.zero_grad()
v_loss.backward()
self.VNet.optimizer.step()
self.QNet.optimizer.zero_grad()
q_loss.backward()
self.QNet.optimizer.step()
self.policyNet.optimizer.zero_grad()
actor_loss.backward()
self.policyNet.optimizer.step()
for target_param, param in zip(self.VTar.parameters(), self.VNet.parameters()):
target_param.data.copy_(target_param.data * (1.0 - 5*1e-3) + param.data * 5*1e-3)
self.totalSteps += 1
| [
"austinnguyen517@berkeley.edu"
] | austinnguyen517@berkeley.edu |
5eb0a07ac596ae7345b97c9eed82da966bd14583 | 59270de0f31107386898488a145b1eec84304b60 | /collective/recipe/template/__init__.py | 1b92546f53e0c58ec847e1ae325e23b4d7f47b25 | [] | no_license | lfs-multisite/lfs-multisite-project | db8349692929bc7c5318052cadc1fc9e4a5ea19c | de0cab06d4c9d76acf6fba53c182a0aa97c402ce | refs/heads/master | 2016-08-02T20:44:25.195086 | 2011-01-31T07:04:29 | 2011-01-31T07:04:29 | 1,302,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | import logging
import os
import re
import stat
import zc.buildout
class Recipe:
def __init__(self, buildout, name, options):
self.buildout=buildout
self.name=name
self.options=options
self.logger=logging.getLogger(self.name)
if "input" not in options and "inline" not in options:
self.logger.error("No input file or inline template specified.")
raise zc.buildout.UserError("No input file specified.")
if "output" not in options:
self.logger.error("No output file specified.")
raise zc.buildout.UserError("No output file specified.")
self.output=options["output"]
self.input=options.get("input")
self.inline=options.get("inline")
if "inline" in options:
self.source = self.inline.lstrip()
self.mode = None
elif os.path.exists(self.input):
self.source=open(self.input).read()
self.mode=stat.S_IMODE(os.stat(self.input).st_mode)
elif self.input.startswith('inline:'):
self.source=self.input[len('inline:'):].lstrip()
self.mode=None
else:
msg="Input file '%s' does not exist." % self.input
self.logger.error(msg)
raise zc.buildout.UserError(msg)
self._execute()
if "mode" in options:
self.mode=int(options["mode"], 8)
def _execute(self):
template=re.sub(r"\$\{([^:]+?)\}", r"${%s:\1}" % self.name, self.source)
self.result=self.options._sub(template, [])
def install(self):
self.createIntermediatePaths(os.path.dirname(self.output))
output=open(self.output, "wt")
output.write(self.result)
output.close()
if self.mode is not None:
os.chmod(self.output, self.mode)
self.options.created(self.output)
return self.options.created()
def update(self):
# Variables in other parts might have changed so we need to do a
# full reinstall.
return self.install()
def createIntermediatePaths(self, path):
parent = os.path.dirname(path)
if os.path.exists(path) or parent == path:
return
self.createIntermediatePaths(parent)
os.mkdir(path)
self.options.created(path)
| [
"tushkanin@mail.ru"
] | tushkanin@mail.ru |
8a6d6870ba905839d5e65c492532192cc083a002 | 445beaa2c50d62a837047b3dbd5132ef5470f8db | /Mastigando estruturas de controle/CalcularSomaDeInteirosAte0.py | f7e992a22d9c1175ef638b2d17e357926bf1a85d | [] | no_license | thayannevls/pythonZumbi | ecee77d640e6ad7e487348eae1ec2eba5e833f22 | 9ffd39aea2f2927bdb5d58828dfbc75756d3c197 | refs/heads/master | 2020-04-09T07:24:28.655236 | 2014-03-09T20:50:02 | 2014-03-09T20:50:02 | 17,462,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | soma = 0
while True:
x = int(input('Digite um número(0 sai):'))
if x== 0:
break
soma = soma + x
print('soma : %d' %soma)
| [
"thayannevls@gmail.com"
] | thayannevls@gmail.com |
2d7f2ed0e05c054d8b480b2f2da123ea594e62c3 | a4f0774053ec36e7bf2adb61bd225215fac99128 | /takeImageFromVideo.py | 6ee456b0f445b05ee0eafc17b83531230b452f07 | [] | no_license | linhhnbkdn/utils | d05848e5168fcc1f18b59eefd7fe2d82917f8d3a | 242697138b5ca5bff33098974c3754685b85eb03 | refs/heads/main | 2023-03-08T08:11:09.569529 | 2021-02-21T04:18:01 | 2021-02-21T04:18:01 | 340,810,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/usr/bin/env python3
import os
import argparse
import logging
import cv2
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s-%(name)s]: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
Logger = logging.getLogger('Take image from Video')
Logger.setLevel(logging.INFO)
args = argparse.ArgumentParser()
args.add_argument('-p', '--path', help='Path to video', required=True)
args = vars(args.parse_args())
Logger.info(args)
src = os.path.dirname(args['path'])
name = args['path'].split(os.sep)[-1]
name = name[0:str(name).find('.')]
FImages = os.path.join(src, name)
if os.path.exists(FImages):
os.remove(FImages)
os.mkdir(FImages)
cap = cv2.VideoCapture(args['path'])
IndexImg = 0
while(cap.isOpened()):
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
f = os.path.join(FImages, '{}.bmp'.format(IndexImg))
cv2.imwrite(f, frame)
Logger.info("Created {}".format(f))
IndexImg += 1
cv2.waitKey(5)
cap.release()
cv2.destroyAllWindows()
| [
"lnhoang@amperecomputing.com"
] | lnhoang@amperecomputing.com |
c7c9ab5555c62ef7bca526ca8069b83788f07dc4 | a3e26112cb5d6b64c30b44f775750653a1daf0dc | /Q910_Smallest-Range-II.py | 550d96ea406915f5883d2b9d5cea91a4561727d1 | [
"MIT"
] | permissive | xiaosean/leetcode_python | 938f1df379b518d99a778e2da8093ff0371e35d4 | d6fc52d13946895d2b2928ef9962af0610b1d1e8 | refs/heads/master | 2023-04-05T07:07:01.561010 | 2023-03-25T19:17:21 | 2023-03-25T19:17:21 | 150,637,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | class Solution:
def smallestRangeII(self, A: List[int], K: int) -> int:
A.sort()
min_range = A[-1] - A[0]
min_val = A[0]
max_val_sub_k = A[-1] - K
min_val = A[0] + K
for idx in range(len(A)-1):
cur_val = A[idx] + K
next_val = A[idx+1] - K
min_range = min(min_range, max(max_val_sub_k, cur_val) - min(min_val, next_val))
# min_range = min(min_range, max_val_sub_k-min(cur_val, next_val))
return min_range | [
"xiaosean5408@gmail.com"
] | xiaosean5408@gmail.com |
855fb15b15d33fbe562973352dba115c1014db55 | 251e8bfec0bfc5b6094f7db8ee6bdfe1ca7f6a5b | /bookmanager/venv/bin/python-config | 759cfeb2748510449a11b8162c1a3830533ca6fc | [] | no_license | googleliyang/Django_meiduo | 543042e08cc5eeb1dce8432b4ea2cca996f35c06 | 46f48ecf7bd6e9e2796eac1c3d54787f5571a9a7 | refs/heads/master | 2020-04-24T19:22:42.295324 | 2019-02-28T05:24:23 | 2019-02-28T05:24:23 | 172,209,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | #!/Users/ly/Programmer/django/bookmanager/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"ubuntu@gmail.com"
] | ubuntu@gmail.com | |
af674a327738ce483ad38943f0d2f07b42d179ed | 7bdb458f37c130fb69ccd9176ac49f1059036ade | /app.py | c191b9668d3fe93dd7b22eb0f397366a3a0ea08a | [] | no_license | cuzer1/testFlask | 84332b17861f445cc5519048ef495f513fa2413b | 01e8d01d376b123e10fb7f7c2726cec10af1a64c | refs/heads/master | 2021-01-21T15:37:31.096097 | 2017-06-26T03:19:54 | 2017-06-26T03:19:54 | 95,396,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item, ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
# To allow flask propagating exception even if debug is set to false on app
# app.config['PROPAGATE_EXCEPTIONS'] = True
app.secret_key = 'jose'
api = Api(app)
# @app.before_first_request
# def create_tables():
# db.create_all()
jwt = JWT(app, authenticate, identity)
api.add_resource(Store, '/store/<string:name>')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(StoreList, '/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True) # important to mention debug=True
| [
"cuzerms@yahoo.com"
] | cuzerms@yahoo.com |
5ae67b4bb1b16de948595c24dd3c6c912329ceb7 | 9f30877e1ace8c9c40c1b31eaf868af45e1eb61e | /main.py | 3e63d83a6a54724b2eddb7823a54724d4e27b614 | [] | no_license | KleEnder/chap-app | b78578be51d59d12491b03b441b9216c5f1cb60a | fe860cc0cd6504173c2a166e4ace8e4f28049195 | refs/heads/master | 2021-01-09T20:40:04.796896 | 2016-07-21T15:11:14 | 2016-07-21T15:11:14 | 63,965,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,670 | py | #!/usr/bin/env python
import os
import jinja2
import webapp2
from models import Message
from google.appengine.api import users
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
return self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
return self.write(self.render_str(template, **kw))
def render_template(self, view_filename, params=None):
if not params:
params = {}
template = jinja_env.get_template(view_filename)
return self.response.out.write(template.render(params))
class MainHandler(BaseHandler):
def get(self):
list_of_m = Message.query().fetch()
params = {"list_of_m": list_of_m}
user = users.get_current_user()
params["user"] = user
if user:
prijavljen = True
logout_url = users.create_logout_url('/')
params["prijavljen"] = prijavljen
params["logout_url"] = logout_url
else:
prijavljen = False
login_url = users.create_login_url('/')
params["prijavljen"] = prijavljen
params["login_url"] = login_url
return self.render_template("main.html", params=params)
class MessageHandler(BaseHandler):
def get(self):
list_of_m = Message.query().fetch()
params = {"list_of_m": list_of_m}
user = users.get_current_user()
params["user"] = user
if user:
prijavljen = True
logout_url = users.create_logout_url('/')
params["prijavljen"] = prijavljen
params["logout_url"] = logout_url
else:
prijavljen = False
login_url = users.create_login_url('/')
params["prijavljen"] = prijavljen
params["login_url"] = login_url
return self.render_template("main.html", params=params)
def post(self):
input_message = self.request.get("input_message")
message = Message(text_entered=input_message)
message.put()
#return self.write("You've entered: " + input_message)
return self.redirect_to("main")
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name="main"),
webapp2.Route('/message', MessageHandler),
#webapp2.Route('/all_messages', MessageHandler, name="all_messages"),
], debug=True)
| [
"klemenznidar208@gmail.com"
] | klemenznidar208@gmail.com |
6a3de9a2997e3969d187a691ddd2deb96e6635a7 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/ondemandscanning/v1beta1/ondemandscanning_v1beta1_client.py | 0ee06b63db758efe5802e29267514cc6f5aa75df | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 13,062 | py | """Generated client library for ondemandscanning version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.generated_clients.apis.ondemandscanning.v1beta1 import ondemandscanning_v1beta1_messages as messages
class OndemandscanningV1beta1(base_api.BaseApiClient):
"""Generated client library for service ondemandscanning version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://ondemandscanning.googleapis.com/'
MTLS_BASE_URL = 'https://ondemandscanning.mtls.googleapis.com/'
_PACKAGE = 'ondemandscanning'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1beta1'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'OndemandscanningV1beta1'
_URL_VERSION = 'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new ondemandscanning handle."""
url = url or self.BASE_URL
super(OndemandscanningV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations_scans_vulnerabilities = self.ProjectsLocationsScansVulnerabilitiesService(self)
self.projects_locations_scans = self.ProjectsLocationsScansService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(OndemandscanningV1beta1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (OndemandscanningProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='ondemandscanning.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1beta1/{+name}:cancel',
request_field='',
request_type_name='OndemandscanningProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (OndemandscanningProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='ondemandscanning.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1beta1/{+name}',
request_field='',
request_type_name='OndemandscanningProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (OndemandscanningProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='ondemandscanning.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1beta1/{+name}',
request_field='',
request_type_name='OndemandscanningProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
Args:
request: (OndemandscanningProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='ondemandscanning.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1beta1/{+name}/operations',
request_field='',
request_type_name='OndemandscanningProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
def Wait(self, request, global_params=None):
r"""Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.
Args:
request: (OndemandscanningProjectsLocationsOperationsWaitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Wait')
return self._RunMethod(
config, request, global_params=global_params)
Wait.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:wait',
http_method='POST',
method_id='ondemandscanning.projects.locations.operations.wait',
ordered_params=['name'],
path_params=['name'],
query_params=['timeout'],
relative_path='v1beta1/{+name}:wait',
request_field='',
request_type_name='OndemandscanningProjectsLocationsOperationsWaitRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsScansVulnerabilitiesService(base_api.BaseApiService):
"""Service class for the projects_locations_scans_vulnerabilities resource."""
_NAME = 'projects_locations_scans_vulnerabilities'
def __init__(self, client):
super(OndemandscanningV1beta1.ProjectsLocationsScansVulnerabilitiesService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Lists vulnerabilities resulting from a successfully completed scan.
Args:
request: (OndemandscanningProjectsLocationsScansVulnerabilitiesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListVulnerabilitiesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/scans/{scansId}/vulnerabilities',
http_method='GET',
method_id='ondemandscanning.projects.locations.scans.vulnerabilities.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1beta1/{+parent}/vulnerabilities',
request_field='',
request_type_name='OndemandscanningProjectsLocationsScansVulnerabilitiesListRequest',
response_type_name='ListVulnerabilitiesResponse',
supports_download=False,
)
class ProjectsLocationsScansService(base_api.BaseApiService):
"""Service class for the projects_locations_scans resource."""
_NAME = 'projects_locations_scans'
def __init__(self, client):
super(OndemandscanningV1beta1.ProjectsLocationsScansService, self).__init__(client)
self._upload_configs = {
}
def AnalyzePackages(self, request, global_params=None):
r"""Initiates an analysis of the provided packages.
Args:
request: (OndemandscanningProjectsLocationsScansAnalyzePackagesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('AnalyzePackages')
return self._RunMethod(
config, request, global_params=global_params)
AnalyzePackages.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1beta1/projects/{projectsId}/locations/{locationsId}/scans:analyzePackages',
http_method='POST',
method_id='ondemandscanning.projects.locations.scans.analyzePackages',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1beta1/{+parent}/scans:analyzePackages',
request_field='analyzePackagesRequest',
request_type_name='OndemandscanningProjectsLocationsScansAnalyzePackagesRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(OndemandscanningV1beta1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(OndemandscanningV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
7afc32c3c55c9f4e1d021e42595106ed40aa6c5b | deed27b6bd8342d00bf0f90ee25f557f1381fced | /Week 4/lab 4/test.py | 327b648731ae065a577049013e52e13b2da98bd5 | [] | no_license | roypj/SparkML | 13309cafb3303a2e871090270fef6dded054901e | 149868b864df331dec23e6d7c2ba5c39f8f9e852 | refs/heads/master | 2021-09-02T05:33:33.625786 | 2017-12-30T19:21:57 | 2017-12-30T19:21:57 | 115,818,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | l =u'0,1,1,5,0,1382,4,15,2,181,1,2,,2,68fd1e64,80e26c9b,fb936136,7b4723c4,25c83c98,7e0ccccf,de7995b8,1f89b562,a73ee510,a8cd5504,b2cb9c98,37c9c164,2824a5f6,1adce6ef,8ba8b39a,891b62e7,e5ba7672,f54016b9,21ddcdc9,b1252a9d,07b5194c,,3a171ecb,c5c50484,e8b83407,9727dd16'
#splitStrng= ([i.split(',') for i in l])
#print(l.split(','))
#print(l.split(',')[1:])
print([(i, j) for i, j in enumerate(l.split(',')[1:])])
nonZeroIndices=[]
for x in rawFeats:
if x in OHEDict:
nonZeroIndices.append(OHEDict[x])
#nonZeroIndices = sorted([OHEDict[x] for x in rawFeats ])
return SparseVector(numOHEFeats,sorted(nonZeroIndices),np.ones(len(nonZeroIndices)))
| [
"roy.p.joseph@gmail.com"
] | roy.p.joseph@gmail.com |
8e69c34e7f7ee6ddd921d0a6e8b759a6073a36be | 93269e34160244fa3a61dc051def95a65f6e7569 | /Mark_4/venv/Scripts/pip3.8-script.py | f7a95d3b498d43f8c90a368f0801707ebe5cb22b | [] | no_license | sgundu-doosratake/Python_Coding | 50e1959c640d6852d99d7cad95f27758c4c76c4e | 5faec8a4f11b3c5f4395ccb31915a459da023d2a | refs/heads/master | 2021-01-14T14:58:07.163661 | 2020-02-24T05:24:20 | 2020-02-24T05:24:20 | 242,653,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!S:\Courses\Python\Examples\Mark_4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"saith.kumar@doosratake.com"
] | saith.kumar@doosratake.com |
b411cb529b4d1a3eab258d75f2fe2c9df7d6a9cf | cde7faaa1440e2d3dc8f5e65d94ba47a9a13bc56 | /tango_with_django_project/rango/migrations/0003_category_slug.py | 7a662eddaa214ebd823ad98d6d91ab5b00a041eb | [] | no_license | 2080770/tangowithdjango | 339fc0c469a41a12f5adef02e3a0111994a32019 | 4974a0f997f3530abf61ba64e4b0d67da320c7c6 | refs/heads/master | 2021-01-22T04:57:25.557892 | 2015-03-07T12:59:22 | 2015-03-07T12:59:22 | 28,933,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20150118_1224'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default='', unique=True),
preserve_default=False,
),
]
| [
"2080770R@BO715-7-05.ad.dcs.gla.ac.uk"
] | 2080770R@BO715-7-05.ad.dcs.gla.ac.uk |
470c33d2edcfd2a53a09494e9049ee2088fb83f0 | 356416463cbdbcb71afc207b4c0ee380296e6873 | /venv/bin/pip3 | 4916b6d7267932aecafadc70da13b10f47097d87 | [] | no_license | cecilianunes6/Clinica | c82c1f79654a11ff9bf3b915343995eb5413252e | d93ea1c9c9578947b11c7b997c8b85feee8ff724 | refs/heads/master | 2020-07-17T23:44:10.433173 | 2019-08-29T14:15:41 | 2019-08-29T14:15:41 | 206,126,201 | 1 | 0 | null | 2019-09-03T16:48:20 | 2019-09-03T16:48:20 | null | UTF-8 | Python | false | false | 405 | #!/home/jhonatan/PycharmProjects/Clinica/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"xdjhonatan0@gmail.com"
] | xdjhonatan0@gmail.com | |
814dcbaa8e8ff066be7c35f6e3c1d8bf2dc22077 | 62c6ed61de8ed77e52f72ee012fd7a0edaaa186f | /Python72.py | f365f511ca3e9b8a3f3e90a4fdc1a79ded87a2e1 | [] | no_license | teofanesowo/pythonkk | 5eb43c3e65199d965047ae6c4c0cde253541149b | 1189c3ac8e0eeb89e8dbce79edddfc9028183b3a | refs/heads/master | 2020-04-02T15:55:45.980968 | 2018-11-13T12:39:54 | 2018-11-13T12:39:54 | 154,590,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | a = int(input('Digite um número: '))
b = int(input('Digite um número: '))
c = int(input('Digite um número: '))
if a > b > c:
print('O maior valor é {} e o menor valor é {}'.format(a, c))
elif a > c > b:
print('O maior valor é {} e o menor valor é {}'.format(a, b))
elif b > a > c:
print('O maior valor é {} e o menor valor é {}'.format(b, c))
elif b > c > a:
print('O maior valor é {} e o menor valor é {}'.format(b, a))
elif c > a > b:
print('O maior valor é {} e o menor valor é {}'.format(c, b))
else:
print('O maior valor é {} e o menor valor é {}'.format(c, a))
| [
"teofanesferreira@gmail.com"
] | teofanesferreira@gmail.com |
bbd06e970f33e0fd3225569ff5aedc8b24bb6c63 | 8b9e9de996cedd31561c14238fe655c202692c39 | /recursion/Tail_Recursion.py | 24b88b9901ef2a299306341c11b8f90bb3107b39 | [] | no_license | monkeylyf/interviewjam | 0049bc1d79e6ae88ca6d746b05d07b9e65bc9983 | 33c623f226981942780751554f0593f2c71cf458 | refs/heads/master | 2021-07-20T18:25:37.537856 | 2021-02-19T03:26:16 | 2021-02-19T03:26:16 | 6,741,986 | 59 | 31 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | # Explain what is tail recursion and implement reverse a list using functional programming style
def rev(a):
"""Tail recursion.
rev([0, 1, 2, 3])
nested([], [0, 1, 2, 3])
nested([0] + [], [1, 2, 3])
nested([1] + [0], [2, 3])
nested([2] + [1, 0], [3])
nested([3], [2, 1, 0], [])
[3, 2, 1, 0]
[3, 2, 1, 0]
"""
# Nested function.
def nested(acc, a):
# Notice that [a[0]] + acc instead of [a[0]] + acc
return nested([a[0]] + acc, a[1:]) if a else acc
return nested([], a)
def re(a):
"""None tail recursion.
What happens in call stack.
re([0, 1, 2, 3])
re([1, 2, 3,]) + 0
(re([2, 3,]) + 1) + 0
((re([3]) + 2) + 1) + 0
(((re([]) + 3) + 2) + 1) + 0
(((3) + 2) + 1) + 0
((5) + 1) + 0
6 + 0
6
"""
return re(a[1:]) + [a[0]] if a else []
def main():
n = 500
# Test case
print rev(range(n))
print re(range(n))
if __name__ == '__main__':
main()
| [
"laituan1986@gmail.com"
] | laituan1986@gmail.com |
ffc78b1d5e4a72e95417083a8d26f63092d50bb2 | c0d65d8cdffd5818c5449f70843803dea8bf26e0 | /2016/onionSkinRendererWidget.py | 03c18748f1182c22d856a5272a8ad29643425443 | [
"MIT"
] | permissive | laifuyu/onionSkinRenderer | 88144831790abd9910b43b3c9cdc7215ae2f55fc | 125bd26051e5b6a700e94bab042e62d55b45ce61 | refs/heads/master | 2021-05-14T06:49:26.686239 | 2017-10-01T10:20:31 | 2017-10-01T10:20:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,670 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Christoph\OneDrive\Dokumente\maya\scripts\onionSkinRenderer\onionSkinRendererWidget.ui'
#
# Created: Sun Sep 17 17:58:18 2017
# by: pyside-uic 0.2.14 running on PySide 1.2.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_onionSkinRenderer(object):
def setupUi(self, onionSkinRenderer):
onionSkinRenderer.setObjectName("onionSkinRenderer")
onionSkinRenderer.resize(333, 377)
self.onionSkinRenderer_mainLayout = QtGui.QWidget(onionSkinRenderer)
self.onionSkinRenderer_mainLayout.setObjectName("onionSkinRenderer_mainLayout")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.onionSkinRenderer_mainLayout)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.onionFrames_tab = QtGui.QTabWidget(self.onionSkinRenderer_mainLayout)
self.onionFrames_tab.setObjectName("onionFrames_tab")
self.relative_tab = QtGui.QWidget()
self.relative_tab.setObjectName("relative_tab")
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.relative_tab)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.relative_frame = QtGui.QFrame(self.relative_tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.relative_frame.sizePolicy().hasHeightForWidth())
self.relative_frame.setSizePolicy(sizePolicy)
self.relative_frame.setMinimumSize(QtCore.QSize(200, 0))
self.relative_frame.setMaximumSize(QtCore.QSize(100000, 16777215))
self.relative_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.relative_frame.setFrameShadow(QtGui.QFrame.Raised)
self.relative_frame.setObjectName("relative_frame")
self.relative_frame_layout = QtGui.QVBoxLayout(self.relative_frame)
self.relative_frame_layout.setSpacing(3)
self.relative_frame_layout.setContentsMargins(0, 4, 4, 4)
self.relative_frame_layout.setObjectName("relative_frame_layout")
self.horizontalLayout_3.addWidget(self.relative_frame)
self.relative_settings_layout = QtGui.QVBoxLayout()
self.relative_settings_layout.setObjectName("relative_settings_layout")
self.relative_keyframes_chkbx = QtGui.QCheckBox(self.relative_tab)
self.relative_keyframes_chkbx.setChecked(True)
self.relative_keyframes_chkbx.setObjectName("relative_keyframes_chkbx")
self.relative_settings_layout.addWidget(self.relative_keyframes_chkbx)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.relative_settings_layout.addItem(spacerItem)
self.relative_tint_strength_label = QtGui.QLabel(self.relative_tab)
self.relative_tint_strength_label.setObjectName("relative_tint_strength_label")
self.relative_settings_layout.addWidget(self.relative_tint_strength_label)
self.relative_tint_strength_slider = QtGui.QSlider(self.relative_tab)
self.relative_tint_strength_slider.setStyleSheet("QSlider{\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: 4px;\n"
"background: rgb(150, 150, 150);\n"
"}\n"
"QSlider::handle{\n"
"height: 8px;\n"
"background: rgb(50, 50, 50);\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: -4px -4px;\n"
"}\n"
"QSlider::groove{\n"
"background: grey;\n"
"}\n"
"QSlider::sub-page{\n"
"background: rgb(75, 75, 75);\n"
"}\n"
"QSlider::add-page{\n"
"background: rgb(150, 150, 150);\n"
"}")
self.relative_tint_strength_slider.setMaximum(100)
self.relative_tint_strength_slider.setProperty("value", 100)
self.relative_tint_strength_slider.setOrientation(QtCore.Qt.Horizontal)
self.relative_tint_strength_slider.setObjectName("relative_tint_strength_slider")
self.relative_settings_layout.addWidget(self.relative_tint_strength_slider)
self.relative_tint_color_label = QtGui.QLabel(self.relative_tab)
self.relative_tint_color_label.setObjectName("relative_tint_color_label")
self.relative_settings_layout.addWidget(self.relative_tint_color_label)
self.relative_futureTint_btn = QtGui.QPushButton(self.relative_tab)
self.relative_futureTint_btn.setStyleSheet("background-color: rgb(20, 255, 114)")
self.relative_futureTint_btn.setObjectName("relative_futureTint_btn")
self.relative_settings_layout.addWidget(self.relative_futureTint_btn)
self.relative_pastTint_btn = QtGui.QPushButton(self.relative_tab)
self.relative_pastTint_btn.setStyleSheet("background-color:rgb(255, 26, 75)")
self.relative_pastTint_btn.setObjectName("relative_pastTint_btn")
self.relative_settings_layout.addWidget(self.relative_pastTint_btn)
self.horizontalLayout_3.addLayout(self.relative_settings_layout)
self.onionFrames_tab.addTab(self.relative_tab, "")
self.absolute_tab = QtGui.QWidget()
self.absolute_tab.setObjectName("absolute_tab")
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.absolute_tab)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.absolute_frame = QtGui.QFrame(self.absolute_tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_frame.sizePolicy().hasHeightForWidth())
self.absolute_frame.setSizePolicy(sizePolicy)
self.absolute_frame.setMinimumSize(QtCore.QSize(200, 0))
self.absolute_frame.setMaximumSize(QtCore.QSize(10000, 16777215))
self.absolute_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.absolute_frame.setFrameShadow(QtGui.QFrame.Raised)
self.absolute_frame.setObjectName("absolute_frame")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.absolute_frame)
self.verticalLayout_2.setSpacing(3)
self.verticalLayout_2.setContentsMargins(4, 4, 4, 4)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.absolute_list = QtGui.QListWidget(self.absolute_frame)
self.absolute_list.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.absolute_list.setObjectName("absolute_list")
self.verticalLayout_2.addWidget(self.absolute_list)
self.absolute_add_layout = QtGui.QHBoxLayout()
self.absolute_add_layout.setObjectName("absolute_add_layout")
self.absolute_add_spinBox = QtGui.QSpinBox(self.absolute_frame)
self.absolute_add_spinBox.setMinimum(-100000)
self.absolute_add_spinBox.setMaximum(100000)
self.absolute_add_spinBox.setObjectName("absolute_add_spinBox")
self.absolute_add_layout.addWidget(self.absolute_add_spinBox)
self.absolute_add_btn = QtGui.QPushButton(self.absolute_frame)
self.absolute_add_btn.setObjectName("absolute_add_btn")
self.absolute_add_layout.addWidget(self.absolute_add_btn)
self.absolute_addCrnt_btn = QtGui.QPushButton(self.absolute_frame)
self.absolute_addCrnt_btn.setObjectName("absolute_addCrnt_btn")
self.absolute_add_layout.addWidget(self.absolute_addCrnt_btn)
self.absolute_clear_btn = QtGui.QPushButton(self.absolute_frame)
self.absolute_clear_btn.setObjectName("absolute_clear_btn")
self.absolute_add_layout.addWidget(self.absolute_clear_btn)
self.verticalLayout_2.addLayout(self.absolute_add_layout)
self.horizontalLayout_4.addWidget(self.absolute_frame)
self.absolute_settings_layout = QtGui.QVBoxLayout()
self.absolute_settings_layout.setObjectName("absolute_settings_layout")
self.absolute_tint_strength_label = QtGui.QLabel(self.absolute_tab)
self.absolute_tint_strength_label.setObjectName("absolute_tint_strength_label")
self.absolute_settings_layout.addWidget(self.absolute_tint_strength_label)
self.absolute_tint_strength_slider = QtGui.QSlider(self.absolute_tab)
self.absolute_tint_strength_slider.setStyleSheet("QSlider{\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: 4px;\n"
"background: rgb(150, 150, 150);\n"
"}\n"
"QSlider::handle{\n"
"height: 8px;\n"
"background: rgb(50, 50, 50);\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: -4px -4px;\n"
"}\n"
"QSlider::groove{\n"
"background: grey;\n"
"}\n"
"QSlider::sub-page{\n"
"background: rgb(75, 75, 75);\n"
"}\n"
"QSlider::add-page{\n"
"background: rgb(150, 150, 150);\n"
"}")
self.absolute_tint_strength_slider.setMaximum(100)
self.absolute_tint_strength_slider.setProperty("value", 100)
self.absolute_tint_strength_slider.setOrientation(QtCore.Qt.Horizontal)
self.absolute_tint_strength_slider.setObjectName("absolute_tint_strength_slider")
self.absolute_settings_layout.addWidget(self.absolute_tint_strength_slider)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.absolute_settings_layout.addItem(spacerItem1)
self.absolute_tint_label = QtGui.QLabel(self.absolute_tab)
self.absolute_tint_label.setObjectName("absolute_tint_label")
self.absolute_settings_layout.addWidget(self.absolute_tint_label)
self.absolute_tint_btn = QtGui.QPushButton(self.absolute_tab)
self.absolute_tint_btn.setStyleSheet("background:rgb(200, 200, 50)")
self.absolute_tint_btn.setObjectName("absolute_tint_btn")
self.absolute_settings_layout.addWidget(self.absolute_tint_btn)
self.horizontalLayout_4.addLayout(self.absolute_settings_layout)
self.onionFrames_tab.addTab(self.absolute_tab, "")
self.verticalLayout_3.addWidget(self.onionFrames_tab)
self.onionObjects_grp = QtGui.QGroupBox(self.onionSkinRenderer_mainLayout)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.onionObjects_grp.sizePolicy().hasHeightForWidth())
self.onionObjects_grp.setSizePolicy(sizePolicy)
self.onionObjects_grp.setObjectName("onionObjects_grp")
self.horizontalLayout = QtGui.QHBoxLayout(self.onionObjects_grp)
self.horizontalLayout.setObjectName("horizontalLayout")
self.onionObjects_list = QtGui.QListWidget(self.onionObjects_grp)
self.onionObjects_list.setBaseSize(QtCore.QSize(2, 1))
self.onionObjects_list.setFrameShadow(QtGui.QFrame.Plain)
self.onionObjects_list.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.onionObjects_list.setObjectName("onionObjects_list")
self.horizontalLayout.addWidget(self.onionObjects_list)
self.onionObjects_btn_layout = QtGui.QVBoxLayout()
self.onionObjects_btn_layout.setObjectName("onionObjects_btn_layout")
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.onionObjects_btn_layout.addItem(spacerItem2)
self.onionObjects_add_btn = QtGui.QPushButton(self.onionObjects_grp)
self.onionObjects_add_btn.setObjectName("onionObjects_add_btn")
self.onionObjects_btn_layout.addWidget(self.onionObjects_add_btn)
self.onionObjects_remove_btn = QtGui.QPushButton(self.onionObjects_grp)
self.onionObjects_remove_btn.setObjectName("onionObjects_remove_btn")
self.onionObjects_btn_layout.addWidget(self.onionObjects_remove_btn)
self.onionObjects_clear_btn = QtGui.QPushButton(self.onionObjects_grp)
self.onionObjects_clear_btn.setObjectName("onionObjects_clear_btn")
self.onionObjects_btn_layout.addWidget(self.onionObjects_clear_btn)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.onionObjects_btn_layout.addItem(spacerItem3)
self.horizontalLayout.addLayout(self.onionObjects_btn_layout)
self.verticalLayout_3.addWidget(self.onionObjects_grp)
onionSkinRenderer.setCentralWidget(self.onionSkinRenderer_mainLayout)
self.menubar = QtGui.QMenuBar(onionSkinRenderer)
self.menubar.setGeometry(QtCore.QRect(0, 0, 333, 21))
self.menubar.setObjectName("menubar")
onionSkinRenderer.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(onionSkinRenderer)
self.statusbar.setObjectName("statusbar")
onionSkinRenderer.setStatusBar(self.statusbar)
self.retranslateUi(onionSkinRenderer)
self.onionFrames_tab.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(onionSkinRenderer)
def retranslateUi(self, onionSkinRenderer):
onionSkinRenderer.setWindowTitle(QtGui.QApplication.translate("onionSkinRenderer", "OnionSkinRenderer", None, QtGui.QApplication.UnicodeUTF8))
self.relative_keyframes_chkbx.setText(QtGui.QApplication.translate("onionSkinRenderer", "Keyframes", None, QtGui.QApplication.UnicodeUTF8))
self.relative_tint_strength_label.setText(QtGui.QApplication.translate("onionSkinRenderer", "Tint Strength", None, QtGui.QApplication.UnicodeUTF8))
self.relative_tint_color_label.setText(QtGui.QApplication.translate("onionSkinRenderer", "Tint Color", None, QtGui.QApplication.UnicodeUTF8))
self.relative_futureTint_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Future", None, QtGui.QApplication.UnicodeUTF8))
self.relative_pastTint_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Past", None, QtGui.QApplication.UnicodeUTF8))
self.onionFrames_tab.setTabText(self.onionFrames_tab.indexOf(self.relative_tab), QtGui.QApplication.translate("onionSkinRenderer", "Relative", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_add_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_addCrnt_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Current", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_clear_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Clear", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_tint_strength_label.setText(QtGui.QApplication.translate("onionSkinRenderer", "Tint Strength", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_tint_label.setText(QtGui.QApplication.translate("onionSkinRenderer", "Tint Color", None, QtGui.QApplication.UnicodeUTF8))
self.absolute_tint_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Absolute", None, QtGui.QApplication.UnicodeUTF8))
self.onionFrames_tab.setTabText(self.onionFrames_tab.indexOf(self.absolute_tab), QtGui.QApplication.translate("onionSkinRenderer", "Absolute", None, QtGui.QApplication.UnicodeUTF8))
self.onionObjects_grp.setTitle(QtGui.QApplication.translate("onionSkinRenderer", "Onion Objects", None, QtGui.QApplication.UnicodeUTF8))
self.onionObjects_add_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Add Selected", None, QtGui.QApplication.UnicodeUTF8))
self.onionObjects_remove_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Remove Selected", None, QtGui.QApplication.UnicodeUTF8))
self.onionObjects_clear_btn.setText(QtGui.QApplication.translate("onionSkinRenderer", "Clear", None, QtGui.QApplication.UnicodeUTF8))
| [
"chris.lend@gmx.at"
] | chris.lend@gmx.at |
ef25ea6ab283c9467bd7a0429fe025d9498dfc72 | 2e83e004d8a69a773d1e305152edd16e4ea35ed8 | /students/tao_ye/lesson04/mailroom_part2.py | e61b668deb69780a3772bccdbd6ef71d372bcec7 | [] | no_license | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | 9b170efbab5efedaba8cf541e8fc42c5c8c0934d | 76224d0fb871d0bf0b838f3fccf01022edd70f82 | refs/heads/master | 2021-06-16T20:14:29.754453 | 2021-02-25T23:03:19 | 2021-02-25T23:03:19 | 161,077,720 | 19 | 182 | null | 2021-02-25T23:03:19 | 2018-12-09T20:18:25 | Python | UTF-8 | Python | false | false | 4,205 | py | #!/usr/bin/env python3
from operator import itemgetter
donation_table = {"Bill Gates": [40000.0, 50000.0, 9000.0],
"Mark Zuckerberg": [10000.0, 6500.00],
"Jeff Bezos": [1000.0, 40000.0, 7500],
"Paul Allen": [100000.0, 2000.0],
"Jack Ma": [15000.0, 77000.0]
}
def main():
dispatch_dict = {
"1": send_thank_you,
"2": create_report,
"3": send_letters_to_all,
"4": quit
}
while True:
user_choice = print_menu()
if user_choice not in dispatch_dict:
print('Invalid choice; try again...')
elif dispatch_dict[user_choice]() == "exit menu":
break
def print_menu():
"""
Print a menu of choices to the user and ask for the user selection
:return: string: user selection
"""
print('''
Main Menu
1 - Send a Thank You to a single donor
2 - Create a report
3 - Send letters to all donors
4 - Quit
''')
choice = str(input('Which option? [1 to 4] - ')).strip()
return choice
def send_thank_you():
"""
send a thank you to donors
"""
while True:
donor_name = input("Enter the donor's full name or 'list' to show current donors: ").strip()
if (donor_name.lower() == 'list'): # list current donors
print('\nThese are the current list of donors:')
for name in donation_table:
print(name, end=" || ")
print("\n")
else: # name entered
donation_amount = float(input('How much to donate? - '))
new_donor = True
for name in donation_table: # for exisitng donor
if (name.lower() == donor_name.lower()):
donation_table[name].append(donation_amount)
new_donor = False
print(donor_name, 'is in the list: updated the record.')
break
if (new_donor): # for new donor
donation_record = []
donation_record.append(donation_amount)
donation_table[donor_name.title()] = donation_record
print(donor_name, 'is a new donor: donation is added in the list.')
send_email(donor_name, donation_amount)
break
def create_report():
"""
create a summary report of the donation
"""
summary_table = []
for donor in donation_table:
donation_record = donation_table[donor]
total_given = sum(donation_record)
average_gift = total_given / len(donation_record)
summary_table_row = [donor, total_given, len(donation_record), average_gift]
summary_table.append(summary_table_row)
# sort the summary table by the index 1 field: total_given
sorted_summary_table = sorted(summary_table, key=itemgetter(1), reverse=True)
print()
print('{:20}| {:>12} |{:>10} |{:>15}'.format('Donor Name', 'Total Given',
'Num Gifts', 'Average Gift'))
print('-'*63)
for row in sorted_summary_table:
print('{:20} ${:12.2f} {:>10d} ${:14.2f}'.format(*row))
def send_letters_to_all():
"""
generate thank you letters for all donors
"""
for donor in donation_table:
file_name = donor.replace(" ", "_") + ".txt"
total_given = sum(donation_table[donor])
with open(file_name, 'w') as file_obj:
file_obj.write('Dear ' + donor + ',\n\n')
file_obj.write(' '*8 + 'Thank you for your kind donation of ' +
f"${total_given:.2f}.\n\n")
file_obj.write(' '*8 + 'It will be put to good use.\n\n')
file_obj.write(' '*25 + 'Sincerely\n' + ' '*28 + '-The Team')
def quit():
input('Press [Enter] key to exit...')
return "exit menu"
def send_email(name, amount):
print('\n----------- Email -----------')
print('Dear', name.title(), ',',
'\n\nThank you for your generous donation of', f"$ {amount:.2f}.",
'\n\nSincerely,',
'\nThe ABC Organization')
print('----------- Email -----------')
if __name__ == "__main__":
main() | [
"taoyeh@gmail.com"
] | taoyeh@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.