id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
4986680 |
import sys
import random
class PathFinder:
def __init__(self):
self.visited = []
pass
def pathFind(self, start, end, board):
if start[0] == end[0] and start[1] == end[1]:
#sys.stderr.write("Got to destination!\n")
return []
if self.visited.count(start) > 0:
#sys.stderr.write("Back track!!\n")
return False
if start[0] < 0 or start[0] >= len(board) or start[1] < 0 or start[1] >= len(board[start[0]]):
#sys.stderr.write("Out of bounds!\n")
return False
if len(self.visited) > 0 and board[start[0]][start[1]] != None:
#sys.stderr.write("Full position!\n")
return False
self.visited.append(start)
left = (start[0]-1, start[1])
right = (start[0]+1, start[1])
up = (start[0], start[1]-1)
down = (start[0], start[1]+1)
choices = [left, right, up, down]
choices.sort(key = lambda e : (e[0] - end[0])**2.0 + (e[1] - end[1])**2.0 )
options = []
for point in choices:
option = [point, self.pathFind(point,end,board)]
if option[1] != False:
options.append(option)
options.sort(key = lambda e : len(e[1]))
if len(options) == 0:
#sys.stderr.write("NO options!\n")
return False
else:
if options[0][0] == left:
options[0][1].insert(0,"LEFT")
elif options[0][0] == right:
options[0][1].insert(0,"RIGHT")
elif options[0][0] == up:
options[0][1].insert(0,"UP")
elif options[0][0] == down:
options[0][1].insert(0,"DOWN")
#sys.stderr.write("PathFind got path " + str(options[0]) + "\n")
return options[0][1]
| StarcoderdataPython |
8178937 | """Report configuration for the analysis"""
import os
import shutil
from pathlib import Path
from policy_sentry.shared.constants import AUDIT_DIRECTORY_PATH
def create_default_report_config_file():
"""
Copies over the default report config file to the config directory
Essentially:
cp $MODULE_DIR/policy_sentry/shared/data/audit/report-config.yml ~/policy_sentry/audit/report-config.yml
"""
existing_report_config_file = 'report-config.yml'
target_report_config_file_path = AUDIT_DIRECTORY_PATH + existing_report_config_file
existing_report_config_file_path = os.path.join(
str(Path(os.path.dirname(__file__)).parent) + '/shared/data/audit/' + existing_report_config_file)
shutil.copy(existing_report_config_file_path,
target_report_config_file_path)
print(
f"Copying overrides file {existing_report_config_file} to {target_report_config_file_path}")
| StarcoderdataPython |
115683 | <gh_stars>1-10
"""
Autofit a Spectra
------------------
"""
import cana
import matplotlib.pyplot as plt
# First load an spectrum, we will just gonna use one from the available datasets.
# you can do: spec = cana.loadspec('path to your spectrum file')
# See spec.py Spectrum class for spec attributes
spec = cana.datasets.getspectrum('000334', ref='primass')
# fitting the spectrum, you can give the minimal and maximal order that could
# be fitted. Default: degree_min=1, degree_max=12.
specfit, coefs = spec.autofit()
# Plotting
plt.plot(spec.w, spec.r, c='0.3', lw=1, label='Spectrum')
plt.plot(specfit.w, specfit.r, c='r', label='Fit')
plt.legend()
plt.show | StarcoderdataPython |
311516 | """You're a wizard, Harry."""
def register(bot):
bot.listen(r'^magic ?(.*)$', magic, require_mention=True)
bot.listen(r'\bmystery\b|'
r"\bwhy (do(es)?n't .+ work|(is|are)n't .+ working)\b|"
r'\bhow do(es)? .+ work\b', mystery)
def _magic(thing):
return '(ノ゚ο゚)ノミ★゜・。。・゜゜・。{} 。・゜☆゜・。。・゜'.format(thing)
def magic(bot, msg):
"""(ノ゚ο゚)ノミ★゜・。。・゜"""
msg.respond(_magic(msg.match.group(1) or 'magic'), ping=False)
def mystery(bot, msg):
"""~it is a mystery~"""
msg.respond(_magic('https://mystery.fuqu.jp/'), ping=False)
| StarcoderdataPython |
1674253 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class CreateJobRequest(AbstractModel):
"""CreateJob请求参数结构体
"""
def __init__(self):
"""
:param Name: 任务名称
:type Name: str
:param Cluster: 运行任务的集群,详见 [使用集群](https://cloud.tencent.com/document/product/851/17317)
:type Cluster: str
:param RuntimeVersion: 运行任务的环境,详见 [运行环境](https://cloud.tencent.com/document/product/851/17320)
:type RuntimeVersion: str
:param PackageDir: 挂载的路径,支持 NFS,[CFS](https://cloud.tencent.com/product/cfs) 和 [COS](https://cloud.tencent.com/product/cos),其中 COS 只在 [TI-A 定制环境](https://cloud.tencent.com/document/product/851/17320#ti-a-.E5.AE.9A.E5.88.B6.E7.8E.AF.E5.A2.83) 中支持
:type PackageDir: list of str
:param Command: 任务启动命令
:type Command: list of str
:param Args: 任务启动参数
:type Args: list of str
:param ScaleTier: 运行任务的配置信息,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type ScaleTier: str
:param MasterType: Master 机器类型,ScaleTier 取值为 `CUSTOM` 时必填,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type MasterType: str
:param WorkerType: Worker 机器类型,ScaleTier 取值为 `CUSTOM` 时必填,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type WorkerType: str
:param ParameterServerType: Parameter server 机器类型,ScaleTier 取值为 `CUSTOM` 时必填,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type ParameterServerType: str
:param WorkerCount: Worker 机器数量,ScaleTier 取值为 `CUSTOM` 时必填,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type WorkerCount: int
:param ParameterServerCount: Parameter server 机器数量,ScaleTier 取值为 `CUSTOM` 时必填,详见 [训练规模](https://cloud.tencent.com/document/product/851/17319)
:type ParameterServerCount: int
:param Debug: 启动 debug 模式,默认为 false
:type Debug: bool
:param RuntimeConf: 运行任务的其他配置信息
:type RuntimeConf: list of str
"""
self.Name = None
self.Cluster = None
self.RuntimeVersion = None
self.PackageDir = None
self.Command = None
self.Args = None
self.ScaleTier = None
self.MasterType = None
self.WorkerType = None
self.ParameterServerType = None
self.WorkerCount = None
self.ParameterServerCount = None
self.Debug = None
self.RuntimeConf = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Cluster = params.get("Cluster")
self.RuntimeVersion = params.get("RuntimeVersion")
self.PackageDir = params.get("PackageDir")
self.Command = params.get("Command")
self.Args = params.get("Args")
self.ScaleTier = params.get("ScaleTier")
self.MasterType = params.get("MasterType")
self.WorkerType = params.get("WorkerType")
self.ParameterServerType = params.get("ParameterServerType")
self.WorkerCount = params.get("WorkerCount")
self.ParameterServerCount = params.get("ParameterServerCount")
self.Debug = params.get("Debug")
self.RuntimeConf = params.get("RuntimeConf")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateJobResponse(AbstractModel):
"""CreateJob返回参数结构体
"""
def __init__(self):
"""
:param Job: 训练任务信息
:type Job: :class:`tencentcloud.tia.v20180226.models.Job`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Job = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Job") is not None:
self.Job = Job()
self.Job._deserialize(params.get("Job"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateModelRequest(AbstractModel):
"""CreateModel请求参数结构体
"""
def __init__(self):
"""
:param Name: 模型名称
:type Name: str
:param Model: 要部署的模型文件路径名
:type Model: str
:param Description: 关于模型的描述
:type Description: str
:param Cluster: 部署目标集群的名称,`集群模式` 必填
:type Cluster: str
:param RuntimeVersion: 运行环境镜像的标签,详见 [Serving 环境](https://cloud.tencent.com/document/product/851/17320#serving-.E7.8E.AF.E5.A2.83)
:type RuntimeVersion: str
:param Replicas: 要部署的模型副本数目,`集群模式` 选填
:type Replicas: int
:param Expose: 暴露外网或内网,默认暴露外网,`集群模式` 选填
:type Expose: str
:param ServType: 部署模式,取值 `serverless` 即为 `无服务器模式`,否则为 `集群模式` 下服务的运行规模,形如 `2U4G1P`,详见 [自定义的训练规模](https://cloud.tencent.com/document/product/851/17319#.E8.87.AA.E5.AE.9A.E4.B9.89.E7.9A.84.E8.AE.AD.E7.BB.83.E8.A7.84.E6.A8.A1)
:type ServType: str
:param RuntimeConf: `无服务器模式` 可选的其他配置信息,详见 [利用无服务器函数部署](https://cloud.tencent.com/document/product/851/17049#.E5.88.A9.E7.94.A8.E6.97.A0.E6.9C.8D.E5.8A.A1.E5.99.A8.E5.87.BD.E6.95.B0.E9.83.A8.E7.BD.B2)
:type RuntimeConf: list of str
"""
self.Name = None
self.Model = None
self.Description = None
self.Cluster = None
self.RuntimeVersion = None
self.Replicas = None
self.Expose = None
self.ServType = None
self.RuntimeConf = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Model = params.get("Model")
self.Description = params.get("Description")
self.Cluster = params.get("Cluster")
self.RuntimeVersion = params.get("RuntimeVersion")
self.Replicas = params.get("Replicas")
self.Expose = params.get("Expose")
self.ServType = params.get("ServType")
self.RuntimeConf = params.get("RuntimeConf")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateModelResponse(AbstractModel):
"""CreateModel返回参数结构体
"""
def __init__(self):
"""
:param Model: 模型的详细信息
:type Model: :class:`tencentcloud.tia.v20180226.models.Model`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Model = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Model") is not None:
self.Model = Model()
self.Model._deserialize(params.get("Model"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteJobRequest(AbstractModel):
"""DeleteJob请求参数结构体
"""
def __init__(self):
"""
:param Name: 任务名称
:type Name: str
:param Cluster: 运行任务的集群
:type Cluster: str
"""
self.Name = None
self.Cluster = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Cluster = params.get("Cluster")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteJobResponse(AbstractModel):
"""DeleteJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteModelRequest(AbstractModel):
"""DeleteModel请求参数结构体
"""
def __init__(self):
"""
:param Name: 要删除的模型名称
:type Name: str
:param Cluster: 要删除的模型所在的集群名称,`集群模式` 必填
:type Cluster: str
:param ServType: 模型类型,取值 `serverless` 即为 `无服务器模式`,否则为 `集群模式`
:type ServType: str
"""
self.Name = None
self.Cluster = None
self.ServType = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Cluster = params.get("Cluster")
self.ServType = params.get("ServType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteModelResponse(AbstractModel):
"""DeleteModel返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeJobRequest(AbstractModel):
"""DescribeJob请求参数结构体
"""
def __init__(self):
"""
:param Name: 任务名称
:type Name: str
:param Cluster: 运行任务的集群
:type Cluster: str
"""
self.Name = None
self.Cluster = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Cluster = params.get("Cluster")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeJobResponse(AbstractModel):
"""DescribeJob返回参数结构体
"""
def __init__(self):
"""
:param Job: 训练任务信息
:type Job: :class:`tencentcloud.tia.v20180226.models.Job`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Job = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Job") is not None:
self.Job = Job()
self.Job._deserialize(params.get("Job"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeModelRequest(AbstractModel):
"""DescribeModel请求参数结构体
"""
def __init__(self):
"""
:param Name: 模型名称
:type Name: str
:param Cluster: 模型所在集群名称,`集群模式` 必填
:type Cluster: str
:param ServType: 模型类型,取值 `serverless` 即为 `无服务器模式`,否则为 `集群模式`
:type ServType: str
"""
self.Name = None
self.Cluster = None
self.ServType = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Cluster = params.get("Cluster")
self.ServType = params.get("ServType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeModelResponse(AbstractModel):
"""DescribeModel返回参数结构体
"""
def __init__(self):
"""
:param Model: 模型信息
:type Model: :class:`tencentcloud.tia.v20180226.models.Model`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Model = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Model") is not None:
self.Model = Model()
self.Model._deserialize(params.get("Model"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstallAgentRequest(AbstractModel):
"""InstallAgent请求参数结构体
"""
def __init__(self):
"""
:param Cluster: 集群名称
:type Cluster: str
:param TiaVersion: Agent版本, 用于私有集群的agent安装,默认为“private-training”
:type TiaVersion: str
:param Update: 是否允许更新Agent
:type Update: bool
"""
self.Cluster = None
self.TiaVersion = None
self.Update = None
def _deserialize(self, params):
self.Cluster = params.get("Cluster")
self.TiaVersion = params.get("TiaVersion")
self.Update = params.get("Update")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstallAgentResponse(AbstractModel):
"""InstallAgent返回参数结构体
"""
def __init__(self):
"""
:param TiaVersion: Agent版本, 用于私有集群的agent安装
:type TiaVersion: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TiaVersion = None
self.RequestId = None
def _deserialize(self, params):
self.TiaVersion = params.get("TiaVersion")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Job(AbstractModel):
"""训练任务信息
"""
def __init__(self):
"""
:param Name: 任务名称
:type Name: str
:param CreateTime: 任务创建时间,格式为:2006-01-02 15:04:05.999999999 -0700 MST
:type CreateTime: str
:param StartTime: 任务开始时间,格式为:2006-01-02 15:04:05.999999999 -0700 MST
:type StartTime: str
:param EndTime: 任务结束时间,格式为:2006-01-02 15:04:05.999999999 -0700 MST
:type EndTime: str
:param State: 任务状态,可能的状态为Created(已创建),Running(运行中),Succeeded(运行完成:成功),Failed(运行完成:失败)
:type State: str
:param Message: 任务状态信息
:type Message: str
:param ScaleTier: 运行任务的配置信息
:type ScaleTier: str
:param MasterType: (ScaleTier为Custom时)master机器类型
:type MasterType: str
:param WorkerType: (ScaleTier为Custom时)worker机器类型
:type WorkerType: str
:param ParameterServerType: (ScaleTier为Custom时)parameter server机器类型
:type ParameterServerType: str
:param WorkerCount: (ScaleTier为Custom时)worker机器数量
:type WorkerCount: int
:param ParameterServerCount: (ScaleTier为Custom时)parameter server机器数量
:type ParameterServerCount: int
:param PackageDir: 挂载的路径
:type PackageDir: list of str
:param Command: 任务启动命令
:type Command: list of str
:param Args: 任务启动参数
:type Args: list of str
:param Cluster: 运行任务的集群
:type Cluster: str
:param RuntimeVersion: 运行任务的环境
:type RuntimeVersion: str
:param DelTime: 任务删除时间,格式为:2006-01-02 15:04:05.999999999 -0700 MST
:type DelTime: str
:param AppId: 创建任务的AppId
:type AppId: int
:param Uin: 创建任务的Uin
:type Uin: str
:param Debug: 创建任务的Debug模式
:type Debug: bool
:param RuntimeConf: Runtime的额外配置信息
:type RuntimeConf: list of str
:param Id: 任务Id
:type Id: str
"""
self.Name = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.State = None
self.Message = None
self.ScaleTier = None
self.MasterType = None
self.WorkerType = None
self.ParameterServerType = None
self.WorkerCount = None
self.ParameterServerCount = None
self.PackageDir = None
self.Command = None
self.Args = None
self.Cluster = None
self.RuntimeVersion = None
self.DelTime = None
self.AppId = None
self.Uin = None
self.Debug = None
self.RuntimeConf = None
self.Id = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.State = params.get("State")
self.Message = params.get("Message")
self.ScaleTier = params.get("ScaleTier")
self.MasterType = params.get("MasterType")
self.WorkerType = params.get("WorkerType")
self.ParameterServerType = params.get("ParameterServerType")
self.WorkerCount = params.get("WorkerCount")
self.ParameterServerCount = params.get("ParameterServerCount")
self.PackageDir = params.get("PackageDir")
self.Command = params.get("Command")
self.Args = params.get("Args")
self.Cluster = params.get("Cluster")
self.RuntimeVersion = params.get("RuntimeVersion")
self.DelTime = params.get("DelTime")
self.AppId = params.get("AppId")
self.Uin = params.get("Uin")
self.Debug = params.get("Debug")
self.RuntimeConf = params.get("RuntimeConf")
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ListJobsRequest(AbstractModel):
"""ListJobs请求参数结构体
"""
def __init__(self):
"""
:param Cluster: 运行任务的集群
:type Cluster: str
:param Limit: 分页参数,返回数量
:type Limit: int
:param Offset: 分页参数,起始位置
:type Offset: int
"""
self.Cluster = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.Cluster = params.get("Cluster")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ListJobsResponse(AbstractModel):
"""ListJobs返回参数结构体
"""
def __init__(self):
"""
:param Jobs: 训练任务列表
:type Jobs: list of Job
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Jobs = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Jobs") is not None:
self.Jobs = []
for item in params.get("Jobs"):
obj = Job()
obj._deserialize(item)
self.Jobs.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ListModelsRequest(AbstractModel):
"""ListModels请求参数结构体
"""
def __init__(self):
"""
:param Cluster: 部署模型的集群, `集群模式` 必填
:type Cluster: str
:param Limit: 分页参数,返回数量上限
:type Limit: int
:param Offset: 分页参数,分页起始位置
:type Offset: int
:param ServType: 部署类型,取值 `serverless` 即为 `无服务器模式`,否则为 `集群模式`。
:type ServType: str
"""
self.Cluster = None
self.Limit = None
self.Offset = None
self.ServType = None
def _deserialize(self, params):
self.Cluster = params.get("Cluster")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.ServType = params.get("ServType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ListModelsResponse(AbstractModel):
"""ListModels返回参数结构体
"""
def __init__(self):
"""
:param Models: Model 数组,用以显示所有模型的信息
:type Models: list of Model
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Models = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Models") is not None:
self.Models = []
for item in params.get("Models"):
obj = Model()
obj._deserialize(item)
self.Models.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Log(AbstractModel):
"""日志
"""
def __init__(self):
"""
:param ContainerName: 容器名
:type ContainerName: str
:param Log: 日志内容
:type Log: str
:param Namespace: 空间名
:type Namespace: str
:param PodId: Pod Id
:type PodId: str
:param PodName: Pod名
:type PodName: str
:param Time: 日志日期,格式为“2018-07-02T09:10:04.916553368Z”
:type Time: str
"""
self.ContainerName = None
self.Log = None
self.Namespace = None
self.PodId = None
self.PodName = None
self.Time = None
def _deserialize(self, params):
self.ContainerName = params.get("ContainerName")
self.Log = params.get("Log")
self.Namespace = params.get("Namespace")
self.PodId = params.get("PodId")
self.PodName = params.get("PodName")
self.Time = params.get("Time")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Model(AbstractModel):
"""用于描述模型的详细情况
"Model": {
"Name": "test-model",
"Description": "test-model",
"Cluster": "ap-beijing",
"Model": "cos://test-1255502019.cos.ap-shanghai.myqcloud.com/example:/data/mnist",
"RuntimeVersion": "tiaserv-1.6.0-cpu",
"CreateTime": "2018-04-26 15:59:25 +0800 CST",
"State": "Running",
"ServingUrl": "172.16.58.3",
"Message": "Deployment does not have minimum availability.",
"AppId": 1255502019,
"ServType": "1U2G0P"
},
"""
def __init__(self):
"""
:param Name: 模型名称
:type Name: str
:param Description: 模型描述
:type Description: str
:param Cluster: 集群名称
:type Cluster: str
:param Model: 模型地址
:type Model: str
:param RuntimeVersion: 运行环境编号
:type RuntimeVersion: str
:param CreateTime: 模型创建时间
:type CreateTime: str
:param State: 模型运行状态
:type State: str
:param ServingUrl: 提供服务的url
:type ServingUrl: str
:param Message: 相关消息
:type Message: str
:param AppId: 编号
:type AppId: int
:param ServType: 机型
:type ServType: str
:param Expose: 模型暴露方式
:type Expose: str
:param Replicas: 部署副本数量
:type Replicas: int
:param Id: 模型Id
:type Id: str
:param Uin: 创建任务的Uin
:type Uin: str
:param DelTime: 模型删除时间,格式为:2006-01-02 15:04:05.999999999 -0700 MST
:type DelTime: str
"""
self.Name = None
self.Description = None
self.Cluster = None
self.Model = None
self.RuntimeVersion = None
self.CreateTime = None
self.State = None
self.ServingUrl = None
self.Message = None
self.AppId = None
self.ServType = None
self.Expose = None
self.Replicas = None
self.Id = None
self.Uin = None
self.DelTime = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Description = params.get("Description")
self.Cluster = params.get("Cluster")
self.Model = params.get("Model")
self.RuntimeVersion = params.get("RuntimeVersion")
self.CreateTime = params.get("CreateTime")
self.State = params.get("State")
self.ServingUrl = params.get("ServingUrl")
self.Message = params.get("Message")
self.AppId = params.get("AppId")
self.ServType = params.get("ServType")
self.Expose = params.get("Expose")
self.Replicas = params.get("Replicas")
self.Id = params.get("Id")
self.Uin = params.get("Uin")
self.DelTime = params.get("DelTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class QueryLogsRequest(AbstractModel):
"""QueryLogs请求参数结构体
"""
def __init__(self):
"""
:param JobName: 任务的名称
:type JobName: str
:param Cluster: 任务所在集群的名称
:type Cluster: str
:param StartTime: 查询日志的开始时间,格式:2019-01-01 00:00:00
:type StartTime: str
:param EndTime: 查询日志的结束时间,格式:2019-01-01 00:00:00
:type EndTime: str
:param Limit: 单次要返回的日志条数上限
:type Limit: int
:param Context: 加载更多日志时使用,透传上次返回的 Context 值,获取后续的日志内容;使用 Context 翻页最多能获取 10000 条日志
:type Context: str
"""
self.JobName = None
self.Cluster = None
self.StartTime = None
self.EndTime = None
self.Limit = None
self.Context = None
def _deserialize(self, params):
self.JobName = params.get("JobName")
self.Cluster = params.get("Cluster")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Limit = params.get("Limit")
self.Context = params.get("Context")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class QueryLogsResponse(AbstractModel):
"""QueryLogs返回参数结构体
"""
def __init__(self):
"""
:param Context: 日志查询上下文,用于加载更多日志
:type Context: str
:param Logs: 日志内容列表
:type Logs: list of Log
:param Listover: 是否已经返回所有符合条件的日志
:type Listover: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Context = None
self.Logs = None
self.Listover = None
self.RequestId = None
def _deserialize(self, params):
self.Context = params.get("Context")
if params.get("Logs") is not None:
self.Logs = []
for item in params.get("Logs"):
obj = Log()
obj._deserialize(item)
self.Logs.append(obj)
self.Listover = params.get("Listover")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| StarcoderdataPython |
1898063 | from app.auth.auth_bearer import JWTBearer
from app.models.lightning import (
Invoice,
LightningInfoLite,
LnInfo,
Payment,
PaymentRequest,
SendCoinsInput,
SendCoinsResponse,
WalletBalance,
)
from app.repositories.lightning import (
add_invoice,
decode_pay_request,
get_ln_info,
get_ln_info_lite,
get_wallet_balance,
send_coins,
send_payment,
)
from app.routers.lightning_docs import (
get_balance_response_desc,
send_coins_desc,
send_payment_desc,
)
from fastapi import APIRouter, HTTPException, Query, status
from fastapi.params import Depends
_PREFIX = "lightning"
router = APIRouter(prefix=f"/{_PREFIX}", tags=["Lightning"])
@router.post(
"/add-invoice",
name=f"{_PREFIX}.add-invoice",
summary="Addinvoice adds a new Invoice to the database.",
description="For additional information see [LND docs](https://api.lightning.community/#addinvoice)",
dependencies=[Depends(JWTBearer())],
response_model=Invoice,
)
async def addinvoice(
value_msat: int, memo: str = "", expiry: int = 3600, is_keysend: bool = False
):
try:
return await add_invoice(memo, value_msat, expiry, is_keysend)
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.reason)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.get(
"/get-balance",
name=f"{_PREFIX}.get-balance",
summary="Get the current on chain and channel balances of the lighting wallet.",
response_description=get_balance_response_desc,
dependencies=[Depends(JWTBearer())],
response_model=WalletBalance,
)
async def getwalletbalance():
try:
return await get_wallet_balance()
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.reason)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.post(
"/send-coins",
name=f"{_PREFIX}.send-coins",
summary="Attempt to send on-chain funds.",
description=send_coins_desc,
response_description="Either an error or a SendCoinsResponse object on success",
dependencies=[Depends(JWTBearer())],
response_model=SendCoinsResponse,
)
async def send_coins_path(input: SendCoinsInput):
try:
return await send_coins(input=input)
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.detail)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.post(
"/send-payment",
name=f"{_PREFIX}.send-payment",
summary="Attempt to pay a payment request.",
description=send_payment_desc,
response_description="Either an error or a Payment object on success",
dependencies=[Depends(JWTBearer())],
response_model=Payment,
)
async def sendpayment(
pay_req: str, timeout_seconds: int = 5, fee_limit_msat: int = 8000
):
try:
return await send_payment(pay_req, timeout_seconds, fee_limit_msat)
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.detail)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.get(
"/get-info",
name=f"{_PREFIX}.get-info",
summary="Request information about the currently running lightning node.",
response_description="Either an error or a LnInfo object on success",
dependencies=[Depends(JWTBearer())],
response_model=LnInfo,
)
async def get_info():
try:
return await get_ln_info()
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.detail)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.get(
"/get-info-lite",
name=f"{_PREFIX}.get-info-lite",
summary="Get lightweight current lightning info. Less verbose version of /lightning/get-info",
dependencies=[Depends(JWTBearer())],
status_code=status.HTTP_200_OK,
response_model=LightningInfoLite,
)
async def get_ln_info_lite_path():
try:
return await get_ln_info_lite()
except HTTPException as r:
raise HTTPException(r.status_code, detail=r.reason)
except NotImplementedError as r:
raise HTTPException(status.HTTP_501_NOT_IMPLEMENTED, detail=r.args[0])
@router.get(
"/decode-pay-req",
name=f"{_PREFIX}.decode-pay-req",
summary="DecodePayReq takes an encoded payment request string and attempts to decode it, returning a full description of the conditions encoded within the payment request.",
response_model=PaymentRequest,
response_description="A fully decoded payment request or a HTTP status 400 if the payment request cannot be decoded.",
dependencies=[Depends(JWTBearer())],
)
async def get_decode_pay_request(
pay_req: str = Query(..., description="The payment request string to be decoded")
):
return await decode_pay_request(pay_req)
| StarcoderdataPython |
8058342 | from __future__ import absolute_import
from __future__ import division
import random
import numpy as np
from blt_net.cascademv2.core.utils import data_augment
from blt_net.cascademv2.core.utils.cython_bbox import bbox_overlaps
from blt_net.cascademv2.core.utils.bbox_process import compute_targets
from blt_net.cascademv2.core.utils.bbox import box_op
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _ratio_enum2(anchor, ratios):
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w/np.ones((len(ratios)))
hs = np.round(ws / ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def get_anchors(img_width, img_height, feat_map_sizes, anchor_box_scales, anchor_ratios):
downscale = np.asarray([[8],[16],[32],[64]])
ancs = []
num_anchors = np.zeros((len(downscale)),dtype=np.int)
for layer in range(len(downscale)):
anchor_scales = anchor_box_scales[layer] / downscale[layer]
base_anchor = np.array([1, 1, downscale[layer], downscale[layer]]) - 1
ratio_anchors = _ratio_enum2(base_anchor, anchor_ratios[layer])
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], anchor_scales)
for i in range(ratio_anchors.shape[0])])
num_anchors[layer] = len(anchors)
output_width, output_height = feat_map_sizes[layer][1], feat_map_sizes[layer][0]
shift_x = np.arange(output_width) * downscale[layer]
shift_y = np.arange(output_height) * downscale[layer]
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
all_anchors = np.expand_dims(anchors, axis=0) + np.expand_dims(shifts, axis=0).transpose((1, 0, 2))
all_anchors = np.reshape(all_anchors, (-1, 4))
# only keep anchors inside the image
all_anchors[:, 0][all_anchors[:, 0] < 0] = 0
all_anchors[:, 1][all_anchors[:, 1] < 0] = 0
all_anchors[:, 2][all_anchors[:, 2] >= img_width] = img_width - 1
all_anchors[:, 3][all_anchors[:, 3] >= img_height] = img_height - 1
all_anchors = np.concatenate((all_anchors, np.ones((all_anchors.shape[0], 1))), axis=-1)
ancs.append(all_anchors)
return np.concatenate(ancs, axis=0), num_anchors
def calc_target(C, num_bboxes, gta, ignoreareas, proposals, igthre=0.5, posthre=0.5, negthre=0.3, img=[], roi_stride=0):
proposals = proposals.copy()
# calculate the valid anchors (without those in the ignore areas and outside the image)
if len(ignoreareas) > 0:
ignore_overlap = box_op(np.ascontiguousarray(proposals[:, :4], dtype=np.float),
np.ascontiguousarray(ignoreareas, dtype=np.float))
ignore_sum = np.sum(ignore_overlap, axis=1)
proposals[ignore_sum > igthre, -1] = 0
#debugging code
ignored_proposal_index_arr = np.where(ignore_sum > igthre)[0]
ignored_anchors = proposals[ignored_proposal_index_arr, :4]
# utils.show_anchors(input_image[i].copy(), (ignored_anchors, gta), 'level 2: positive anchors vs. gt', color_arr=((0, 255, 255), (255, 255, 0)))
from blt_net.cascademv2.core.utils.bbox_process import filter_negboxes
dont_keep = filter_negboxes(proposals, roi_stride)
proposals[dont_keep, -1] = 0
# get the indexes of the anchors that are not overlapping the ignore areas
valid_idxs = np.where(proposals[:, -1] == 1)[0]
# initialise empty output objectives
y_alf_overlap = np.zeros((proposals.shape[0], 1))
y_alf_negindex = np.zeros((proposals.shape[0], 1))
y_is_box_valid = np.zeros((proposals.shape[0], 1))
y_alf_regr = np.zeros((proposals.shape[0], 4))
# remove anchors that are overlapping the ignore regions
valid_anchors = proposals[valid_idxs, :]
valid_alf_overlap = np.zeros((valid_anchors.shape[0], 1))
valid_is_box_valid = np.zeros((valid_anchors.shape[0], 1))
valid_alf_regr = np.zeros((valid_anchors.shape[0], 4))
# utils.show_anchors(img, (all_anchors, gta), 'all anchors vs. gt', color_arr=((0, 255, 255), (255, 255, 0)))
# get for training only anchors that overlap the ground truth, the rest is defined as negative (below a IoU threshold)
# if there are any GTs
if num_bboxes > 0:
valid_overlap = bbox_overlaps(np.ascontiguousarray(valid_anchors, dtype=np.float),
np.ascontiguousarray(gta, dtype=np.float))
# find for every anchor to which gt box is the closest
argmax_overlaps = valid_overlap.argmax(axis=1)
# for all valid anchors get the IOU of the anchors maximum overlap with GT
max_overlaps = valid_overlap[np.arange(len(valid_idxs)), argmax_overlaps]
# for each GT get the anchor index that maximum overlaps it
gt_argmax_overlaps = valid_overlap.argmax(axis=0)
# for each GT get the IOU of the anchor that maximum overlaps it
gt_max_overlaps = valid_overlap[gt_argmax_overlaps, np.arange(num_bboxes)]
# for each GT mark the anchor that maximally overlaps it (regardless of its IOU) as positive
gt_argmax_overlaps = np.where(valid_overlap == gt_max_overlaps)[0]
valid_alf_overlap[gt_argmax_overlaps] = 1
# get only the valid anchors that are above a specific IOU given threshold
valid_alf_overlap[max_overlaps >= posthre] = 1
# in any case get the three most overlapping anchors to each GT
for i in range(len(gta)):
inds = valid_overlap[:, i].ravel().argsort()[-3:]
valid_alf_overlap[inds] = 1
# get positives labels
fg_inds = np.where(valid_alf_overlap == 1)[0]
valid_is_box_valid[fg_inds] = 1
anchor_box = valid_anchors[fg_inds, :4]
gt_box = gta[argmax_overlaps[fg_inds], :]
# if len(img)>0:
# show_anchors(img.copy(), (anchor_box.copy(), gta.copy()), 'level 1: pos. anchors vs. gt', color_arr=((0, 255, 255), (255, 255, 0)))
# calculate regression values to GTs only for positive anchors
valid_alf_regr[fg_inds, :] = compute_targets(anchor_box, gt_box, C.classifier_regr_std, std=True)
bg_inds = np.where((max_overlaps < negthre) & (valid_is_box_valid.reshape((-1)) == 0))[0]
valid_is_box_valid[bg_inds] = 1
# transform to the original overlap and validbox
y_alf_overlap[valid_idxs, :] = valid_alf_overlap
y_is_box_valid[valid_idxs, :] = valid_is_box_valid
y_alf_regr[valid_idxs, :] = valid_alf_regr
y_alf_negindex = y_is_box_valid - y_alf_overlap
# else: #(TODO check if this is needed)
# y_alf_negindex[valid_idxs, :] = np.ones((valid_idxs.shape[0], 1))
# set the y[0] to contain positive examples and y[1] to contain negative examples
y_alf_cls = np.expand_dims(np.concatenate([y_alf_overlap, y_alf_negindex], axis=1), axis=0)
y_alf_regr = np.expand_dims(np.concatenate([y_alf_overlap, y_alf_regr], axis=1), axis=0)
return y_alf_cls, y_alf_regr
def calc_targets(C, num_bboxes, gta, ignoreareas, proposals, igthre=0.5, posthre=0.5, negthre=0.3, img=[], roi_stride=0):
y_alf_cls, y_alf_regr = calc_target(C, num_bboxes, gta, ignoreareas, proposals,
igthre=igthre, posthre=posthre, negthre=negthre, img=img, roi_stride=roi_stride)
return y_alf_cls, y_alf_regr
def calc_target_multilayer(C, img_data, anchors, igthre=0.5, posthre=0.5, negthre=0.3, img=[]):
# by default all anchors are valid (last column)
all_anchors = np.copy(anchors)
num_bboxes = len(img_data['bboxes'])
gta = np.copy(img_data['bboxes'])
ignoreareas = img_data['ignoreareas']
return calc_targets(C, num_bboxes, gta, ignoreareas, all_anchors, igthre=igthre, posthre=posthre, negthre=negthre, img=img)
# Generating the training data
def get_target(anchors, all_img_data, C, batchsize=4, net='2step', igthre=0.5,posthre=0.5, negthre=0.3):
random.shuffle(all_img_data)
current = 0
while True:
x_img_batch, y_cls_batch, y_regr_batch, img_data_batch, orig_img_batch = [], [], [], [], []
if current >= len(all_img_data)-batchsize:
random.shuffle(all_img_data)
current = 0
for img_data in all_img_data[current:current+batchsize]:
#for img_data in all_img_data[0:1]:
# augment1: brightness
# augment2: horizontal flip, coordinates
img_data, x_img = data_augment.augment(img_data, C, augment_brightness=C.augmentBrightness, augment_crop=C.augmentCrop)
orig_img_batch.append(x_img)
y_cls, y_regr = calc_target_multilayer(C, img_data, anchors, igthre=igthre, posthre=posthre, negthre=negthre, img=x_img)
x_img = x_img.astype(np.float32)
x_img[:, :, 0] -= C.img_channel_mean[0]
x_img[:, :, 1] -= C.img_channel_mean[1]
x_img[:, :, 2] -= C.img_channel_mean[2]
x_img = np.expand_dims(x_img, axis=0)
x_img_batch.append(x_img)
y_cls_batch.append(y_cls)
y_regr_batch.append(y_regr)
img_data_batch.append(img_data)
x_img_batch = np.concatenate(np.array(x_img_batch),axis=0)
y_cls_batch = np.concatenate(np.array(y_cls_batch), axis=0)
y_regr_batch = np.concatenate(np.array(y_regr_batch), axis=0)
current += batchsize
if net == '2step':
yield np.copy(x_img_batch), [np.copy(y_cls_batch), np.copy(y_regr_batch)], np.copy(img_data_batch), np.copy(orig_img_batch)
else:
yield np.copy(x_img_batch), [np.copy(y_cls_batch), np.copy(y_regr_batch)]
| StarcoderdataPython |
3329099 | <filename>cloud_browser/cloud/config.py<gh_stars>10-100
"""Cloud configuration."""
class Config(object):
"""General class helper to construct connection objects."""
__connection_obj = None
__connection_cls = None
__connection_fn = None
@classmethod
def from_settings(cls):
"""Create configuration from Django settings or environment."""
from cloud_browser.app_settings import settings
from django.core.exceptions import ImproperlyConfigured
conn_cls = conn_fn = None
datastore = settings.CLOUD_BROWSER_DATASTORE
if datastore == "ApacheLibcloud":
# Try ApacheLibcloud
from cloud_browser.cloud.apache_libcloud import ApacheLibcloudConnection
provider = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER
account = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT
secret_key = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY
host = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_HOST
port = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PORT
secure = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_SECURE != "False"
if provider and account and secret_key:
conn_cls = ApacheLibcloudConnection
conn_fn = lambda: ApacheLibcloudConnection(
provider, account, secret_key, host=host, port=port, secure=secure
)
if datastore == "AWS":
# Try AWS
from cloud_browser.cloud.aws import AwsConnection
account = settings.CLOUD_BROWSER_AWS_ACCOUNT
secret_key = settings.CLOUD_BROWSER_AWS_SECRET_KEY
if account and secret_key:
conn_cls = AwsConnection
conn_fn = lambda: AwsConnection(account, secret_key)
if datastore == "Google":
# Try Google Storage
from cloud_browser.cloud.google import GsConnection
account = settings.CLOUD_BROWSER_GS_ACCOUNT
secret_key = settings.CLOUD_BROWSER_GS_SECRET_KEY
if account and secret_key:
conn_cls = GsConnection
conn_fn = lambda: GsConnection(account, secret_key)
elif datastore == "Rackspace":
# Try Rackspace
account = settings.CLOUD_BROWSER_RACKSPACE_ACCOUNT
secret_key = settings.CLOUD_BROWSER_RACKSPACE_SECRET_KEY
servicenet = settings.CLOUD_BROWSER_RACKSPACE_SERVICENET
authurl = settings.CLOUD_BROWSER_RACKSPACE_AUTHURL
if account and secret_key:
from cloud_browser.cloud.rackspace import RackspaceConnection
conn_cls = RackspaceConnection
conn_fn = lambda: RackspaceConnection(
account, secret_key, servicenet=servicenet, authurl=authurl
)
elif datastore == "Filesystem":
# Mock filesystem
root = settings.CLOUD_BROWSER_FILESYSTEM_ROOT
if root is not None:
from cloud_browser.cloud.fs import FilesystemConnection
conn_cls = FilesystemConnection
conn_fn = lambda: FilesystemConnection(root)
if conn_cls is None:
raise ImproperlyConfigured(
"No suitable credentials found for datastore: %s." % datastore
)
# Adjust connection function.
conn_fn = staticmethod(conn_fn)
# Directly cache attributes.
cls.__connection_cls = conn_cls
cls.__connection_fn = conn_fn
return conn_cls, conn_fn
@classmethod
def get_connection_cls(cls):
"""Return connection class.
:rtype: :class:`type`
"""
if cls.__connection_cls is None:
cls.__connection_cls, _ = cls.from_settings()
return cls.__connection_cls
@classmethod
def get_connection(cls):
"""Return connection object.
:rtype: :class:`cloud_browser.cloud.base.CloudConnection`
"""
if cls.__connection_obj is None:
if cls.__connection_fn is None:
_, cls.__connection_fn = cls.from_settings()
cls.__connection_obj = cls.__connection_fn()
return cls.__connection_obj
| StarcoderdataPython |
4924454 | import collections
import datetime
import functools
import os
import urllib.parse
from urllib.parse import parse_qsl, urlparse, urlencode
from flask import (
Flask,
make_response,
render_template,
request,
send_file,
send_from_directory,
)
import hyperlink
import smartypants
from werkzeug.middleware.profiler import ProfilerMiddleware
from docstore.documents import find_original_filename, read_documents
from docstore.tag_cloud import TagCloud
from docstore.tag_list import render_tag_list
from docstore.text_utils import hostname, pretty_date
def tags_with_prefix(document, prefix):
return [t for t in document.tags if t.startswith(prefix)]
def tags_without_prefix(document, prefix):
return [t for t in document.tags if not t.startswith(prefix)]
def url_without_sortby(u):
url = hyperlink.URL.from_text(u)
return str(url.remove("sortBy"))
def serve_file(*, root, shard, filename):
"""
Serves a file which has been saved in docstore.
This adds the Content-Disposition header to the response, so files
are downloaded with the original filename they were uploaded as,
rather than the normalised filename.
"""
path = os.path.abspath(os.path.join(root, "files", shard, filename))
response = make_response(send_file(path))
original_filename = find_original_filename(root, path=path)
# See https://stackoverflow.com/a/49481671/1558022 for UTF-8 encoding
encoded_filename = urllib.parse.quote(original_filename, encoding="utf-8")
response.headers["Content-Disposition"] = f"filename*=utf-8''{encoded_filename}"
return response
def create_app(title, root, thumbnail_width):
app = Flask(__name__)
app.config["THUMBNAIL_WIDTH"] = thumbnail_width
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.filters["hostname"] = hostname
app.jinja_env.filters["pretty_date"] = lambda d: pretty_date(
d, now=datetime.datetime.now()
)
app.jinja_env.filters["render_tag_list"] = render_tag_list
app.jinja_env.filters["smartypants"] = smartypants.smartypants
app.jinja_env.filters["url_without_sortby"] = url_without_sortby
app.jinja_env.filters["tags_with_prefix"] = tags_with_prefix
app.jinja_env.filters["tags_without_prefix"] = tags_without_prefix
@app.route("/")
def list_documents():
request_tags = set(request.args.getlist("tag"))
documents = [
doc for doc in read_documents(root) if request_tags.issubset(set(doc.tags))
]
tag_tally = collections.Counter()
for doc in documents:
for t in doc.tags:
tag_tally[t] += 1
try:
page = int(request.args["page"])
except KeyError:
page = 1
sort_by = request.args.get("sortBy", "date (newest first)")
if sort_by.startswith("date"):
sort_key = lambda d: d.date_saved
elif sort_by.startswith("title"):
sort_key = lambda d: d.title.lower()
else:
raise ValueError(f"Unrecognised sortBy query parameter: {sort_by}")
if sort_by in {"date (newest first)", "title (Z to A)"}:
sort_reverse = True
else:
sort_reverse = False
html = render_template(
"index.html",
documents=sorted(documents, key=sort_key, reverse=sort_reverse),
request_tags=request_tags,
query_string=tuple(parse_qsl(urlparse(request.url).query)),
tag_tally=tag_tally,
title=title,
page=page,
sort_by=sort_by,
TagCloud=TagCloud,
)
return html
@app.route("/thumbnails/<shard>/<filename>")
def thumbnails(shard, filename):
return send_from_directory(
os.path.abspath(os.path.join(root, "thumbnails", shard)), filename
)
app.add_url_rule(
rule="/files/<shard>/<filename>",
view_func=lambda shard, filename: serve_file(
root=root, shard=shard, filename=filename
),
)
@app.template_filter("add_tag")
@functools.lru_cache()
def add_tag(query_string, tag):
return "?" + urlencode(
[(k, v) for k, v in query_string if k != "page"] + [("tag", tag)]
)
@app.template_filter("remove_tag")
def remove_tag(query_string, tag):
return "?" + urlencode(
[(k, v) for k, v in query_string if (k, v) != ("tag", tag)]
)
@app.template_filter("set_page")
@functools.lru_cache()
def set_page(query_string, page):
pageless_qs = [(k, v) for k, v in query_string if k != "page"]
if page == 1:
return "?" + urlencode(pageless_qs)
else:
return "?" + urlencode(pageless_qs + [("page", page)])
return app
def run_profiler(*, host, port, **kwargs): # pragma: no cover
app = create_app(**kwargs)
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
app.run(host=host, port=port, debug=True)
def run_server(*, host, port, debug, **kwargs): # pragma: no cover
app = create_app(**kwargs)
app.run(host=host, port=port, debug=debug)
| StarcoderdataPython |
4856743 | <filename>wagtail_headlessing/apps.py
from django.apps import AppConfig
class SourcecraftingWagtailConfig(AppConfig):
name = 'wagtail_headlessing'
| StarcoderdataPython |
6412574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <EMAIL>
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from setuptools import setup
import os
# Load the version number
try: # python3
fields = {}
with open(os.path.join("xusbboot", "version.py")) as f:
exec(f.read(), fields)
__version__ = fields['__version__']
except: # python2
execfile(os.path.join("xusbboot", "version.py"))
setup(
name = 'xusbboot',
version = '0.0.2',
description = "Python library for xmega xusb bootloader",
url = "http://github.com/ahtn/xusb-boot",
author = "jem",
author_email = "<EMAIL>",
license = 'MIT',
packages = ['xusbboot'],
install_requires = ['hexdump', 'intelhex', 'easyhid'],
keywords = ['xmega', 'usb', 'hid', 'avr', 'bootloader'],
scripts = ['bin/xusbboot-cli'],
zip_safe = False
)
| StarcoderdataPython |
104182 | import struct
from suitcase.fields import BaseField
from suitcase.fields import BaseStructField
from suitcase.fields import BaseFixedByteSequence
class SLFloat32(BaseStructField):
"""Signed Little Endian 32-bit float field."""
PACK_FORMAT = UNPACK_FORMAT = b"<f"
def unpack(self, data, **kwargs):
self._value = struct.unpack(self.UNPACK_FORMAT, data)[0]
class UBInt32Sequence(BaseFixedByteSequence):
"""A sequence of unsigned, big-endian 32 bit integers.
:param length: Number of 32-bit integers in sequence.
:type length: Integer
"""
def __init__(self, length, **kwargs):
super().__init__(lambda l: ">" + "I" * l, length, **kwargs)
self.bytes_required = length * 4
class FixedLengthString(BaseField):
"""A string of a fixed number of bytes.
The specified number of bytes are read and then any null
bytes are stripped from the result.
:param length: Number of bytes to read.
:type length: Integer
"""
def __init__(self, length, **kwargs):
super().__init__(**kwargs)
self.length = length
@property
def bytes_required(self):
"""Number of bytes to read from stream."""
return self.length
def pack(self, stream):
stream.write(self._value.strip(b'\0'))
def unpack(self, data):
self._value = data.strip(b'\0')
| StarcoderdataPython |
6640618 | from algorithmx import *
| StarcoderdataPython |
8172400 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
## GenSemaphore class
# base class for Semaphore
from base.Sequence import Sequence
class GenSemaphore(Sequence):
def __init__(self, aGenThread, aName, aCounter, **kwargs):
super().__init__(aGenThread)
self.mAddrReg = None # register for address
self.mCounterReg = None # register for counter
self.mStatusReg = None # register for status
self.mSemaVA = None # semaphore virtual address
self.mVaAttr = kwargs.get('MemAttrImpl', 'Normal_WBWA') # semaphore va attribute
self.mName = aName # semaphore name
self.mCounter = aCounter # semaphore initial value
self.mBank = kwargs.get('Bank', 0) # which bank to allocate semaphore
self.mSize = kwargs.get('Size', 8) # semaphore size
self.mSharePA = None # physical address allocated
self.mReverseEndian = None # Whether or not Reverse data endian
self.setup()
def _acquireSemaphore(self):
pass
def _releaseSemaphore(self):
pass
def _reloadSemaphore(self):
(self.mSharedPA, self.mReverseEndian, valid) = self.genThread.genSemaphore(self.mName, self.mCounter, self.mBank, self.mSize) # Shared PA has been initialized with the counter
if not valid:
self.error("Thread %d failed to generate semaphore as the PA 0x%x is out of address size" %(self._threadId(), self.mSharePA))
self.mSemaVA = self.genVAforPA(Size=self.mSize, Align=self.mSize, Type="D",
PA = self.mSharedPA, Bank=self.mBank, MemAttrImpl=self.mVaAttr, CanAlias=1)
if (self.mSemaVA & 0x00ffffffffff0000) == 0:
self.error("ERROR VA=%x is invalid"%self.mSemaVA)
shared_va_page_info = self.getPageInfo(self.mSemaVA, "VA", self.mBank)
if not shared_va_page_info["Page"]["MemoryAttr"] == self.mVaAttr:
self.error("ERROR VA=%x is set to %s instead of %s"%(self.mSemaVA,shared_va_page_info["Page"]["MemoryAttr"],self.mVaAttr))
self.notice("Thread %d map va 0x%x to [%d] pa 0x%x" % (self._threadId(), self.mSemaVA, self.mBank, self.mSharedPA))
load_gpr = LoadGPR64(self.genThread)
load_gpr.load(self.mAddrReg, self.mSemaVA)
def _threadId(self):
return self.genThread.genThreadID
def _handleLowPower(self):
gen_mode = self.getPEstate("GenMode")
while gen_mode & (1 << 9): # low power mode
restart_pc = self.getPEstate("PC")
gen_mode &= ~(1 << 9)
self.setPEstate("GenMode", gen_mode)
self.genSequence("ReExecution", {"Address" : restart_pc})
gen_mode = self.getPEstate("GenMode")
| StarcoderdataPython |
1786068 | class KeyNotFound(Exception):
def __init__(self, message):
self.message = message
class NoMethodsGiven(Exception):
def __init__(self, message):
self.message = message
class FileNotFound(Exception):
def __init__(self, message):
self.message = message
class StrategyNotSupported(Exception):
def __init__(self, message):
self.message = message
class NoFunctionsProvided(Exception):
def __init__(self, message):
self.message = message
class NoneParameterError(Exception):
def __init__(self, message):
self.message = message
class AndroSignatureError(Exception):
def __init__(self, message):
self.message = message
class SootSignatureError(Exception):
def __init__(self, message):
self.message = message
class ToolNotFoundError(Exception):
def __init__(self, message):
self.message = message
class InvalidFileError(Exception):
def __init__(self, message):
self.message = message
# Messages
ABSTRAC_METHOD = 'This method is abstract like and needs to be implemented by subclasses to be used'
NONE_PARAM = 'parameter \'{}\' cannot be None'
ANDRO_SIG = 'The signature \'{}\' extracted does not respect the expected Androguard pattern'
SOOT_SIG = 'The signature \'{}\' extracted does not respect the expected Soot pattern'
TOOL_NOT_FOUND = 'Could not find \'{}\' on this computer, please install it and try again'
FILE_NOT_FOUND = 'Could not find \'{}\' on this machine, maybe wrong path?'
INVALID_FILE = 'The file provided is not of the correct type. File must be .\'{}\' like.' | StarcoderdataPython |
1612257 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 09:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('practiceapp', '0003_auto_20160714_1653'),
]
operations = [
migrations.RenameField(
model_name='certificate',
old_name='cer_amount',
new_name='amount',
),
]
| StarcoderdataPython |
5133222 | <reponame>altryne/django-unslashed
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.test import TestCase, Client
from django.middleware.common import CommonMiddleware
from unslashed.middleware import RemoveSlashMiddleware
class RemoveSlashMiddlewareTest(TestCase):
def setUp(self):
self.client = Client()
def test_permanent_redirect_to_non_slashed(self):
response = self.client.get('/testapps/', follow=False)
self.assertIsInstance(response, HttpResponsePermanentRedirect)
self.assertFalse(response['Location'].endswith('/'))
response = self.client.get('/testapps/1/', follow=False)
self.assertIsInstance(response, HttpResponsePermanentRedirect)
self.assertFalse(response['Location'].endswith('/'))
def test_no_redirect_when_slash_url_is_valid(self):
response = self.client.get('/testapps/1/urlendsinslash/', follow=False)
self.assertIsInstance(response, HttpResponse)
self.assertEqual(response.status_code, 200)
def test_no_redirect_when_slashed_and_unslashed_invalid(self):
response = self.client.get('/testapps/invalid/', follow=False)
self.assertNotIsInstance(response, HttpResponsePermanentRedirect)
self.assertEqual(response.status_code, 404)
response = self.client.get('/testapps/invalid', follow=False)
self.assertNotIsInstance(response, HttpResponsePermanentRedirect)
self.assertEqual(response.status_code, 404)
def tearDown(self):
del self.client | StarcoderdataPython |
11308684 | <gh_stars>1-10
from google_images_download import google_images_download
import sys
# first install https://github.com/hardikvasa/google-images-download
# usage python get_my_images.py "bear, smelly cat" 200
response = google_images_download.googleimagesdownload()
keywords = (sys.argv[1:2])
limit = sys.argv[2:] or 100
arguments = {"keywords": keywords, "limit": limit, "print_urls": True}
response.download(arguments)
| StarcoderdataPython |
167634 | <filename>model/simplebase_model.py
import tensorflow as tf # noqa
import numpy as np # noqa
from ..utils import nn # noqa
import transforms as trans
import conditionals as conds # noqa
import likelihoods as likes
from ..model import model as mod
class SimpleBaseModel(mod.Model):
# TODO: docstring.
def __init__(self, transformations,
preproc_func=None, base_distribution='gaussian',
sample_size=128, sample_size_n=1000,
trans_conditioning=True,
):
"""
Args:
transformations: list of transformation functions that take input
(and possibly conditioning) variables to transform and return
output, logdet of Jacobian, and inverse for transformation.
preproc_func:
base_distribution:
sample_size:
trans_conditioning:
"""
# Parameters
self.transformations = transformations
self.base_distribution = base_distribution
self.sample_size = sample_size
self.sample_size_n = sample_size_n
self.preproc_func = preproc_func
self.trans_conditioning = trans_conditioning
def build_graph(self, inputs, conditioning=None,
sampler_conditioning=None, forward_tensors=None):
print('Building {} Graph,\n\tconditioning {}'.format(
'SimpleBase', conditioning))
# Place holder for model input.
if self.preproc_func is not None:
inputs, inv_preproc = self.preproc_func(inputs)
else:
inv_preproc = None
N = tf.shape(inputs)[0]
self.d = int(inputs.get_shape()[2])
# Sampling extreneous coditioning values.
if sampler_conditioning is None:
sampler_conditioning = conditioning
else:
# Allows for sampling procedure to be independent from any
# placeholder/input.
assert conditioning is not None # Need to also train conditioning.
# Do transformation on input variables.
with tf.variable_scope('transformations') as trans_scope:
self.z, self.logdet, self.invmap = trans.transformer(
inputs, self.transformations,
conditioning if self.trans_conditioning else None,
forward_tensors=forward_tensors
)
# Get the likelihood of covariates all iid according to base distro
# Note: the 3 below is for the weight, mu, sigma param of mixture
# component and not dimensionality.
self.llikes = self.logdet
with tf.variable_scope('conditionals'):
# Treat as N x nd flat covariates
flat_z = tf.reshape(self.z, shape=(N, -1, 1))
std_params = tf.tile(tf.zeros_like(flat_z), [1, 1, 3])
# Get likelihood with base distribution
self.llikes += tf.reduce_sum(likes.mixture_likelihoods(
std_params, flat_z, self.base_distribution), -1)
# Sample all tensor dimensions iid from base distribution
total_dims = self.sample_size*self.sample_size_n*self.d
self.z_samples = tf.reshape(
conds.sample_mm(
tf.zeros(shape=(total_dims, 3), dtype=tf.float32),
self.base_distribution),
(self.sample_size, self.sample_size_n, self.d))
# Invert to get samples back in original space.
with tf.variable_scope(trans_scope, reuse=True):
self.sampler = self.invmap(
self.z_samples,
sampler_conditioning if self.trans_conditioning else None)
if inv_preproc is not None:
self.sampler = inv_preproc(self.sampler)
return self.llikes, self.sampler
| StarcoderdataPython |
11210019 | #!/usr/bin/env python
"""
B.5 Macros for text
"""
from plasTeX import Command, Environment, sourceChildren
class frenchspacing(Command):
unicode = u''
class nonfrenchspacing(Command):
unicode = u''
class normalbaselines(Command):
unicode = u''
class lq(Command):
unicode = unichr(8216)
class rq(Command):
unicode = unichr(8217)
class lbrack(Command):
unicode = u'['
class rbrack(Command):
unicode = u']'
class space(Command):
unicode = u' '
class empty(Command):
unicode = u''
class null(Command):
unicode = u''
class bgroup(Command):
def invoke(self, tex):
self.ownerDocument.context.push()
def digest(self, tokens):
# Absorb the tokens that belong to us
for item in tokens:
if item.nodeType == Command.ELEMENT_NODE:
if item.level < self.ENDSECTIONS_LEVEL:
tokens.push(item)
break
if isinstance(item, (egroup,endgroup)):
break
if item.contextDepth < self.contextDepth:
tokens.push(item)
break
item.parentNode = self
item.digest(tokens)
self.appendChild(item)
self.paragraphs(force=False)
@property
def source(self):
if self.hasChildNodes():
return '{%s}' % sourceChildren(self)
return '{'
class begingroup(bgroup):
pass
class egroup(Command):
unicode = u''
def invoke(self, tex):
self.ownerDocument.context.pop()
@property
def source(self):
return '}'
def digest(self, tokens):
return
class endgroup(egroup):
unicode = u''
class obeyspaces(Command):
unicode = u''
class loop(Command):
args = 'var:Tok'
unicode = u''
class iterate(Command):
unicode = u''
class repeat(Command):
unicode = u''
class enskip(Command):
pass
class enspace(Command):
pass
class quad(Command):
pass
class qquad(Command):
pass
class thinspace(Command):
pass
class negthinspace(Command):
pass
class hglue(Command):
pass
class vglue(Command):
pass
class topglue(Command):
pass
class nointerlineskip(Command):
pass
class offinterlineskip(Command):
pass
class smallskip(Command):
pass
class medskip(Command):
pass
class bigskip(Command):
pass
class TeXBreak(Command):
macroName = 'break'
unicode = u''
class allowbreak(Command):
unicode = u''
class ControlSpace(Command):
macroName = 'active::~'
class slash(Command):
pass
class filbreak(Command):
pass
class goodbreak(Command):
pass
class eject(Command):
unicode = u''
class supereject(Command):
unicode = u''
class removelastskip(Command):
pass
class smallbreak(Command):
pass
class medbreak(Command):
pass
class bigbreak(Command):
pass
class line(Command):
pass
class leftline(Command):
args = 'self'
class llap(Command):
args = 'self'
class centerline(Command):
args = 'self'
class underbar(Command):
args = 'self'
class hang(Command):
pass
class textindent(Command):
args = 'self'
class narrower(Command):
pass
class raggedright(Environment):
pass
#
# Accents are done in the LaTeX package
#
| StarcoderdataPython |
8007789 | import matplotlib
import matplotlib.pyplot as plt
from calculate import *
a=np.arange(0,1,1/100)
test0=[]
test1=[]
testm1=[]
for i in range(100):
test0=np.append(test0,nn(a[i],0))
test1=np.append(test1,nn(a[i],1))
testm1=np.append(testm1,nn(a[i],-1))
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
plt.figure(figsize=(9,9))
# plt.text(0.2,3.0,'$k=-1$',fontsize='14',ha='left',wrap=True)
plt.plot(a,test0,linewidth=1,color='r',label='k=0')
plt.plot(a,test1,linewidth=1,color='b',label='k=1')
plt.plot(a,testm1,linewidth=1,color='g',label='k=-1')
plt.legend()
plt.savefig('/users/dingding 1/desktop/homework9.26.eps')
plt.show()
| StarcoderdataPython |
11226312 | from Src.graph_algos import nd2vec
from Src.n2v_parser import nd2vec_parser
from Src.utilities import read_graph,tab_printer
#main function where all intialization and triggering happens
def nd2vec_main(args):
'''
Pipeline for representational learning for all nodes in a graph.
'''
tab_printer(args)
parsed_input_graph = read_graph(args.input,args.weighted,args.directed)
graph_nodes = nd2vec(args,parsed_input_graph)
graph_nodes.prep_trans_prob()
walks = graph_nodes.simulate_walks(args.num_walks,args.walk_length)
graph_nodes.generate_nd2vec_embeddings(walks)
if __name__ == "__main__":
args = nd2vec_parser()
nd2vec_main(args)
| StarcoderdataPython |
1856402 | from django.contrib import admin
from .models import Item
# Register your models here.
admin.site.register(Item)
class ItemAdmin(admin.ModelAdmin):
readonly_fields=('added','modified',)
| StarcoderdataPython |
8124550 | <reponame>Danieltry/calculadora1
print "cualquiercosa"
raw_input("mensaje")
| StarcoderdataPython |
11331438 | <reponame>Semicheche/foa_frappe_docker<gh_stars>1-10
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
reference_date = guess_reference_date()
for name in frappe.db.sql_list("""select name from `tabJournal Entry`
where date(creation)>=%s""", reference_date):
jv = frappe.get_doc("Journal Entry", name)
try:
jv.create_remarks()
except frappe.MandatoryError:
pass
else:
frappe.db.set_value("Journal Entry", jv.name, "remark", jv.remark)
def guess_reference_date():
return (frappe.db.get_value("Patch Log", {"patch": "erpnext.patches.v4_0.validate_v3_patch"}, "creation")
or "2014-05-06")
| StarcoderdataPython |
213719 | <reponame>sciris/openpyexcel<gh_stars>1-10
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
from openpyexcel.descriptors.serialisable import Serialisable
from openpyexcel.descriptors import (
Sequence
)
from openpyexcel.descriptors.excel import (
Relation,
)
class ExternalReference(Serialisable):
tagname = "externalReference"
id = Relation()
def __init__(self, id):
self.id = id
| StarcoderdataPython |
11304825 | <reponame>concurrentlabs/laguna
#!/usr/bin/env python
#
import os
import yaml
# This import statment MUST come after the lock
BASE_DIR = os.path.join(os.path.dirname(__file__), '..')
def read():
import config
config_stream = file('%s/config.yaml' % BASE_DIR, 'r')
server_conf_stream = file('%s/servers.yaml' % BASE_DIR, 'r')
try:
# Config section...
# IMPORTANT - config.yaml should be ../config.yaml and servers.yaml should be ../servers.yaml on Red Hat!
configs = yaml.load(config_stream)
config.debug = configs["debug"]
config.port = configs["port"]
config.http_response_codes = {}
config.http_response_codes = configs["http_response_codes"]
config.logging = configs["logging"]
config.log_file = configs["log_file"]
config.errors_log_file = configs["errors_log_file"]
config.log_format = configs["log_format"]
config.log_date_format = configs["log_date_format"]
config.log_when = configs["log_when"]
config.log_utc = configs["log_utc"]
config.log_rotation = configs["log_rotation"]
config.log_backups = configs["log_backups"]
config.cookie_expires = {}
config.cookie_expires = configs["cookie_expires"]
config.alert_email = {}
config.alert_email = configs["alert_email"]
config.api_ip_allow_range = []
config.api_ip_allow_range = configs["api_ip_allow_range"]
config.keys = {}
config.keys = configs["keys"]
config.msg_code = {}
config.msg_code = configs["msg_code"]
#Server section...
server_conf = yaml.load(server_conf_stream)
config.server_defaults = {}
config.server_defaults = server_conf["server_defaults"]
config.server_default_port = ":" + str(config.server_defaults["port"]) # Must be present in the servers.yaml file.
if config.server_default_port == ":80" or config.server_default_port == ":":
config.server_default_port = "" # Set it to empty since it's not needed for browsers/players.
config.server_ports = {}
config.server_ports = server_conf["server_ports"] # 0MQ port - default to 5555
config.servers_ip = []
config.servers_ip = server_conf["servers"]
config.servers_interface_ip = []
config.servers_interface_ip = server_conf["servers_interface_ip"]
config.servers_server_config = {}
config.servers_server_config = server_conf["server_config"]
config.servers_purge = {}
config.servers_purge = server_conf["purge"]
except yaml.YAMLError, e:
from log import log
log("Config Error - %s" % e, "CRITICAL")
finally:
config_stream.close()
server_conf_stream.close()
| StarcoderdataPython |
9754544 | import os
import collections
import json
import torch
import torchvision
import numpy as np
import scipy.misc as m
import scipy.io as io
import matplotlib.pyplot as plt
import cv2
import torchvision.transforms as transforms
from PIL import Image
from tqdm import tqdm
from torch.utils import data
def get_data_path(name):
js = open('config.json').read()
data = json.loads(js)
return data[name]['data_path']
class pascalVOCLoader(data.Dataset):
def __init__(self, root, split="train", is_transform=False, img_size=224, image_transform=None):
self.root = root
self.split = split
self.is_transform = is_transform
self.n_classes = 21
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.files = collections.defaultdict(list)
self.image_transform = image_transform
file_list = []
with open(root + '/ImageSets/Segmentation/' + split + '.txt', 'r') as f:
lines = f.readlines()
filenames = [l.strip() for l in lines]
N = len(filenames)
print('Loading image and label filenames...\n')
for i in range(N):
file_list.append(filenames[i])
self.files = file_list
def __len__(self):
return len(self.files)
def __getitem__(self, index):
img_name = self.files[index]
img_path = self.root + '/JPEGImages/' + img_name + '.jpg'
lbl_path = self.root + '/SegmentationClass/pre_encoded/' + img_name + '.png'
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.int32)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
if self.image_transform is None:
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
img = cv2.resize(img, self.img_size, interpolation=cv2.INTER_CUBIC)
img = img.astype(float) / 255.0
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
else:
img = cv2.resize(img, self.img_size, interpolation=cv2.INTER_CUBIC).astype(np.int32)
img = self.image_transform(img)
# img = img.transpose(2, 0, 1) ## No need to do this transpose here anymore,
# as it is done internally within ToTensor() function, inside image_transform.
# img = torch.from_numpy(img).float() # It has already been converted to tensor using transforms.ToTensor() in image_transform.
lbl[lbl==255] = -1
if self.split=='train':
lbl = lbl.astype(float)
lbl = cv2.resize(lbl, self.img_size, interpolation=cv2.INTER_NEAREST)
lbl = lbl.astype(int)
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_pascal_labels(self):
return np.asarray([[0,0,0], [128,0,0], [0,128,0], [128,128,0], [0,0,128], [128,0,128],
[0,128,128], [128,128,128], [64,0,0], [192,0,0], [64,128,0], [192,128,0],
[64,0,128], [192,0,128], [64,128,128], [192,128,128], [0, 64,0], [128, 64, 0],
[0,192,0], [128,192,0], [0,64,128]])
def encode_segmap(self, mask):
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for i, label in enumerate(self.get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = i
label_mask = label_mask.astype(int)
return label_mask
def decode_segmap(self, temp, plot=False):
label_colours = self.get_pascal_labels()
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = label_colours[l, 0]
g[temp == l] = label_colours[l, 1]
b[temp == l] = label_colours[l, 2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
if __name__ == '__main__':
local_path = '/home/gpu_users/meetshah/segdata/pascal/VOCdevkit/VOC2012'
dst = pascalVOCLoader(local_path, is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
plt.imshow(dst.decode_segmap(labels.numpy()[i+1]))
plt.show()
| StarcoderdataPython |
73494 | from ..ops import *
class Translator(object):
"""
A translator wraps a physical operator and provides the compilation logic.
It follows the producer/consumer model.
It also contains information about the lineage it needs to capture.
"""
_id = 0
def __init__(self, op):
self.id = Translator._id
Translator._id += 1
self.op = op
self.child_translator = None
self.parent_translator = None
self.l_materialize = False
self.l_capture = False
self.l_prev_translator = None # previous translator that contains lindexes
self.lindex = None
self.lindexes = []
@property
def propagated_lindexes(self):
"""
Join
/ \
Join C
| |
A B
"""
return self.lindexes
def prepare(self, c, p, pipeline):
self.child_translator = c
self.parent_translator = p
self.pipeline = pipeline
def is_type(self, klasses):
if not isinstance(klasses, list):
klasses = [klasses]
return any(isinstance(self, k) for k in klasses)
def produce(self, ctx):
pass
def consume(self, ctx):
pass
def compile_expr(self, ctx, e):
"""
@return var name containing expression result
"""
raise Exception("Not implemented")
def compile_exprs(self, ctx, exprs):
"""
@return [varname,] list of expression results
"""
raise Exception("Not implemented")
def compile_new_tuple(self, ctx, schema):
"""
@return varname containing the new tuple
"""
raise Exception("Not implemented")
def clean_prev_lineage_indexes(self):
"""
Clean up (delete) previous lineage indexes,
if they are not materialized
"""
if self.l_capture and self.l_prev_translator:
self.l_prev_translator.clean_lineage_indexes()
def clean_lineage_indexes(self):
if self.l_capture and not self.l_materialize:
for lindex in self.propagated_lindexes:
lindex.clean_lineage_indexes()
self.lindex = None
self.lindexes = []
if hasattr(self, "left") and self.left:
self.left.lindex = None
self.left.lindexes = []
def pretty_print(self):
return self.op.pretty_print()
def __str__(self):
return "%s: %s" % (self.id, self.__class__.__name__)
class BottomTranslator(Translator):
"""
Unary operators that are pipeline breakers (groupby, orderby)
are split into bottom and top translators.
Bottom is responsible for buffering tuples in an appropriate data structure
(hashtable for groupby, list for orderby)
"""
def __init__(self, op):
super(BottomTranslator, self).__init__(op)
self.l_i = None
class TopTranslator(Translator):
"""
Top is responsible for processing and walking the populated data struture
from Bottom and generating output tuples for its parent tranlators
"""
def __init__(self, op, bottom):
super(TopTranslator, self).__init__(op)
self.bottom = bottom
self.l_i = None # source rid
self.l_o = None # output rid
def initialize_lineage_indexes(self, ctx):
pass
def populate_lineage_indexes(self, ctx, v_bucket):
pass
class LeftTranslator(Translator):
"""
Binary join operators are split into a left and right side.
For hash joins, the left translator is a pipeline breaker that
collects tuples in a hash table.
For theta joins, the left is just a loop
"""
def __init__(self, op):
super(LeftTranslator, self).__init__(op)
self.l_i = None
class RightTranslator(Translator):
"""
Iterates over the right side of the join and probes the left side.
"""
def __init__(self, op, left):
super(RightTranslator, self).__init__(op)
self.left = left
assert(op.is_type(Join))
self.l_i = None
self.l_o = None
@property
def propagated_lindexes(self):
ret = []
ret.extend(self.left.propagated_lindexes)
ret.extend(self.lindexes)
return ret
| StarcoderdataPython |
3469501 | <gh_stars>0
#!/usr/bin/env python3
f = open('inventory.json', 'r')
print(f.read(), end="")
f.close()
| StarcoderdataPython |
3249502 | <filename>codility/caterpillar_method_count_distinct_slices.py<gh_stars>1-10
# https://app.codility.com/demo/results/training2AX89J-FPF/
def solution(M, A):
"""
3 steps -
1. count the value of distinct in current window
2. check for duplicates
3. count distinct
method-
0,0
tail ------ head
for tail - 0 -------head 0,1,2,3...length
:param M:
:param A:
:return:
"""
in_current_slice = [False] * (M + 1)
total_slices = 0
head = 0
# for each tail...
for tail in range(0, len(A)):
print()
print("For this tail.."+str(tail))
# start with each head...
# check if not duplicate
while head < len(A) and (not in_current_slice[A[head]]):
print("For this head.." + str(head))
# mark item at head as visited
in_current_slice[A[head]] = True
# find total slices
total_slices += (head - tail) + 1
head += 1
total_slices = 1000000000 if total_slices > 1000000000 else total_slices
print("total up to here "+str(total_slices))
# one iteration is finished, now mark tail pointer location as not visited
in_current_slice[A[tail]] = False
return total_slices
if __name__ == '__main__':
result = solution(6, [3, 4, 5, 5, 2])
print("Solution " + str(result))
"""
Input - [3, 4, 5, 5, 2]
For this tail..0
For this head..0
total up to here 1
For this head..1
total up to here 3
For this head..2
total up to here 6
For this tail..1
For this tail..2
For this tail..3
For this head..3
total up to here 7
For this head..4
total up to here 9
For this tail..4
Solution 9
"""
| StarcoderdataPython |
383040 | <filename>main.py<gh_stars>0
import pandas as pd
import re
with open('input.html', 'r') as f:
html_content = f.read()
result = pd.read_html(html_content)[2]
places = result.Weiterbildungsstätten
trimmed_places_list = []
for p in places:
# regex remove tilde distance
address = re.search(r'(.*)\s~.*', p)[1]
trimmed_places_list.append(address)
with open('out.csv', 'w') as f:
f.write('Weiterbildungsstätten\n')
for place in trimmed_places_list:
f.write('"{}"\n'.format(place))
| StarcoderdataPython |
11332999 | from pathlib import Path
from typing import Dict, List
from tqdm import trange
from bs4 import BeautifulSoup
import requests
from rich import print
import gpxpy
import gpxpy.gpx
import yaml
import re
# Settings ---------------------------------------------------------------------
pages = [
("bergrebell", "https://www.hikr.org/user/{user}/tour/?skip={skip}", 760),
("hikr", "https://www.hikr.org/filter.php?skip={skip}&act=filter&a=alp&ai=1&aa=4", 3000),
]
diff_titles = {
"Wandern Schwierigkeit": "hiking",
"Hochtouren Schwierigkeit": "mountaineering",
"Klettern Schwierigkeit": "climbing",
"Klettersteig Schwierigkeit": "via-ferrata",
"Ski Schwierigkeit": "ski",
}
# ------------------------------------------------------------------------------
def dms2dec(dms_str: str) -> float:
dms_str = re.sub(r'\s', '', dms_str)
sign = -1 if re.search('[swSW]', dms_str) else 1
numbers = [*filter(len, re.split('\D+', dms_str, maxsplit=4))]
degree = numbers[0]
minute = numbers[1] if len(numbers) >= 2 else '0'
second = numbers[2] if len(numbers) >= 3 else '0'
frac_seconds = numbers[3] if len(numbers) >= 4 else '0'
second += "." + frac_seconds
return sign * (int(degree) + float(minute) / 60 + float(second) / 3600)
def get_coordinates(url: str) -> Dict[str, str]:
soup = BeautifulSoup(requests.get(url).content, "html.parser")
return {str(soup.find("h1").string).strip(): str(soup.find(id="sidebar_swiss").find("td", class_="div13", string="Koordinaten: ").next_sibling.string).strip()}
def cache_entry(url: str) -> List[Dict[str, str]]:
print(url)
soup = BeautifulSoup(requests.get(url).content, "html.parser")
coords = []
for link in soup.find("td", class_="fiche_rando_b", string="Wegpunkte:").next_sibling.find_all("a"):
coords.append(get_coordinates(link["href"]))
return coords
def download_entry_list(name: str, url: str, max_skip: int):
cache_file: Path = Path(f"../../content/maps/{name}.yaml")
def save(entries):
with open(cache_file, "w") as fp:
yaml.dump(entries, fp)
if cache_file.exists():
with open(cache_file, "r") as fp:
return yaml.load(fp, Loader=yaml.SafeLoader)
entries = yaml.load(fp, Loader=yaml.SafeLoader)
else:
entries = []
for skip in trange(0, max_skip + 20, 20):
soup = BeautifulSoup(requests.get(url.format(skip=skip)).content, "html.parser")
for result in soup.find_all("div", class_="content-list-intern"):
entries.append({"url": result.find_next("a")["href"]})
for k, v in diff_titles.items():
r = result.find_next("span", attrs={"title": k})
if r is not None:
entries[-1][v] = r.contents[0].strip()
save(entries)
for i in range(len(entries)):
if "waypoints" not in entries[i]:
try:
entries[i]["waypoints"] = cache_entry(entries[i]["url"])
except:
continue
save(entries)
save(entries)
return entries
def get_tracks(entries: Dict[str, str], difficulty: str):
gpx = gpxpy.gpx.GPX()
for entry in entries:
if entry.get("mountaineering", "") != difficulty or not "waypoints" in entry:
continue
gpx_track = gpxpy.gpx.GPXTrack()
gpx_segment = gpxpy.gpx.GPXTrackSegment()
for point in entry["waypoints"]:
for name, coord in point.items():
latitude, langitude = list(map(dms2dec, coord.split(",")))
gpx_segment.points.append(gpxpy.gpx.GPXTrackPoint(latitude, langitude, name=name))
gpx_track.segments.append(gpx_segment)
gpx.tracks.append(gpx_track)
return gpx
for name, url, max_skip in pages:
entries = download_entry_list(name, url, max_skip)
gpx_file: Path = f"../../content/maps/{name}_{{difficulty}}.gpx"
for difficulty in ["L", "WS-", "WS", "WS+"]:
gpx = get_tracks(entries, difficulty)
with open(gpx_file.format(difficulty=difficulty), "w") as fp:
fp.write(gpx.to_xml()) | StarcoderdataPython |
1706388 | <reponame>maxuewei2/word2vec<filename>src/test_lr_ovr.py
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
X=[]
ids=[]
with open('../data/gw.emb')as f:
f.readline()
for line in f:
line=line.strip().split(' ')
id=int(line[0])
vec=list(map(float,line[1:]))
X.append(vec)
ids.append(id)
yd={}
with open('../data/usa-airports.group')as f:
for line in f:
line=line.strip().split(' ')
id,l=list(map(int,line))
yd[id]=l
y=[yd[_] for _ in ids]
train_ratio=0.8
train_num=int(len(X)*train_ratio)
test_num=len(X)-train_num
train_X=X[:train_num]
train_y=y[:train_num]
test_X=X[train_num:]
test_y=y[train_num:]
clf = LogisticRegression(random_state=0, max_iter=100000,solver='saga',penalty='l2')
clf=OneVsRestClassifier(clf).fit(train_X, train_y)
pred=clf.predict(test_X)
print(train_X)
print(train_y)
print(test_X)
print(test_y)
print(pred)
print(accuracy_score(test_y,pred))
#print(clf.coef_)
#print(clf.intercept_) | StarcoderdataPython |
11393260 | <reponame>HelkeBaeyens/read
# English tenses will be determined
from nltk import word_tokenize, pos_tag
import unittest
import re
def determine_tense_input(sentence):
tense = []
text = word_tokenize(sentence.lower())
tagged_tup = pos_tag(text)
#print (tagged_tup)
tags = [tuple[1] for tuple in tagged_tup]
words = [tuple[0] for tuple in tagged_tup]
tags_strings = " ".join(tags)
words_strings = " ".join(words)
tags_words_strings = " ".join([tuple[0]+ "_" + tuple[1] for tuple in tagged_tup])
#print (words_strings)
#print (tags_strings)
#print (tags_words_strings)
dict_tenses1 = {\
r"(PRP.*VBD)" : "past_simple_affirmative",
r"(PRP.*VBP)|(VBZ.*\._\.)" : "present_simple_affirmative"
}
dict_tenses2 = {\
r"(do not)|(does not)|(am not)|(is not)|(cannot)|(can not)|(do n't)|(ca n't)|(am n't)" : "present_simple_negative",
r"(did not)|(did n't)|(didn't)" : "past_simple_negative",
r"(has been)|(have been)" : "present_perfect_affirmative",
r"(has not been)|(has n't been)|(hasn't been)|(have not been)|(haven't been)|(have n't been)" : "present_perfect_negative",
}
dict_tenses3 = {\
r"(do.*PRP.*VB.*\?)|(does.*PRP.*VB.*\?)|(do.*PRP.*VBP.*\?)|(does.*PRP.*VBP.*\?)" : "present_simple_interrogative",
r"(did.*PRP.*VB.*\?)|(did.*PRP.*VBP.*\?)" : "past_simple_interrogative",
r"(is.*VBG.*\._\.)|(am.*VBG.*\._\.)|(are.*VBG.*\._\.)" : "present_continuous_affirmative",
r"(was.*VBG.*\._\.)|(were.*VBG.*\._\.)" : "past_continuous_affirmative",
r"(is.*n't.*VBG.*\._\.)|(is.*not.*VBG.*\._\.)|(are.*n't.*VBG.*\._\.)|(are.*not.*VBG.*\._\.)|('m.*not.*VBG.*\._\.)|(am.*not.*VBG.*\._\.)" : "present_continuous_negative",
r"(was.*n't.*VBG.*\._\.)|(was.*not.*VBG.*\._\.)|(wasn't.*VBG.*\._\.)|(were.*n't.*VBG*.\._\.)|(were.*not.*VBG.*\._\.)|(weren't.*VBG.*\._\.)" : "past_continuous_negative",
r"(is.*PRP.*VBG.*\?)|(am.*PRP.*VBG.*\?)|(are.*PRP.*VBG.*\?)" : "present_continuous_interrogative",
r"(was.*PRP.*VBG.*\?)|(were.*PRP.*VBG.*\?)" : "past_continuous_interrogative",
r"(have.*PRP.*been.*\?)|(has.*PRP.*been.*\?)" : "present_perfect_interrogative",
r"(have.*been.*VBG.*\._\.)|(has.*been.*VBG.*\._\.)|('ve.*been.*VBG.*\._\.)" : "present_perfect_continuous_affirmative",
r"(have.*not.*been.*VBG.*\._\.)|(haven't.*been.*VBG.*\._\.)|(have.*n't.*been.*VBG.*\._\.)|(has.*not.*been.*VBG.*\._\.)|(hasn't.*been.*VBG.*\._\.)|(has.*n't.*been.*VBG.*\._\.)" : "present_perfect_continuous_negative",
r"(have.*PRP.*been.*VBG.*\?)|(has.*PRP.*been.*VBG.*\?)" : "present_perfect_continuous_interrogative",
r"(had.*VBN.*\._\.)" : "past_perfect_affirmative",
r"(had.*not.*VBN.*\._\.)|(hadn't.*VBN.*\._\.)|(had.*n't.*VBN.*\._\.)" : "past_perfect_negative",
r"(had.*PRP.*VBN.*\?)" : "past_perfect_interrogative",
r"(had.*been.*VBG.*\._\.)" : "past_perfect_continuous_affirmative",
r"(had.*not.*been.*VBG.*\._\.)|(had.*n't.*been.*VBG.*\._\.)|(hadn't.*been.*VBG.*\._\.)" : "past_perfect_continuous_negative",
r"(had.*PRP.*been.*VBG.*\?)" : "past_perfect_continuous_interrogative",
r"(will.*VB.*\._\.)" : "future_simple_affirmative",
r"(wo.*n't.*VB.*\._\.)|(will.*not.*VB.*\._\.)|(will.*n't.*\._\.)" : "future_simple_negative",
r"(will.*PRP.*VB.*\?)" : "future_simple_interrogative",
r"(will.*be.*VBG.*\._\.)" : "future_continuous_affirmative",
r"(won't.*be.*VBG.*\._\.)| (will.*not.*VBG.*\._\.)|(will.*n't.*VBG.*\._\.)" :"future_continuous_negative",
r"(will.*PRP.*be.*VBG.*\?)" : "future_continuous_interrogative",
r"(will.*have.*VBN.*\._\.)" : "future_perfect_affirmative",
r"(won't.*have.*VBN.*\._\.)|(will.*not.*have.*VBN.*\._\.)|(will.*n't have.*VBN.*\._\.)" : "future_perfect_affirmative",
r"(will.*PRP.*have.*VBN.*\?)" : "future_perfect_interrogative",
r"(will.*have.*been.*VBG.*\._\.)" : "future_perfect_continuous_affirmative",
r"(won't.*have.*been.*VBG.*\._\.)|(will.*not.*have.*been.*VBG.*\._\.)|(will.*n't.*have.*been.*VBG.*\._\.)" : "future_perfect_continuous_negative",
r"(will.*PRP.*have.*been.*VBG.*\?)" : "future_perfect_continuous_interrogative",
r"(would.*VB.*\._\.)" : "conditional_affirmative",
r"(wouldn't_VB.*\._\.)|(would.*not_VB.*\._\.)|(would.*n't_VB.*\._\.)" : "conditional_negative",
r"(would.*PRP.*VB.*\?)" : "conditional_interrogative",
r"(would.*be.*VBG.*\._\.)" :"conditional_continuous_affirmative",
r"(wouldn't.*be.*VBG.*\._\.)|(would.*not.*VBG.*\._\.)|(would.*n't.*VBG.*\._\.)" : "conditional_continuous_negative",
r"(would.*PRP.*be.*VBG.*\?)" : "conditional_continuous_interrogative",
r"(would.*have.*VBN.*\._\.)" : "conditional_perfect_affirmative",
r"(wouldn't.*have.*VNB.*\._\.)|(would.*not.*have.*VBN.*\._\.)|(would.*n't.*have.*VBN.*\._\.)" : "conditional_perfect_negative",
r"(would.*PRP.*have.*VBG.*\?)" : "conditional_perfect_interrogative",
r"(is.*going.*TO.*VBN.*\._\.)|('s.*going.*TO.*VBN.*\._\.)|(am.*going.*TO.*VBN.*\._\.)|(are.*going.*TO.*VBN.*\._\.)|('re.*going.*TO.*VBN.*\._\.)|('m.*going.*TO.*VBN.*\._\.)" : "future_going_to_affirmative",
r"(is.*n't.*going.*TO.*VBN.*\._\.)|(is.*not.*going.*TO.*VBN.*\._\.)|(are.*n't.*going.*TO.*VBN.*\._\.)|(are.*not.*going.*TO.*VBN.*\._\.)|('m.*not.*going.*TO.*VBN.*\._\.)|(am.*not.*going.*TO.*VBN.*\._\.)" : "future_going_to_negative",
r"(is.*PRP.*going.*TO.*VBN.*\?)|(am.*PRP.*going.*TO.*VBN.*\?)|('s.*PRP.*going.*TO.*VBN.*\?)|('m.*PRP.*going.*TO.*VBN.*\?)|(are.*PRP.*going.*TO.*VBN.*\?)|('re.*PRP.*going.*TO.*VBN.*\?)" : "future_going_to_interrogative"
}
for tense_regex in dict_tenses3:
if re.search (tense_regex, tags_words_strings):
tense.append (dict_tenses3[tense_regex])
if len (tense)==0:
for tense_regex in dict_tenses2:
if re.search (tense_regex, words_strings):
tense.append (dict_tenses2[tense_regex])
if len (tense)==0:
for tense_regex in dict_tenses1:
if re.search (tense_regex, tags_strings):
tense.append (dict_tenses1[tense_regex])
return(tense)
def level_of_tenses (tense):
dict_level = {
"past_simple_affirmative":"A1",
"present_simple_affirmative":"A1",
"present_simple_negative":"A1",
"past_simple_negative":"A1",
"present_perfect_affirmative":"A2",
"present_perfect_negative":"A2",
"present_simple_interrogative":"A1",
"past_simple_interrogative":"A1",
"present_continuous_affirmative":"A1",
"past_continuous_affirmative":"A2",
"present_continuous_negative":"A1",
"past_continuous_negative":"A2",
"present_continuous_interrogative":"A1",
"past_continuous_interrogative":"A2",
"present_perfect_interrogative":"A2",
"present_perfect_continuous_affirmative":"B2",
"present_perfect_continuous_negative":"B2",
"present_perfect_continuous_interrogative":"B2",
"past_perfect_affirmative":"B1",
"past_perfect_negative":"B1",
"past_perfect_interrogative":"B1",
"past_perfect_continuous_affirmative":"B2",
"past_perfect_continuous_negative":"B2",
"past_perfect_continuous_interrogative":"B2",
"future_simple_affirmative":"A2",
"future_simple_negative":"A2",
"future_simple_interrogative":"A2",
"future_continuous_affirmative":"A2",
"future_continuous_negative":"A2",
"future_continuous_interrogative":"A2",
"future_perfect_affirmative":"B2",
"future_perfect_affirmative":"B2",
"future_perfect_interrogative":"B2",
"future_perfect_continuous_affirmative":"B2",
"future_perfect_continuous_negative":"B2",
"future_perfect_continuous_interrogative":"B2",
"conditional_affirmative":"A1",
"conditional_negative":"A1",
"conditional_interrogative":"A1",
"conditional_continuous_affirmative":"C1",
"conditional_continuous_negative":"C1",
"conditional_continuous_interrogative":"C1",
"conditional_perfect_affirmative":"C1",
"conditional_perfect_negative":"C1",
"conditional_perfect_interrogative":"C1",
"future_going_to_affirmative":"A2",
"future_going_to_negative":"A2",
"future_going_to_interrogative":"A2",
}
return dict_level.get(tense)
class My_test(unittest.TestCase):
def test_psa(self):
self.assertEqual(determine_tense_input("They have a car."), ["present_simple_affirmative"])
def test_psn(self):
self.assertEqual(determine_tense_input("They don't have a car."), ["present_simple_negative"])
def test_psn1(self):
self.assertEqual(determine_tense_input("They Don't have a car."), ["present_simple_negative"])
def test_psi(self):
self.assertEqual(determine_tense_input("Do they have a car?"), ["present_simple_interrogative"])
def test_pca(self):
self.assertEqual(determine_tense_input("He is reading now."), ["present_continuous_affirmative"])
def test_pcn(self):
self.assertEqual(determine_tense_input("He isn't reading now."), ["present_continuous_negative"])
def test_pci(self):
self.assertEqual(determine_tense_input("Is he reading now?"), ["present_continuous_interrogative"])
def test_pasa(self):
self.assertEqual(determine_tense_input("They saw a movie."), ["past_simple_affirmative"])
def test_pasn(self):
self.assertEqual(determine_tense_input("They didn't see a movie."), ["past_simple_negative"])
def test_pasi(self):
self.assertEqual(determine_tense_input("Did they see a movie?"), ["past_simple_interrogative"])
def test_paca(self):
self.assertEqual(determine_tense_input("It was snowing."), ["past_continuous_affirmative"])
def test_pacn(self):
self.assertEqual(determine_tense_input("It wasn't snowing."), ["past_continuous_negative"])
def test_paci(self):
self.assertEqual(determine_tense_input("Was it snowing?"), ["past_continuous_interrogative"])
def test_ppa(self):
self.assertEqual(determine_tense_input("We have been there."), ["present_perfect_affirmative"])
def test_ppn(self):
self.assertEqual(determine_tense_input("We haven't been there."), ["present_perfect_negative"])
def test_ppi(self):
self.assertEqual(determine_tense_input("Have we been there?"), ["present_perfect_interrogative"])
def test_ppca(self):
self.assertEqual(determine_tense_input("You have been working hard."), ["present_perfect_continuous_affirmative"])
def test_ppcn(self):
self.assertEqual(determine_tense_input("You haven't been working hard."), ["present_perfect_continuous_negative"])
def test_ppci(self):
self.assertEqual(determine_tense_input("Have you been working hard?"), ["present_perfect_continuous_interrogative"])
def test_papa(self):
self.assertEqual(determine_tense_input("They had left for France."), ["past_perfect_affirmative"])
def test_papn(self):
self.assertEqual(determine_tense_input("They hadn't left for France."), ["past_perfect_negative"])
def test_papi(self):
self.assertEqual(determine_tense_input("Had they left for France?"), ["past_perfect_interrogative"])
#NLTK tagged 'left' as VBD, whereas it should be VBN: TODO: bug report
def test_papca(self):
self.assertEqual(determine_tense_input("She had been waiting for him."), ["past_perfect_continuous_affirmative"])
def test_papcn(self):
self.assertEqual(determine_tense_input("She hadn't been waiting for him."), ["past_perfect_continuous_negative"])
def test_papci(self):
self.assertEqual(determine_tense_input("Had she been waiting for him?"), ["past_perfect_continuous_interrogative"])
def test_fsa(self):
self.assertEqual(determine_tense_input("It will snow this Winter."), ["future_simple_affirmative"])
def test_fsn(self):
self.assertEqual(determine_tense_input("It won't snow this Winter."), ["future_simple_negative"])
def test_fsi(self):
self.assertEqual(determine_tense_input("Will it snow this Winter?"), ["future_simple_interogative"])
def test_fca(self):
self.assertEqual(determine_tense_input("She will be travelling."), ["future_continuous_affirmative"])
def test_fcn(self):
self.assertEqual(determine_tense_input("She won't be travelling."), ["future_continuous_negative"])
def test_fci(self):
self.assertEqual(determine_tense_input("Will she be travelling?"), ["future_continuous_interrogative"])
def test_fpa(self):
self.assertEqual(determine_tense_input("He will have arrived."), ["future_perfect_affirmative"])
def test_fpn(self):
self.assertEqual(determine_tense_input("He won't have arrived"), ["future_perfect_negative"])
def test_fpi(self):
self.assertEqual(determine_tense_input("Will he have arrived?"), ["future_perfect_interrogative"])
def test_fpca(self):
self.assertEqual(determine_tense_input("You will have been working."), ["future_perfect_continuous_affirmative"])
def test_fpcn(self):
self.assertEqual(determine_tense_input("You won't have been working."), ["future_perfect_continuous_negative"])
def test_fpci(self):
self.assertEqual(determine_tense_input("Will you have been working?"), ["future_perfect_continuous_interrogative"])
def test_ca(self):
self.assertEqual(determine_tense_input("I would fly there."), ["conditional_affirmative"])
def test_cn(self):
self.assertEqual(determine_tense_input("I wouldn't fly there."), ["conditional_negative"])
def test_ci(self):
self.assertEqual(determine_tense_input("Would you fly there?"), ["conditional_interrogative"])
def test_cca(self):
self.assertEqual(determine_tense_input("They would be sleeping now."), ["conditional_continuous_affirmative"])
def test_ccn(self):
self.assertEqual(determine_tense_input("They wouldn't be sleeping now."), ["conditional_continuous_negative"])
def test_cci(self):
self.assertEqual(determine_tense_input("Would they be sleeping now?"), ["conditional_continuous_interrogative"])
def test_cpa(self):
self.assertEqual(determine_tense_input("She would have been there."), ["conditional_perfect_affirmative"])
def test_cpn(self):
self.assertEqual(determine_tense_input("She wouldn't have been there."), ["conditional perfect_negative"])
def test_cpi(self):
self.assertEqual(determine_tense_input("Would she have been there?"), ["conditional_perfect_interrogative"])
def test_futa(self):
self.assertEqual(determine_tense_input("She is going to get married."), ["future_going_to_affirmative"])
def test_futn(self):
self.assertEqual(determine_tense_input("She isn't going to get married."), ["future_going_to_negative"])
def test_futi(self):
self.assertEqual(determine_tense_input("Is she going to get married?"), ["future_going_to_interrogative"])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
73971 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = []
import json
import sqlite3
if __name__ == "__main__":
with open("aas/abstracts.json") as f:
data = json.load(f)
with sqlite3.connect("aas/aas.db") as conn:
c = conn.cursor()
for doc in data:
# session id.
c.execute("select id from sessions where aas_id=?",
(doc["session_id"], ))
session_id = c.fetchone()
if session_id is None:
c.execute("""insert into sessions(aas_id,title,date,room,type)
values(?,?,?,?,?)
""", (doc["session_id"], None, doc["date"],
doc["room"], doc["type"]))
conn.commit()
c.execute("select id from sessions where aas_id=?",
(doc["session_id"], ))
session_id = c.fetchone()
session_id = session_id[0]
# abstract.
c.execute("""insert into abstracts(aas_id,session_id,title,
abstract,counts)
values(?,?,?,?,?)
""", (doc["id"], session_id, doc["title"],
doc["abstract"], json.dumps(doc["counts"])))
conn.commit()
c.execute("select id from abstracts where aas_id=?", (doc["id"], ))
abstract_id = c.fetchone()[0]
# authors.
c.executemany("insert into authors(abstract_id,name) values(?,?)",
[(abstract_id, author) for author in doc["authors"]])
| StarcoderdataPython |
1754614 | <reponame>c-goldschmidt/AoC_2018
from collections import defaultdict
from day import Day
class Pots:
def __init__(self, initial_state, state_map):
self.state = defaultdict(lambda: '.')
for i in range(len(initial_state)):
self.state[i] = initial_state[i]
self.state_map = state_map
def range_string(self, pivot):
result = self.state[pivot - 2]
result += self.state[pivot - 1]
result += self.state[pivot]
result += self.state[pivot + 1]
result += self.state[pivot + 2]
return result
def apply(self):
indices = list(sorted(self.state.keys()))
next_state = defaultdict(lambda: '.')
for index in range(indices[0] - 2, indices[-1] + 2):
next_state[index] = self.state_map.get(self.range_string(index), '.')
self.state = next_state
def get_active(self):
return [key for key, value in self.state.items() if value == '#']
def minmax(self):
active = self.get_active()
min_index = min(active)
max_index = max(active)
indices = list(sorted(self.state.keys()))
for index in indices:
if index < min_index or index > max_index:
del self.state[index]
def sum(self):
return sum([key for key, value in self.state.items() if value == '#'])
class Day12(Day):
def parse(self, content):
lines = super().parse(content)
init = lines[0].replace('initial state: ', '')
changes = [line.split(' => ') for line in lines[2:]]
return init, {cng[0]: cng[1] for cng in changes}
def part1(self):
pots = Pots(self.input[0], self.input[1])
for generation in range(20):
pots.apply()
pots.minmax()
return pots.sum()
def part2(self):
pots = Pots(self.input[0], self.input[1])
pots.minmax()
prev = None
target = 50_000_000_000 - 1
for generation in range(target):
pots.apply()
pots.minmax()
active = tuple(pots.get_active())
if self._is_stable(prev, active):
steps_to_end = target - generation
return pots.sum() + steps_to_end * len(active)
prev = active
return None
@staticmethod
def _is_stable(prev, active):
if not prev or len(prev) != len(active):
return
return all((active[index] -1 == prev[index] for index in range(len(active))))
| StarcoderdataPython |
1811248 | <reponame>Arbupa/DAS_Sistemas
import abc
class archivoComponent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def path(self):
pass
def get_Name(self):
return self.name # Nombre del archivo
def get_Type(self):
return self.type # Directorio o archivo
class directory(archivoComponent):
def __init__(self, nombre):
self.directorios = []
self.nombre = nombre
self.tipo = 'Directorio'
def path(self):
return f'Nombre: {self.get_Name()}\n Tipo: {self.get_Type()}'
for i in self.directorios:
i.path()
def agregar(self, archive: archivoComponent):
self.directorios.append(archive)
def eliminar(self, archive: archivoComponent):
self.directorios.remove(archive)
def limpiar(self):
print("Clearing all the paths")
self.directorios = []
class Hoja(archivoComponent):
def __init__(self, nombre, extension):
self.name = nombre
self.extension = extension
self.tipo = 'Archivo'
def directorio(self):
return f'Nombre: {self.get_Name()}\n Tipo: {self.get_Type()}'
def get_Extension(self):
return self.extension
class SistemaDeArchivos:
def __init__(self, archive: archivoComponent):
self.archive = archive
def printArchive(self):
self.archive.path()
if __name__ == '__main__':
#se crean los diferentes archivos que formaran parte del directorio
root = directory('/')
etc = directory('/etc')
var = directory('/var')
usr = directory('/usr')
include = directory('/include')
home = directory('/home')
users = directory('/users')
salguer = directory('/salguer')
documentos = directory('/documentos')
archivo1 = Hoja('ensayo', 'txt')
tarea = Hoja('presentacion', 'txt')
tarea2 = Hoja('DAS', 'txt')
root.agregar(etc)
root.agregar(var)
root.agregar(usr)
root.agregar(home)
usr.agregar(include)
home.agregar(users)
users.agregar(salguer)
salguer.agregar(archivo1)
salguer.agregar(documentos)
salguer.agregar(tarea)
salguer.agregar(tarea2)
root.path()
| StarcoderdataPython |
4860268 | import pytest
import os
import os.path as osp
import shutil as sh
import dataset_split.dir_utils as dir_utils
THIS_PATH = osp.join(os.getcwd(), 'dataset_split', 'test')
SAFE_PATH = osp.join(THIS_PATH, 'test-utils')
TEST_PATH = osp.join(THIS_PATH, 'test-utils-exec')
TEST_DIRS = ['OMG', 'ROFL', 'XOXO', '.SNEAKY']
ORIGINAL_DIRS = ['folder1', 'folder2']
@pytest.fixture(autouse=True)
def clean_mess():
#Before each function creates a new test folder
sh.copytree(SAFE_PATH, TEST_PATH)
yield
#After each function delete the test folder
sh.rmtree(TEST_PATH)
def test_list_dirs():
expected1 = {'Apple', 'Pineapple', 'Orange'}
expected2 = {'Spaghetti', 'Steak', 'Rice'}
listed_dirs1 = dir_utils.list_dirs(osp.join(TEST_PATH, 'folder1'))
listed_dirs2 = dir_utils.list_dirs(osp.join(TEST_PATH, 'folder2'))
assert len(expected1) == len(listed_dirs1)
assert len(expected2) == len(listed_dirs2)
assert expected1 == set(listed_dirs1)
assert expected2 == set(listed_dirs2)
def test_create_dirs():
#Creating dumming directories
dir_utils.create_dirs(TEST_PATH, TEST_DIRS)
#Listing every dir
listed_dirs = [d for d in os.listdir(TEST_PATH)]
expected = set(TEST_DIRS).union(set(ORIGINAL_DIRS))
assert len(expected) == len(listed_dirs)
assert expected == set(listed_dirs)
def test_remove_dirs():
# Creating dummie directories
for d in TEST_DIRS:
os.mkdir(osp.join(TEST_PATH, d))
#Removing them
dir_utils.remove_dirs(TEST_PATH, TEST_DIRS)
#Listing and testing
listed_dirs = [d for d in os.listdir(TEST_PATH)]
assert set(ORIGINAL_DIRS) == set(listed_dirs)
| StarcoderdataPython |
8169680 | <gh_stars>1-10
import os
from spaceone.inventory.libs.common_parser import *
from spaceone.inventory.libs.schema.dynamic_widget import ChartWidget, CardWidget
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, ListDyField, EnumDyField, SearchField, SizeField
from spaceone.inventory.libs.schema.resource import CloudServiceTypeResource, CloudServiceTypeResponse, CloudServiceTypeMeta
current_dir = os.path.abspath(os.path.dirname(__file__))
total_count_conf = os.path.join(current_dir, 'widget/total_count.yaml')
storage_total_size_conf = os.path.join(current_dir, 'widget/storage_total_size.yaml')
item_total_count_conf = os.path.join(current_dir, 'widget/item_total_count.yaml')
read_capacity_total_conf = os.path.join(current_dir, 'widget/read_capacity_total.yaml')
write_capacity_total_conf = os.path.join(current_dir, 'widget/write_capacity_total.yaml')
table_count_by_region_conf = os.path.join(current_dir, 'widget/table_count_by_region.yaml')
table_count_by_account_conf = os.path.join(current_dir, 'widget/table_count_by_account.yaml')
top_table_size_conf = os.path.join(current_dir, 'widget/top_table_size.yaml')
cst_table = CloudServiceTypeResource()
cst_table.name = 'Table'
cst_table.provider = 'aws'
cst_table.group = 'DynamoDB'
cst_table.labels = ['Database']
cst_table.is_primary = True
cst_table.is_major = True
cst_table.service_code = 'AmazonDynamoDB'
cst_table.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/Amazon-DynamoDB.svg',
}
cst_table._metadata = CloudServiceTypeMeta.set_meta(
fields=[
EnumDyField.data_source('Status', 'data.table_status', default_state={
'safe': ['ACTIVE'],
'warning': ['CREATING', 'UPDATING', 'DELETING', 'ARCHIVING'],
'alert': ['INACCESSIBLE_ENCRYPTION_CREDENTIALS', 'ARCHIVED']
}),
TextDyField.data_source('Partition Key', 'data.partition_key_display'),
TextDyField.data_source('Sort Key', 'data.sort_key_display'),
TextDyField.data_source('Indexes', 'data.index_count'),
TextDyField.data_source('Total read capacity', 'data.total_read_capacity'),
TextDyField.data_source('Total write capacity', 'data.total_write_capacity'),
ListDyField.data_source('Auto Scaling', 'data.auto_scaling_policies', default_badge={'type': 'outline'}),
TextDyField.data_source('Table ID', 'data.table_id', options={
'is_optional': True
}),
TextDyField.data_source('ARN', 'data.table_arn', options={
'is_optional': True
}),
TextDyField.data_source('Encryption Type', 'data.encryption_type', options={
'is_optional': True
}),
SizeField.data_source('Table Size', 'instance_size', options={
'is_optional': True
}),
TextDyField.data_source('Item Count', 'data.item_count', options={
'is_optional': True
}),
TextDyField.data_source('Time to Live Status', 'data.time_to_live.time_to_live_status', options={
'is_optional': True
}),
TextDyField.data_source('Billing Mode', 'data.billing_mode_summary.billing_mode', options={
'is_optional': True
})
],
search=[
SearchField.set(name='Table ARN', key='data.table_arn'),
SearchField.set(name='Table Status', key='data.table_status',
enums={
'ACTIVE': {'label': 'ACTIVE', 'icon': {'color': 'green.500'}},
'CREATING': {'label': 'CREATING', 'icon': {'color': 'yellow.500'}},
'UPDATING': {'label': 'UPDATING', 'icon': {'color': 'yellow.500'}},
'DELETING': {'label': 'DELETING', 'icon': {'color': 'yellow.500'}},
'ARCHIVING': {'label': 'ARCHIVING', 'icon': {'color': 'yellow.500'}},
'INACCESSIBLE_ENCRYPTION_CREDENTIALS': {'label': 'INACCESSIBLE_ENCRYPTION_CREDENTIALS',
'icon': {'color': 'red.500'}},
'ARCHIVED': {'label': 'ARCHIVED', 'icon': {'color': 'red.500'}}
}),
SearchField.set(name='Storage Size (Bytes)', key='instance_size', data_type='integer'),
SearchField.set(name='Item Count', key='data.item_count', data_type='integer'),
],
widget=[
CardWidget.set(**get_data_from_yaml(total_count_conf)),
CardWidget.set(**get_data_from_yaml(storage_total_size_conf)),
CardWidget.set(**get_data_from_yaml(item_total_count_conf)),
CardWidget.set(**get_data_from_yaml(read_capacity_total_conf)),
CardWidget.set(**get_data_from_yaml(write_capacity_total_conf)),
ChartWidget.set(**get_data_from_yaml(table_count_by_region_conf)),
ChartWidget.set(**get_data_from_yaml(table_count_by_account_conf)),
ChartWidget.set(**get_data_from_yaml(top_table_size_conf)),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_table}),
]
| StarcoderdataPython |
9753562 | from global_data import db
from sqlalchemy.orm import relationship
class SessionModel(db.Model):
__tablename__ = 'sessions'
id = db.Column(db.Integer, primary_key = True, nullable=True)
instructor_id = db.Column(db.Integer, db.ForeignKey('users.id'))
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
students = relationship('SessionStudent', backref='sessions', passive_deletes=True)
def save_to_db(self):
try:
db.session.add(self)
db.session.commit()
return {'status': True,
'id': self.id}
except:
return {'status': False}
@classmethod
def find_by_instructor_id(cls, instructor_id):
return cls.query.filter_by(instructor_id=instructor_id).first()
@classmethod
def find_by_course_id(cls, course_id):
return cls.query.filter_by(course_id=course_id).first()
@classmethod
def return_all(cls):
def to_json(x):
return {
'id': x.id,
}
return {'sessions': list(map(lambda x: to_json(x), SessionModel.query.all()))}
@classmethod
def delete_all(cls):
try:
num_rows_deleted = db.session.query(cls).delete()
db.session.commit()
return {'message': '{} row(s) deleted'.format(num_rows_deleted)}
except:
return {'message': 'Something went wrong'}
@classmethod
def delete(cls, instructor_id):
try:
db.session.query(cls).filter_by(instructor_id = instructor_id).delete()
db.session.commit()
return {'message': 'Session deleted.'}
except:
return {'message': 'Something went wrong'}
@classmethod
def update_notification(cls, instructor_id, notification):
try:
ses = db.session.query(cls).filter_by(instructor_id = instructor_id).first()
ses.notification = notification
db.session.commit()
return {'message': 'Instructor notification updated.'}
except:
return {'message': 'Something went wrong.'}
| StarcoderdataPython |
6688157 | from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase, TestCase
class PatientWebDriver(WebDriver):
def element_exists(self, css):
"""
Checks if a certain element exists in the DOM Tree.
"""
try:
return self.find_element_by_css_selector(css)
except NoSuchElementException:
return False
def find_css(self, css_selector):
"""
Shortcut to find elements by CSS. Returns either a list or
singleton
"""
elems = self.find_elements_by_css_selector(css_selector)
found = len(elems)
if found == 1:
return elems[0]
elif not elems:
raise NoSuchElementException(css_selector)
return elems
def wait_for_css(self, css_selector, timeout=7):
"""
Shortcut for WebDriverWait
"""
return WebDriverWait(self, timeout).until(lambda driver : driver.find_css(css_selector))
class BaseSeleniumTestCase(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.selenium = PatientWebDriver()
super(BaseSeleniumTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(BaseSeleniumTestCase, cls).tearDownClass()
def open(self, url):
self.selenium.get("%s%s" % (self.live_server_url, url))
class YawdAdminBaseSeleniumTestCase(BaseSeleniumTestCase):
username = ''
password = ''
def add_inline(self, prefix):
last = int(self.selenium.find_css('#%s-wrapper .inline-related' % prefix)[-2].get_attribute('id').replace('%s-' % prefix, ''))
self.selenium.find_css('#%s-wrapper .add-row a' % prefix).click()
WebDriverWait(self.selenium, 10).until(lambda d: d.element_exists('#%s-%s' % (prefix, (last+1))))
return last + 1
def close_modal_inline(self, prefix):
self.selenium.find_css('#modal-wrapper-%s .modal-footer button' % prefix).click()
WebDriverWait(self.selenium, 10).until(lambda d: not d.find_css('#modal-wrapper-%s' % prefix).is_displayed())
def fill_modal_inlines(self, prefix, callback, items, add=True):
"""
if `add` is set to False, the modal form is expected to exist
in the DOM, otherwise the 'Add another inline' link will be clicked.
You can provide an empty dict among the items list. For example
if you don't want the second inline to be edited, your items list
could look like this:
.. code-block:: python
items = [{'title': 'whatever', ... },
{},
{'title': 'whatever'}]
"""
c=0
for i in items:
if i:
self.open_modal_inline('%s-%s' % (prefix, c))
callback(inline_prefix=c, **i)
self.close_modal_inline('%s-%s' % (prefix, c))
if add:
c = self.add_inline(prefix)
else:
c += 1
def fill_input_text(self, el, value):
el.clear()
el.send_keys(value)
def login(self):
self.open(reverse('admin:index'))
self.selenium.find_css('#id_username').send_keys(self.username)
self.selenium.find_css('#id_password').send_keys(<PASSWORD>)
self.selenium.find_element_by_xpath('//input[@value="Log in"]').click()
self.selenium.wait_for_css("#content-main")
def logout(self):
self.selenium.find_css('#logged-user-menu .dropdown-toggle').click()
self.selenium.find_css('#logout-link').click()
self.selenium.wait_for_css('.page-header')
def open_modal_inline(self, prefix):
self.selenium.find_element_by_xpath('//a[@class="inline-modal" and @href="#modal-wrapper-%s"]' % prefix).click()
WebDriverWait(self.selenium, 10).until(lambda d: d.find_css('#modal-wrapper-%s' % prefix).is_displayed())
def save_and_continue(self):
self.selenium.find_css('input[name="_continue"]').click()
self.selenium.wait_for_css("#content-main")
def save(self):
self.selenium.find_css('input[name="_save"]').click()
self.selenium.wait_for_css("#content-main")
def delete(self):
self.selenium.find_element_by_xpath('//a[text()="Delete" and contains(@class, "deletelink")]').click()
self.selenium.wait_for_css("#content-main")
self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
self.selenium.wait_for_css("#content-main")
def do_action(self, value):
action_select = Select(self.selenium.find_css('select[name="action"]'))
action_select.select_by_value(value)
self.selenium.find_css('button[name="index"]').click()
self.selenium.wait_for_css("#content-main")
def check_selected_action(self, id):
self.selenium.wait_for_css('input[name="_selected_action"]')
self.selenium.find_element_by_xpath('//input[@value="%s" and @name="_selected_action"]' % id).click()
def confirm_action(self):
self.selenium.find_css('input[type="submit"]').click()
self.selenium.wait_for_css('#content-main')
class PermissionTestCase(TestCase):
"""
Check all standard admin views of a model, all expected to return
a specific status (e.g. 200 on success, 403 on permission denied).
"""
def naive_admin_check(self, model_prefix, expected_status, item=1):
response = self.client.get(reverse('admin:%s_changelist' % model_prefix))
self.assertEqual(response.status_code, expected_status)
response = self.client.get(reverse('admin:%s_change' % model_prefix, args=(item,)))
self.assertEqual(response.status_code, expected_status)
response = self.client.get(reverse('admin:%s_history' % model_prefix, args=(item,)))
self.assertEqual(response.status_code, expected_status)
response = self.client.get(reverse('admin:%s_delete' % model_prefix, args=(item,)))
self.assertEqual(response.status_code, expected_status)
response = self.client.get(reverse('admin:%s_add' % model_prefix))
self.assertEqual(response.status_code, expected_status)
| StarcoderdataPython |
8137836 | <reponame>calmisential/SkeNetch
import torch
import torch.nn as nn
from utils.auto_padding import same_padding
class DeformableConv2d(nn.Module):
"""
可变性卷积,Ref: https://github.com/dontLoveBugs/Deformable_ConvNet_pytorch/blob/master/network/deform_conv/deform_conv_v2.py
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=False, modulation=False):
super(DeformableConv2d, self).__init__()
self.k = kernel_size
pad = same_padding(kernel_size, stride)
self.zero_padding = nn.ZeroPad2d(padding=pad)
self.s = stride
self.modulation = modulation
if self.modulation:
# 卷积核的每个位置都有不同的权重
self.m_conv = nn.Conv2d(in_channels=in_channels, out_channels=kernel_size * kernel_size,
kernel_size=kernel_size,
stride=stride, padding=pad)
# 为卷积核的每个位置都生成横纵坐标的偏移量(offset)
self.p_conv = nn.Conv2d(in_channels=in_channels, out_channels=2 * kernel_size * kernel_size,
kernel_size=kernel_size,
stride=stride, padding=pad)
# 最终实际要进行的卷积操作,注意这里步长设置为卷积核大小,
# 因为与该卷积核进行卷积操作的特征图是由输出特征图中每个点扩展为其对应卷积核那么多个点后生成的。
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=kernel_size, bias=bias)
def forward(self, x):
"""
x : torch.Tensor, shape: (B, C, H, W)
"""
offset = self.p_conv(x) # (B, 2k^2, H, W)
if self.modulation:
m = torch.sigmoid(self.m_conv(x)) # (B, k^2, H, W)
data_type = offset.data.type()
N = self.k * self.k
x = self.zero_padding(x)
p = self._get_p(offset, data_type) # shape: (batch_size, 2N, out_h, out_w)
p = p.contiguous().permute(0, 2, 3, 1) # shape: (batch_size, out_h, out_w, 2N)
q_lt = p.detach().floor()
q_rb = q_lt + 1
# 左上,shape: (batch_size, out_h, out_w, 2N)
q_lt = torch.cat(tensors=[
torch.clamp(input=q_lt[..., :N], min=0, max=x.size(2)-1),
torch.clamp(input=q_lt[..., N:], min=0, max=x.size(3)-1)
], dim=-1).long()
# 右下,shape: (batch_size, out_h, out_w, 2N)
q_rb = torch.cat(tensors=[
torch.clamp(input=q_rb[..., :N], min=0, max=x.size(2) - 1),
torch.clamp(input=q_rb[..., N:], min=0, max=x.size(3)-1)
], dim=-1).long()
# 左下,shape: (batch_size, out_h, out_w, 2N)
q_lb = torch.cat([q_lt[..., :N], q_rb[..., N:]], dim=-1)
# 右上,shape: (batch_size, out_h, out_w, 2N)
q_rt = torch.cat([q_rb[..., :N], q_lt[..., N:]], dim=-1)
p = torch.cat([torch.clamp(p[..., :N], 0, x.size(2) - 1), torch.clamp(p[..., N:], 0, x.size(3) - 1)], dim=-1)
# bilinear kernel (b, h, w, N)
g_lt = (1 + (q_lt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_lt[..., N:].type_as(p) - p[..., N:]))
g_rb = (1 - (q_rb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_rb[..., N:].type_as(p) - p[..., N:]))
g_lb = (1 + (q_lb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_lb[..., N:].type_as(p) - p[..., N:]))
g_rt = (1 - (q_rt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_rt[..., N:].type_as(p) - p[..., N:]))
# (b, c, h, w, N)
x_q_lt = self._get_x_q(x, q_lt, N)
x_q_rb = self._get_x_q(x, q_rb, N)
x_q_lb = self._get_x_q(x, q_lb, N)
x_q_rt = self._get_x_q(x, q_rt, N)
# (b, c, h, w, N)
x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \
g_rb.unsqueeze(dim=1) * x_q_rb + \
g_lb.unsqueeze(dim=1) * x_q_lb + \
g_rt.unsqueeze(dim=1) * x_q_rt
# modulation
if self.modulation:
m = m.contiguous().permute(0, 2, 3, 1)
m = m.unsqueeze(dim=1)
m = torch.cat([m for _ in range(x_offset.size(1))], dim=1)
x_offset *= m
x_offset = self._reshape_x_offset(x_offset, self.k) # (b, c, h * k, w * k)
out = self.conv(x_offset)
return out
def _init_weights(self):
# 初始化offset为0
nn.init.constant_(self.p_conv.weight, 0)
if self.modulation:
# 初始化所有kernel position权重为1
nn.init.constant_(self.m_conv.weight, 1)
def _get_p(self, offset, data_type):
h, w = offset.shape[2:]
p0 = self._get_p0(h, w, data_type)
pn = self._get_pn(data_type)
p = p0 + pn + offset
return p
def _get_p0(self, out_h, out_w, data_type):
# 将输出feature map上的每一个点匹配到输入feature map的卷积核中心位置
N = self.k * self.k
# 卷积核的中心位置
kc = self.k // 2
# p0_y shape: (out_h, out_w), p0_x shape: (out_h, out_w)
p0_y, p0_x = torch.meshgrid(
torch.arange(start=kc, end=out_h * self.s + kc, step=self.s),
torch.arange(start=kc, end=out_w * self.s + kc, step=self.s)
)
# shape: (1, N, out_h, out_w)
p0_y = p0_y.flatten().view(1, 1, out_h, out_w).repeat(1, N, 1, 1)
p0_x = p0_x.flatten().view(1, 1, out_h, out_w).repeat(1, N, 1, 1)
# (1, 2N, out_h, out_w)
p0 = torch.cat(tensors=[p0_y, p0_x], dim=1).type(data_type)
return p0
def _get_pn(self, data_type):
# 卷积核的每个位置相对于其中心的偏移(offset)
N = self.k * self.k
# shape: (k, k), (k, k)
pn_y, pn_x = torch.meshgrid(
torch.arange(start=-(self.k//2), end=self.k//2+1, step=1),
torch.arange(start=-(self.k//2), end=self.k//2+1, step=1)
)
pn_y = pn_y.flatten().view(1, N, 1, 1)
pn_x = pn_x.flatten().view(1, N, 1, 1)
# (1, 2N, 1, 1)
pn = torch.cat(tensors=[pn_y, pn_x], dim=1).type(data_type)
return pn
def _get_x_q(self, x, q, N):
b, h, w, _ = q.size()
padded_w = x.size(3)
c = x.size(1)
# (b, c, h*w)
x = x.contiguous().view(b, c, -1)
# (b, h, w, N)
index = q[..., :N] * padded_w + q[..., N:] # offset_x*w + offset_y
# (b, c, h*w*N)
index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)
x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)
return x_offset
@staticmethod
def _reshape_x_offset(x_offset, ks):
b, c, h, w, N = x_offset.size()
x_offset = torch.cat([x_offset[..., s:s + ks].contiguous().view(b, c, h, w * ks) for s in range(0, N, ks)],
dim=-1)
x_offset = x_offset.contiguous().view(b, c, h * ks, w * ks)
return x_offset
| StarcoderdataPython |
8024192 | <filename>tests/test_game.py
import time
import unittest
from tests import initialize_screenshot, initialize_video
from tft import game, tracker, main, handler, debugger, utils
Test1080PDefaultScreenshot = "/Users/henry/Downloads/TFT Screenshots/board_1080_1.png"
Test1440PDefaultScreenshot = "/Users/henry/Downloads/TFT Screenshots/board_1440_5.png"
Test1080PDefaultRecording = "/Users/henry/Downloads/TFT Screenshots/video_1080_3_short_3.mp4"
Test1440PDefaultRecording = "/Users/henry/Downloads/TFT Screenshots/video_1440_3.mkv"
def generate_player_list():
return "player1", "player2", "player3", "player4", "player5", "player6", "player7", "player8"
class TestGame(unittest.TestCase):
def test_parse_state_1080p_screenshot(self):
test_screenshot(self, Test1080PDefaultScreenshot)
def test_parse_state_1440p_screenshot(self):
test_screenshot(self, Test1440PDefaultScreenshot)
def test_parse_state_1440p_video(self):
gameDebugger = debugger.Debugger()
gameWindow = initialize_video(Test1440PDefaultRecording)
main.main(gameWindow, gameDebugger, f"test/testing_1440_{utils.generate_file_name()}")
def test_parse_state_1080p_video(self):
gameDebugger = debugger.Debugger()
gameWindow = initialize_video(Test1080PDefaultRecording)
main.main(gameWindow, gameDebugger, f"test/testing_1080_{utils.generate_file_name()}")
def test_screenshot(testcase, file_name):
gameWindow, gameBoard = initialize_screenshot(file_name)
gameTracker = tracker.Tracker([], file_name=None)
gameHandler = handler.Handler(gameTracker.getEntryQueue())
img = gameWindow.captureWindow()
game.parse_state(img, gameBoard, gameTracker, gameHandler)
time.sleep(10)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8020720 | # %%
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from transformers import (
AutoTokenizer,
AutoModelForQuestionAnswering,
)
from captum.attr import visualization as viz
from captum.attr import (
IntegratedGradients,
LayerConductance,
LayerIntegratedGradients,
LayerActivation,
)
from captum.attr import (
configure_interpretable_embedding_layer,
remove_interpretable_embedding_layer,
)
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# The first step is to fine-tune BERT model on SQUAD dataset. This can be easiy accomplished by following the steps described in hugging face's official web site: https://github.com/huggingface/transformers#run_squadpy-fine-tuning-on-squad-for-question-answering
#
# Note that the fine-tuning is done on a `bert-base-uncased` pre-trained model.
# After we pretrain the model, we can load the tokenizer and pre-trained BERT model using the commands described below.
# %%
# load model
tokenizer = AutoTokenizer.from_pretrained("csarron/bert-base-uncased-squad-v1")
model = AutoModelForQuestionAnswering.from_pretrained(
"csarron/bert-base-uncased-squad-v1", output_attentions=True
)
model.eval()
model.zero_grad()
# A helper function to perform forward pass of the model and make predictions.
# %%
def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):
output = model(
inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
return output.start_logits, output.end_logits, output.attentions
# Defining a custom forward function that will allow us to access the start and end positions of our prediction using `position` input argument.
# %%
def squad_pos_forward_func(
inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0
):
pred = model(
inputs_embeds=inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
pred = pred[position]
return pred.max(1).values
# Let's define some variables and functions that will help us to compute the attribution of attention matrices for specific output such as start or end positions of the prediction.
#
# To do so, we need to define baselines / references, numericalize both the baselines and the inputs. We will define helper functions to achieve that.
#
# The cell below defines numericalized special tokens that will be later used for constructing inputs and corresponding baselines/references.
# %%
ref_token_id = tokenizer.pad_token_id # A token used for generating token reference
sep_token_id = (
tokenizer.sep_token_id
) # A token used as a separator between question and text and it is also added to the end of the text.
cls_token_id = (
tokenizer.cls_token_id
) # A token used for prepending to the concatenated question-text word sequence
# Below we define a set of helper function for constructing references / baselines for word tokens, token types and position ids.
# %%
def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):
question_ids = tokenizer.encode(question, add_special_tokens=False)
text_ids = tokenizer.encode(text, add_special_tokens=False)
# construct input token ids
input_ids = (
[cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]
)
# construct reference token ids
ref_input_ids = (
[cls_token_id]
+ [ref_token_id] * len(question_ids)
+ [sep_token_id]
+ [ref_token_id] * len(text_ids)
+ [sep_token_id]
)
return (
torch.tensor([input_ids], device=device),
torch.tensor([ref_input_ids], device=device),
len(question_ids),
)
def construct_input_ref_token_type_pair(input_ids, sep_ind=0):
seq_len = input_ids.size(1)
token_type_ids = torch.tensor(
[[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device
)
ref_token_type_ids = torch.zeros_like(token_type_ids, device=device) # * -1
return token_type_ids, ref_token_type_ids
def construct_input_ref_pos_id_pair(input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
# we could potentially also use random permutation with `torch.randperm(seq_length, device=device)`
ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids, ref_position_ids
def construct_attention_mask(input_ids):
return torch.ones_like(input_ids)
def construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=None,
ref_token_type_ids=None,
position_ids=None,
ref_position_ids=None,
):
input_embeddings = interpretable_embedding.indices_to_embeddings(input_ids)
ref_input_embeddings = interpretable_embedding.indices_to_embeddings(ref_input_ids)
return input_embeddings, ref_input_embeddings
# Let's define the `question - text` pair that we'd like to use as an input for our Bert model and interpret what the model was focusing on when predicting an answer to the question from given input text
# %%
question, text = (
"What is important to us?",
"It is important to us to include, empower and support humans of all kinds.",
)
# Let's numericalize the question, the input text and generate corresponding baselines / references for all three sub-embeddings (word, token type and position embeddings) types using our helper functions defined above.
# %%
input_ids, ref_input_ids, sep_id = construct_input_ref_pair(
question, text, ref_token_id, sep_token_id, cls_token_id
)
token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(
input_ids, sep_id
)
position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)
attention_mask = construct_attention_mask(input_ids)
indices = input_ids[0].detach().tolist()
all_tokens = tokenizer.convert_ids_to_tokens(indices)
# Also, let's define the ground truth for prediction's start and end positions.
# In[10]:
ground_truth = "to include, empower and support humans of all kinds"
ground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)
ground_truth_end_ind = indices.index(ground_truth_tokens[-1])
ground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1
# Now let's make predictions using input, token type, position id and a default attention mask.
# In[11]:
start_scores, end_scores, output_attentions = predict(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
print("Question: ", question)
print(
"Predicted Answer: ",
" ".join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores) + 1]),
)
# # Visualizing Attention Matrices
# `output_attentions` represent attention matrices aka attention probabilities for all 12 layers and all 12 heads. It represents softmax-normalized dot-product between the key and query vectors. In the literature (https://www.aclweb.org/anthology/W19-4828.pdf) it has been used as an importance indicator of how much a token attends / relates to another token in the text. In case of translation for example it is a good indicator of how much a token in one language attends to the corresponding translation in another language. In case of Question Answering model it indicates which tokens attend / relate to each other in question, text or answer segment.
#
# Since `output_attentions` contains the layers in a list, we will stack them in order to move everything into a tensor.
# In[12]:
# shape -> layer x batch x head x seq_len x seq_len
output_attentions_all = torch.stack(output_attentions)
# #### A helper function for visualizing Token-To-Token matices
# Below helper function will be used for visualizing token-to-token relation / attention scores for all heads in a given layer or for all layers across all heads.
# In[13]:
def visualize_token2token_scores(scores_mat, x_label_name="Head"):
fig = plt.figure(figsize=(20, 20))
for idx, scores in enumerate(scores_mat):
scores_np = np.array(scores)
ax = fig.add_subplot(4, 3, idx + 1)
# append the attention weights
im = ax.imshow(scores, cmap="viridis")
fontdict = {"fontsize": 10}
ax.set_xticks(range(len(all_tokens)))
ax.set_yticks(range(len(all_tokens)))
ax.set_xticklabels(all_tokens, fontdict=fontdict, rotation=90)
ax.set_yticklabels(all_tokens, fontdict=fontdict)
ax.set_xlabel("{} {}".format(x_label_name, idx + 1))
fig.colorbar(im, fraction=0.046, pad=0.04)
plt.tight_layout()
plt.show()
# #### A helper function for visualizing Token-To-Head matrices
# Below helper function will be used for visualizing the importance scores for tokens across all heads in all layers.
# In[14]:
def visualize_token2head_scores(scores_mat):
fig = plt.figure(figsize=(30, 50))
for idx, scores in enumerate(scores_mat):
scores_np = np.array(scores)
ax = fig.add_subplot(6, 2, idx + 1)
# append the attention weights
im = ax.matshow(scores_np, cmap="viridis")
fontdict = {"fontsize": 20}
ax.set_xticks(range(len(all_tokens)))
ax.set_yticks(range(len(scores)))
ax.set_xticklabels(all_tokens, fontdict=fontdict, rotation=90)
ax.set_yticklabels(range(len(scores)), fontdict=fontdict)
ax.set_xlabel("Layer {}".format(idx + 1))
fig.colorbar(im, fraction=0.046, pad=0.04)
plt.tight_layout()
plt.show()
# Let's examine a specific layer. For that reason we will define a fixed layer id that will be used for visualization purposes. The users are free to change this layer if they want to examine a different one.
#
# In[15]:
layer = 11
# Visualizing attention matrices for a selected layer `layer`.
# In[16]:
visualize_token2token_scores(
output_attentions_all[layer].squeeze().detach().cpu().numpy()
)
# Based on the visualizations above we observe that there is a high attention set along the diagonals and on an uninformative token such as `[SEP]`. This is something that was observed in previous papers which indicates that attention matrices aren't always a good indicator of finding which tokens are more important or which token is related to which. We observe similar pattern when we examine another layer.
# In the cell below we compute and visualize L2 norm across head axis for all 12 layer. This provides a summary for each layer across all heads.
# Defining normalization function depending on pytorch version.
# In[17]:
if torch.__version__ >= "1.7.0":
norm_fn = torch.linalg.norm
else:
norm_fn = torch.norm
# In[18]:
visualize_token2token_scores(
norm_fn(output_attentions_all, dim=2).squeeze().detach().cpu().numpy(),
x_label_name="Layer",
)
# Based on the visualiziation above we can convince ourselves that attention scores aren't trustworthy measures of importances for token-to-token relations across all layers. We see strong signal along the diagonal and for the `[SEP]` and `[CLS]` tokens. These signals, however, aren't true indicators of what semantic the model learns.
#
# # Visualizing attribution / importance scores
# In the cells below we visualize the attribution scores of attention matrices for the start and end position positions prediction and compare with the actual attention matrices. To do so, first of all, we compute the attribution scores using LayerConductance algorithm similar to Part 1.
# A helper function to summarize attributions for each word token in the sequence.
# In[19]:
def summarize_attributions(attributions):
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / norm_fn(attributions)
return attributions
# # Interpreting BertLayer Outputs and Self-Attention Matrices in each Layer
# Now let's look into the layers of our network. More specifically we would like to look into the distribution of attribution scores for each token across all layers and attribution matrices for each head in all layers in Bert model.
# We do that using one of the layer attribution algorithms, namely, layer conductance. However, we encourage you to try out and compare the results with other algorithms as well.
#
#
# Let's configure `InterpretableEmbeddingsBase` again, in this case in order to interpret the layers of our model.
# In[ ]:
interpretable_embedding = configure_interpretable_embedding_layer(
model, "bert.embeddings.word_embeddings"
)
# Let's iterate over all layers and compute the attributions w.r.t. all tokens in the input and attention matrices.
#
# Note: Since below code is iterating over all layers it can take over 5 seconds. Please be patient!
# In[21]:
layer_attrs_start = []
layer_attrs_end = []
layer_attn_mat_start = []
layer_attn_mat_end = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=token_type_ids,
ref_token_type_ids=ref_token_type_ids,
position_ids=position_ids,
ref_position_ids=ref_position_ids,
)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(
inputs=input_embeddings,
baselines=ref_input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
)
layer_attributions_end = lc.attribute(
inputs=input_embeddings,
baselines=ref_input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
)
layer_attrs_start.append(summarize_attributions(layer_attributions_start[0]))
layer_attrs_end.append(summarize_attributions(layer_attributions_end[0]))
layer_attn_mat_start.append(layer_attributions_start[1])
layer_attn_mat_end.append(layer_attributions_end[1])
# In[22]:
# layer x seq_len
layer_attrs_start = torch.stack(layer_attrs_start)
# layer x seq_len
layer_attrs_end = torch.stack(layer_attrs_end)
# layer x batch x head x seq_len x seq_len
layer_attn_mat_start = torch.stack(layer_attn_mat_start)
# layer x batch x head x seq_len x seq_len
layer_attn_mat_end = torch.stack(layer_attn_mat_end)
# As a reminder of Part 1 we visualize the heatmaps of the attributions for the outputs of all 12 layers in the plots below. The outputs of 12 layers are also known as context layer which represents the dot product between the attribution matrices and value vector.
#
# The plot below represents a heatmap of attributions across all layers and tokens for the start position prediction.
#
# Note that here we do not have information about different heads. Heads related information will be examined separately when we visualize the attribution scores of the attention matrices w.r.t. the start or end position predictions.
#
# It is interesting to observe that the question word `what` gains increasingly high attribution from layer one to ten. In the last two layers that importance is slowly diminishing.
# In contrary to `what` token, many other tokens have negative or close to zero attribution in the first 6 layers.
#
# We start seeing slightly higher attribution in tokens `important`, `us` and `to`. Interestingly token `important` is also assigned high attribution score which is remarkably high in the fifth and sixth layers.
#
# Lastly, our correctly predicted token `to` gains increasingly high positive attribution especially in the last two layers.
#
# In[23]:
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(
layer_attrs_start.cpu().detach().numpy(),
xticklabels=xticklabels,
yticklabels=yticklabels,
linewidth=0.2,
)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
# Now let's examine the heat map of the attributions for the end position prediction. In the case of end position prediction we again observe high attribution scores for the token `what` in the last 11 layers.
# Correctly predicted end token `kinds` has positive attribution across all layers and it is especially prominent in the last two layers. It's also interesting to observe that `humans` token also has relatively high attribution score in the last two layers.
# In[24]:
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(
layer_attrs_end.cpu().detach().numpy(),
xticklabels=xticklabels,
yticklabels=yticklabels,
linewidth=0.2,
) # , annot=True
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
# It is interesting to note that when we compare the heat maps of start and end position, overall the colors for start position prediction on the map have darker intensities. This implies that there are less tokens that attribute positively to the start position prediction and there are more tokens which are negative indicators or signals of start position prediction.
# # Interpreting Attribution Scores for Attention Matrices
# In this section we visualize the attribution scores of start and end position predictions w.r.t. attention matrices.
# Note that each layer has 12 heads, hence attention matrices. We will first visualize for a specific layer and head, later we will summarize across all heads in order to gain a bigger picture.
#
# Below we visualize the attribution scores of 12 heads for selected layer `layer` for start position prediction.
# In[25]:
visualize_token2token_scores(
layer_attn_mat_start[layer].squeeze().cpu().detach().numpy()
)
# As we can see from the visualizations above, in contrary to attention scores the attributions of specific target w.r.t. to those scores are more meaningful and most importantly, they do not attend to `[SEP]` token or show diagonal patterns. We observe that heads 4, 9, 12 and 2 show strong relationship between `what` and `it` tokens when predicting start position, head 10 and 11 between `it` and `it`, heads 8 between `important` and `to` and head 1 between `to` and `what`. Note that `to` token is the start position of the answer token. It is also important to mention that these observations are for a selected `layer`. We can change the index of selected `layer` and examine interesting relationships in other layers.
# In the cell below we visualize the attention attribution scores normalized across the head axis.
# In[26]:
visualize_token2token_scores(
norm_fn(layer_attn_mat_start, dim=2).squeeze().detach().cpu().numpy(),
x_label_name="Layer",
)
# By looking at the visualizations above we can see that the model pays attention to very specific handpicked relationships when making a sprediction for start position. Most notably in the layers 10, 7, 11 and 4 it focuses more on the relationships between `it` and `is`, `important` and `to`.
# Now let's run the same experiments for the end position prediction. Below we visualize the attribution scorese of attention matrices for the end position prediction for the selected `layer`.
# In[27]:
visualize_token2token_scores(layer_attn_mat_end[layer].squeeze().cpu().detach().numpy())
# As we can see from the visualizations above that for the end position prediction we have stronger attention towards the end of the answer token `kinds`. Here we can see stronger connection between `humans` and `kinds` in the 11th head, `it` and `em`, `power`, `and` in the 5th, 6th and 8th heads. The connections between `it` and `what` are also strong in first couple and 10th heads.
# Similar to start position let's visualize the norm across all heads for each layer.
# In[28]:
visualize_token2token_scores(
norm_fn(layer_attn_mat_end, dim=2).squeeze().detach().cpu().numpy(),
x_label_name="Layer",
)
# As we can see from the visualizations above for the end position prediction there is a relation learnt between `[SEP]` and `.` in first and second layers. Also we observe that `it` token is strongly related to `what`, `important` and `to`.
# # Computing and Visualizing Vector Norms
# In this section of the tutorial we will compute Vector norms for activation layers such as ||f(x)||, ||α * f(x)|| and ||Σαf(x)|| as also described in the: https://arxiv.org/pdf/2004.10102.pdf
#
# As also shown in the paper mentioned above, normalized activations are better indicators of importance scores than the attention scores however they aren't as indicative as the attribution scores. This is because normalized activations ||f(x)|| and ||α * f(x)|| aren't attributed to a specific output prediction. From our results we can also see that according to those normalized scores `[SEP]` tokens are insignificant.
# Below we define / extract all parameters that we need to computation vector norms.
# In[29]:
output_attentions_all_shape = output_attentions_all.shape
batch = output_attentions_all_shape[1]
num_heads = output_attentions_all_shape[2]
head_size = 64
all_head_size = 768
# In order to compute above mentioned norms we need to get access to dense layer's weights and value vector of the self attention layer.
# #### Getting Access to Value Activations
# Let's define the list of all layers for which we would like to access Value Activations.
# In[30]:
layers = [
model.bert.encoder.layer[layer].attention.self.value
for layer in range(len(model.bert.encoder.layer))
]
# We use `Captum`'s LayerActivation algorithm to access the outputs of all `layers`.
# In[31]:
la = LayerActivation(squad_pos_forward_func, layers)
value_layer_acts = la.attribute(
input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask),
)
# shape -> layer x batch x seq_len x all_head_size
value_layer_acts = torch.stack(value_layer_acts)
# In the cell below we perform several transformations with the value layer activations and bring it to the shape so that we can compute different norms. The transformations are done the same way as it is described in the original paper and corresponding github implementation.
# In[32]:
new_x_shape = value_layer_acts.size()[:-1] + (num_heads, head_size)
value_layer_acts = value_layer_acts.view(*new_x_shape)
# layer x batch x neum_heads x 1 x head_size
value_layer_acts = value_layer_acts.permute(0, 1, 3, 2, 4)
value_layer_acts = value_layer_acts.permute(0, 1, 3, 2, 4).contiguous()
value_layer_acts_shape = value_layer_acts.size()
# layer x batch x seq_length x num_heads x 1 x head_size
value_layer_acts = value_layer_acts.view(
value_layer_acts_shape[:-1]
+ (
1,
value_layer_acts_shape[-1],
)
)
print("value_layer_acts: ", value_layer_acts.shape)
# #### Getting Access to Dense Features
# Now let's transform dense features so that we can use them to compute ||f(x)|| and ||α * f(x)||.
# In[33]:
dense_acts = torch.stack(
[dlayer.attention.output.dense.weight for dlayer in model.bert.encoder.layer]
)
dense_acts = dense_acts.view(len(layers), all_head_size, num_heads, head_size)
# layer x num_heads x head_size x all_head_size
dense_acts = dense_acts.permute(0, 2, 3, 1).contiguous()
# Computing f(x) score by multiplying the value vector with the weights of the dense vector for all layers.
# In[34]:
# layers, batch, seq_length, num_heads, 1, all_head_size
f_x = torch.stack(
[
value_layer_acts_i.matmul(dense_acts_i)
for value_layer_acts_i, dense_acts_i in zip(value_layer_acts, dense_acts)
]
)
f_x.shape
# In[35]:
# layer x batch x seq_length x num_heads x 1 x all_head_size)
f_x_shape = f_x.size()
f_x = f_x.view(f_x_shape[:-2] + (f_x_shape[-1],))
f_x = f_x.permute(0, 1, 3, 2, 4).contiguous()
# (layers x batch, num_heads, seq_length, all_head_size)
f_x_shape = f_x.size()
# (layers x batch, num_heads, seq_length)
f_x_norm = norm_fn(f_x, dim=-1)
# Now let's visualize ||f(x)|| scores for all layers and examine the distribution of those scores.
# In[36]:
visualize_token2head_scores(f_x_norm.squeeze().detach().cpu().numpy())
# When we examine ||f(x)|| scores for all layers we can easily see that the `[SEP]` token receives the lowest score across all layers. This is one of the conclusions that the original paper came to. In terms of other tokens we can see that the heads in different layers focus on different parts of the input sentence.
# Now let's compute ||α * f_x||. This computation is performed using the original paper's technique with the help of einsum operator.
# In[37]:
# layer x batch x num_heads x seq_length x seq_length x all_head_size
alpha_f_x = torch.einsum("lbhks,lbhsd->lbhksd", output_attentions_all, f_x)
# layer x batch x num_heads x seq_length x seq_length
alpha_f_x_norm = norm_fn(alpha_f_x, dim=-1)
# Let's now visualize ||α * f_x|| scores for the layer with index `layer`.
# In[38]:
visualize_token2token_scores(alpha_f_x_norm[layer].squeeze().detach().cpu().numpy())
# As we can see from the visualizations above there is no strong attention to `[SEP]` or `[CLS]` tokens. Some of the heads show diagonal patterns and some of them show strong attention between specific pairs of tokens.
# Now let's compute the summed norm across `num_heads` axis ||Σαf(x)|| and visualize normalized scores for each layer.
# In[39]:
summed_alpha_f_x = alpha_f_x.sum(dim=2)
# layers x batch x seq_length x seq_length
summed_alpha_f_x_norm = norm_fn(summed_alpha_f_x, dim=-1)
# In[40]:
visualize_token2token_scores(
summed_alpha_f_x_norm.squeeze().cpu().detach().numpy(), x_label_name="Layer"
)
# Above visualizations also confirm that the attention scores aren't concentrated on the tokens such as `[CLS]`, `[SEP]` and `.` however we see stronger signals along the diagonals and some patches of stronger signals between certain parts of the text including some tokens in the question part that are relevant in the answer piece.
# It is important to mention that all experiments were performed for one input sample, namely, `sentence`. In the papers we often see aggregation of the results across multiple samples. For further analysis and more convincing propositions we recommend to conduct the experiments across multiple input samples. In addition to that it would be also interesting to look into the correlation of heads in layer and across different layers.
# %%
| StarcoderdataPython |
1974336 | """The Pygame backend of the renderer.
Defines a pygame surface that implements the
FramebufferSurface interface.
"""
import math
import typing
try:
import pygame # type: ignore
except ImportError:
SUPPORTED = False
else:
SUPPORTED = True
class PygameSurface:
"""A pygame surface, used when rendering the game to a pygame window.
__init__ takes a pygame.Surface instance; if you want that to be
a new window, use pygame.display.set_mode(...). Please reference
the PyGame documentation for more information.
"""
def __init__(self, surface: pygame.Surface):
"""Initializes this surface, by passing a pygame Surface to it.
You can create a pygame Surface by using pygame.display.set_mode.
"""
self.surf = surface
def get_size(self) -> typing.Tuple[int, int]:
"""Returns the size of the pygame window.
For all intents and purposes, this is the size
that the renderer uses.
"""
return pygame.display.get_window_size()
def _rgb_color(
self, rgb: typing.Tuple[float, float, float]
) -> typing.Tuple[int, int, int]:
"""Converts a color from floating point (0.0-1.0) to 8-bit (0-255)."""
col_r, col_g, col_b = rgb
return (
min(255, max(0, math.floor(col_r * 255.0))),
min(255, max(0, math.floor(col_g * 255.0))),
min(255, max(0, math.floor(col_b * 255.0))),
)
def plot_pixel(self, x: int, y: int, rgb: typing.Tuple[float, float, float]):
"""Plots a pixel to the Pygame window.
Plots an RGB pixel at the specified position with the
specified colour, within the pygame window.
"""
rgb_int = self._rgb_color(rgb)
self.surf.set_at((x, y), rgb_int)
def plot_rect(
self,
xy1: typing.Tuple[int, int],
xy2: typing.Tuple[int, int],
rgb: typing.Tuple[float, float, float],
):
"""Fills a rectangular region with a given colour.
This implementation uses pygame's utilities to fill
a rectangle of the specified colour between the
specified corners.
"""
self.surf.fill(self._rgb_color(rgb), (*xy1, *xy2))
def update(self):
"""Updates the pygame surface."""
pygame.display.flip()
| StarcoderdataPython |
1805945 | <reponame>Thib17/tailon<filename>tasks.py
# -*- coding: utf-8; -*-
import json
import subprocess as sub
from glob import glob
from pathlib import Path
from time import time
import re
from invoke import run, task
from webassets.filter import register_filter, Filter
from webassets.loaders import YAMLLoader
#-----------------------------------------------------------------------------
LOGDIR = Path('./logs')
LOGSIM_PID = Path('/tmp/python-tailon-logsim.pid')
LOGSIM_FILES = [
LOGDIR / 'nginx/access.log',
LOGDIR / 'nginx/error.log',
LOGDIR / 'apache/www.tailon.org/access.log',
LOGDIR / 'apache/www.tailon.org/error.log',
]
BOWERBIN = Path('node_modules/bower/bin/bower')
BOWERDIR = Path('bower_components')
ASSETDIR = Path('tailon/assets')
#-----------------------------------------------------------------------------
# Invoke tasks.
#-----------------------------------------------------------------------------
@task
def logsim_start(
ctx,
update_msec='100,2000',
truncate_msec='10000,20000',
rate='1,5', seed=None,
pid=str(LOGSIM_PID)
):
seed = seed if seed else str(time())
files = ' '.join(str(i) for i in LOGSIM_FILES)
print('writing random log lines to:')
print(' \n'.join(' - %s' % i for i in LOGSIM_FILES))
cmd = '''\
python tests/logsim.py \
--update-msec {update_msec} \
--truncate-msec {truncate_msec} \
--rate {rate} \
--pid {pid} \
--seed {seed} \
--daemon start \
{files}
'''.format(**vars())
run(cmd)
@task
def logsim_stop(ctx):
run('python tests/logsim.py --daemon stop')
@task
def logsim(ctx):
files = ' '.join(str(i) for i in LOGSIM_FILES)
sub.check_call('python -m tailon.main -d -a -f {}'.format(files), shell=True)
@task
def test(ctx):
run('py.test -sv tests', pty=True)
@task(aliases=['lsbower'])
def list_bowerfiles(ctx):
for source in bowerfiles():
print(source)
@task
def collectstatic(ctx):
# Copy bower main files to the vendor dir.
for source in bowerfiles():
dest = Path(ASSETDIR/'vendor', *source.parts[1:])
run('install -vD {} {}'.format(source, dest))
@task
def cleanstatic(ctx):
dirs = ['gen', 'fonts']
paths = [Path(ASSETDIR/i).glob('*') for i in dirs]
for path in (j for i in paths for j in i):
if path.name.startswith('.'):
continue
print('unkink: %s' % path)
path.unlink()
@task
def compile_typescript(ctx, debug=False):
dst = ASSETDIR / 'gen/Main.js'
src = ' '.join(map(str, Path('tailon/assets/js/').glob('*.ts')))
cmd = 'node_modules/typescript/bin/tsc --pretty --out %s --sourceMap %s'
print('* Compiling typescript to %s' % dst)
run(cmd % (dst, src))
@task(pre=[compile_typescript])
def webassets(ctx, debug=False, expire=True, replace=False):
# Register our custom webassets filter.
register_filter(ConsoleLogFilter)
#--------------------------------------------------------------------------
# Copy fonts to webassets dir.
print('* Copying fonts to %s' % ASSETDIR)
fonts = [
'tailon/assets/vendor/components-font-awesome/fonts/fontawesome-webfont.eot',
'tailon/assets/vendor/components-font-awesome/fonts/fontawesome-webfont.svg',
'tailon/assets/vendor/components-font-awesome/fonts/fontawesome-webfont.ttf',
'tailon/assets/vendor/components-font-awesome/fonts/fontawesome-webfont.woff',
'tailon/assets/vendor/components-font-awesome/fonts/fontawesome-webfont.woff2',
]
run('rsync -v {} {}'.format(' '.join(fonts), ASSETDIR / 'fonts'))
#--------------------------------------------------------------------------
# Load webassets environment.
env = YAMLLoader('./webassets.yaml').load_environment()
env.debug = debug
env.url_expire = expire
#--------------------------------------------------------------------------
# Generate css/js urls.
css_urls = [env['external-css'], env['selectize-css'], env['internal-css']]
css_urls = [url_to_link(url) for urls in css_urls for url in urls.urls()]
js_urls = [env['external-js'], env['internal-js']]
js_urls = [url_to_script(url) for urls in js_urls for url in urls.urls()]
print()
print('* URLs css:')
print(''.join((i.lstrip() for i in css_urls)))
print('* URLs js:')
print(''.join((i.lstrip() for i in js_urls)))
if replace:
sedplaceholder('tailon/templates/base.html', '<!-- WEBASSETS CSS -->', css_urls)
sedplaceholder('tailon/templates/base.html', '<!-- WEBASSETS JS -->', js_urls)
#-----------------------------------------------------------------------------
# Utility functions.
#-----------------------------------------------------------------------------
def sedplaceholder(filename, placeholder, replacement, indent=6):
lines = open(filename).readlines()
start, end = None, None
for n, line in enumerate(lines):
if line.strip() == placeholder:
if not start:
start = n
continue
if not end:
end = n
if start and end:
break
lines[start + 1:end] = ['%s%s' % (' ' * indent, i) for i in replacement]
with open(filename, 'w') as fh:
fh.write(''.join(lines))
def url_to_link(url):
return " <link rel='stylesheet' href='{{root}}%s'>\n" % url
def url_to_script(url):
return " <script src='{{root}}%s'></script>\n" % url
def bowerfiles():
res = run('%s list --paths --json' % BOWERBIN, hide='out')
res = json.loads(res.stdout)
# Flatten the output of `bower list` and expand globs.
main = ([i] if not isinstance(i, list) else i for i in res.values())
main = (glob(j) for i in main for j in i)
main = [Path(j) for i in main for j in i]
return main
def vendorfiles():
for source in bowerfiles():
yield Path(ASSETDIR, *source.parts[1:])
class ConsoleLogFilter(Filter):
'''
A webassets filter that removes calls to console.log in non-debug builds.
'''
name = 'rmconsole'
def output(self, _in, out, **kwargs):
for line in _in:
line = re.sub(r'console\.(log|warn)\(.*?\);', '', line)
out.write(line)
| StarcoderdataPython |
6558650 | <filename>tests/days/Day10Test.py
import unittest
from ac2020.days.Day10 import Day10
class Day10Test(unittest.TestCase):
def test_empty_input(self):
day = Day10()
day._set_input('')
self.assertEqual('No result', day.part1())
self.assertEqual('No result', day.part2())
def test_correct_input(self):
day = Day10()
day._set_input('16\n10\n15\n5\n1\n11\n7\n19\n6\n12\n4')
self.assertEqual('35', day.part1())
self.assertEqual('8', day.part2())
def test_more_complex_correct_input(self):
day = Day10()
day._set_input('28\n33\n18\n42\n31\n14\n46\n20\n48\n47\n24\n23\n49\n45\n19\n38\n39\n11\n1\n32\n25\n35\n8\n17\n7\n9\n4\n2\n34\n10\n3')
self.assertEqual('220', day.part1())
self.assertEqual('19208', day.part2()) | StarcoderdataPython |
6497390 | <reponame>status-im/eth2.0-specs<filename>test_libs/pyspec/eth2spec/test/helpers/bitfields.py
def set_bitfield_bit(bitfield, i):
"""
Set the bit in ``bitfield`` at position ``i`` to ``1``.
"""
byte_index = i // 8
bit_index = i % 8
return (
bitfield[:byte_index] +
bytes([bitfield[byte_index] | (1 << bit_index)]) +
bitfield[byte_index + 1:]
)
| StarcoderdataPython |
6677327 |
import smart_imports
smart_imports.all()
class Config(django_apps.AppConfig):
name = 'the_tale.game.chronicle'
label = 'chronicle'
verbose_name = 'chronicle'
def ready(self):
from . import signal_processors
pass
| StarcoderdataPython |
1752017 | # ==============================================================================
# This file is part of the SPNC project under the Apache License v2.0 by the
# Embedded Systems and Applications Group, TU Darmstadt.
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
import numpy as np
import os
import time
from spnc.cpu import CPUCompiler
from xspn.serialization.binary.BinarySerialization import BinaryDeserializer
def test_vector_fashion_mnist():
if not CPUCompiler.isVectorizationSupported():
print("Test not supported by the compiler installation")
return 0
# Locate test resources located in same directory as this script.
scriptPath = os.path.realpath(os.path.dirname(__file__))
# Deserialize model
model = BinaryDeserializer(os.path.join(scriptPath, "nltcs_100_200_2_10_8_8_1_True.bin")).deserialize_from_file()
spn = model.root
inputs = np.genfromtxt(os.path.join(scriptPath, "input.csv"), delimiter=",", dtype="float64")
reference = np.genfromtxt(os.path.join(scriptPath, "nltcs_100_200_2_10_8_8_1_True_output.csv"), delimiter=",",
dtype="float64")
reference = reference.reshape(10000)
# Compile the kernel.
compiler = CPUCompiler(vectorize=True, computeInLogSpace=True)
kernel = compiler.compile_ll(spn=spn, batchSize=1, supportMarginal=False)
# Execute the compiled Kernel.
time_sum = 0
for i in range(len(reference)):
# Check the computation results against the reference
start = time.time()
result = compiler.execute(kernel, inputs=np.array([inputs[i]]))
time_sum = time_sum + time.time() - start
if not np.isclose(result, reference[i]):
print(f"\nevaluation #{i} failed: result: {result[0]:16.8f}, reference: {reference[i]:16.8f}")
raise AssertionError()
print(f"\nExecution of {len(reference)} samples took {time_sum} seconds.")
if __name__ == "__main__":
test_vector_fashion_mnist()
print("COMPUTATION OK")
| StarcoderdataPython |
3383742 | import numpy
import math
from scipy.optimize import root
from math import *
print('')
print('LIQUIDS PIPE SIZING CALCULATIONS')
print('')
print('INPUT DATA')
print('')
q = float(input('Please introduce liquid flow rate (US gpm): '))
ro = float(input('Please introduce liquid density (lb/ft3): '))
vi = float(input('Please introduce liquid viscosity (cP): '))
di = float(input('Please introduce pipe inner diameter (inches): '))
ru = float(input('Please introduce pipe roughness (inches): '))
def run ():
print('')
print('RESULTS')
print('')
flow_area = area (di)
print("Pipe flow area is " + str(round(flow_area,4)) + " ft2")
flow_velocity = velocity (q)
print("Flow velocity is " + str(round(flow_velocity,2)) + " ft/s")
reynolds_number = Reynolds (ro, vi)
print("Reynolds number is " + str(round(reynolds_number,0)))
relative_roughness = rr (ru, di)
print("Relative roughness is " + str(round(relative_roughness,6)))
factor = fac ()
print("Darcy friction factor is " + str(round(factor,5)))
droop_pressure = dp (ro,di)
print('Droop pressure is ' + str(round(droop_pressure,5)) + ' psi/100ft')
def area (di):
flow_area = 0.25*math.pi*((di/12)**2)
return flow_area
def velocity (q):
flow_area = area(di)
flow_velocity = (0.00222801*q)/flow_area
return flow_velocity
def Reynolds (ro, vi):
flow_velocity = velocity (q)
visc = vi * 0.000671969
reynolds_number = ro * flow_velocity * (di/12) / visc
return reynolds_number
def rr (ru, di):
relative_roughness = ru/di
return relative_roughness
def fac():
reynolds_number = Reynolds (ro, vi)
relative_roughness = rr (ru, di)
if reynolds_number < 4000:
friction_factor = 64/reynolds_number
return friction_factor
else:
f2 = 0.01
for i in range (1,6):
f1 = 1/(-2*log((relative_roughness/3.7) + (2.51/(reynolds_number*sqrt(f2))),10))**2
f2=f1
i=i+1
return f1
def dp (ro, di):
flow_velocity = velocity (q)
friction_factor = fac()
gc = 32.17
droop_pressure = (25/6)*friction_factor*ro*(flow_velocity**2)/(gc*di)
return droop_pressure
if __name__ == "__main__":
run ()
| StarcoderdataPython |
1833043 | from functools import wraps
from unittest.mock import patch
from ninja.signature import is_async
def mock_signal_call(signal: str, called: bool = True):
def _wrap(func):
if is_async(func):
async def _wrapper(*args, **kwargs):
with patch(f"ninja_extra.signals.{signal}.send") as mock_:
await func(*args, **kwargs)
assert mock_.called == called
else:
def _wrapper(*args, **kwargs):
with patch(f"ninja_extra.signals.{signal}.send") as mock_:
func(*args, **kwargs)
assert mock_.called == called
return wraps(func)(_wrapper)
return _wrap
def mock_log_call(level: str, called: bool = True):
def _wrap(func):
if is_async(func):
async def _wrapper(*args, **kwargs):
with patch(
f"ninja_extra.logger.request_logger.{level.lower()}"
) as mock_:
await func(*args, **kwargs)
assert mock_.called == called
else:
def _wrapper(*args, **kwargs):
with patch(
f"ninja_extra.logger.request_logger.{level.lower()}"
) as mock_:
func(*args, **kwargs)
assert mock_.called == called
return wraps(func)(_wrapper)
return _wrap
| StarcoderdataPython |
3543415 | """Plots."""
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from computations import lamb_vdp1, lamb_vpp1, lamb_p1cond1, lamb_p1cond2,\
lamb_vdp2, lamb_vpp2, lamb_p2cond1, lamb_p2cond2
def plot_optimal_policies(δ, ρ, γ, rh, rl, xaxis="δ", yaxis="γ", prec=100,
riskyoptcolor="orange", cautiousoptcolor="blue",
ax=None):
"""Colorcode parameterregions according to where which policy is optimal.
Parameters
----------
δ : float
the collapse probability δ
ρ : float
the recovery probability ρ
γ : float
the discount factor
rh : float
the high reward
rl : float
the low reward
xaxis : string
the parameter to be plotted on the xaxis (optional, default: "δ")
yaxis : string
the parameter to be plotted on the yaxis (optional, default: "γ")
prec : int
the number of points for linspace (optional, default: 100)
riskyoptcolor : string
the color for the parameter region where the risky policy is optimal
(optional, default: orange)
cautiousoptcolor : string
the color for the parameter region where the cautious policy is optimal
(optional, default: blue)
ax : None or axis object
the ax where to polt to (optional, default: None)
"""
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl}
# Getting x and y
x = np.linspace(0, params[xaxis], prec)
y = np.linspace(0, params[yaxis], prec)
X, Y = np.meshgrid(x, y)
params[xaxis] = X
params[yaxis] = Y
# Obtaining values --> for prosperous state
vpp1 = lamb_vpp1(*params.values())
vpp2 = lamb_vpp2(*params.values())
# preparint data to plot
data = (vpp1 < vpp2).astype(int)
# colormap
colors = [riskyoptcolor, cautiousoptcolor]
cmap = mpl.colors.ListedColormap(colors)
# plot
if ax is None:
fig, ax = plt.subplots()
ax.pcolormesh(X, Y, data, cmap=cmap, vmin=0, vmax=1)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def plot_acceptal_states(δ, ρ, γ, rh, rl, rmin, state="degraded",
xaxis="δ", yaxis="γ", prec=100,
nonacceptcolor="Pink",
riskyacceptcolor="Lightblue",
cautiousacceptcolor="Yellow",
bothacceptcolor="Seagreen",
ax=None):
"""Colorcode parameterregions.
Parameters
----------
δ : float
the collapse probability δ
ρ : float
the recovery probability ρ
γ : float
the discount factor
rh : float
the high reward
rl : float
the low reward
rmin : float
the minimal acceptal reward value
state : stringing
either "prosperous" or "degraded"
xaxis : string
the parameter to be plotted on the xaxis (optional, default: "δ")
yaxis : string
the parameter to be plotted on the yaxis (optional, default: "γ")
prec : int
the number of points for linspace (optional, default: 100)
nonacceptcolor : string
color for region where no policy is acceptable
(optional, default: "pink")
riskyacceptcolor : string
color for region where only risky policy is acceptable
(optional, default: "lightblue")
cautiousacceptcolor : string
color for region where only cautious policy is acceptable
(optional, default: "yellow")
bothacceptcolor : string
color for region where both policies are acceptable
(optional, default: "seagreen")
ax : None or axis object
the ax where to polt to (optional, default: None)
"""
assert state is "prosperous" or state is "degraded"
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl}
# Getting x and y
x = np.linspace(0, params[xaxis], prec)
y = np.linspace(0, params[yaxis], prec)
X, Y = np.meshgrid(x, y)
params[xaxis] = X
params[yaxis] = Y
ones = np.ones_like(X)
# Obtaining values
value_functions = {"prosperous": [lamb_vpp1, lamb_vpp2],
"degraded": [lamb_vdp1, lamb_vdp2]}
vp1 = value_functions[state][0](*params.values())
vp2 = value_functions[state][1](*params.values())
# obtaining plotting data
# 0: no policy acceptable, 1: only safe policy acceptable,
# 2: only risky policy acceptable, 3: both policies acceptable
p1_accept = ((vp1 > rmin)*ones).astype(int)
p2_accept = ((vp2 > rmin)*ones).astype(int)
p2_accept[p2_accept != 0] += 1
data = p1_accept + p2_accept
# colormap
colors = [nonacceptcolor, riskyacceptcolor,
cautiousacceptcolor, bothacceptcolor]
cmap = mpl.colors.ListedColormap(colors)
# plot
if ax is None:
fig, ax = plt.subplots()
ax.pcolormesh(X, Y, data, cmap=cmap, vmin=0, vmax=3)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def iplot_acceptal_states(δ, ρ, γ, rh, rl, rmin, state="degraded",
xaxis="δ", yaxis="γ", prec=100):
"""Interactively colorcode parameterregions.
Parameters
----------
δ : float
the collapse probability δ
ρ : float
the recovery probability ρ
γ : float
the discount factor
rh : float
the high reward
rl : float
the low reward
rmin : float
the minimal acceptal reward value
state : string
either "prosperous" or "degraded"
xaxis : string
the parameter to be plotted on the xaxis (optional, default: "δ")
yaxis : string
the parameter to be plotted on the yaxis (optional, default: "γ")
prec : int (optional, default: 100)
the number of points for linspace
"""
# colors
ov = 0.6
accept_underboth_color = (1.0, 1.0, 0.0)
accept_undercautious_color = (1.0-ov, 1.0, 0.0)
accept_underrisky_color = (1.0, 1.0-ov, 0.0)
accept_underno_color = (1.0-ov, 1.0-ov, 0.0)
plot_acceptal_states(δ, ρ, γ, rh, rl, rmin, state=state,
nonacceptcolor=accept_underno_color,
riskyacceptcolor=accept_underrisky_color,
cautiousacceptcolor=accept_undercautious_color,
bothacceptcolor=accept_underboth_color,
xaxis=xaxis, yaxis=yaxis, prec=prec, ax=None)
def plot_sustainble_policies(δ, ρ, γ, rh, rl, rmin,
nonsuscolor="Red", riskysuscolor="Lightblue",
cautioussuscolor="Lightgreen", bothsuscolor="Green",
xaxis="δ", yaxis="γ", prec=100, ax=None):
"""Colorcode parameterregions.
Parameters
----------
δ : float
the collapse probability δ
ρ : float
the recovery probability ρ
γ : float
the discount factor
rh : float
the high reward
rl : float
the low reward
rmin : float
the minimal acceptal reward value
xaxis : string
the parameter to be plotted on the xaxis (optional, default: "δ")
yaxis : string
the parameter to be plotted on the yaxis (optional, default: "γ")
prec : int (optional, default: 100)
the number of points for linspace
ax : None or axis object
the ax where to polt to (optional, default: None)
"""
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl}
# Getting x and y
x = np.linspace(0, params[xaxis], prec)
y = np.linspace(0, params[yaxis], prec)
X, Y = np.meshgrid(x, y)
params[xaxis] = X
params[yaxis] = Y
ones = np.ones_like(X)
# Obtaining values
vpp1 = lamb_vpp1(*params.values())
vpp2 = lamb_vpp2(*params.values())
vdp1 = lamb_vdp1(*params.values())
vdp2 = lamb_vdp2(*params.values())
# obtaining plotting data
# 0: no policy sustainable, 1: only safe policy sustainable,
# 2: only risky policy sustainable, 3: both policies sustainable
p_p1_accept = ((vpp1 >= rmin)*ones).astype(int)
d_p1_accept = ((vdp1 >= rmin)*ones).astype(int)
p_p2_accept = ((vpp2 >= rmin)*ones).astype(int)
p_p2_accept[p_p2_accept != 0] += 1
p1_sus = p_p1_accept * d_p1_accept # only when both states are accept
p2_sus = p_p2_accept # only prosperous counts for policy 2
data = p1_sus + p2_sus
# colormap
colors = [nonsuscolor, riskysuscolor, cautioussuscolor, bothsuscolor]
cmap = mpl.colors.ListedColormap(colors)
# plot
if ax is None:
fig, ax = plt.subplots()
ax.pcolormesh(X, Y, data, cmap=cmap, vmin=0, vmax=3)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def plot_SOS_policies(δ, ρ, γ, rh, rl, xaxis="δ", yaxis="γ", prec=100,
cautiousafecolor="darkblue",
ax=None):
"""Colorcode parameterregions according to where which policy is SOS.
Parameters
----------
δ : float
the collapse probability δ
ρ : float
the recovery probability ρ
γ : float
the discount factor
rh : float
the high reward
rl : float
the low reward
xaxis : string
the parameter to be plotted on the xaxis (optional, default: "δ")
yaxis : string
the parameter to be plotted on the yaxis (optional, default: "γ")
prec : int (optional, default: 100)
the number of points for linspace
cautiousafecolor : string
color for region where only cautious policy is safe
(optional, default: "darkblue")
ax : None or axis object
the ax where to polt to (optional, default: None)
"""
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl}
# Getting x and y
x = np.linspace(0, params[xaxis], prec)
y = np.linspace(0, params[yaxis], prec)
X, Y = np.meshgrid(x, y)
params[xaxis] = X
params[yaxis] = Y
# colormap
colors = ["yellow", cautiousafecolor]
cmap = mpl.colors.ListedColormap(colors)
# plot
if ax is None:
fig, ax = plt.subplots()
ax.pcolormesh(X, Y, np.ones_like(X), cmap=cmap, vmin=0, vmax=1)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def plot_policies(δ, ρ, γ, rh, rl, rmin,
xaxis="δ", yaxis="γ", prec=100):
"""
Plot classification of risky and cautious policy according to paradigms.
see doc strings of functions:
* plot_optimal_policies
* plot_sustainble_policies
* plot_SOS_policies
"""
fig, ax = plt.subplots(1, 3, sharey='row', figsize=(6.5, 2))
# colors
ov = 0.6
risky_opt_color = (1.0, ov, 0)
cautious_opt_color = (1.0, 0, ov)
both_sus_color = (ov, ov, 1.0)
cautious_sus_color = (0.0, ov, 1.0)
risky_sus_color = (ov, 0.0, 1.0)
non_sus_color = (0.0, 0.0, 1.0)
cautious_safe_color = (0.0, 1.0, ov)
plot_optimal_policies(δ, ρ, γ, rh, rl, xaxis=xaxis, yaxis=yaxis,
prec=prec, riskyoptcolor=risky_opt_color,
cautiousoptcolor=cautious_opt_color,ax=ax[0])
plot_sustainble_policies(δ, ρ, γ, rh, rl, rmin, xaxis=xaxis,
yaxis=yaxis, prec=prec,
bothsuscolor=both_sus_color,
cautioussuscolor=cautious_sus_color,
riskysuscolor=risky_sus_color,
nonsuscolor=non_sus_color,
ax=ax[1])
plot_SOS_policies(δ, ρ, γ, rh, rl, xaxis=xaxis, yaxis=yaxis,
prec=prec, cautiousafecolor=cautious_safe_color,
ax=ax[2])
def _plot_PolicyCombinations(δ, ρ, γ, rh, rl, rmin, policy="risky",
xaxis="δ", yaxis="γ", prec=100, ax=None):
"""aux function to plot the paradigm combinations."""
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl}
# Getting x and y
x = np.linspace(0, params[xaxis], prec)
y = np.linspace(0, params[yaxis], prec)
X, Y = np.meshgrid(x, y)
params[xaxis] = X
params[yaxis] = Y
ones = np.ones_like(X)
# Obtaining values
vpp1 = lamb_vpp1(*params.values())
vpp2 = lamb_vpp2(*params.values())
vdp1 = lamb_vdp1(*params.values())
vdp2 = lamb_vdp2(*params.values())
# policiy combinations: opt, sus, SOS
# 0 = non opt, non sus, non SOS
# 1 = non opt, non sus, SOS
# 2 = non opt, sus, non SOS
# 3 = non opt, sus, SOS
# 4 = opt, non sus, non SOS
# 5 = opt, non sus, SOS
# 6 = opt, sus, non SOS
# 7 = opt, sus, SOS
p_p1_accept = ((vpp1 >= rmin)*ones).astype(int)
d_p1_accept = ((vdp1 >= rmin)*ones).astype(int)
p_p2_accept = ((vpp2*ones >= rmin)).astype(int)
p1_sus = p_p1_accept * d_p1_accept # only when both states are accept
p2_sus = p_p2_accept # only prosperous counts for policy 2
p1_opt = (vpp1 > vpp2).astype(int)
p2_opt = (vpp2 > vpp1).astype(int)
p1_SOS = 0 * ones
p2_SOS = ones
if policy == "risky":
data = 4*p1_opt + 2*p1_sus + p1_SOS
elif policy == "safe":
data = 4*p2_opt + 2*p2_sus + p2_SOS
cv = 200/255.
colors = [(0., 0., 0.), (0., cv, 0.), (0., 0., cv),
(0., cv, cv), (cv, 0., 0.), (cv, cv, 0.),
(cv, 0., cv), (cv, cv, cv)]
cmap = mpl.colors.ListedColormap(colors)
# plot
if ax is None:
fig, ax = plt.subplots()
ax.pcolormesh(X, Y, data, cmap=cmap, vmin=0, vmax=7)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def plot_PolicyCombinations(δ, ρ, γ, rh, rl, rmin,
xaxis="δ", yaxis="γ", prec=100):
"""Plot the paradigm combinations."""
fig, ax = plt.subplots(1, 2, sharey='row', figsize=(6.5, 3))
_plot_PolicyCombinations(δ, ρ, γ, rh, rl, rmin, policy="risky",
xaxis=xaxis, yaxis=yaxis, prec=prec,
ax=ax[0])
ax[0].set_title("risky policy")
_plot_PolicyCombinations(δ, ρ, γ, rh, rl, rmin, policy="safe",
xaxis=xaxis, yaxis=yaxis, prec=prec,
ax=ax[1])
ax[1].set_title("safe policy")
def plot_PolicyCombinations_withUncertainty(δ, ρ, γ, rh, rl, rmin,
xaxis="δ", yaxis="γ",
plotprec=101, uprec=11,
policy=None, ax=None):
"""aux function to plot the paradigm combinations."""
def get_linspace(param, name):
if name == xaxis or name == yaxis:
assert param[0] != param[1], "Parameters and Axis not consistent"
plinspace = np.linspace(param[0], param[1], plotprec)
else:
if param[0] == param[1]:
plinspace = param[0]
else:
plinspace = np.linspace(param[0], param[1], uprec)
return plinspace
params = {"δ": δ, "ρ": ρ, "γ": γ, "rh": rh, "rl": rl, "rmin": rmin}
paramsl = {name: get_linspace(params[name], name) for name in params}
PARAMS = np.meshgrid(paramsl["δ"], paramsl["ρ"], paramsl["γ"],
paramsl["rh"], paramsl["rl"], indexing="ij")
vpp1 = lamb_vpp1(*PARAMS)[:,:,:,:,:,np.newaxis]
vpp2 = lamb_vpp2(*PARAMS)[:,:,:,:,:,np.newaxis]
vdp1 = lamb_vdp1(*PARAMS)[:,:,:,:,:,np.newaxis]
vdp2 = lamb_vdp2(*PARAMS)[:,:,:,:,:,np.newaxis]
if type(paramsl["rmin"]) == float:
rmin = paramsl["rmin"]
else:
rmin = paramsl["rmin"][np.newaxis, np.newaxis, np.newaxis, np.newaxis,
np.newaxis, :]
rminsize = 1 if type(rmin) == float else len(rmin)
ones = np.ones(PARAMS[0].shape + (rminsize,))
p_p1_accept = ((vpp1 >= rmin)).astype(int)
d_p1_accept = ((vdp1 >= rmin)).astype(int)
p_p2_accept = ((vpp2*ones >= rmin)).astype(int)
p1_sus = p_p1_accept * d_p1_accept # only when both states are accept
p2_sus = p_p2_accept # only prosperous counts for policy 2
p1_opt = (vpp1 > vpp2).astype(int)
p2_opt = (vpp2 > vpp1).astype(int)
p1_SOS = 0 * ones
p2_SOS = ones
xpos = np.where(np.array(list(params.keys())) == xaxis)[0][0]
ypos = np.where(np.array(list(params.keys())) == yaxis)[0][0]
otherpos = tuple(set(range(6)) - set((xpos, ypos)))
print(otherpos)
#create imshow arrays
cv = 200/255.
p1data = np.zeros((plotprec, plotprec, 3))
p1data[:, :, 0] = p1_opt.mean(axis=otherpos) * cv
p1data[:, :, 1] = p1_SOS.mean(axis=otherpos) * cv
p1data[:, :, 2] = p1_sus.mean(axis=otherpos) * cv
p2data = np.zeros((plotprec, plotprec, 3))
p2data[:, :, 0] = p2_opt.mean(axis=otherpos) * cv
p2data[:, :, 1] = p2_SOS.mean(axis=otherpos) * cv
p2data[:, :, 2] = p2_sus.mean(axis=otherpos) * cv
if xpos < ypos:
print("swapping axes")
p1data = p1data.swapaxes(0, 1)
p2data = p2data.swapaxes(0, 1)
ext = [params[xaxis][0],params[xaxis][1],
params[yaxis][0],params[yaxis][1]]
# ------------------------------------------------------------
# Plotting
# ------------------------------------------------------------
if policy == None:
fig, ax = plt.subplots(1, 2, sharey='row', figsize=(6.5, 3))
ax[0].imshow(p1data, origin="lower",
interpolation='none', aspect="auto",
extent=ext)
ax[1].imshow(p2data, origin="lower",
interpolation='none', aspect="auto",
extent=ext)
ax[0].set_ylabel(yaxis)
ax[0].set_xlabel(xaxis)
ax[1].set_xlabel(xaxis)
else:
if ax is None:
fig, ax = plt.subplots()
data = p1data if policy == "risky" else p2data
ax.imshow(data, origin="lower",
interpolation='none', aspect="auto",
extent=ext)
ax.set_xlabel(xaxis)
ax.set_ylabel(yaxis)
def plot_ParadigmVolumes(prec = 31, ax=None):
δ = (0, 1.0)
ρ = (0, 1.0)
γ = (0.0, 0.9999)
rl = (0.0, 1.0)
rmin = (0.0, 1.0)
params = {"δ": δ, "ρ": ρ, "γ": γ, #"rh": rh,
"rl": rl, "rmin": rmin}
paramsl = {name: np.linspace(params[name][0], params[name][1], prec)
for name in params}
PARAMS = np.meshgrid(paramsl["δ"], paramsl["ρ"], paramsl["γ"],
1.0, paramsl["rl"], indexing="ij")
vpp1 = lamb_vpp1(*PARAMS)[:,:,:,:,:,np.newaxis]
vpp2 = lamb_vpp2(*PARAMS)[:,:,:,:,:,np.newaxis]
vdp1 = lamb_vdp1(*PARAMS)[:,:,:,:,:,np.newaxis]
vdp2 = lamb_vdp2(*PARAMS)[:,:,:,:,:,np.newaxis]
rmin = paramsl["rmin"][np.newaxis, np.newaxis, np.newaxis,
np.newaxis,np.newaxis, :]
ones = np.ones(PARAMS[0].shape + (prec,))
p_p1_accept = ((vpp1 >= rmin)).astype(int)
d_p1_accept = ((vdp1 >= rmin)).astype(int)
p_p2_accept = ((vpp2*ones >= rmin)).astype(int)
p1_sus = p_p1_accept * d_p1_accept # only when both states are accept
p2_sus = p_p2_accept # only prosperous counts for policy 2
p1_opt = (vpp1 > vpp2).astype(int)
p2_opt = (vpp2 > vpp1).astype(int)
p1_SOS = 0 * ones
p2_SOS = ones
pol1 = 4*p1_opt + 2*p1_sus + p1_SOS
pol2 = 4*p2_opt + 2*p2_sus + p2_SOS
h1 = np.histogram(pol1.flatten(), bins=[0,1,2,3,4,5,6,7,8])[0]
h2 = np.histogram(pol2.flatten(), bins=[0,1,2,3,4,5,6,7,8])[0]
# policiy combinations: opt, sus, SOS
# 0 = non opt, non sus, non SOS
# 1 = non opt, non sus, SOS
# 2 = non opt, sus, non SOS
# 3 = non opt, sus, SOS
# 4 = opt, non sus, non SOS
# 5 = opt, non sus, SOS
# 6 = opt, sus, non SOS
# 7 = opt, sus, SOS
sizes = {"non opt, non sus, non SOS": h1[0] + h2[0],
"non opt, non sus, SOS" : h1[1] + h2[1],
"non opt, sus, non SOS" : h1[2] + h2[2],
"non opt, sus, SOS" : h1[3] + h2[3],
"opt, non sus, non SOS" : h1[4] + h2[4],
"opt, non sus, SOS" : h1[5] + h2[5],
"opt, sus, non SOS" : h1[6] + h2[6],
"opt, sus, SOS" : h1[7] + h2[7]}
cv = 200/255.
colors = {"non opt, non sus, non SOS": (0., 0., 0.),
"non opt, non sus, SOS" : (0., cv, 0.),
"non opt, sus, non SOS" : (0., 0., cv),
"non opt, sus, SOS" : (0., cv, cv),
"opt, non sus, non SOS" : (cv, 0., 0.),
"opt, non sus, SOS" : (cv, cv, 0.),
"opt, sus, non SOS" : (cv, 0., cv),
"opt, sus, SOS" : (cv, cv, cv)}
keylist = ["opt, non sus, non SOS", "non opt, sus, non SOS",
"non opt, non sus, SOS",
"opt, sus, non SOS", "opt, non sus, SOS", "non opt, sus, SOS",
"opt, sus, SOS", "non opt, non sus, non SOS"]
sizeslist = np.array([sizes[k] for k in keylist])
sizeslist = sizeslist / np.sum(sizeslist)
colorslist = [colors[k] for k in keylist]
poslist = [9,8,7,5.5,4.5,3.5,2,1]
if ax is None:
fig, ax = plt.subplots()
ax.barh(poslist, sizeslist, height=0.9, color=colorslist)
colors2 = ["w", "k", "k",
"k", "k", "k",
"k", "w"]
labels = ["opt, not sus, not safe", "sus, not opt, not safe",
"safe, not opt, not sus",
"opt, sus, not safe", "opt, safe, not sus", "sus, safe, not opt",
"opt, sus, safe", "not opt, not sus, not safe"]
hapos = ["right", "left", "right",
"left", "left", "left",
"right", "right"]
leftpos = [sizeslist[0], sizeslist[1], sizeslist[2],
sizeslist[3], sizeslist[4], sizeslist[5],
sizeslist[6], sizeslist[7]]
for i in range(len(keylist)):
ax.annotate(" " + labels[i] + " ", (leftpos[i], poslist[i]),
xycoords="data", ha=hapos[i], va="center",
color=colors2[i], fontsize=11.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_xlabel("Normalized parameter space volume of paradigms combinations") | StarcoderdataPython |
1935688 | """modify sites and tags array fields
Revision ID: <KEY>
Revises: 844fbeba4059
Create Date: 2020-12-07 21:12:51.918148
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '844fbeba4059'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tags', sa.Column('sites', postgresql.ARRAY(sa.Integer()), nullable=True))
op.drop_column('tags', 'site')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tags', sa.Column('site', postgresql.ARRAY(sa.INTEGER()), autoincrement=False, nullable=True))
op.drop_column('tags', 'sites')
# ### end Alembic commands ###
| StarcoderdataPython |
314495 | from beet import Context
def beet_default(ctx: Context):
ctx.generate.objective()
ctx.generate.objective("{hash}", "something")
ctx.generate.objective("hello", criterion="playerKillCount")
ctx.generate.objective("world", display="Something")
generate = ctx.generate["foo"]["bar"]
generate.objective()
generate.objective("{hash}", "something")
generate.objective("hello", criterion="playerKillCount")
generate.objective("world", display="Something")
ctx.generate.objective(
"{hash}", "something", display={"text": "Update name", "color": "red"}
)
| StarcoderdataPython |
4909252 | from .version import version_info as VERSION
from .version import version_str as __version__
__all__ = ['VERSION', '__version__']
| StarcoderdataPython |
314373 | sys_word = {}
for x in range(0,325):
sys_word[x] = 0
file = open("UAD-0015.txt", "r+")
words = file.read().split()
file.close()
for word in words:
sys_word[int(word)] += 1
for x in range(0,325):
sys_word[x] = sys_word[x]/int(325)
file_ = open("a_1.txt", "w")
for x in range(0,325):
if x is 324:
file_.write(str(sys_word[x]) + "\n")
else:
file_.write(str(sys_word[x]) + ",")
file_.close()
print(sys_word)
| StarcoderdataPython |
4838457 | # Generated by Django 3.0.7 on 2020-06-29 02:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Clinic_information',
fields=[
('clinic_name', models.CharField(help_text="Enter the patient's name", max_length=20, primary_key=True, serialize=False, verbose_name='Patient Name:')),
('previous', models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], default='N/A', help_text='Has patient preformed p-EGFR test in Sanomics previously?', max_length=3)),
('spid', models.CharField(blank=True, help_text="If yes, Enter the patient's Sanomics Patient ID", max_length=10)),
('diagnosis', models.CharField(blank=True, choices=[('Confirmed', 'Confirmed'), ('Pending', 'Pending')], help_text='Diagnosis of Lund Cancer:', max_length=10)),
('celltype', models.CharField(blank=True, choices=[('Adenocarcinoma', 'Adenocarcinoma'), ('Squamous Carcinoma', 'Squamous Carcinoma'), ('Adenosquamous Carcinoma', 'Adenosquamous Carcinoma'), ('Others', 'Others')], help_text='If confirmed, Histological Cell Type:', max_length=30)),
('other', models.CharField(blank=True, help_text='If choose others, please state:', max_length=30)),
('stage', models.CharField(blank=True, choices=[('I', 'I'), ('II', 'II'), ('III', 'III'), ('IV', 'IV')], max_length=5, verbose_name='Stage of Disease')),
('status', models.CharField(blank=True, choices=[('Treatment Naive', 'Treatment Naive'), ('On Chemotherapy', 'On Chemotherapy'), ('On Tyrosine Kinase Inhibitor(s)(TKI)', 'On Tyrosine Kinase Inhibitor(s)(TKI)')], max_length=100, verbose_name='Treatment Status')),
('additional', models.CharField(blank=True, default='', max_length=100, verbose_name='Additional Information')),
('request', models.CharField(blank=True, choices=[('Exon 19 Deletions + Exon 21 L858R + Exon 20 T790M', 'a) Exon 19 Deletions + Exon 21 L858R + Exon 20 T790M'), ('#Exon 20 C797S(Test for resistance to T790M-targeting EGFR TKI)', 'b) #Exon 20 C797S(Test for resistance to T790M-targeting EGFR TKI)'), ('Exon 19 Deletions + Exon 21 L858R + Exon 20 T790M + #Exon 20 C797S(Test for resistance to T790M-targeting EGFR TKI)', 'a) + b)')], help_text='Remarks: TWO Streck tubes of specimen are required for a) OR a) + b)\n ONE Streck tuve of specimen is required for b) only', max_length=200)),
],
),
migrations.CreateModel(
name='Patients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('specimen_type', models.CharField(blank=True, choices=[('Blood', 'Blood'), ('Pleural Fluid', 'Pleural Fluid'), ('others', 'Others')], max_length=20, verbose_name='Specimen Type *')),
('other_type', models.CharField(blank=True, help_text='If you choose others, please state:', max_length=30)),
('collectiondate', models.CharField(help_text='YYYY/MM/DD', max_length=10, verbose_name='Collection Date *')),
('collectiontime', models.CharField(help_text='HH:MM', max_length=5, verbose_name='Collection Time *')),
('name', models.CharField(help_text="Enter the patient's name", max_length=30, verbose_name='Full Name *')),
('hkid', models.CharField(help_text="Enter the patient's HKID/Passport No.", max_length=20, verbose_name='HKID/National ID/Passport ID *')),
('ethnicity', models.CharField(blank=True, help_text="Enter the patient's Ethnicity", max_length=20, verbose_name='Ethnicity ')),
('sex', models.CharField(help_text="Enter the patient's sex", max_length=1, verbose_name='Sex *')),
('dob', models.DateField(verbose_name='Date of Birth *')),
('referral', models.CharField(help_text='Enter the Referral Site', max_length=20, verbose_name='Referral Site *')),
('clinicid', models.CharField(blank=True, help_text='Enter the Clinic ID', max_length=10)),
('doctor', models.CharField(help_text='Enter the Referral Doctor', max_length=30, verbose_name='Referral Doctor *')),
('phone', models.CharField(blank=True, help_text='Enter the phone number', max_length=15)),
('fax', models.CharField(blank=True, help_text='Enter the fax number', max_length=30)),
('clinic', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Clinic_information')),
],
),
]
| StarcoderdataPython |
3360239 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : utils.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 10/06/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
import torch
__all__ = ['canonize_monitors', 'update_from_loss_module']
def canonize_monitors(monitors):
for k, v in monitors.items():
if isinstance(monitors[k], list):
if isinstance(monitors[k][0], tuple) and len(monitors[k][0]) == 2:
monitors[k] = sum([a * b for a, b in monitors[k]]) / max(sum([b for _, b in monitors[k]]), 1e-6)
else:
monitors[k] = sum(v) / max(len(v), 1e-3)
if isinstance(monitors[k], float):
monitors[k] = torch.tensor(monitors[k])
def update_from_loss_module(monitors, output_dict, loss_update):
tmp_monitors, tmp_outputs = loss_update
monitors.update(tmp_monitors)
output_dict.update(tmp_outputs)
| StarcoderdataPython |
9708193 | import json
import os
import time
import mock
from pecan import set_config
from pecan.testing import load_test_app
from bm_instance_agent.common import utils as bm_utils
from bm_instance_agent.systems import base as driver_base
from bm_instance_agent.tests import base
from bm_instance_agent.tests.unit import fake
def wait_func_called(mock_func, time_count=0):
try:
mock_func.assert_called()
return
except AssertionError as e:
if time_count < 3:
time.sleep(0.1)
return wait_func_called(mock_func, time_count + 0.1)
raise e
class ApiTestBase(base.TestCase):
def setUp(self):
super(ApiTestBase, self).setUp()
self.app = load_test_app(os.path.join(
os.path.dirname(__file__),
'../../api/config.py'))
def tearDown(self):
super(ApiTestBase, self).tearDown()
set_config({}, overwrite=True)
class ApiTest(ApiTestBase):
@mock.patch('bm_instance_agent.systems.base.SystemDriverBase.ping')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_ping(self, mock_post, mock_driv, mock_ping):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_ping.return_value = None
# Call the api
data = {
'bmInstance': fake.BM_INSTANCE1
}
resp = self.app.post('/v2/ping',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None,
'ping': {
'bmInstanceUuid': '7b432900-c0ad-47e7-b1c7-01b74961c235'
}
}
mock_post.assert_called_once_with(url, headers, body)
@mock.patch('bm_instance_agent.manager.AgentManager.reboot')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_reboot(self, mock_post, mock_driv, mock_reboot):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_reboot.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1}
resp = self.app.post('/v2/reboot',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_reboot.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1))
@mock.patch('bm_instance_agent.manager.AgentManager.stop')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_stop(self, mock_post, mock_driv, mock_stop):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_stop.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1}
resp = self.app.post('/v2/stop',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_stop.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1))
@mock.patch('bm_instance_agent.manager.AgentManager.attach_volume')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_volume_attach(self, mock_post, mock_driv, mock_vol_attach):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_vol_attach.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1, 'volume': fake.VOLUME1}
resp = self.app.post('/v2/volume/attach',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_vol_attach.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
volume=bm_utils.camel_obj_to_snake(fake.VOLUME1))
@mock.patch('bm_instance_agent.manager.AgentManager.detach_volume')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_volume_detach(self, mock_post, mock_driv, mock_vol_detach):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_vol_detach.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1, 'volume': fake.VOLUME1}
resp = self.app.post('/v2/volume/detach',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_vol_detach.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
volume=bm_utils.camel_obj_to_snake(fake.VOLUME1))
@mock.patch('bm_instance_agent.manager.AgentManager.attach_port')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_port_attach(self, mock_post, mock_driv, mock_port_attach):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_port_attach.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1, 'nic': fake.PORT1}
resp = self.app.post('/v2/nic/attach',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_port_attach.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
port=bm_utils.camel_obj_to_snake(fake.PORT1))
@mock.patch('bm_instance_agent.manager.AgentManager.detach_port')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_port_detach(self, mock_post, mock_driv, mock_port_detach):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_port_detach.return_value = None
self.test_ping()
# Call the api
data = {'bmInstance': fake.BM_INSTANCE1, 'nic': fake.PORT1}
resp = self.app.post('/v2/nic/detach',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_port_detach.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
port=bm_utils.camel_obj_to_snake(fake.PORT1))
@mock.patch('bm_instance_agent.manager.AgentManager.update_default_route')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_default_route_change(self, mock_post,
mock_driv, mock_update_route):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_update_route.return_value = None
self.test_ping()
# Call the api
data = {
'bmInstance': fake.BM_INSTANCE1,
'oldDefault': fake.PORT1,
'newDefault': fake.PORT2
}
resp = self.app.post('/v2/defaultRoute/change',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_update_route.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
old_default_port=bm_utils.camel_obj_to_snake(fake.PORT1),
new_default_port=bm_utils.camel_obj_to_snake(fake.PORT2))
@mock.patch('bm_instance_agent.manager.AgentManager.update_password')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
@mock.patch('bm_instance_agent.api.utils._post')
def test_password_change(self, mock_post, mock_driv, mock_update_passwd):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_update_passwd.return_value = None
self.test_ping()
# Call the api
data = {
'password': '<PASSWORD>',
'bmInstance': fake.BM_INSTANCE1
}
resp = self.app.post('/v2/password/change',
headers=fake.HEADERS,
params=json.dumps(data))
self.assertEqual(200, resp.status_int)
# Wait the thread work done
wait_func_called(mock_post)
url = 'http://127.0.0.1:8080'
headers = {'taskuuid': 'd123d27c-4b4f-4368-a33c-acfc5d9eaf2c'}
body = {
'success': True,
'error': None
}
calls = [mock.call(url, headers, body)]
mock_post.assert_has_calls(calls)
# Assert the call corrent
mock_update_passwd.assert_called_once_with(
bm_instance=bm_utils.camel_obj_to_snake(fake.BM_INSTANCE1),
password='<PASSWORD>')
@mock.patch('bm_instance_agent.manager.AgentManager.console')
@mock.patch('bm_instance_agent.manager.AgentManager._load_driver')
def test_console(self, mock_driv, mock_console):
mock_driv.return_value = driver_base.SystemDriverBase()
mock_console.return_value = {'port': 12345}
self.test_ping()
# Call the api
resp = self.app.get('/v2/console/prepare', headers=fake.HEADERS)
self.assertEqual(200, resp.status_int)
data = {
'success': True,
'error': None,
'port': 12345}
self.assertEqual(data, resp.json_body)
# Assert the call exist
mock_console.assert_called()
| StarcoderdataPython |
8123893 | <reponame>p-montero/py-ans<filename>class2/ex1_c.py
#!/usr/bin/env python
'''
Simple Python script calling (module name = my_func.py)
'''
from my_func import phello as p
p()
| StarcoderdataPython |
3524797 | <reponame>etdv-thevoid/pokemon-rgb-enhanced
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Use this tool to dump an asm file for a new source code or disassembly project.
usage:
from dump_sections import dump_sections
output = dump_sections("../../butt.gbc")
file_handler = open("main.asm", "w")
file_handler.write(output)
file_handler.close()
You can also use this script from the shell, where it will look for
"baserom.gbc" in the current working path or whatever file path you pass in the
first positional argument.
"""
import os
import sys
import argparse
def upper_hex(input):
"""
Converts the input to an uppercase hex string.
"""
if input in [0, "0"]:
return "0"
elif input <= 0xF:
return ("%.x" % (input)).upper()
else:
return ("%.2x" % (input)).upper()
def format_bank_number(address, bank_size=0x4000):
"""
Returns a str of the hex number of the bank based on the address.
"""
return upper_hex(address / bank_size)
def calculate_bank_quantity(path, bank_size=0x4000):
"""
Returns the number of 0x4000 banks in the file at path.
"""
return float(os.lstat(path).st_size) / bank_size
def dump_section(bank_number, separator="\n\n"):
"""
Returns a str of a section header for the asm file.
"""
output = "SECTION \""
if bank_number in [0, "0"]:
output += "bank0\",HOME"
else:
output += "bank"
output += bank_number
output += "\",DATA,BANK[$"
output += bank_number
output += "]"
output += separator
return output
def dump_incbin_for_section(address, bank_size=0x4000, baserom="baserom.gbc", separator="\n\n"):
"""
Returns a str for an INCBIN line for an entire section.
"""
output = "INCBIN \""
output += baserom
output += "\",$"
output += upper_hex(address)
output += ",$"
output += upper_hex(bank_size)
output += separator
return output
def dump_sections(path, bank_size=0x4000, initial_bank=0, last_bank=None, separator="\n\n"):
"""
Returns a str of assembly source code. The source code delineates each
SECTION and includes bytes from the file specified by baserom.
"""
if not last_bank:
last_bank = calculate_bank_quantity(path, bank_size=bank_size)
if last_bank < initial_bank:
raise Exception("last_bank must be greater than or equal to initial_bank")
baserom_name = os.path.basename(path)
output = ""
banks = range(initial_bank, last_bank)
for bank_number in banks:
address = bank_number * bank_size
# get a formatted hex number of the bank based on the address
formatted_bank_number = format_bank_number(address, bank_size=bank_size)
# SECTION
output += dump_section(formatted_bank_number, separator=separator)
# INCBIN a range of bytes from the ROM
output += dump_incbin_for_section(address, bank_size=bank_size, baserom=baserom_name, separator=separator)
# clean up newlines at the end of the output
if output[-2:] == "\n\n":
output = output[:-2]
output += "\n"
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("rompath", nargs="?", metavar="rompath", type=str)
args = parser.parse_args()
# default to "baserom.gbc" in the current working directory
baserom = "baserom.gbc"
# but let the user override the default
if args.rompath:
baserom = args.rompath
# generate some asm
output = dump_sections(baserom)
# dump everything to stdout
sys.stdout.write(output)
| StarcoderdataPython |
8151643 | from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from app import app
layout = [dcc.Markdown("""
### Evaluate
The distribution of predictions closely matches the true distribution of incomes, with slight overpredictions around the median and underpredictions of the few high income outliers.
Unsurprisingly, median income of an area is highly correlated with unemployment and poverty rates. High levels of Professional and Services employment greatly boost incomes.
The model predicts highest incomes for areas with average commute times around 40 minutes, presumably reflecting the prosperity of suburbs compared to urban and rural communities.
"""),
html.Img(src='/assets/randomforest_title.png', style={'width': '80%'})]
| StarcoderdataPython |
8136330 | # --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import numpy as np
import tensorflow as tf
import argparse
import pickle
import json
import ipdb
import os
from ult.config import cfg
from networks.TIN_HICO_with_part import ResNet50
from models.test_Solver_HICO_DET_binary import test_net
os.environ['CUDA_VISIBLE_DEVICES'] = '2' # use GPU 0
def parse_args():
parser = argparse.ArgumentParser(description='Test TIN on HICO dataset')
parser.add_argument('--num_iteration', dest='iteration',
help='Specify which weight to load',
default=200000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='TIN_10w_with_part', type=str)
parser.add_argument('--object_thres', dest='object_thres',
help='Object threshold',
default=0.3, type=float)
parser.add_argument('--human_thres', dest='human_thres',
help='Human threshold',
default=0.8, type=float)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
Test_RCNN = pickle.load(open('Data/Test_Faster_RCNN_R-50-PFN_2x_HICO_DET_with_pose_with_part.pkl', "rb"))
# pretrain model
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(
args.iteration) + ', path = ' + weight)
output_file = '-Results/' + str(args.iteration) + '_' + args.model + '.pkl'
print('output file = ', output_file)
# init session
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
net = ResNet50()
net.create_architecture(False)
saver = tf.train.Saver()
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
test_net(sess, net, Test_RCNN, output_file, args.object_thres, args.human_thres)
sess.close()
| StarcoderdataPython |
6516090 | <reponame>Louquinze/auto-sklearn
import os
import sys
import unittest
from autosklearn.pipeline.components.base import find_components, \
AutoSklearnClassificationAlgorithm
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(this_dir)
class TestBase(unittest.TestCase):
def test_find_components(self):
c = find_components('dummy_components',
os.path.join(this_dir, 'dummy_components'),
AutoSklearnClassificationAlgorithm)
print('COMPONENTS: %s' % repr(c))
self.assertEqual(len(c), 2)
self.assertEqual(c['dummy_component_1'].__name__, 'DummyComponent1')
self.assertEqual(c['dummy_component_2'].__name__, 'DummyComponent2')
| StarcoderdataPython |
303700 | import matplotlib.pyplot as plt
import cv2
lr = cv2.imread('../datasets/DIV2K/DIV2K_test_lr_unknown/11.png')
edsr = cv2.imread('../experiment/x3_Dila_ensemble/results-Demo/11_x3_SR.png')
lh, lw, _ = lr.shape
h, w, _ = edsr.shape
# fig, axs = plt.subplots(2, 3, num='Result X3', figsize=(10, 9))
# fig.suptitle('X3, {}x{} -> {}x{}'.format(lw, lh, w, h))
# axs[0, 0].set_title('Original')
# axs[0, 0].imshow(cv2.cvtColor(lr, cv2.COLOR_BGR2RGB))
# axs[0, 1].set_title('NEAREST')
# axs[0, 1].imshow(cv2.cvtColor(cv2.resize(
# lr, (w, h), interpolation=cv2.INTER_NEAREST), cv2.COLOR_BGR2RGB))
# axs[0, 2].set_title('LINEAR')
# axs[0, 2].imshow(cv2.cvtColor(cv2.resize(
# lr, (w, h), interpolation=cv2.INTER_LINEAR), cv2.COLOR_BGR2RGB))
# axs[1, 0].set_title('CUBIC')
# axs[1, 0].imshow(cv2.cvtColor(cv2.resize(
# lr, (w, h), interpolation=cv2.INTER_CUBIC), cv2.COLOR_BGR2RGB))
# axs[1, 1].set_title('AREA')
# axs[1, 1].imshow(cv2.cvtColor(cv2.resize(
# lr, (w, h), interpolation=cv2.INTER_AREA), cv2.COLOR_BGR2RGB))
# axs[1, 2].set_title('EDSR')
# axs[1, 2].imshow(cv2.cvtColor(edsr, cv2.COLOR_BGR2RGB))
fig, axs = plt.subplots(1, 3, num='Result X3', figsize=(10, 5))
fig.suptitle('X3, {}x{} -> {}x{}'.format(lw, lh, w, h))
axs[0].set_title('Original')
axs[0].imshow(cv2.cvtColor(lr, cv2.COLOR_BGR2RGB))
axs[1].set_title('CUBIC')
axs[1].imshow(cv2.cvtColor(cv2.resize(
lr, (w, h), interpolation=cv2.INTER_CUBIC), cv2.COLOR_BGR2RGB))
axs[2].set_title('EDSR')
axs[2].imshow(cv2.cvtColor(edsr, cv2.COLOR_BGR2RGB))
fig.savefig('./Result_report.png')
| StarcoderdataPython |
6486168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BaseWebResponse import BaseWebResponse
class AntfortuneYebEntityequityVerifyResponse(AlipayResponse):
def __init__(self):
super(AntfortuneYebEntityequityVerifyResponse, self).__init__()
self._response = None
@property
def response(self):
return self._response
@response.setter
def response(self, value):
if isinstance(value, BaseWebResponse):
self._response = value
else:
self._response = BaseWebResponse.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AntfortuneYebEntityequityVerifyResponse, self).parse_response_content(response_content)
if 'response' in response:
self.response = response['response']
| StarcoderdataPython |
5174355 |
import re
from depccg.combinator import ja_default_binary_rules, unary_rule
from depccg.cat import Category
from depccg.tree import Tree
from depccg.token import Token
combinators = {sign: rule for rule, sign in zip(
ja_default_binary_rules,
['SSEQ', '>', '<', '>B', '<B1', '<B2', '<B3', '<B4', '>Bx1', '>Bx2', '>Bx3'])
}
for sign in ['ADNext', 'ADNint', 'ADV0', 'ADV1', 'ADV2']:
combinators[sign] = unary_rule()
DEPENDENCY = re.compile(r'{.+?}')
def read_ccgbank(filepath):
for i, line in enumerate(open(filepath)):
line = line.strip()
if len(line) == 0:
continue
tree, tokens = _JaCCGLineReader(line).parse()
yield str(i), tokens, tree
class _JaCCGLineReader(object):
def __init__(self, line):
self.lang = 'ja'
self.line = line
self.index = 0
self.word_id = -1
self.tokens = []
def next(self, target):
end = self.line.find(target, self.index)
res = self.line[self.index:end]
self.index = end + 1
return res
def check(self, text, offset=0):
if self.line[self.index + offset] != text:
raise RuntimeError('AutoLineReader.check catches parse error')
def peek(self):
return self.line[self.index]
def parse(self):
res = self.next_node()
return res, self.tokens
@property
def next_node(self):
end = self.line.find(' ', self.index)
if self.line[self.index+1:end] in combinators:
return self.parse_tree
else:
return self.parse_leaf
def parse_leaf(self):
self.word_id += 1
self.check('{')
cat = self.next(' ')[1:]
cat = cat[:cat.find('_')]
cat = DEPENDENCY.sub('', cat)
cat = Category.parse(cat)
surf, base, pos1, pos2 = self.next('}')[:-1].split('/')
token = Token(surf=surf, base=base, pos1=pos1, pos2=pos2)
self.tokens.append(token)
return Tree.make_terminal(surf, cat, self.word_id, self.lang)
def parse_tree(self):
self.check('{')
op = self.next(' ')
op = combinators[op[1:]]
cat = DEPENDENCY.sub('', self.next(' '))
cat = Category.parse(cat)
self.check('{')
children = []
while self.peek() != '}':
children.append(self.next_node())
if self.peek() == ' ':
self.next(' ')
self.next('}')
if len(children) == 1:
return Tree.make_unary(cat, children[0], self.lang)
else:
assert len(children) == 2, f'failed to parse, invalid number of children: {self.line}'
left, right = children
left_is_head = op.head_is_left(left.cat, right.cat)
return Tree.make_binary(cat, left_is_head, left, right, op, self.lang)
| StarcoderdataPython |
1845332 | # Generated by Django 3.1.6 on 2021-03-14 18:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("wkz", "0004_settings_path_to_activities_on_device"),
]
operations = [
migrations.RemoveField(
model_name="settings",
name="path_to_activities_on_device",
),
]
| StarcoderdataPython |
8094562 | <reponame>hangg7/deformable-kernels
#!/usr/bin/env python3
#
# File : cond_conv.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 12/25/2019
#
# Distributed under terms of the MIT license.
import torch
from apex import amp
from torch import nn
class CondConv2d(nn.Module):
def __init__(self, num_experts, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super().__init__()
self.num_experts = num_experts
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
assert not bias
self.weight = nn.Parameter(
torch.tensor(
num_experts * out_channels,
in_channels // self.groups,
kernel_size,
kernel_size,
)
)
self.fc = nn.Linear(in_channels, num_experts)
self.fc.zero_init = True
@amp.float_function
def dynaic_inference(self, x, weight):
# TODO(Hang Gao @ 12/26): make sure passing weight to amp is necessary.
n = x.shape[0]
avg_x = x.mean((2, 3))
gate_x = torch.sigmoid(self.fc(avg_x))
weight = torch.mm(
gate_x,
self.weight.reshape(self.num_experts, -1)
).reshape(
n * self.out_channels,
self.in_channels // self.groups,
self.kernel_size,
self.kernel_size,
)
return weight
def forward(self, x):
n, _, h, w = x.shape
weight = self.dynaic_inference(x, self.weight)
out = nn.functional.conv2d(
x.reshape(1, n * self.in_channels, h, w),
weight,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=n*self.groups,
padding_mode=self.padding_mode,
)
out = out.reshape(n, self.out_channels, *out.shape[2:])
return out
def extra_repr(self):
s = ('{num_experts}, {in_channels}, {out_channels}'
', kernel_size={kernel_size}, stride={stride}'
', scale={scale}, zero_point={zero_point}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
return s.format(**self.__dict__)
| StarcoderdataPython |
5195191 | <reponame>dharjani/flask-restapi-aws
import os
from dotenv import load_dotenv
load_dotenv()
S3_BUCKET = os.getenv("S3_BUCKET")
S3_KEY = os.getenv("S3_KEY")
S3_SECRET = os.getenv("S3_SECRET_ACCESS_KEY")
S3_URL_PREFIX = os.getenv("S3_URL_PREFIX") | StarcoderdataPython |
4994367 | <gh_stars>0
# -*- coding:utf-8 -*-
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the manifest_xml.py module."""
from __future__ import print_function
import unittest
import error
import manifest_xml
class ManifestValidateFilePaths(unittest.TestCase):
"""Check _ValidateFilePaths helper.
This doesn't access a real filesystem.
"""
def check_both(self, *args):
manifest_xml.XmlManifest._ValidateFilePaths('copyfile', *args)
manifest_xml.XmlManifest._ValidateFilePaths('linkfile', *args)
def test_normal_path(self):
"""Make sure good paths are accepted."""
self.check_both('foo', 'bar')
self.check_both('foo/bar', 'bar')
self.check_both('foo', 'bar/bar')
self.check_both('foo/bar', 'bar/bar')
def test_symlink_targets(self):
"""Some extra checks for symlinks."""
def check(*args):
manifest_xml.XmlManifest._ValidateFilePaths('linkfile', *args)
# We allow symlinks to end in a slash since we allow them to point to dirs
# in general. Technically the slash isn't necessary.
check('foo/', 'bar')
# We allow a single '.' to get a reference to the project itself.
check('.', 'bar')
def test_bad_paths(self):
"""Make sure bad paths (src & dest) are rejected."""
PATHS = (
'..',
'../',
'./',
'foo/',
'./foo',
'../foo',
'foo/./bar',
'foo/../../bar',
'/foo',
'./../foo',
'.git/foo',
# Check case folding.
'.GIT/foo',
'blah/.git/foo',
'.repo/foo',
'.repoconfig',
# Block ~ due to 8.3 filenames on Windows filesystems.
'~',
'foo~',
'blah/foo~',
# Block Unicode characters that get normalized out by filesystems.
u'foo\u200Cbar',
)
for path in PATHS:
self.assertRaises(
error.ManifestInvalidPathError, self.check_both, path, 'a')
self.assertRaises(
error.ManifestInvalidPathError, self.check_both, 'a', path)
| StarcoderdataPython |
6510530 | <filename>peleenet/components/train/src/peleenet.py
import argparse
import datetime
import json
import math
import os
import pickle
import shutil
from collections import OrderedDict
from random import randrange
from typing import List, Tuple
import numpy as np # type: ignore
import tensorflow as tf # type: ignore
from PIL import Image # type: ignore
from tensorflow.keras import Sequential, regularizers # type: ignore
from tensorflow.keras.callbacks import (ModelCheckpoint, # type: ignore
ReduceLROnPlateau)
from tensorflow.keras.layers import (Activation, # type: ignore
AveragePooling2D, BatchNormalization,
Concatenate, Conv2D, Dense, Dropout,
Flatten, GlobalAveragePooling2D, Input,
MaxPool2D)
from tensorflow.keras.models import Model # type: ignore
import tensorflow_datasets as tfds # type: ignore
class _DenseLayer(Model):
def __init__(self, num_input_features, growth_rate, bottleneck_width, drop_rate):
super(_DenseLayer, self).__init__()
growth_rate: int = int(growth_rate / 2)
inter_channel: int = int(growth_rate * bottleneck_width / 4) * 4
if inter_channel > num_input_features / 2:
inter_channel = int(num_input_features / 8) * 4
print(f'adjusting inter_channel to {inter_channel}')
self.branch1a = BasicConv2D(inter_channel, kernel_size=1, padding="same")
self.branch1b = BasicConv2D(growth_rate, kernel_size=3, padding="same")
self.branch2a = BasicConv2D(inter_channel, kernel_size=1, padding="same")
self.branch2b = BasicConv2D(growth_rate, kernel_size=3, padding="same")
self.branch2c = BasicConv2D(growth_rate, kernel_size=3, padding="same")
def call(self, x):
branch1 = self.branch1a(x)
branch1 = self.branch1b(branch1)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch2 = self.branch2c(branch2)
return Concatenate()([x, branch1, branch2])
class _DenseBlock(Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add(layer)
class _StemBlock(Model):
def __init__(self, num_init_features):
super(_StemBlock, self).__init__()
num_stem_features = int(num_init_features/2)
self.stem1 = BasicConv2D(out_channels=num_init_features, kernel_size=3, strides=2)
self.stem2a = BasicConv2D(out_channels=num_stem_features, kernel_size=1, strides=1)
self.stem2b = BasicConv2D(out_channels=num_init_features, kernel_size=3, strides=2)
self.stem3 = BasicConv2D(out_channels=num_init_features, kernel_size=1, strides=1)
self.pool = MaxPool2D(2)
def call(self, x):
out = self.stem1(x)
branch2 = self.stem2a(out)
branch2 = self.stem2b(branch2)
branch1 = self.pool(out)
out = Concatenate()([branch1, branch1])
out = self.stem3(out)
return out
class BasicConv2D(Model):
def __init__(self, out_channels, activation=True, **kwargs):
super(BasicConv2D, self).__init__()
self.conv = Conv2D(filters=out_channels, use_bias=False, kernel_initializer='glorot_uniform', kernel_regularizer=tf.keras.regularizers.l2(5e-4), **kwargs)
self.norm = BatchNormalization()
self.activation = activation
def call(self, x):
x = self.conv(x)
x = self.norm(x)
if self.activation:
return Activation('relu')(x)
else:
return x
class PeleeNet(Model):
def __init__(self, growth_rate=32, block_config=[3,4,8,6], num_init_features=32,
bottleneck_width=[1,2,4,4], drop_rate=0.5, num_classes=1000):
super(PeleeNet, self).__init__()
self.features = Sequential(
_StemBlock(num_init_features))
if type(growth_rate) is list:
growth_rates = growth_rate
assert len(growth_rates) == 4
else:
growth_rates = [growth_rate] * 4
if type(bottleneck_width) is list:
bottleneck_widths = bottleneck_width
assert len(bottleneck_widths) == 4
else:
bottleneck_widths = [bottleneck_width] * 4
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bottleneck_widths[i], growth_rate=growth_rates[i], drop_rate=drop_rate)
self.features.add(block)
num_features = num_features + num_layers * growth_rates[i]
self.features.add(BasicConv2D(num_features, kernel_size=1, strides=1))
if i != len(block_config) - 1:
self.features.add(AveragePooling2D(2))
num_features = num_features
# Dense layer
self.classifier = Dense(num_classes, kernel_initializer='glorot_uniform')
self.drop_rate = drop_rate
def call(self, x):
features = self.features(x)
out = GlobalAveragePooling2D()(features)
if self.drop_rate > 0:
out = Dropout(self.drop_rate)(out)
out = self.classifier(out)
return out
class ImageAugmentation:
"""
Resize all images in dataset to (224,224,3) as there are variable sized images in some datasets
"""
def __init__(self):
pass
def __call__(self, image, label):
aug_img = tf.image.resize(image, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return aug_img, label
class TrainingImageAugmentation:
def __init__(self, log_dir: str, max_images: int, name: str,
input_size: int, scale_img: int, resize: int,
batch_size: int):
self.file_writer = tf.summary.create_file_writer(log_dir)
self.max_images: int = max_images
self.name: str = name
self.input_size: int = input_size
self.resize: int = resize
self.batch_size: int = batch_size
self.scale_img: int = scale_img
self._counter: int = 0
def __call__(self, image, label):
image = tf.cast(image, tf.float32) / 255.0
#aug_img = tf.image.per_image_standardization(image)
aug_img = tf.image.resize(image, (((self.input_size * self.scale_img) + self.resize), ((self.input_size * self.scale_img) + self.resize)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#aug_img = tf.image.random_crop(aug_img, size=(224, 224, 3))
aug_img = tf.image.random_flip_left_right(aug_img)
aug_img = tf.image.resize(aug_img, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with self.file_writer.as_default():
tf.summary.image(
self.name,
aug_img,
step=self._counter,
max_outputs = self.max_images
)
self._counter += 1
return aug_img, label
class TestingImageAugmentation:
def __init__(self, log_dir: str, max_images: int, name: str,
input_size: int, scale_img: int, resize: int,
batch_size: int) -> None:
self.file_writer = tf.summary.create_file_writer(log_dir)
self.max_images: int = max_images
self.name: str = name
self.input_size: int = input_size
self.resize: int = resize
self.batch_size: int = batch_size
self.scale_img: int = scale_img
self._counter: int = 0
def __call__(self, image: tf.data.Dataset, label: tf.data.Dataset) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
img: tf.data.Dataset = tf.cast(image, tf.float32) / 255.0
#aug_img = tf.image.per_image_standardization(image)
aug_img: tf.data.Dataset = tf.image.resize(img, (((self.input_size * self.scale_img) + self.resize), ((self.input_size * self.scale_img) + self.resize)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
aug_img = tf.image.resize(aug_img, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with self.file_writer.as_default():
tf.summary.image(
self.name,
aug_img,
step=self._counter,
max_outputs = self.max_images
)
self._counter += 1
return aug_img, label
def main():
parser = argparse.ArgumentParser(description='PeleeNet Trainer')
parser.add_argument('--input_dir', help="Directory containing training data (eg. /workspace/data)")
parser.add_argument('--output_dir', help="Directory to save model to disk (eg. /tmp/model_dir)")
parser.add_argument('--epochs', help="Number of training epochs")
parser.add_argument('--model_name', help="Name of the model being trained")
parser.add_argument('--model_version', help="Version of the model (eg. 1.0.0 (versioning scheme independent))")
parser.add_argument('--data_augment', help="Enable or disable data augmentation")
parser.add_argument('--resize', help="Resize training data (eg. 32 (where original image size is (224,224) this would resize the image to (256, 256)))")
parser.add_argument('--scale_img', help="Factor by which to scale the input image (eg. 7 (if the input image is 32x32x3 (HWC) the output would be (224,224,3)))")
parser.add_argument('--crop_pct', help="Percentage to center crop training image (eg. 0.5 will center crop to the middle 50% of pixels in the image)")
parser.add_argument('--subtract_pixel_mean', help="Enable or disable subtracting the pixel mean from input image batches")
parser.add_argument('--batch_size', help="Batch size for batching training data (eg. 128)")
parser.add_argument('--learning_rate', help="Learning rate to use with the optimizer we choose on our model (eg. 1e-3 or 0.003)")
parser.add_argument('--momentum', help="Momentum to use for the SGD Optimizer")
parser.add_argument('--lr_patience', help='Number of epochs with no improvement after which learning rate will be reduced. (eg. 5)')
parser.add_argument('--dropout', help="Percentage of dropout to add to the network (eg .5 == 50% dropout rate")
#parser.add_argument('--dataset_split', nargs='+', type=float, help="What splits to use for partitioning data between training, validation, and test (eg. 0.7 0.15 0.15) (repsectively))")
parser.add_argument('--growth_rate', help="Growth Rate as defined in the PeleeNet paper (eg. 32)")
parser.add_argument('--bottle_neck_width', nargs="+", type=str, help="Bottle Neck Width as defined in the PeleeNet paper (eg. 1 2 4 4)")
parser.add_argument('--num_classes', help="Number of classes contained within a dataset. (eg. 1000 for ImageNet)")
parser.add_argument('--input_size', help="Input size of the dataset (eg. 224 for images with (224,224,3) dimensions)")
parser.add_argument('--prefetch_size', help="Number of batches to prefetch for model training (eg. 5)")
parser.add_argument('--shuffle_buffer', help="Number of data points to add to shuffle buffer (eg. 10000)")
args = parser.parse_args()
EPOCHS = int(args.epochs)
LEARNING_RATE = float(args.learning_rate)
MOMENTUM = float(args.momentum)
#TODO(ehenry): Make dataset augmentation optional as an argument for hyperparameter sweeps
DATA_AUGMENTATION = args.data_augment
RESIZE = int(args.resize)
SCALE_IMG = int(args.scale_img)
DROPOUT = float(args.dropout)
PATIENCE = int(args.lr_patience)
INPUT_DIR = str(args.input_dir)
NUM_CLASSES = int(args.num_classes)
INPUT_SIZE = int(args.input_size)
PREFETCH_SIZE = int(args.prefetch_size)
SHUFFLE_BUFFER = int(args.shuffle_buffer)
OUTPUT_DIR = str(args.output_dir)
MODEL_NAME = str(args.model_name)
#TODO(ehenry): This can likely be combined with the DATA_AUGMENTATION flag above.
# It should be made optional for via command line argument for hyperparameter sweeps
if args.crop_pct:
CROP_PERCENT = float(args.crop_pct)
else:
pass
if args.growth_rate:
GROWTH_RATE = int(args.growth_rate)
else:
GROWTH_RATE = 32
if args.bottle_neck_width:
BOTTLENECK_WIDTH = list(args.bottle_neck_width)
else:
BOTTLENECK_WIDTH = [1,2,4,4]
# TODO(ehenry) For data management, is there a way we can automate this process
# for users whom use our platform(s)? Something to investigate when
# looking into what data management means? API calls to FS served by
# Dell EMC storage array?
MODEL_VERSION = args.model_version
MODEL_DIRECTORY = os.path.join(args.output_dir, MODEL_NAME, MODEL_VERSION)
checkpoint_dir = os.path.join(MODEL_DIRECTORY, 'ckpt')
tensorboard_dir = os.path.join(MODEL_DIRECTORY, 'logs')
current_time: str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/train'
train_img_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/train/images'
test_img_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/test/images'
test_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/test'
if args.batch_size:
BATCH_SIZE = int(args.batch_size)
else:
BATCH_SIZE = 128
#TODO(ehenry) clean up this logic for directory creation
print(MODEL_DIRECTORY)
if os.path.isdir(MODEL_DIRECTORY) == False:
os.makedirs(os.path.join(MODEL_DIRECTORY, MODEL_VERSION))
os.mkdir(checkpoint_dir)
os.mkdir(tensorboard_dir)
print(f"Training Log Directory : {train_log_dir}")
print(f"Testing Log Directory : {test_log_dir}")
validation_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/validation'
os.makedirs(train_log_dir)
os.makedirs(test_log_dir)
os.makedirs(validation_log_dir)
else:
print(f"Model {MODEL_NAME} Version {MODEL_VERSION} already exists!")
#TODO(ehenry): Implement logic to write metadata files for use in Kubeflow pipelines
# This specific example will allow for spawning a TensorBoard instance within Kubernetes
# from the Kubeflow Pipelines UI
metadata = {
"outputs": [{
"type": "tensorboard",
"source": train_log_dir,
}]
}
#TODO(ehenry): Define logic for saving model metadata to the metadata module included with Kubeflow
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
# dataset_splits = args.dataset_split
# TRAIN_SIZE = dataset_splits[0]
# VALIDATION_SIZE = dataset_splits[1]
# TEST_SIZE = dataset_splits[2]
# load data
#TODO(ehenry): Break out data loading functionality into separate module
# image_net = tfds.builder("cifar10")
# download_config = tfds.download.DownloadConfig()
# download_config.manual_dir="/home/tom/tensorflow_datasets"
# #download_config.extract_dir="/mnt/tensorflow_datasets"
# download_config.compute_stats="skip"
# image_net.download_and_prepare(download_config=download_config)
# # image_net.as_dataset()
# # image_net_train, image_net_valid = image_net['train'], image_net['valid']
(train, test), info = tfds.load("cifar100",
split=["train", "train"],
shuffle_files=True,
as_supervised=True,
with_info=True,
data_dir=INPUT_DIR)
#TODO(ehenry): Match learning rate scheduler to peleenet paper -- for now using peicewiseconstantdecay
def lr_scheduler(init_lr: float, num_epochs: int, iterations_per_epoch: int, iterations: int) -> Tuple[List, List]:
"""Scheduler for use in reducing learning rate during training as outlined in the original PeleeNet Paper
Arguments:
init_lr {float} -- Initial learning rate
num_epochs {int} -- Total number of training epochs
iterations_per_epoch {int} -- Total number of iterations_per_epoch (total number of training examples)
iterations {int} -- Total number of steps per epoch
Returns:
Tuple[List, List] -- List of boundaires and list of values for use in optimization object
"""
# Lists of boundaries and values for use in PiecewiseConstantDecay learning rate
boundaries = []
values = []
learning_rate: float = init_lr
T_total: int = num_epochs * iterations_per_epoch
for i in range(EPOCHS):
for e in range(iterations):
T_cur: int = (i % num_epochs) * iterations_per_epoch + e
lr: float = 0.5 * learning_rate * (1 + math.cos(math.pi * T_cur / T_total))
boundaries.append(i+1)
values.append(lr)
return boundaries[:-1], values
BATCH_SIZE_AUGMENTATION = ImageAugmentation()
TRAIN_AUGMENTATION = TrainingImageAugmentation(train_img_dir, max_images=5, name="Augmented Training Images",
input_size=INPUT_SIZE, scale_img=SCALE_IMG, resize=RESIZE,
batch_size=BATCH_SIZE)
TEST_AUGMENTATION = TestingImageAugmentation(test_img_dir, max_images=5, name="Augmented Testing Images",
input_size=INPUT_SIZE, scale_img=SCALE_IMG, resize=RESIZE,
batch_size=BATCH_SIZE)
# Shuffle our dataset, and reshuffle after each epoch
train = train.shuffle(SHUFFLE_BUFFER, reshuffle_each_iteration=True)
# Resize all images, create training batches, augment the images, and prefetch batches
train = train.map(BATCH_SIZE_AUGMENTATION, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(BATCH_SIZE, drop_remainder=True).map(TRAIN_AUGMENTATION, num_parallel_calls=tf.data.experimental.AUTOTUNE).prefetch(PREFETCH_SIZE)
# Resize all images, create testing batches, augment the images, and prefetch batches
test = test.map(BATCH_SIZE_AUGMENTATION, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(BATCH_SIZE, drop_remainder=True).map(TEST_AUGMENTATION, num_parallel_calls=tf.data.experimental.AUTOTUNE).prefetch(PREFETCH_SIZE)
#test = test.prefetch(PREFETCH_SIZE)
print(f"Model output directory : {args.output_dir}/{args.model_name}")
# Loss object for use in tracking model loss on train/validation/test datasets
loss_object = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
# Quick hack for using PiecewiseConstantDecay for learning rate decay
# Ideally we'd want to implement our own LearningRateSchedule here, but
# this should work for now...
lr_boundaries, lr_values = lr_scheduler(init_lr=LEARNING_RATE, num_epochs=EPOCHS, iterations_per_epoch=info.splits['train'].num_examples, iterations=info.splits['train'].num_examples//BATCH_SIZE)
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=lr_boundaries, values=lr_values)
# Optimizer (this one is pretty straight forward)
optimizer = tf.optimizers.SGD(learning_rate=LEARNING_RATE, momentum=MOMENTUM)
# Metrics for tracking train, validation, and test loss during training
train_loss = tf.keras.metrics.Mean(name='train_loss')
validation_loss = tf.keras.metrics.Mean(name='validation_loss')
test_loss = tf.keras.metrics.Mean(name='test_loss')
# Train accuracy metric for use in model training
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
# Test accuracy metric for use in model training
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
# Create an instance of out model
model = PeleeNet(bottleneck_width=BOTTLENECK_WIDTH, growth_rate=GROWTH_RATE, drop_rate=DROPOUT, num_classes=NUM_CLASSES)
graph_writer = tf.summary.create_file_writer(train_log_dir)
tf.summary.trace_on(graph=True, profiler=True)
with graph_writer.as_default():
tf.summary.trace_export(
name='peleenet_trace',
step=0,
profiler_outdir=train_log_dir
)
@tf.function
def train_step(images: tf.data.Dataset.batch, labels: tf.data.Dataset.batch):
"""Training step for our model
Arguments:
images {tf.data.Dataset.batch} -- Single batch of images for model training
labels {tf.data.Dataset.batch} -- Single batch of labels for model training
"""
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_accuracy(labels, predictions)
train_loss(loss)
# @tf.function
# def validation_step(images: tf.data.Dataset.batch, labels: tf.data.Dataset.batch):
# """[summary]
# Arguments:
# images {tf.data.Dataset.batch} -- Batch of validation images
# labels {tf.data.Dataset.batch} -- Batch of validation labels
# """
# predictions = model(images)
# v_loss = loss_object(labels, predictions)
# validation_loss(v_loss)
@tf.function
def test_step(images: tf.data.Dataset.batch, labels: tf.data.Dataset.batch):
"""Test step for use with training our model
Arguments:
images {tf.data.Dataset.batch} -- Batch of testing images
labels {tf.data.Dataset.batch} -- Batch of testing labels
"""
# Get model preidctions
predictions = model(images, training=False)
# Get test dataset loss
t_loss = loss_object(labels, predictions)
# Set test dataset accuracy
test_accuracy(labels, predictions)
# Set test dataset loss
test_loss(t_loss)
# Checkpoint object for use in training pipeline
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
checkpoint_path = checkpoint_dir + '/'
# Checkpoint manager for managing checkpoints during training
checkpoint_manager = tf.train.CheckpointManager(checkpoint, directory=checkpoint_path, max_to_keep=5)
# model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=LEARNING_RATE, momentum=MOMENTUM),
# loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# metrics=['acc'])
# checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir, monitor='val_loss', verbose=1,
# mode='auto', save_best_only=True, save_freq='epoch', save_weights_only=False)
# tensorboard = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir, histogram_freq=20, write_graph=True,
# update_freq='batch', profile_batch=2)
# lr_plateau = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.25, patience=PATIENCE,
# verbose=1, mode='auto', cooldown=0, min_lr=0.0001)
# print('Fit model...')
# history = model.fit(train,
# epochs=EPOCHS,
# validation_data=test,
# callbacks=[lr_plateau, checkpoint, tensorboard])
# Create summary writers for writing values for visualization in TensorBoard
#TODO(ehenry) Implement validation dataset logic
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
validation_summary_writer =tf.summary.create_file_writer(validation_log_dir)
# Metrics to use with progress bar for model training and testing
progbar_metrics = ['train_loss', 'val_loss', 'test_loss',
'train_accuracy', 'val_accuracy', 'test_accuracy'
'learning_rate']
for epoch in range(EPOCHS):
print(f"\nStarting epoch number {(epoch + 1)}...")
# Progress bar for tracking training and testing
bar = tf.keras.utils.Progbar(target=info.splits['train'].num_examples//BATCH_SIZE, unit_name="step", stateful_metrics=progbar_metrics)
# Iterate over training dataset batches
for step, (images, labels) in enumerate(train):
# Evaluate BATCH_SIZE of images and take a gradient step
train_step(images, labels)
# Update the progress bar for CLI output
bar.update(step, values=[('train_loss', train_loss.result()), ('train_accuracy', train_accuracy.result())])
# Write per epoch training results to Tensorboard summary files
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
tf.summary.scalar('learning_rate', optimizer.learning_rate.numpy(), step=epoch)
# Iterate over test dataset batches
for step, (images, labels) in enumerate(test):
# Evaluate the model in BATCH_SIZE of images
test_step(images, labels)
# Write per epoch training results to Tensorboard summary files
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
# Update progres bar with results of test dataset evaluation
bar.update(info.splits['train'].num_examples//BATCH_SIZE, values=[('test_loss', test_loss.result()), ('test_accuracy', test_accuracy.result())])
# Checkpoint the model to disk
checkpoint_manager.save(checkpoint_number=None)
# Reset metric states for each epoch
train_loss.reset_states()
train_accuracy.reset_states()
validation_loss.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
#TODO(ehenry): Evaluate if this is necessary with model checkpointing...
model.save((MODEL_DIRECTORY + "/" + "PELEENET" + "-" + str(MODEL_VERSION)))
# with open('/output.txt', 'w') as f:
# f.write(args.output_dir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
194367 | <reponame>gimait/pycozmo<filename>examples/procedural_face_show.py<gh_stars>1-10
#!/usr/bin/env python
import pycozmo
def main():
# Render a 128x64 procedural face with default parameters.
f = pycozmo.procedural_face.ProceduralFace()
im = f.render()
im.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6639583 | <filename>PYTHON/skyscrapper.py
class Solution:
def getSkyline(self, buildings: 'List[List[int]]') -> 'List[List[int]]':
"""
Divide-and-conquer algorithm to solve skyline problem,
which is similar with the merge sort algorithm.
"""
n = len(buildings)
# The base cases
if n == 0:
return []
if n == 1:
x_start, x_end, y = buildings[0]
return [[x_start, y], [x_end, 0]]
# If there is more than one building,
# recursively divide the input into two subproblems.
left_skyline = self.getSkyline(buildings[: n // 2])
right_skyline = self.getSkyline(buildings[n // 2:])
# Merge the results of subproblem together.
return self.merge_skylines(left_skyline, right_skyline)
def merge_skylines(self, left, right):
"""
Merge two skylines together.
"""
def update_output(x, y):
"""
Update the final output with the new element.
"""
# if skyline change is not vertical -
# add the new point
if not output or output[-1][0] != x:
output.append([x, y])
# if skyline change is vertical -
# update the last point
else:
output[-1][1] = y
def append_skyline(p, lst, n, y, curr_y):
"""
Append the rest of the skyline elements with indice (p, n)
to the final output.
"""
while p < n:
x, y = lst[p]
p += 1
if curr_y != y:
update_output(x, y)
curr_y = y
n_l, n_r = len(left), len(right)
p_l = p_r = 0
curr_y = left_y = right_y = 0
output = []
# while we're in the region where both skylines are present
while p_l < n_l and p_r < n_r:
point_l, point_r = left[p_l], right[p_r]
# pick up the smallest x
if point_l[0] < point_r[0]:
x, left_y = point_l
p_l += 1
else:
x, right_y = point_r
p_r += 1
# max height (i.e. y) between both skylines
max_y = max(left_y, right_y)
# if there is a skyline change
if curr_y != max_y:
update_output(x, max_y)
curr_y = max_y
# there is only left skyline
append_skyline(p_l, left, n_l, left_y, curr_y)
# there is only right skyline
append_skyline(p_r, right, n_r, right_y, curr_y)
return output
| StarcoderdataPython |
319496 | from p10_requests import *
print(FOO)
print(math.pi)
| StarcoderdataPython |
4972866 | import unittest
from src.gilded_rose import GildedRose, Item
class BackstagePassTest(unittest.TestCase):
def setUp(self):
self.backstage_pass_name = "Backstage passes to a TAFKAL80ETC concert"
def test_should_increase_quality_by_1_when_sell_in_date_is_more_than_10(self):
items = [Item(self.backstage_pass_name, sell_in=11, quality=0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
expected = 1
actual = items[0].quality
self.assertEqual(actual, expected)
def test_should_not_increase_quality_when_quality_is_50(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 11, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_should_increase_quality_by_2_when_sell_in_date_is_less_than_equal_to_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(22, items[0].quality)
def test_should_not_increase_quality_above_50_when_quality_is_49_and_sell_in_date_is_less_than_equal_to_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 49)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_should_increase_quality_by_2_when_sell_in_date_is_more_than_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 6, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(22, items[0].quality)
def test_should_increase_quality_by_3_when_sell_in_date_is_less_than_equal_to_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(23, items[0].quality)
def test_should_not_increase_quality_above_50_when_quality_is_48_and_sell_in_date_is_less_than_equal_to_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 48)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_should_not_increase_quality_above_50_when_quality_is_49_and_sell_in_date_is_less_than_equal_to_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 49)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_should_not_increase_quality_above_50_when_quality_is_50_and_sell_in_date_is_less_than_equal_to_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_quality_should_be_0_when_sell_in_date_is_less_than_0(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", -1, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
def test_quality_should_be_0_when_sell_in_date_is_0(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
| StarcoderdataPython |
1686955 | <filename>mk_outFoV_rates_resps.py
import numpy as np
from scipy import optimize, stats, interpolate
from astropy.io import fits
from astropy.table import Table
import os
import argparse
import logging, traceback
import healpy as hp
from config import rt_dir
from ray_trace_funcs import RayTraces
from event2dpi_funcs import det2dpis, mask_detxy
from models import Source_Model_InFoV, Source_Model_InOutFoV
from response import Swift_Mask_Interactions, bldmask2batxys, get_fixture_struct, dpi_shape
from coord_conv_funcs import imxy2theta_phi, theta_phi2imxy
from flux_models import Cutoff_Plaw_Flux, Plaw_Flux
from Polygons import Polygon2D
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--Njobs', type=int,\
help="Number of jobs being run",
default=None)
parser.add_argument('--job_id', type=int,\
help="Job ID number",
default=-1)
args = parser.parse_args()
return args
def get_bldmask_alldets():
detxs_by_sand0 = np.arange(0, 286-15, 18)
detxs_by_sand1 = detxs_by_sand0 + 15
detys_by_sand0 = np.arange(0, 173-7, 11)
detys_by_sand1 = detys_by_sand0 + 7
all_good_detxs = np.ravel([np.arange(detxs_by_sand0[i], detxs_by_sand1[i]+1,\
1, dtype=np.int) for i in range(16)])
all_good_detys = np.ravel([np.arange(detys_by_sand0[i], detys_by_sand1[i]+1,\
1, dtype=np.int) for i in range(16)])
detxax = np.arange(286, dtype=np.int)
detyax = np.arange(173, dtype=np.int)
detx_dpi, dety_dpi = np.meshgrid(detxax, detyax)
bl_alldets = np.isin(detx_dpi, all_good_detxs)&np.isin(dety_dpi, all_good_detys)
return bl_alldets
def get_in_out_rates4EpeakGamma(sig_mod, Epeak, gamma):
flux_params = {'A':1.0, 'Epeak':Epeak, 'gamma':gamma}
sig_mod.set_flux_params(flux_params)
# in_fov_bl = sig_mod.resp_obj.mask_obj.does_int_mask
in_fov_bl = (sig_mod.resp_obj.mask_obj.does_int_mask)&\
(sig_mod.resp_obj.mask_obj.fix_trans[:,10]>0.99)
out_fov_bl = ~in_fov_bl
rate_dpis = sig_mod.normed_rate_dpis
in_fov_rates = np.sum(rate_dpis[:,in_fov_bl], axis=1)/np.sum(in_fov_bl)
out_fov_rates = np.sum(rate_dpis[:,out_fov_bl], axis=1)/np.sum(out_fov_bl)
return in_fov_rates, out_fov_rates
def get_in_out_rates(sig_mod):
Epeaks = np.logspace(1, 3.2, 11*2+1)
print Epeaks
gammas = np.linspace(-0.2, 2.3, 4*5+1)
print gammas
Gs = np.meshgrid(Epeaks, gammas)
Epeaks = Gs[0].ravel()
gammas = Gs[1].ravel()
print len(Epeaks)
Npnts = len(Epeaks)
res_dicts = []
for j in range(Npnts):
res_dict = {'Epeak':Epeaks[j], 'gamma':gammas[j]}
in_fov_rates, out_fov_rates = get_in_out_rates4EpeakGamma(sig_mod, Epeaks[j], gammas[j])
res_dict['RatesIn'] = in_fov_rates
res_dict['RatesOut'] = out_fov_rates
res_dicts.append(res_dict)
return res_dicts
def mk_in_out_rates_tab_masks(sig_mod, theta, phi):
dpi_shape = (173, 286)
sig_mod.set_theta_phi(theta, phi)
# in_fov_bl = sig_mod.resp_obj.mask_obj.does_int_mask
in_fov_bl = (sig_mod.resp_obj.mask_obj.does_int_mask)&\
(sig_mod.resp_obj.mask_obj.fix_trans[:,10]>0.99)
out_fov_bl = ~in_fov_bl
in_ndets = np.sum(in_fov_bl)
if in_ndets > 100:
print "%d dets in FoV"%(in_ndets)
return None, None, None
mask_in = np.zeros(dpi_shape, dtype=np.bool)
mask_out = np.zeros(dpi_shape, dtype=np.bool)
mask_in[sig_mod.bl_dmask] = in_fov_bl
mask_out[sig_mod.bl_dmask] = out_fov_bl
res_dicts = get_in_out_rates(sig_mod)
tab = Table(data=res_dicts)
return tab, mask_in, mask_out
def mk_npz_file_in_out_rates(sig_mod, hp_ind):
dname = '/gpfs/scratch/jjd330/bat_data/rates_resps_outFoV2/'
phi, lat = hp.pix2ang(2**2, hp_ind, lonlat=True, nest=True)
theta = 90.0 - lat
tab, mask_in, mask_out = mk_in_out_rates_tab_masks(sig_mod, theta, phi)
if tab is None:
return
# imx, imy = theta_phi2imxy(theta, phi)
# fname = 'resp_imx_%.3f_imy_%.3f_'%(np.round(imx,decimals=3),np.round(imy,decimals=3))
fname = 'resp_hpind_%d_'%(hp_ind)
Epeak = tab['Epeak']
gamma = tab['gamma']
RatesIn = tab['RatesIn']
RatesOut = tab['RatesOut']
save_fname = os.path.join(dname,fname)
print save_fname
np.savez(save_fname, RatesIn=RatesIn, RatesOut=RatesOut, Epeak=Epeak,\
gamma=gamma, mask_in=mask_in, mask_out=mask_out)
def main(args):
fname = 'mk_outFoV_resp' + '_' + str(args.job_id)
logging.basicConfig(filename=fname+'.log', level=logging.DEBUG,\
format='%(asctime)s-' '%(levelname)s- %(message)s')
# xbins = np.linspace(-1.8, 1.8, 30+1)
# ybins = np.linspace(-1.0, 1.0, 25+1)
#
# Nx = len(xbins)-1
# Ny = len(ybins)-1
# xs = np.empty(0)
# ys = np.empty(0)
#
# for i in range(Nx):
# xmid = (xbins[i]+xbins[i+1])/2. - 0.12/4
# for j in range(Ny):
# ymid = (ybins[j]+ybins[j+1])/2.
# if j%2 == 0:
# xmid += 0.12/2
# else:
# xmid -= 0.12/2
# nsteps = 1
# yax = np.linspace(-0.1/2., 0.1/2., nsteps+1)
# yax = (yax[1:]+yax[:-1])/2.
# xax = np.linspace(-0.16/2, 0.16/2, nsteps+1)
# xax = (xax[1:]+xax[:-1])/2.
# for jj in range(nsteps):
# x0_ = 0.1/(nsteps+1)
# if jj%2 == 1:
# dx = -x0_
# else:
# dx = x0_
# y = yax[jj] + ymid
# for ii in range(nsteps):
# x = xax[ii] + dx + xmid
# xs = np.append(xs, [x])
# ys = np.append(ys, [y])
# print len(xs)
# print len(ys)
ebins0 = np.array([15.0, 24.0, 35.0, 48.0, 64.0])
ebins0 = np.append(ebins0, np.logspace(np.log10(84.0), np.log10(500.0), 5+1))[:-1]
ebins0 = np.round(ebins0, decimals=1)[:-1]
ebins1 = np.append(ebins0[1:], [350.0])
nebins = len(ebins0)
flux_mod = Cutoff_Plaw_Flux(E0=100.0)
flux_params = {'A':1.0, 'Epeak':1000.0, 'gamma':1.5}
rt_obj = RayTraces(rt_dir, max_nbytes=1e9)
bl_alldets = get_bldmask_alldets()
# sig_mod = Source_Model_InFoV(flux_mod, [ebins0,ebins1], bl_alldets, rt_obj)
# sig_mod.flor_resp_dname = '/gpfs/scratch/jjd330/bat_data/flor_resps_ebins/'
hpinds = np.arange(hp.nside2npix(2**2), dtype=np.int)
phis, lats = hp.pix2ang(2**2, hpinds, lonlat=True, nest=True)
thetas = 90.0 - lats
bl = (thetas>40.0)
hp_inds = hpinds[bl]
phis = phis[bl]
thetas = thetas[bl]
Ntot_pnts = len(hp_inds)
Npnts2do = 1 + int(Ntot_pnts / args.Njobs)
i0 = Npnts2do*args.job_id
i1 = i0 + Npnts2do
for i in range(i0, i1):
logging.info("Starting %d of %d points"%(1+i - i0, Npnts2do))
logging.info("hp_ind: %d"%(hp_inds[i]))
logging.info("theta, phi: %.3f, %.3f"%(thetas[i], phis[i]))
sig_mod = Source_Model_InOutFoV(flux_mod, [ebins0,ebins1], bl_alldets, rt_obj)
# sig_mod.flor_resp_dname = '/gpfs/scratch/jjd330/bat_data/flor_resps_ebins/'
mk_npz_file_in_out_rates(sig_mod, hp_inds[i])
if __name__ == "__main__":
args = cli()
main(args)
| StarcoderdataPython |
9759159 | TESTS_BUNDLE_MODEL = 'ExampleBundle'
TESTS_BUNDLE_APP = 'example_bundle'
TESTS_DYNAMIC_APP = 'example_dynamic_models'
| StarcoderdataPython |
3453092 | import tensorflow as tf
from base import hyperparams as base_hp
class Hyperparams(base_hp.Hyperparams):
dtype = tf.float32
batch_size = 64
logit_batch_size = 32
input_size = 2
z_size = 2
lr_autoencoder = 0.0001
lr_decoder = 0.0001
lr_disc = 0.0001
z_dist_type = 'uniform' # ['uniform', 'normal', 'sphere']
z_bounds = 4.0
# show_visual_while_training = True
train_generator_adv = True
train_autoencoder = True
train_batch_logits = True
train_sample_logits = True
dataloader = 'four_gaussian_sym'
model = 'bcgan'
exp_name = 'trial'
| StarcoderdataPython |
8100986 | from screenplay_pdf_to_json.utils import isCharacter
import pytest
def createMockContent(text):
return {
"x": 225,
"y": 4,
"text": text
}
def setupMultiplecharacters(characters):
characters = [createMockContent(heading) for heading in characters]
characters = [isCharacter(content) for content in characters]
return characters
def assertGroup(characters, expectedcharacters):
i = 0
while i < len(characters):
assert characters[i] == expectedcharacters[i]
i += 1
def test_correct_characrters():
characters = [
'JOHN',
'JEFFERSON (INTO P.A.)'
'D.A.',
'COBB (reads aloud)'
]
characters = setupMultiplecharacters(characters)
expectedcharacters = [
True,
True,
True,
True
]
assertGroup(characters, expectedcharacters)
def test_incorrect_characters():
characters = [
'DOING?',
'CUT BACK TO:',
'FADE TO BLACK:',
'I...',
'I’m <NAME>. I’m in your O.S. lab.'
]
characters = setupMultiplecharacters(characters)
assert isCharacter({
"x": 180,
"y": 100,
"text": "I WILL SPLIT UP MY FATHER'S",
}) == False
expectedcharacters = [
False,
False,
False,
False,
False,
False
]
assertGroup(characters, expectedcharacters)
| StarcoderdataPython |
6586200 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpResponse
#from .models import ToDoList, Item
from good_things_that_happened.models import GoodThingThatHappened
from .models import ProfileAccess
from django.contrib.auth.models import User
def profile_for_self(request):
if not request.user.is_authenticated:
return redirect('/login/')
context = {'good_things_that_happened': GoodThingThatHappened.objects.filter(who_its_about=request.user).order_by('-created_at')}
context['profiles_you_can_acccess'] = ProfileAccess.objects.filter(access_for=request.user)
context['is_for_self'] = True
context['who_can_access'] = ProfileAccess.objects.filter(profile_for=request.user)
context['following_users_good_things'] = GoodThingThatHappened.objects.filter(who_its_about__in=ProfileAccess.objects.filter(access_for=request.user).values('profile_for')).order_by('-created_at')
return render(request, "profiles/profile.html", context)
def profile_for_other(request, username):
if not request.user.is_authenticated:
return redirect('/login/')
profile_for = User.objects.get(username=username)
if not request.user.is_authenticated:
return redirect('/login')
can_access = ProfileAccess.objects.filter(profile_for=profile_for, access_for=request.user)
if not can_access:
return HttpResponse('Access denied')
context = {'good_things_that_happened': GoodThingThatHappened.objects.filter(who_its_about=profile_for).order_by('-created_at')}
context['profile_for'] = profile_for
context['profiles_you_can_acccess'] = ProfileAccess.objects.filter(access_for=request.user)
context['who_can_access'] = ProfileAccess.objects.filter(profile_for=profile_for)
context['is_for_self'] = False
return render(request, "profiles/profile.html", context)
def add_user_who_can_access_your_profile(request):
profile_access = ProfileAccess(profile_for=request.user, access_for=User.objects.get(username=request.POST.get('add_username')))
profile_access.save()
return redirect('/profile') | StarcoderdataPython |
11233623 | import pytest
from vcx.error import ErrorCode, VcxError
from vcx.common import error_message
def test_error():
assert ErrorCode.InvalidJson == 1016
def test_c_error_msg():
assert error_message(0) == 'Success'
def test_all_error_codes():
max = 0
assert(VcxError(1079).error_msg == "Wallet Not Found")
for e in ErrorCode:
assert(VcxError(int(e)) != "Unknown Error")
max = int(e)
assert(VcxError(max+1).error_msg == "Unknown Error")
| StarcoderdataPython |
11392337 | def decryptBacon(cipher):
bacon = ['AAAAA','AAAAB','AAABA','AAABB','AABAA','AABAB','AABBA','AABBB','ABAAA','ABAAB','ABABA','ABABB','ABBAA','ABBAB','ABBBA','ABBBB','BAAAA','BAAAB','BAABA','BAABB','BABAA','BABAB','BABBA','BABBB']
alphabet = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','w','x','y','z']
chunk_size = 5
myList = [ cipher[i:i+chunk_size] for i in range(0, len(cipher), chunk_size) ]
myArr = []
out = ''
for i in range(0, len(myList), 1):
myList[i] = myList[i].upper()
if any(myList[i] in s for s in bacon):
for j in range(0, len(bacon), 1):
if myList[i] == bacon[j]:
myArr.append(alphabet[j])
else:
myArr.append("?")
res = out.join(myArr)
iv = res.replace('u','v')
ju = res.replace('i','j')
jv = res.replace('i','j').replace('u','v')
print('[1]',res)
print('[2]',iv)
print('[3]',ju)
print('[4]',jv)
if __name__ == '__main__':
cipher = """BAAABABABAAABAAABBAABAABAAABAAABBAAAABBAABABBAAAAABAABBABAABAABAAABAAAABBABAABBAABAAAAAAAA"""
decryptBacon(cipher) | StarcoderdataPython |
9678134 | _base_ = [
'../_base_/models/vit-base-p16_ft.py',
'../_base_/datasets/imagenet.py',
'../_base_/schedules/adamw_coslr-100e_in1k.py',
'../_base_/default_runtime.py',
]
# dataset
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(
type='RandomAug',
input_size=224,
color_jitter=None,
auto_augment='rand-m9-mstd0.5-inc1',
interpolation='bicubic',
re_prob=0.25,
re_mode='pixel',
re_count=1,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
]
test_pipeline = [
dict(type='Resize', size=256, interpolation=3),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)
]
data = dict(
samples_per_gpu=128,
drop_last=False,
workers_per_gpu=32,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline))
# model
model = dict(backbone=dict(init_cfg=dict()))
# optimizer
optimizer = dict(
lr=1e-3 * 1024 / 256,
paramwise_options={
'norm': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
'pos_embed': dict(weight_decay=0.),
'cls_token': dict(weight_decay=0.)
},
constructor='TransformerFinetuneConstructor',
model_type='vit',
layer_decay=0.65)
# learning policy
lr_config = dict(
policy='StepFixCosineAnnealing',
min_lr=1e-6,
warmup='linear',
warmup_iters=5,
warmup_ratio=1e-4,
warmup_by_epoch=True,
by_epoch=False)
# runtime
checkpoint_config = dict(interval=1, max_keep_ckpts=3, out_dir='')
persistent_workers = True
log_config = dict(
interval=100, hooks=[
dict(type='TextLoggerHook'),
])
| StarcoderdataPython |
1681206 | <gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DatabasesOperations(object):
"""DatabasesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar extension_name: The name of the operation to perform. Constant value: "import".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.extension_name = "import"
self.config = config
def _import_method_initial(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2014-04-01"
# Construct URL
url = self.import_method.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ImportRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def import_method(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Imports a bacpac into a new database. .
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The required parameters for importing a Bacpac into
a database.
:type parameters: ~azure.mgmt.sql.models.ImportRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ImportExportResponse or
ClientRawResponse<ImportExportResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.ImportExportResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.ImportExportResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._import_method_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
import_method.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/import'}
def _create_import_operation_initial(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2014-04-01"
# Construct URL
url = self.create_import_operation.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'extensionName': self._serialize.url("self.extension_name", self.extension_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ImportExtensionRequest')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_import_operation(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates an import operation that imports a bacpac into an existing
database. The existing database must be empty.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to import into
:type database_name: str
:param parameters: The required parameters for importing a Bacpac into
a database.
:type parameters: ~azure.mgmt.sql.models.ImportExtensionRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ImportExportResponse or
ClientRawResponse<ImportExportResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.ImportExportResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.ImportExportResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_import_operation_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_import_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions/{extensionName}'}
def _export_initial(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2014-04-01"
# Construct URL
url = self.export.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExportRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Exports a database to a bacpac.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be exported.
:type database_name: str
:param parameters: The required parameters for exporting a database.
:type parameters: ~azure.mgmt.sql.models.ExportRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ImportExportResponse or
ClientRawResponse<ImportExportResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.ImportExportResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.ImportExportResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._export_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ImportExportResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
export.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/export'}
def list_metrics(
self, resource_group_name, server_name, database_name, filter, custom_headers=None, raw=False, **operation_config):
"""Returns database metrics.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param filter: An OData filter expression that describes a subset of
metrics to return.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Metric
:rtype:
~azure.mgmt.sql.models.MetricPaged[~azure.mgmt.sql.models.Metric]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2014-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.MetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/metrics'}
def list_metric_definitions(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns database metric definitions.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of MetricDefinition
:rtype:
~azure.mgmt.sql.models.MetricDefinitionPaged[~azure.mgmt.sql.models.MetricDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2014-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_metric_definitions.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/metricDefinitions'}
def list_by_server(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of databases.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Database
:rtype:
~azure.mgmt.sql.models.DatabasePaged[~azure.mgmt.sql.models.Database]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-10-01-preview"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DatabasePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases'}
def get(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Gets a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Database or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.sql.models.Database or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-10-01-preview"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'}
def _create_or_update_initial(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Database')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if response.status_code == 201:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a new database or updates an existing database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param parameters: The requested database resource state.
:type parameters: ~azure.mgmt.sql.models.Database
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Database or
ClientRawResponse<Database> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Database]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.Database]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'}
def _delete_initial(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'}
def _update_initial(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DatabaseUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates an existing database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param parameters: The requested database resource state.
:type parameters: ~azure.mgmt.sql.models.DatabaseUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Database or
ClientRawResponse<Database> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Database]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.Database]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'}
def list_by_elastic_pool(
self, resource_group_name, server_name, elastic_pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of databases in an elastic pool.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param elastic_pool_name: The name of the elastic pool.
:type elastic_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Database
:rtype:
~azure.mgmt.sql.models.DatabasePaged[~azure.mgmt.sql.models.Database]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-10-01-preview"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_elastic_pool.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'elasticPoolName': self._serialize.url("elastic_pool_name", elastic_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DatabasePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_elastic_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/databases'}
def _pause_initial(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.pause.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def pause(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Pauses a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be paused.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Database or
ClientRawResponse<Database> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Database]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.Database]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._pause_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
pause.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/pause'}
def _resume_initial(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.resume.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def resume(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resumes a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be resumed.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Database or
ClientRawResponse<Database> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Database]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.sql.models.Database]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._resume_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
resume.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/resume'}
def _upgrade_data_warehouse_initial(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-10-01-preview"
# Construct URL
url = self.upgrade_data_warehouse.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def upgrade_data_warehouse(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Upgrades a data warehouse.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be upgraded.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._upgrade_data_warehouse_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
upgrade_data_warehouse.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/upgradeDataWarehouse'}
def rename(
self, resource_group_name, server_name, database_name, id, custom_headers=None, raw=False, **operation_config):
"""Renames a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to rename.
:type database_name: str
:param id: The target ID for the resource
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.ResourceMoveDefinition(id=id)
api_version = "2017-10-01-preview"
# Construct URL
url = self.rename.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ResourceMoveDefinition')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
rename.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/move'}
def _failover_initial(
self, resource_group_name, server_name, database_name, replica_type=None, custom_headers=None, raw=False, **operation_config):
api_version = "2018-06-01-preview"
# Construct URL
url = self.failover.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if replica_type is not None:
query_parameters['replicaType'] = self._serialize.query("replica_type", replica_type, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def failover(
self, resource_group_name, server_name, database_name, replica_type=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Failovers a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to failover.
:type database_name: str
:param replica_type: The type of replica to be failed over. Possible
values include: 'Primary', 'ReadableSecondary'
:type replica_type: str or ~azure.mgmt.sql.models.ReplicaType
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._failover_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
replica_type=replica_type,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/failover'}
| StarcoderdataPython |
12847193 | import os
import asyncio
import concurrent.futures
import requests
import aiohttp
from mlimages.util.file_api import FileAPI
import mlimages.util.log_api as LogAPI
class API():
def __init__(self, data_root, proxy="", proxy_user="", proxy_password="", parallel=-1, limit=-1, timeout=10, debug=False):
self.file_api = FileAPI(data_root)
self.proxy = proxy
self.proxy_user = proxy_user
self.proxy_password = <PASSWORD>
self.parallel = parallel if parallel > 0 else 4
self.limit = limit
self.timeout = timeout
self.logger = LogAPI.create_logger(type(self).__name__, debug)
def _gather(self):
raise Exception("API has to implements gather method")
def create_session(self, loop):
conn = None
if self.proxy and self.proxy_user:
conn = aiohttp.ProxyConnector(
loop=loop,
limit=self.parallel,
proxy=self.proxy,
proxy_auth=aiohttp.BasicAuth(self.proxy_user, self.proxy_password)
)
elif self.proxy:
conn = aiohttp.ProxyConnector(loop=loop, limit=self.parallel, proxy=self.proxy)
else:
conn = aiohttp.TCPConnector(loop=loop, limit=self.parallel)
session = aiohttp.ClientSession(connector=conn)
return session
async def _download_images(self, session, relative, image_urls):
self.file_api.prepare_dir(relative)
successed = 0
for urls in [image_urls[i:i+self.parallel] for i in range(0, len(image_urls), self.parallel)]:
done, pendings = await asyncio.wait([self.fetch_image(session, relative, u) for u in urls])
for d in done:
try:
successed += 1 if d.result() else 0
except:
pass
if successed >= self.limit:
break
async def fetch_image(self, session, relative, image_url):
fname = self.file_api.get_file_name(image_url)
p = os.path.join(relative, fname)
fetched = False
try:
with aiohttp.Timeout(self.timeout):
async with session.get(image_url) as r:
if r.status == 200 and self.file_api.get_file_name(r.url) == fname:
c = await r.read()
if c:
with open(self.file_api.to_abs(p), "wb") as f:
f.write(c)
fetched = True
except FileNotFoundError as ex:
self.logger.error("{0} is not found.".format(p))
except concurrent.futures._base.TimeoutError as tx:
self.logger.warning("{0} is timeouted.".format(image_url))
except Exception as ex:
self.logger.warning("fetch image is failed. url: {0}, cause: {1}".format(image_url, str(ex)))
return fetched
def download_dataset(self, url, relative):
r = requests.get(url, stream=True)
if r.ok:
with self.file_api.open_with_mkdir(relative) as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
| StarcoderdataPython |
5102197 | <reponame>CAST-projects/Extension-SDK
import unittest
from cast.analysers.test import MainframeTestAnalysis
class Test(unittest.TestCase):
def test_ok1(self):
analysis = MainframeTestAnalysis()
analysis.add_dependency(r'C:\ProgramData\CAST\CAST\Extensions\com.castsoftware.mainframe.1.0.0-alpha1')
analysis.add_selection('binary_depending/pgmok1.cob')
# analysis.set_verbose()
analysis.run()
program = analysis.get_object_by_name('PGM1', 'CAST_COBOL_SavedProgram')
self.assertTrue(program)
self.assertFalse(analysis.get_violations(program, 'MyCompany_COBOL_Rules.useBinaryForDepending'))
def test_ko1(self):
analysis = MainframeTestAnalysis()
analysis.add_dependency(r'C:\ProgramData\CAST\CAST\Extensions\com.castsoftware.mainframe.1.0.0-alpha1')
analysis.add_selection('binary_depending/pgmko1.cob')
# analysis.set_verbose()
analysis.run()
program = analysis.get_object_by_name('PGM1', 'CAST_COBOL_SavedProgram')
self.assertTrue(program)
# list of violations for the program/rule
violations = analysis.get_violations(program, 'MyCompany_COBOL_Rules.useBinaryForDepending')
self.assertEqual(1, len(violations))
# the first violation
violation = violations[0]
# print(violation.position)
# the data declaration containing the occurs depending on a non binary data
# starts at line 6
self.assertEqual(6, violation.position.begin_line)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5145065 | <filename>ship/fmp/datunits/isisunit.py<gh_stars>1-10
"""
Summary:
Contains the AUnit, CommentUnit, HeaderUnit and UnknownSection
classes.
The AUnit is an abstract base class for all types of Isis unit read
in through the ISIS data file. All section types that are built should
inherit from the AUnit baseclass.
Author:
<NAME>
Created:
01 Apr 2016
Copyright:
<NAME> 2016
TODO:
Updates:
"""
from __future__ import unicode_literals
import hashlib
import uuid
import random
import copy
# from abc import ABCMeta, abstractmethod
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures import DATA_TYPES as dt
from ship.fmp.headdata import HeadDataItem
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class AUnit(object):
"""Abstract base class for all Dat file units.
This class must be inherited by all classes representing an isis
data file unit (such as River, Junction, Culvert, etc).
Every subclass should override the readUnitData() and getData() methods to
ensure they are specific to the setup of the individual units variables.
If they are not overridden this class will simply take the data and store it
as read and provide it back in the same state.
All calls from the client to these classes should create the object and then
call the readUnitData() method with the raw data.
There is an UknownSection class at the bottom of this file that can be used
for all parts of the isis dat file that have not had a class defined. It just
calls the basic read-in read-out methods from this class and understands nothing
about the structure of the file section it is holding.
If you are creating subclass of this that has row_data (see below) you
should make sure that you call the setDummyRow() method in each of the
RowDataCollections. Otherwise, if the user doesn't add any rows, FMP will
throw errors.
See Also:
UnknownSection
"""
# __metaclass__ = ABCMeta
def __init__(self, **kwargs):
"""Constructor
Set the defaults for all unit specific variables.
These should be set by each unit at some point in the setup process.
E.g. RiverUnit would set type and UNIT_CATEGORY at __init__() while name
and data_objects are set in the readUnitData() method.
Both of these are called at or immediately after initialisation.
"""
self._name = kwargs.get('name', 'unknown') # Unit label
self._name_ds = kwargs.get('name_ds', 'unknown') # Unit downstream label
self._data = None
"""This is used for catch-all data storage.
Used in units such as UnknownSection.
Classes that override the readUnitData() and getData() methods are
likely to ignore this variable and use row_collection and head_data instead.
"""
self._unit_type = 'Unknown'
"""The type of ISIS unit - e.g. 'River'"""
self._unit_category = 'Unknown'
"""The ISIS unit category - e.g. for type 'Usbpr' it would be 'Bridge'"""
self.row_data = {}
"""Collection containing all of the ADataRow objects.
This is the main collection for row data in any unit that contains it.
In a RiverUnit, for example, this will hold the RowDataObject's
containing the CHAINAGE, ELEVATION, etc.
"""
self.head_data = {}
"""Dictionary containing set values that are always present in the file.
In a RiverUnit this includes values like slope and distance. I.e.
values that appear in set locations, usually at the top of the unit
data in the .dat file.
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def name_ds(self):
return self._name_ds
@name_ds.setter
def name_ds(self, value):
self._name_ds = value
@property
def has_ics(self):
if not self.icLabels():
return False
else:
return True
@property
def has_row_data(self):
if not self.row_data:
return False
else:
return True
@property
def unit_type(self):
return self._unit_type
@property
def unit_category(self):
return self._unit_category
def icLabels(self):
"""Returns the initial_conditions values for this object.
This method should be overriden by all classes that contain intial
conditions.
For example a BridgeUnit type will have two initial conditions labels;
the upstream and downstream label names.
By default this will return an empty list.
Return:
list - of intial condition label names.
"""
return []
def linkLabels(self):
"""Dict of all the names that the unit references.
For a RiverUnit this is only the self.name + spills and laterals,
for a bridge it would be self.name, self.name_ds, self.remote_us,
self.remote_ds and for a JunctionUnit it could be many more.
It can be used to identify which other units are directly associated to
this one in some way.
Return:
dict - containing all referenced names.
"""
return {'name': self._name}
def copy(self):
"""Returns a copy of this unit with it's own memory allocation."""
object_copy = copy.deepcopy(self)
return object_copy
def rowDataObject(self, key, rowdata_key='main'):
"""Returns the row data object as a list.
This will return the row_collection data object referenced by the key
provided in list form.
If you intend to update the values you should use getRowDataObject
instead as the data provided will be mutable and therefore reflected in
the values held by the row_collection. If you just want a quick way to
loop through the values in one of the data objects and only intend to
read the data then use this.
Args:
key (int): the key for the data object requested. It is best to use
the class constants (i.e. RiverUnit.CHAINAGE) for this.
rowdata_key(str): key to a RowDataCollection in row_data.
Returns:
List containing the data in the DataObject that the key points
to. Returns false if there is no row collection.
Raises:
KeyError: If key or rowdata_key don't exist.
"""
if not self.has_row_data:
return None
return self.row_data[rowdata_key].dataObject(key)
def row(self, index, rowdata_key='main'):
"""Get the data vals in a particular row by index.
Args:
index(int): the index of the row to return.
Return:
dict - containing the values for the requested row.
"""
if not self.has_row_data:
return None
return self.row_data[rowdata_key].rowAsDict(index)
def getData(self):
"""Getter for the unit data.
Return the file geometry data formatted ready for saving in the style
of an ISIS .dat file
Note:
This method should be overriden by the sub class to restore the
data to the format required by the dat file.
Returns:
List of strings - formatted for writing to .dat file.
"""
raise NotImplementedError
def readUnitData(self, data, file_line, **kwargs):
"""Reads the unit data supplied to the object.
This method is called by the FmpUnitFactory class when constructing the
Isis unit based on the data passed in from the dat file.
The default hook just copies all the data parsed in the buildUnit()
method of the factory and aves it to the given unit. This is exactly
what happens for the UnknownUnit class that just maintains a copy of the
unit data exactly as it was read in.
Args:
data (list): raw data for the section as supplied to the class.
Note:
When a class inherits from AUnit it should override this method
with unit specific load behaviour. This is likely to include:
populate unit specific header value dictionary and in some units
creating row data object.
See Also:
RiverSection for an example of overriding this method with a
concrete class.
"""
self.head_data['all'] = data
def deleteRow(self, index, rowdata_key='main', **kwargs):
"""Removes a data row from the RowDataCollection.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
"""
if index < 0 or index >= self.row_data[rowdata_key].numberOfRows():
raise IndexError('Given index is outside bounds of row_data[rowdata_key] data')
self.row_data[rowdata_key].deleteRow(index, **kwargs)
def updateRow(self, row_vals, index, rowdata_key='main', **kwargs):
"""Update an existing data row in one of the RowDataCollection's.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
Args:
row_vals(dict): Named arguments required for adding a row to the
collection. These will be as stipulated by the way that a
concrete implementation of this class setup the collection.
rowdata_key='main'(str): the name of the RowDataCollection
held by this to add the new row to. If None it is the
self.row_collection. Otherwise it is the name of one of the
entries in the self.additional_row_collections dictionary.
index=None(int): the index in the RowDataObjectCollection to update
with the row_vals
"""
if index >= self.row_data[rowdata_key].numberOfRows():
raise IndexError('Given index is outside bounds of row_collection data')
# Call the row collection add row method to add the new row.
self.row_data[rowdata_key].updateRow(row_vals=row_vals, index=index, **kwargs)
def addRow(self, row_vals, rowdata_key='main', index=None, **kwargs):
"""Add a new data row to one of the row data collections.
Provides the basics of a function for adding additional row dat to one
of the RowDataCollection's held by an AUnit type.
Checks that key required variables: ROW_DATA_TYPES.CHAINAGE amd
ROW_DATA_TYPES.ELEVATION are in the kwargs and that inserting chainge in
the specified location is not negative, unless check_negatie == False.
It then passes the kwargs directly to the RowDataCollection's
addNewRow function. It is the concrete class implementations
respnsobility to ensure that these are the expected values for it's
row collection and to set any defaults. If they are not as expected by
the RowDataObjectCollection a ValueError will be raised.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
Args:
row_vals(dict): Named arguments required for adding a row to the
collection. These will be as stipulated by the way that a
concrete implementation of this class setup the collection.
rowdata_key='main'(str): the name of the RowDataCollection
held by this to add the new row to. If None it is the
self.row_collection. Otherwise it is the name of one of the
entries in the self.additional_row_collections dictionary.
index=None(int): the index in the RowDataObjectCollection to insert
the row into. If None it will be appended to the end.
"""
# If index is >= record length it gets set to None and is appended
if index is not None and index >= self.row_data[rowdata_key].numberOfRows():
index = None
if index is None:
index = self.row_data[rowdata_key].numberOfRows()
self.row_data[rowdata_key].addRow(row_vals, index, **kwargs)
def checkIncreases(self, data_obj, value, index):
"""Checks that: prev_value < value < next_value.
If the given value is not greater than the previous value and less
than the next value it will return False.
If an index greater than the number of rows in the row_data it will
check that it's greater than previous value and return True if it is.
Note:
the ARowDataObject class accepts a callback function called
update_callback which is called whenever an item is added or
updated. That is how this method is generally used.
Args:
data_obj(RowDataObject): containing the values to check against.
value(float | int): the value to check.
index=None(int): index to check ajacent values against. If None
it will assume the index is the last on in the list.
Returns:
False if not prev_value < value < next_value. Otherwise True.
"""
details = self._getAdjacentDataObjDetails(data_obj, value, index)
if details['prev_value']:
if not value >= details['prev_value']:
raise ValueError('VALUE must be > prev index and < next index.')
if details['next_value']:
if not value <= details['next_value']:
raise ValueError('VALUE must be > prev index and < next index.')
def _getAdjacentDataObjDetails(self, data_obj, value, index):
"""Safely check the status of adjacent values in an ADataRowObject.
Fetches values for previous and next indexes in the data_obj if they
exist.
Note value in return 'index' key will be the given index unless it was
None, in which case it will be the maximum index.
All other values will be set to None if they do not exist.
Args:
data_obj(RowDataObject): containing the values to check against.
value(float | int): the value to check.
index=None(int): index to check ajacent values against. If None
it will assume the index is the last on in the list.
Return:
dict - containing previous and next values and indexes, as well as
the given index checked for None.
"""
prev_value = None
next_value = None
prev_index = None
next_index = None
if index is None:
index = data_obj._max
if index < 0:
raise ValueError('Index must be > 0')
if index > 0:
prev_index = index - 1
prev_value = data_obj[prev_index]
if index < data_obj._max:
next_index = index
next_value = data_obj[next_index]
retvals = {'index': index,
'prev_value': prev_value, 'prev_index': prev_index,
'next_value': next_value, 'next_index': next_index}
return retvals
class UnknownUnit(AUnit):
""" Catch all section for unknown parts of the .dat file.
This can be used for all sections of the isis dat file that have not had
a unit class constructed.
It has no knowledge of the file section that it contains and will store it
without altering it's state and return it in exactly the same format that it
received it.
This class is designed to be a fall-back class for any parts of the dat file
for which it is deemed unnecessary to deal with more carefully.
It has a 'Section' suffix rather that 'Unit' which is the naming convention
for the other unit objects because it is not necessarily a single unit. It
could be many different units.
It is created whenever the DatLoader finds
parts of the dat file that it doesn't Know how to load (i.e. there is no
*Unit defined for it. It will then put all the dat file data in one of these
until it reaches a part of the file that it does recognise.
"""
FILE_KEY = 'UNKNOWN'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(UnknownUnit, self).__init__(**kwargs)
self._unit_type = 'unknown'
self._unit_category = 'unknown'
self._name = 'unknown_' + str(hashlib.md5(str(random.randint(-500, 500)).encode()).hexdigest()) # str(uuid.uuid4())
def getData(self):
return self.head_data['all']
def readUnitData(self, data):
self.head_data['all'] = data
class CommentUnit(AUnit):
"""Holds the data in COMMENT sections of the .dat file.
This is very similar to the UnknownSection in that all it does is grab the
data between the comment tags and save it. It then prints out the same data
in the same format with the COMMENT tags around it.
"""
# Class constants
UNIT_TYPE = 'comment'
UNIT_CATEGORY = 'meta'
FILE_KEY = 'COMMENT'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(CommentUnit, self).__init__(**kwargs)
text = kwargs.get('text', '')
self._unit_type = CommentUnit.UNIT_TYPE
self._unit_category = CommentUnit.UNIT_CATEGORY
self._name = 'comment_' + str(hashlib.md5(str(random.randint(-500, 500)).encode()).hexdigest()) # str(uuid.uuid4())
self.has_datarows = True
self.data = []
if not text.strip() == '':
self.addCommentText(text)
def addCommentText(self, text):
text = text.split('\n')
self.no_of_rows = int(len(self.data) + len(text))
for t in text:
self.data.append(t.strip())
def readUnitData(self, data, file_line):
"""
"""
file_line += 1
line = data[file_line]
self.no_of_rows = int(data[file_line].strip())
file_line += 1
for i in range(file_line, file_line + self.no_of_rows):
self.data.append(data[file_line].strip())
file_line += 1
return file_line - 1
def getData(self):
"""
"""
output = []
output.append('{:<10}'.format('COMMENT'))
output.append('{:>10}'.format(self.no_of_rows))
for d in self.data:
output.append(d)
if len(output) > self.no_of_rows + 2:
output = output[:self.no_of_rows + 2]
return output
class HeaderUnit(AUnit):
"""This class deals with the data file values at the top of the file.
These contain the global variables for the model such as water temperature,
key matrix coefficients and the total number of nodes.
There is only ever one of these units in every dat file - at the very top -
so it seems convenient to put it in this module.
"""
# Class constants
UNIT_TYPE = 'header'
UNIT_CATEGORY = 'meta'
FILE_KEY = 'HEADER'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(HeaderUnit, self).__init__(**kwargs)
self._unit_type = HeaderUnit.UNIT_TYPE
self._unit_category = HeaderUnit.UNIT_CATEGORY
self._name = 'header'
self.head_data = {
'name': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'revision': HeadDataItem('#REVISION#1', '{:>10}', 1, 0, dtype=dt.STRING),
'node_count': HeadDataItem(0, '{:>10}', 2, 0, dtype=dt.INT),
'fr_lower': HeadDataItem(0.750, '{:>10}', 2, 1, dtype=dt.FLOAT, dps=3, default=0.9),
'fr_upper': HeadDataItem(0.900, '{:>10}', 2, 2, dtype=dt.FLOAT, dps=3, default=0.75),
'min_depth': HeadDataItem(0.100, '{:>10}', 2, 3, dtype=dt.FLOAT, dps=3, default=0.01),
'direct_method': HeadDataItem(0.001, '{:>10}', 2, 4, dtype=dt.FLOAT, dps=3, default=0.001),
'label_length': HeadDataItem(12, '{:>10}', 2, 5, dtype=dt.INT),
'water_temp': HeadDataItem(10.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3, default=10),
'flow': HeadDataItem(0.010, '{:>10}', 3, 1, dtype=dt.FLOAT, dps=3, default=0.10),
'head': HeadDataItem(0.010, '{:>10}', 3, 2, dtype=dt.FLOAT, dps=3, default=0.10),
'math_damp': HeadDataItem(0.700, '{:>10}', 3, 3, dtype=dt.FLOAT, dps=3, default=0.7),
'pivot': HeadDataItem(0.100, '{:>10}', 3, 4, dtype=dt.FLOAT, dps=3, default=0.1),
'relax': HeadDataItem(0.700, '{:>10}', 3, 5, dtype=dt.FLOAT, dps=3, default=0.7),
'dummy': HeadDataItem(0.000, '{:>10}', 3, 6, dtype=dt.FLOAT, dps=3, default=0.0),
'roughness': HeadDataItem('', '{:>10}', 5, 0, dtype=dt.STRING),
}
def readUnitData(self, unit_data, file_line):
"""Reads the given data into the object.
Args:
unit_data (list): The raw file data to be processed.
"""
self.head_data['name'].value = unit_data[0].strip()
self.head_data['revision'].value = unit_data[1].strip()
self.head_data['node_count'].value = unit_data[2][:10].strip()
self.head_data['fr_lower'].value = unit_data[2][10:20].strip()
self.head_data['fr_upper'].value = unit_data[2][20:30].strip()
self.head_data['min_depth'].value = unit_data[2][30:40].strip()
self.head_data['direct_method'].value = unit_data[2][40:50].strip()
self.head_data['label_length'].value = unit_data[2][50:60].strip()
self.head_data['water_temp'].value = unit_data[3][:10].strip()
self.head_data['flow'].value = unit_data[3][10:20].strip()
self.head_data['head'].value = unit_data[3][20:30].strip()
self.head_data['math_damp'].value = unit_data[3][30:40].strip()
self.head_data['pivot'].value = unit_data[3][40:50].strip()
self.head_data['relax'].value = unit_data[3][50:60].strip()
self.head_data['dummy'].value = unit_data[3][60:70].strip()
self.head_data['roughness'].value = unit_data[5].strip()
return file_line + 7
def getData(self):
""" Getter for the formatted data to write back to the .dat file.
Returns:
List - data formatted for writing to the new dat file.
"""
out = []
out.append(self.head_data['name'].value)
key_order = ['revision', 'node_count', 'fr_lower', 'fr_upper', 'min_depth',
'direct_method', 'unknown', 'water_temp', 'flow', 'head',
'math_damp', 'pivot', 'relax', 'dummy']
for k in key_order:
out.append(self.head_data[k].format(True))
out = ''.join(out).split('\n')
out.append('RAD FILE')
out.append(self.head_data['roughness'].format())
out.append('END GENERAL')
return out
| StarcoderdataPython |
1685129 | #! /usr/bin/env python3
import math
REPEAT = 4
PACKET_LEN = 4
fin = open('input.txt', 'w') # 入力ファイル
fphase = open('ref_phase.txt', 'w') # 位相の比較ファイル
fres = open('ref_result.txt', 'w') # cellの結果の比較ファイル
feedback = 0
count = 0
d = 1
for r in range(REPEAT):
for i in range(PACKET_LEN):
d2 = d * 2**8 #フォーマットは 7Q8
s = "{0:04x}".format(d2)
# 入力ファイルに書き込む
#print(s)
print(s, file=fin)
# 極座標での座標を求める
r = math.sqrt(d*d + feedback*feedback)
theta = math.atan2(d, feedback)
theta2 = theta / 3.14 * 180
if i == PACKET_LEN-1:
tlast = 1
feedback = 0
#print(r)
print(r, file=fres)
else:
tlast = 0
feedback = r
#print(-theta2, tlast)
print(-theta2, tlast, file=fphase)
d += 1
fin.close()
fphase.close()
fres.close()
| StarcoderdataPython |
5095216 | from __future__ import absolute_import
from __future__ import unicode_literals
from flask.views import MethodView
from flask import request
from .util import camel_to_underscore
from werkzeug.exceptions import NotImplemented
from logging import getLogger
import six
class WebHook(MethodView):
def __init__(self, logger=None):
self.logger = logger if logger else getLogger('webhooks')
def event(self, request):
"""Returns the event name from the request information.
"""
raise NotImplementedError('Subclasses must implement the event method.')
def post(self):
event = self.event(request)
if not hasattr(self, event):
raise NotImplemented('No method implemented for event %s.' % event)
# Get a dict of POSTed data
data = {k: d[k] for d in [request.json, request.form, request.args] for k in six.iterkeys(d or {})}
self.logger.debug('Received %s event with the following data:\n %s' % (event, repr(data)))
# Call the method with the json as parameter
return getattr(self, event)(data)
class WebHooks:
"""Provides methods to define webhooks for a flask app"""
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
self.app = app
self.handlers = {}
def add_handler(self, name, cls):
"""Set a webhook base class to handle requests for a specified type.
Example:
```
app = Flask(__name__)
webhooks = WebHooks(app)
webhooks.add_handler('github', GitHubWebHook)
@webhooks.hook('/url/for/hook', handler='gitlab')
class MyHook
def issues(self, data):
pass
def commit_comment(self, data):
pass
```
"""
self.handlers[name] = cls
def hook(self, prefix, handler=None):
"""Decorator for creating a webhook from a generic class
If a handler is defined, it will try to get the handler from the list of defined
handlers, otherwise it will default to the WebHook handler
@webhooks.hook('/url/for/hook', handler='github')
class MyHook
def issues(self, data):
pass
def commit_comment(self, data):
pass
"""
def wrapper(cls):
# Save the original init
clsinit = getattr(cls, '__init__', lambda self: None)
basecls = self.handlers.get(handler, self.app.config.get('WEBHOOKS_DEFAULT_HANDLER', WebHook))
# Dirty trick, make the class belong to the type restful.Resource
cls = type(cls.__name__, (basecls,), dict(cls.__dict__))
# Save this instance in another class to use inside the method
hook = self
def __init__(self, *args, **kwargs):
# Call Resource constructor
super(cls, self).__init__(logger=hook.app.logger)
# Initialize the instance
clsinit(self, *args, **kwargs)
cls.__init__ = __init__
# Add the resource to the app
self.app.add_url_rule(prefix, view_func=cls.as_view(camel_to_underscore(cls.__name__)))
return cls
return wrapper
| StarcoderdataPython |
9626188 | from flask_app import app, db, Classroom, Teacher, Student, Activity, Question, HelpingHand, HelpingHandLog
from flask_login import current_user, login_required
from flask import redirect, render_template, url_for, request, flash
from functools import wraps
from datetime import datetime
################################################################################
# Definition of student accesses to helping-hands
# Each app.route should use a login verification
# @login_required
################################################################################
@app.route("/student/", methods=["GET", "POST"])
@login_required
def student():
template_html = "student/index.html"
return render_template(
template_html
)
@app.route("/student/link-to-teacher/", methods=["GET", "POST"])
@login_required
def link_to_teacher():
template_html = "student/link_to_teacher.html"
if request.method == "GET":
return render_template(template_html)
if current_user.admin:
flash('Ne fonctionne que pour les élèves')
return redirect('/student/')
teacher_username = request.form["teacher_username"]
teacher = Teacher.query.filter_by(username=teacher_username).first()
if teacher is None:
flash("Le professeur demandé n'existe pas !")
return render_template(template_html)
if current_user in teacher.students:
flash(f"Vous vous êtes déjà signalé à {teacher.name} {teacher.surname} !")
return render_template(template_html)
teacher.students.append(current_user)
db.session.commit()
flash(f"Vous vous êtes bien signalé à {teacher.name} {teacher.surname}")
return redirect(url_for('student'))
@app.route("/student/show-helping-hand/<classroom_id>/<activity_id>/<question_id>/<helping_hand_id>/")
@login_required
def show_helping_hand(classroom_id, activity_id, question_id, helping_hand_id):
classroom = Classroom.query.filter_by(id=classroom_id).first()
if (classroom is None) or (not classroom in current_user.classrooms):
flash("Classe non accessible")
return redirect(url_for('student'))
# check if access to activity
activity = Activity.query.filter_by(id=activity_id).first()
if (activity is None) or (not activity in classroom.activities):
flash("Activité non diponible")
return redirect(url_for('student'))
question = Question.query.filter_by(id=question_id).first()
if (question is None) or (not question in activity.questions):
flash("Question non diponible")
return redirect(url_for('student'))
helping_hand = HelpingHand.query.filter_by(id=helping_hand_id).first()
if (helping_hand is None) or (not helping_hand in question.helping_hands):
flash("Coup de pouce non diponible")
return redirect(url_for('student'))
# log the access (only if student, if admin -> corresponds to a teacher testing its activities)
if not current_user.admin:
log = HelpingHandLog(
timestamp=datetime.now(),
helping_hand=helping_hand,
student=current_user
)
db.session.add(log)
db.session.commit()
template_html = "student/show_helping_hand.html"
return render_template(
template_html,
activity=activity,
question=question,
helping_hand=helping_hand,
classroom=classroom
)
@app.route("/student/access-activity/<classroom_id>/<activity_id>/")
@login_required
def access_activity(classroom_id, activity_id):
classroom = Classroom.query.filter_by(id=classroom_id).first()
if (classroom is None) or (not classroom in current_user.classrooms):
flash("Classe non accessible")
return redirect(url_for('student'))
# check if access to activity
activity = Activity.query.filter_by(id=activity_id).first()
if (activity is None) or (not activity in classroom.activities):
flash("Activité non diponible")
return redirect(url_for('student'))
template_html = "student/access_activity.html"
return render_template(
template_html,
activity=activity,
classroom=classroom
) | StarcoderdataPython |
5113204 | <gh_stars>1-10
import cv2
import numpy as np
circle = np.zeros((512, 512, 3), dtype = np.uint8) + 255
cv2.circle(circle, (256, 256), 50, color = (0, 0, 255), thickness = -1)
rectangle = np.zeros((512, 512, 3), dtype = np.uint8) + 255
cv2.rectangle(rectangle, (200, 200), (300, 300), color=(0,255,0), thickness=-1)
add = cv2.add(circle, rectangle)
print(add[256, 256])
cv2.imshow("circle", circle)
cv2.imshow("reclangle", rectangle)
cv2.imshow("add", add)
cv2.waitKey(0)
cv2.destroyAllWindows() | StarcoderdataPython |
4828324 | from graphql.core.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLInt,
GraphQLNonNull,
GraphQLList,
GraphQLObjectType,
GraphQLString,
GraphQLField
)
class ConnectionConfig(object):
'''
Returns a GraphQLFieldConfigArgumentMap appropriate to include
on a field whose return type is a connection type.
'''
def __init__(self, name, nodeType, edgeFields=None, connectionFields=None):
self.name = name
self.nodeType = nodeType
self.edgeFields = edgeFields
self.connectionFields = connectionFields
class GraphQLConnection(object):
def __init__(self, edgeType, connectionType):
self.edgeType = edgeType
self.connectionType = connectionType
connectionArgs = {
'before': GraphQLArgument(GraphQLString),
'after': GraphQLArgument(GraphQLString),
'first': GraphQLArgument(GraphQLInt),
'last': GraphQLArgument(GraphQLInt),
}
def resolveMaybeThunk(f):
if hasattr(f, '__call__'):
return f()
return f
def connectionDefinitions(*args, **kwargs):
if len(args) == 1 and not kwargs and isinstance(args[0], ConnectionConfig):
config = args[0]
else:
config = ConnectionConfig(*args, **kwargs)
name, nodeType = config.name, config.nodeType
edgeFields = config.edgeFields or {}
connectionFields = config.connectionFields or {}
edgeType = GraphQLObjectType(
name+'Edge',
description='An edge in a connection.',
fields=lambda: dict({
'node': GraphQLField(
nodeType,
description='The item at the end of the edge',
),
'cursor': GraphQLField(
GraphQLNonNull(GraphQLString),
description='A cursor for use in pagination',
)
}, **resolveMaybeThunk(edgeFields))
)
connectionType = GraphQLObjectType(
name+'Connection',
description='A connection to a list of items.',
fields=lambda: dict({
'pageInfo': GraphQLField(
GraphQLNonNull(pageInfoType),
description='The Information to aid in pagination',
),
'edges': GraphQLField(
GraphQLList(edgeType),
description='Information to aid in pagination.',
)
}, **resolveMaybeThunk(connectionFields))
)
return GraphQLConnection(edgeType, connectionType)
# The common page info type used by all connections.
pageInfoType = GraphQLObjectType(
'PageInfo',
description='Information about pagination in a connection.',
fields=lambda:{
'hasNextPage': GraphQLField(
GraphQLNonNull(GraphQLBoolean),
description='When paginating forwards, are there more items?',
),
'hasPreviousPage': GraphQLField(
GraphQLNonNull(GraphQLBoolean),
description='When paginating backwards, are there more items?',
),
'startCursor': GraphQLField(
GraphQLString,
description='When paginating backwards, the cursor to continue.',
),
'endCursor': GraphQLField(
GraphQLString,
description='When paginating forwards, the cursor to continue.',
),
}
)
| StarcoderdataPython |
9728634 | <filename>tvdordrecht/swimtest/apps.py<gh_stars>0
from django.apps import AppConfig
class SwimTestConfig(AppConfig):
name = 'swimtest'
verbose_name = "Zwemtest"
| StarcoderdataPython |
1753147 | <filename>Lesson4/line_plot_with_ggplot.py
from pandas import *
from ggplot import *
import pandas
def lineplot(hr_year_csv):
# A csv file will be passed in as an argument which
# contains two columns -- 'HR' (the number of homerun hits)
# and 'yearID' (the year in which the homeruns were hit).
#
# Fill out the body of this function, lineplot, to use the
# passed-in csv file, hr_year.csv, and create a
# chart with points connected by lines, both colored 'red',
# showing the number of HR by year.
#
# You will want to first load the csv file into a pandas dataframe
# and use the pandas dataframe along with ggplot to create your visualization
#
# You can check out the data in the csv file at the link below:
# https://www.dropbox.com/s/awgdal71hc1u06d/hr_year.csv
#
# You can read more about ggplot at the following link:
# https://github.com/yhat/ggplot/
data = pandas.read_csv(hr_year_csv)
gg = ggplot(data,aes(data['yearID'],data['HR'])) +geom_point(color='red') + geom_line(color='red')
return gg
| StarcoderdataPython |
4926961 | <reponame>olavosamp/kaggle_isic_2020
metadata_anatom_categories = ("torso", "lower extremity", "upper extremity",
"head/neck", "palms/soles", "oral/genital")
# ImageNet stats
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
# Matplotlib
MPL_FIG_SIZE = (18, 9)
MPL_FIG_SIZE_SMALL = (8, 4)
| StarcoderdataPython |
9620865 | <filename>exchangepair.py
#!/usr/bin/env python3
class ExchangePair:
def __init__(self, cutoff, exchange0, exchange1):
self.exchange0 = exchange0
self.exchange1 = exchange1
self.runningAverages = {} # keep track of the running average over the past ~2 hours
for key in exchange0.wallets.keys():
self.runningAverages[key] = -0.25
self.last = 0.2 - cutoff / 2
def __getitem__(self, index):
if index == 0:
return self.exchange0
elif index == 1:
return self.exchange1
else:
raise IndexError("Exchange Pair contains indices 0 and 1")
def get_diff(self, key):
buy_exchange = (0 if self[0].arbitrar == self[0].valueWallet else 1)
# symbol = key + "-" + self[0].arbitrar.currency
if buy_exchange == 0:
price1 = self[0].get_buy_price_for(key)
price2 = self[1].get_sell_price()
else:
price1 = self[0].get_sell_price()
price2 = self[1].get_buy_price_for(key)
return self.calculate_diff(key, price1, price2)
def calculate_diff(self, key, price1, price2):
symbol = key + "-" + self[0].arbitrar.currency
diff = price2 - price1
diffp = diff / (price1 if price1 < price2 else price1) * 100
# Print higher first
print(symbol, ":", (self[0].get_name() if diff < 0 else self[1].get_name()).ljust(6),
str("%.3f" % diffp).rjust(6) + "%\n")
# About 43200 price checks every 24 hours
# self.runningAverages[key] = (self.runningAverages[key] * 43199 + diffp)/43200
# About 3600 price checks every 2 hours
# self.runningAverages[key] = (self.runningAverages[key] * 3599 + diffp)/3600
# print("runningAverage: " + str("%.3f" % self.runningAverages[key]) + "%")
return diffp
# exchange is 0 or 1 - this is for multiple crypto arbitrage
# def buy(self, exchange):
# buyExch = self[exchange]
# bestKey = None
# bestDiff = float('Inf') if exchange == 1 else -float('Inf')
# i = 0
# for key in self[0].wallets.keys():
# if key == buyExch.arbitrar.currency: continue
# if not key in self[1].wallets.keys(): continue
# symbol = key+"-"+buyExch.arbitrar.currency
# price1 = self[exchange].getLastTradePrice(symbol)
# price2 = self[not exchange].getLastTradePrice(symbol)
# diffp = self.calculateDiff(key, price1, price2)
# normalizedDiff = diffp - self.runningAverages[key]
# i += 1
# if normalizedDiff > bestDiff and exchange == 0 or normalizedDiff < bestDiff and exchange == 1:
# bestKey = key
# bestDiff = normalizedDiff
# buySymbol, buyRate = buyExch.buy(bestKey)
# self.last = bestDiff
# time.sleep(max(2*i, 2))
# return buySymbol, buyRate, bestKey
| StarcoderdataPython |
4863232 |
# ! Desafio 61
# ! Refaça o desafio 051, lendo o primeiro termo e a razão de uma PA, mostrando os 10 primeiros termos da progressão usando a estrutura while. | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.