seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
1457996365 | n = int(input())
colors = "ROYGBIV"
res = ""
for i in range(n):
res += colors[i % 7]
if len(res) > 7:
for idx, c in enumerate(res[-3:], n-3):
cs = ''.join([ x if x not in res[idx-3:idx] + res[:3-(n-idx)+1] else '' for x in colors])
if idx < 7:
s = set(res[:idx])
cs = [c for c in cs if c not in s]
res = res[:idx] + cs[0]
print(res) | userr2232/PC | Codeforces/B/78.py | 78.py | py | 388 | python | en | code | 1 | github-code | 13 |
74175411538 | import main
import sys
miss_you_tests = [
{
'expected': 'Andy loves you so much!',
'slots': {
'person': { 'value': 'me' },
'verb': {'value': 'loves'}
}
},
{
'expected': 'Andy adores you so much!',
'slots': {
'person': { 'value': 'you' },
'verb': {'value': 'adores'}
}
},
{
'expected': 'Andy misses Melanie so much!',
'slots': {
'person': { 'value': 'Melanie' },
'verb': {'value': 'misses'}
}
}
]
failure = False
for test in miss_you_tests:
expected = test['expected']
intent = {'slots': test['slots']}
actual = main.get_miss_you_response(intent)['response']['outputSpeech']['text']
print(expected == actual, 'expected:', expected, 'actual:', actual)
if expected != actual:
failure = True
if failure:
sys.exit(1)
| andrewmacheret/a1-alexa-functions | src/lambda/test.py | test.py | py | 817 | python | en | code | 1 | github-code | 13 |
18713673673 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def isSubStructure(A: TreeNode, B: TreeNode) -> bool:
def dfs(A,B) -> bool :
if not B :
return True
if not A or A.val != B.val :
return False
return dfs(A.left,B.left) and dfs(A.right,B.right)
if not A or not B :
return False
return dfs(A,B) or isSubStructure(A.left,B) or isSubStructure(A.right, B)
if __name__ == "__main__" :
node = TreeNode(3)
node.left = TreeNode(5)
node.right = TreeNode(1)
node.left.left = TreeNode(6)
node.left.right = TreeNode(2)
node.right.left = TreeNode(7)
node1 = TreeNode(1)
node1.left = TreeNode(7)
result = isSubStructure(node,node1)
print(result)
| russellgao/algorithm | dailyQuestion/2020/2020-10/10-04/python/solution.py | solution.py | py | 808 | python | en | code | 3 | github-code | 13 |
74831756496 | # -*- coding: utf-8 -*-
'''
概念:一种保存数据的格式
作用:可以保存本地的json 文件,也可以将json串进行传输,
通常将json称为轻量级的传输方式
json文件组成:
{} 代表对象
[] 代表列表
: 代表键值对
, 分隔两个部分
'''
'''
1.loads针对内存对象,即将Python内置数据序列化为字串
2.load针对文件句柄
3.dumps
4.dump
json.dumps : dict转成str
json.dump是将python数据保存成json
json.loads:str转成dict
json.load是读取json数据
'''
import json
jsonStr = '''{
"name":"菜鸟教程",
"url":"www.runoob.com",
"slogan":"学的不仅是技术,更是梦想!"
}'''
#将json格式的字符串转化为Python数据类型的对象
jsonData = json.loads(jsonStr)
print(jsonData)
print(type(jsonData))
print(jsonData["name"])
#将Python数据类型的对象转化为json格式的字符串
jsonData2 = {
"name":"cainiaojiaocheng",
"url":"www.runoob.com",
"slogan":"dream"
}
jsonStr2 = json.dumps(jsonData2)
print(jsonStr2)
print(type(jsonStr2))
#读取本地的json文件
path1 = r"F:\Python\Test\test.json"
with open(path1,"rb") as f:
data = json.load(f)
print(data)
print(type(data))
#写本地的json文件
path2 = r"F:\Python\Test\test2.json"
jsonData3 = {
"name":"cainiaojiaocheng",
"url":"www.runoob.com",
"slogan":"dream"
}
with open(path2,"w") as f:
json.dump(jsonData3,f)
| AWangHe/Python-basis | 18.PaChong/7.json讲解.py | 7.json讲解.py | py | 1,549 | python | zh | code | 0 | github-code | 13 |
8390677815 | class Settings(object):
'''存储所有设置的类'''
# 定义画面帧率
def __init__(self):
'''初始化游戏的设置'''
self.FRAME_RATE = 60
self.screen_width = 600
self.screen_height = 800
self.bg_color = (230, 230, 230)
self.ship_speed_factor = 8
self.bullet_speed_factor = 12
self.bullet_speed_factor2 = 6
self.bullet_width = 5
self.bullet_height = 10
self.bullet_color = (255, 100, 100)
self.alien_speed_factor = 4
| yiyayiyayoaaa/pygame-alien | setting.py | setting.py | py | 537 | python | en | code | 0 | github-code | 13 |
25522881623 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Description: MySQLDB 工具
Author: jingyu
Date: 2022-01-16 14:23:43
LastEditors: Please set LastEditors
LastEditTime: 2022-01-19 01:05:11
'''
from .BaseDB import BaseDB
import MySQLdb
class MySQLDB(BaseDB):
def __init__(self, host, port, user, passwd, db, charset='utf8', autocommit=True):
super().__init__(host, port, user, passwd, db)
self._db_conf['charset'] = charset
self.conn = None
self.cursor = None
self._autocommit = autocommit
self._table = None
self.last_execute_sql = None
self.conn = self.get_conn()
self.cursor = self.get_cursor(MySQLdb.cursors.DictCursor)
def get_conn(self):
if self.conn is not None:
self.conn.ping()
return self.conn
try:
self.conn = MySQLdb.connect(**self._db_conf)
except Exception as e:
raise e
return self.conn
def get_cursor(self, cursor_type=None):
if not self.conn:
self.get_conn()
else:
self.conn.ping()
try:
self.cursor = self.conn.cursor(cursor_type)
except Exception as e:
raise e
return self.cursor
def get_last_sql(self):
return self.last_execute_sql
def get_all_tables(self):
show_sql = "SHOW TABLES"
tables = self.execute(show_sql)
return [ v for table in tables for k, v in table.items()]
'''
简单实现创建表
input:
{
"id": "INT UNSIGNED AUTO_INCREMENT PRIMARY KEY",
"name": "varchar(50) NOT NULL DEFAULT ''"
....
}
'''
def create_table(self, table, col_items={}):
create_sql = ""
for column, property in col_items.items():
create_sql += """
`{column}` {property},""".format(column=column, property=property)
create_sql = create_sql.strip()[:-1]
if create_sql:
create_sql = """
CREATE TABLE IF NOT EXISTS `{table}` (
{create_body}
)
""".format(table=table, create_body=create_sql)
return self.execute(create_sql)
def drop_table(self, table):
delete_table_sql = """DROP TABLE IF EXISTS `{table}`""".format(table=table)
return self.execute(delete_table_sql)
def get_table_indexes(self, table):
index_sql = """SHOW INDEX FROM {table}""".format(table=table)
idxs = self.execute(index_sql)
exists_index = {}
for idx in idxs:
key_name =idx['Key_name']
index_type = idx['Index_type']
column = idx['Column_name']
non_unique = idx['Non_unique']
if key_name == "PRIMARY":
type = "PRIMARY"
elif index_type == "FULLTEXT":
type = "FULLTEXT"
elif non_unique == 0:
type = "UNIQUE"
else:
type = "NORMAL"
if key_name in exists_index:
exists_index[key_name]['column'].append(column)
else:
exists_index[key_name] = {
'index_name': key_name,
'column': [column],
'type': type
}
return list(exists_index.values())
'''
创建索引
indexes structure
[{"index_name": "idx_name", "column": ["column1", ...], "type": "NORMAL"}, ...]
特别说明
# 当创建主键时, index_name 的值需为PRIMARY
type:
NORMAL UNIQUE FULLTEXT PRIMARY
若原表存在相同的索引, 则修改
'''
def create_indexes_on_table(self, table, indexes, cover=False):
if not indexes:
return
exists_idx_list = self.get_table_indexes(table)
exists_indexes = {}
for idx in exists_idx_list:
exists_indexes[idx['index_name']] = idx
index_sql = ""
for index in indexes:
idx_name = index['index_name']
columns = index['column']
type = index['type']
column_str = self.format_select_fields(columns)
if idx_name in exists_indexes:
# 原来存在相同的索引
if cover:
# 删除
if type == "PRIMARY":
index_sql += """
DROP INDEX PRIMARY KEY,
ADD PRIMARY KEY ({column_str}),""".format(column_str=column_str)
else:
type_str = ""
if type in ["UNIQUE", "FULLTEXT"]:
type_str = type
index_sql += """
DROP INDEX `{idx_name}`,
ADD {type_str} INDEX `{idx_name}`({column_str}),""".format(idx_name=idx_name, type_str=type_str,column_str=column_str)
else:
print("索引重复, 且不覆盖: ", table, index)
else:
if type == "PRIMARY":
index_sql += """
ADD PRIMARY KEY ({column_str}),""".format(column_str=column_str)
else:
type_str = ""
if type in ["UNIQUE", "FULLTEXT"]:
type_str = type
index_sql += """
ADD {type_str} INDEX `{idx_name}`({column_str}),""".format(idx_name=idx_name, type_str=type_str,column_str=column_str)
index_sql = index_sql.strip()[:-1]
if index_sql:
index_sql = "ALTER TABLE `%s` " % table + index_sql
return self.execute(index_sql)
'''
根据传入的索引名进行删除, 传入空不删除所有的索引
input:
["PRIMARY", 'idx1', .... ],
说明:
删除主键需要写定 PRIMARY
'''
def drop_indexes_on_table(self, table, indexes):
if isinstance(indexes, str):
indexes = [indexes]
exists_idx_list = self.get_table_indexes(table)
exists_indexes = {}
for idx in exists_idx_list:
exists_indexes[idx['index_name']] = idx
drop_idx_sql = ""
for idx_name in indexes:
if idx_name in exists_indexes:
type = exists_indexes[idx_name]['type']
if type == "PRIMARY":
drop_idx_sql += """
DROP PRIMARY KEY,
"""
else:
drop_idx_sql += """
DROP INDEX `{idx_name}`,""".format(idx_name=idx_name)
else:
print("%s does not exist index[%s]" % (table, idx_name))
drop_idx_sql = drop_idx_sql.strip()[:-1]
if drop_idx_sql:
drop_idx_sql = "ALTER TABLE %s " % table + drop_idx_sql
return self.execute(drop_idx_sql)
# 获取表的字段
def get_table_fields(self, table):
try:
self.cursor.execute("SHOW FIELDS FROM %s" % table)
fields = list([each['Field'] for each in self.cursor.fetchall()])
return fields
except Exception as e:
raise e
'''
格式化投射字段
input: ['filed1', 'field2 as f2', ....]
output: `field1`, `field2` as `f2`
'''
def format_select_fields(self, fields):
fields_str = ",".join([
"`{0}` as `{1}`".format(*field.split(' as ')) if ' as ' in field else "`%s`" % field for field in fields
])
return fields_str
'''
将数据格式化成sql语句需要代替的形式
'''
def format_data(self, data, where_data=None, defaults={}, is_many=False):
if is_many:
fields = list(data[0].keys())
fields.sort()
data_list = []
if where_data is None:
where_data = []
for i, each in enumerate(data):
cur_where_data = {}
if 0 <= i < len(where_data):
cur_where_data = where_data[i]
cur_where_keys = list(cur_where_data.keys())
data_list.append(
tuple(
[
each.get(field) if not self._is_value_empty(each.get(field))
else self._deal_default( defaults.get(field) if defaults else None )
for field in fields
]
+
[
cur_where_data.get(key) for key in cur_where_keys
]
)
)
return data_list
else:
fields = list(data.keys())
fields.sort()
format_data = [
data.get(field) if not self._is_value_empty(data.get(field))
else self._deal_default( defaults.get(field) if defaults else '' )
for field in fields
]
if where_data:
# 涉及到索引问题, 保持原配置顺序
where_keys = list(where_data.keys())
format_data += [ where_data[key] for key in where_keys ]
return tuple(format_data)
'''
select 查询语句
input:
table -> table_name
fields -> [] or ['col1', 'col2'],
where_data -> 'where id < 15 limit 3'
'''
def select(self, table, fields, where_data=None):
if fields:
fields_str = self.format_select_fields(fields)
else:
fields_str = "*"
if where_data:
select_sql = "SELECT %s FROM %s %s" % (fields_str, table, where_data)
else:
select_sql = "SELECT %s FROM %s " % (fields_str, table)
return self.execute(select_sql)
'''
根据封装好的数据格式, 插入数据
input:
1. table_name, {"c1": "v1", "c2": "v2"}
2. table_name, [{"c1": "v1", "c2": "v2"}, {"c1": "v3", "c2": "v4"}, ...]
'''
def insert(self, table, insert_data, batch=5000):
is_many = self._is_data_many(insert_data)
fields = list(insert_data[0].keys() if is_many else insert_data.keys())
fields.sort()
insert_fields_str = self.format_select_fields(fields)
value_s_str = ",".join(["%s" for i in range(len(fields))])
format_data = self.format_data(insert_data,is_many=is_many)
insert_sql = "INSERT INTO `%s`(%s) VALUES(%s)" % (table, insert_fields_str, value_s_str)
try:
self.last_execute_sql = insert_sql
if is_many:
index = 0
while index < len(insert_data):
self.cursor.executemany(insert_sql, format_data[index: index + batch])
index += batch
else:
self.cursor.execute(insert_sql, format_data)
if self._autocommit:
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
'''
更新语句
input
1. table_name {set_c1: v1, ...} {where_c1:w1, ...}
2. table_name [{set_c1: v11, ...}, {set_c1: v12, ...}] [{where_c1:w11, ...}, {where_c1:w12, ...}]
关于 default_data:
在 set_data 复制时, 若字段为None, 可以从default_data中找默认值,并处理
'''
def update(self, table, set_data, where_data, default_data={}):
is_many = self._is_data_many(set_data)
if not is_many:
set_data = [set_data]
where_data = [where_data]
format_data = self.format_data(set_data, where_data, default_data, is_many=True)
try:
for i in range(len(set_data)):
fields = list(set_data[i].keys())
fields.sort()
set_str = ",".join([ "`%s` = %s" % (field, "'%s'" if isinstance(set_data[i].get(field), str) else "%s") for field in fields ])
where_fields = list(where_data[i].keys())
where_str = " AND ".join([ "`%s` = %s" % (field, "'%s'" if isinstance(where_data[i].get(field), str) else "%s") for field in where_fields ])
update_sql = "UPDATE `%s` SET %s WHERE %s" % (table, set_str, where_str)
update_sql = update_sql % format_data[i]
self.last_execute_sql = update_sql
self.cursor.execute(update_sql)
if self._autocommit:
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
def delete(self, table, where_data=None):
if where_data:
delete_sql = "DELETE FROM `%s` %s " % (table, where_data)
else:
delete_sql = "DELETE FROM `%s` " % table
return self.execute(delete_sql)
# 执行原生sql
def execute(self, sql):
try:
self.last_execute_sql = sql
self.cursor.execute(sql)
if self._autocommit:
self.conn.commit
return self.cursor.fetchall()
except Exception as e:
raise e
if __name__ == "__main__":
config = {
"host": "localhost",
"port": 3306,
"user": "root",
"passwd": "root",
"db": "msyql_learn"
}
db = MySQLDB(**config)
| jingyucute/docker-flask-celery | py_toolkits/db/MySQLDB.py | MySQLDB.py | py | 13,579 | python | en | code | 0 | github-code | 13 |
20683127771 | import random
print("welcome to rock paper and scissor project")
print()
# Rock Paper Scissors ASCII Art
# Rock
rock = ("""
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
""")
# Paper
paper = ("""
_______
---' ____)____
______)
_______)
_______)
---.__________)
""")
# Scissors
scissor = ("""
_______
---' ____)____
______)
__________)
(____)
---.__(___)
""")
user_choice = int(input("Enter your choice '0' for rock '1' for paper and '2' for scissor: "))
computer_chosen = random.randint(0, 2)
if user_choice == 0 and computer_chosen == 2:
print("you chose: ", rock, "\nComputer chose: \n", scissor, " You win")
elif computer_chosen == 0 and user_choice == 2:
print("you chose: ", scissor, "\nComputer chose: \n", rock, " You loose")
elif user_choice == 0 and computer_chosen == 0:
print("you chose: ", rock, "\n computer chose: \n", rock, " Tie")
elif user_choice == 2 and computer_chosen == 1:
print("you chose: ", scissor, "\nComputer chose: \n", paper, " You win")
elif computer_chosen == 2 and user_choice == 1:
print("you chose: ", scissor, "\nComputer chose: \n", paper, " You loose")
elif user_choice == 1 and computer_chosen == 1:
print("you chose: ", paper, "\n computer chose: \n", paper, " Tie")
elif user_choice == 1 and computer_chosen == 0:
print("you chose :", paper, "\ncomputer chose: \n", rock, " you win")
elif computer_chosen == 1 and user_choice == 0:
print("you chose: ", rock, "\nComputer chose: \n", paper, " you loose")
elif user_choice == 2 and computer_chosen == 0:
print("you chose: ", scissor, "\n computer chose: \n", scissor, " Tie")
| Ayaz-75/Updated-game-Rock-Scissor-and-paper | rock_paper_scissors.py | rock_paper_scissors.py | py | 1,710 | python | en | code | 0 | github-code | 13 |
70213127698 | #!/usr/bin/python
# input files
# ####################
BASE_DIR = '/home/jcchen/hgst/hdd_spc'
# BASE_DIR = 'D:/HGST/MFG/processing/HDD_WEBSPC_CR'
CR_FOLDER = BASE_DIR + '/PN16575'
SPECIFICATION_XLS = CR_FOLDER + '/spec' + '/HDD SPC Monitoring Parameter Specification rev.5.5.xlsx'
MASTER_XLS = CR_FOLDER + '/spec' + '/C050_HDDWebSPC2_SPCID_Master_List_4.4.xls'
EXTRACTION_FILE = BASE_DIR + "/repo" + "/etc/extraction.xml"
SPCIDS_FILE = CR_FOLDER + '/spcids_pure.txt'
MASTER_PROFILEIDS_FILE = CR_FOLDER + '/profileIds.txt'
DATASOURCE_FILE = "datasource.ini"
# For evidence_gen.py
LOG_DIR = CR_FOLDER + "/ut/log"
SQLRET_DIR = CR_FOLDER + '/ut/sqlret'
UT_LOG = CR_FOLDER + '/ut/ut.log'
# output files
# ####################
MAIN_OUT = CR_FOLDER + "/exs_out.txt"
MODELSET_OUT = CR_FOLDER + "/modelset_out.txt"
EVIDENCE_OUT = CR_FOLDER + "/evidence_out.txt"
MASTER_OUT = CR_FOLDER + "/master_out.txt"
| jccode/spcrobot | settings.py | settings.py | py | 907 | python | en | code | 0 | github-code | 13 |
70114096019 | """Tests for Institute api"""
from django.urls import resolve, reverse
from rest_framework.test import APIClient
from rest_framework import status
from institute import views
from .test_base import TestData
INSTITUTE_URL = reverse("instituteapi:institute-list")
def get_detail_url(pk):
"""Return the detail url for the given pk"""
return reverse("instituteapi:institute-detail", args=[pk])
class InstituteAPITest(TestData):
"""Test the Institute API"""
def setUp(self):
self.client = APIClient()
self.valid_payload = {
"id": "INS002",
"name": "Test Institute 2",
"address": "Test Address 2",
"contact_person": "Test Contact Person 2",
"status": "Active",
"branches": "Test Branches 2",
}
self.put_payload = {
"id": "INS001",
"name": "Test Institute Edited",
"address": "Test Address Edited",
"contact_person": "Test Contact Person...",
"status": "Active",
"branches": "Test Branches",
}
self.patch_payload = {
"status": "InActive",
}
def test_list_institutes(self):
"""Test retrieving a list of institutes"""
res = self.client.get(INSTITUTE_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.json()), 1)
def test_retrieve_institute(self):
"""Test retrieving a institute"""
url = get_detail_url(self.institute.pk)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_institute(self):
"""Test creating a institute"""
res = self.client.post(INSTITUTE_URL, self.valid_payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.json()["id"], "INS002")
def test_update_institute(self):
"""Test updating a institute with valid payload"""
url = get_detail_url(self.institute.pk)
res = self.client.put(url, self.put_payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.json()["name"], "Test Institute Edited")
def test_partial_update_institute(self):
"""Test partial update a institute with valid payload"""
url = get_detail_url(self.institute.pk)
res = self.client.patch(url, self.patch_payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.json()["status"], "InActive")
def test_delete_institute(self):
"""Test deleting a institute"""
url = get_detail_url(self.institute.pk)
res = self.client.delete(url)
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
| himansrivastava/django-logging | institute/tests/test_institute.py | test_institute.py | py | 2,802 | python | en | code | 0 | github-code | 13 |
3079525479 | import numpy as np
from calc_covariance_matrix import elements_of_covariance_matrix
from estimators_fabric import EstimatorsFabric
from source_trace import SourceTrace
class CTATrace:
"""Класс, описывающий трассу ЕМТ"""
__slots__ = ("number",
"ticks",
"coordinates",
"velocities",
"type",
"coordinate_covariance_matrix",
"head_source_trace",
"additional_source_trace_array")
def __init__(self, head_source_trace: SourceTrace) -> None:
# Номер трассы
self.number = 0
# Текущее время
self.ticks = 0
# Координаты
self.coordinates = np.zeros(3)
# Скорость
self.velocities = np.zeros(3)
# Тип трассы
self.type = None
# Ковариационная матрица координат
self.coordinate_covariance_matrix = np.eye(3)
# Трасса головного источника
self.head_source_trace = head_source_trace
# Массив трасс дополнительных источников
self.additional_source_trace_array = []
def __repr__(self) -> str:
return f"Трасса ЕМТ номера {self.number!r} с количеством источников {len(self.all_source_traces)!r}. " \
f"Объект класса {self.__class__.__name__} по адресу в памяти {hex(id(self))}"
@property
def registration(self) -> list:
"""Регистрируем номер, координаты, скорость, элементы ковариационной матрицы, количество источников по трассе ЕМТ
:return: Региструриуемые величины в виде одномерного массива
:rtype: list
"""
return [self.number, *self.coordinates.tolist(), *self.velocities.tolist(),
*elements_of_covariance_matrix(self.coordinate_covariance_matrix), len(self.all_source_traces)]
@property
def all_source_traces(self) -> list:
"""
:return: Список всех трасс источников
:rtype: list
"""
return [self.head_source_trace] + self.additional_source_trace_array
def must_identify_with_source_trace(self, trace: SourceTrace) -> bool:
"""Проверяет нужно ли отождествление с этой трассой источника.
Проверка идёт по номерам МФР: от одного МФР не отождествляем
:param trace: Трасса источника - кандидат для отождествления
:type trace: SourceTrace
:return: Признак нужно ли отождествление
:rtype: bool
"""
return not any(trace.mfr_number == source_trace.mfr_number for source_trace in self.all_source_traces)
def must_identify_with_cta_trace(self, cta_trace) -> bool:
"""Проверяет нужно ли отождествление с этой трассой ЕМТ
Проверка идёт по номером МФР: от одного МФР не отождествляем
:param cta_trace: Трасса ЕМТ - кандидат на отождествление
:type cta_trace: CTATrace
:return: Признак нужно ли отожедствление
:rtype: bool
"""
cta_trace: CTATrace
return not any(self_source_trace.mfr_number == cta_trace_source_trace.mfr_number
for self_source_trace in self.all_source_traces
for cta_trace_source_trace in cta_trace.all_source_traces)
def add_new_source_trace(self, source_trace: SourceTrace) -> None:
"""Добавление наиболее близкой трассы из всех отождествившихся в массив дополнительных трасс ЕМТ
:param source_trace: Новый источник по трассе ЕМТ
:type source_trace: SourceTrace
:return: None
"""
# Добавляем информацию от трассе ЕМТ в трассу источника
source_trace.append_cta_info_and_number(num=self.number, is_head=False)
# Добавляем трассу источника как дополнительную
self.additional_source_trace_array.append(source_trace)
def del_additional_source_trace(self, source_trace: SourceTrace) -> None:
"""Удаление дополнительного источника трассы
:param source_trace: Дополнительная трасса, от которой нужно избавиться
:type source_trace: SourceTrace
:return: None
"""
# Убираем информацию о трассе ЕМТ и номере в трассе источника
source_trace.delete_cta_info_and_number()
# Удаляем трассу истояника из состава ЕМТ
self.additional_source_trace_array.remove(source_trace)
def sort_sources(self) -> None:
"""Сортировка источников трасс, корректировка признаков, головного и дополнительных источников
:return: None
"""
# Сначала сортируем по признаку пеленга, потом по АС, далее по времени оценки координат
all_sorted_source_traces = sorted(self.all_source_traces, key=self.sort_key_function, reverse=True)
# Запоминаем головную трассу и дополнительные источники
self.head_source_trace, *self.additional_source_trace_array = all_sorted_source_traces
# Выставляем признаки головного источника
self.head_source_trace.is_head_source = True
for source_trace in self.additional_source_trace_array:
source_trace.is_head_source = False
@staticmethod
def sort_key_function(trace: SourceTrace) -> tuple:
"""Функция для сортировки трасс источников, применяется к каждой трассе источника, входящей в трассу ЕМТ
:param trace: Трасса источника
:type trace: SourceTrace
:return: В порядке важности признаки сортировки: признак АШП, признак АС, время оценки координат
:rtype: tuple
"""
return not trace.is_bearing, trace.is_auto_tracking, trace.estimate_tick
def delete_sources_traces(self):
"""Удаление трасс источников
:return: None
"""
for source_trace in self.all_source_traces:
source_trace.delete_cta_info_and_number()
self.head_source_trace = None
self.additional_source_trace_array = []
def change_numbers(self, num: int) -> None:
"""Изменение номера трассы ЕМТ и связанных номеров трасс источников
:param num: Номер трассы ЕМТ
:type num: int
:return: None
"""
self.number = num
for source_trace in self.all_source_traces:
source_trace.cta_number = self.number
def calculate_self_data(self) -> None:
"""Получение итоговой оценки координат, скорости и ковариационной матрицы
:return: None
"""
estimator = EstimatorsFabric.generate(self.all_source_traces)
self.coordinates = estimator.coordinates
self.velocities = estimator.velocities
self.coordinate_covariance_matrix = estimator.coordinates_covariance_matrix
| Igor9rov/TrackingAndIdentificationModel | Model/ModelCP/cta_trace.py | cta_trace.py | py | 8,244 | python | ru | code | 0 | github-code | 13 |
6627891139 | from modulefinder import packagePathMap
from fastapi import APIRouter, Response, Depends, HTTPException, status
from starlette.status import HTTP_201_CREATED
from schemas.input_schema import InputSchema
from config.db import engine
from models.input import inputs
from typing import List
import secrets
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from pydantic import BaseModel
input_router = APIRouter()
security = HTTPBasic()
class InputCreationRequest(BaseModel):
field_1: str
author: str
description: str
my_numeric_field: float
memory_db=[]
def get_current_username(credentials: HTTPBasicCredentials = Depends(security)):
correct_username = secrets.compare_digest(credentials.username, "german")
correct_password = secrets.compare_digest(credentials.password, "123456")
if not (correct_username and correct_password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect user or password",
headers={"WWW-Authenticate": "Basic"},
)
return credentials.username
@input_router.get("/users/me")
def read_current_user(username: str = Depends(get_current_username)):
return {"username": username}
# @input_router.get("/api/input", response_model=List[InputCreationRequest])
# def get_input():
# with engine.connect() as conn:
# result = conn.execute(inputs.select()).fetchall()
# return result
@input_router.get("/data/{input_id}", response_model=InputCreationRequest)
def get_inputs(input_id: str):
with engine.connect() as conn:
result = conn.execute(inputs.select().where(inputs.c.id == input_id)).first()
return result
@input_router.post("/input/field_1", status_code=HTTP_201_CREATED)
def save_input_field_1(request: InputCreationRequest):
with engine.connect() as conn:
new_input = []
new_input.append("")
for a in request:
if a[1] == request.field_1:
F = request.field_1.upper()
new_input.append(F)
else:
new_input.append(a[1])
conn.execute(inputs.insert().values(new_input))
return Response(status_code=HTTP_201_CREATED)
@input_router.post("/input/author", status_code=HTTP_201_CREATED)
def save_input_author(request: InputCreationRequest):
with engine.connect() as conn:
new_input = []
new_input.append("")
for a in request:
if a[1] == request.author:
F = request.author.upper()
new_input.append(F)
else:
new_input.append(a[1])
conn.execute(inputs.insert().values(new_input))
return Response(status_code=HTTP_201_CREATED)
@input_router.post("/input/description", status_code=HTTP_201_CREATED)
def save_input_description(request: InputCreationRequest):
with engine.connect() as conn:
new_input = []
new_input.append("")
for a in request:
if a[1] == request.description:
F = request.description.upper()
new_input.append(F)
else:
new_input.append(a[1])
conn.execute(inputs.insert().values(new_input))
return Response(status_code=HTTP_201_CREATED)
@input_router.post("/input/my_numeric_field", status_code=HTTP_201_CREATED)
def save_input_my_numeric_field():
return {"error": "my_numeric_field no es un campo valido para convertir a mayuscula"}
@input_router.post("/input/random_field", status_code=HTTP_201_CREATED)
def save_input_random_field():
return {"error": "random_field no es un campo valido para convertir a mayuscula"}
| neof0x/proyecto | routes/input.py | input.py | py | 3,764 | python | en | code | 1 | github-code | 13 |
27022026503 | import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import math
import random
import skimage.io
import skimage.transform
import skimage.filters
import skimage.feature
import skimage.morphology
import scipy.ndimage.morphology
import skimage.measure
import skimage.color
import sklearn.cluster
import collections
import sklearn.metrics
import matplotlib.cm
import gap_statistic
import multiprocessing
import functools
import errno
import warnings
from mpl_toolkits.mplot3d import Axes3D
from copy import copy
import pickle
def background_entropy(img):
"""
Compute image parameters: mean, std
Parameters
----------
img : np.array(2D) - 2D array representing image
Returns
-------
int : average of the cropped images
int : std of the cropped images
"""
#Crop Bounds
y, x = img.shape
zone = int(min(y,x)/10)
startx = 0
starty = 0
image1 = img[starty:zone, startx:x]
startx = 0
starty = zone
image2 = img[starty:y - zone, startx:zone]
startx = 0
starty = y - zone
image3 = img[starty:y, startx:x]
startx = x - zone
starty = zone
image4 = img[starty: y - zone, startx: x]
mean = (np.mean(image1) + np.mean(image2) + np.mean(image3) + np.mean(image4))/4
std = (np.std(image1) + np.std(image2) + np.std(image3) + np.std(image4))/4
return mean,std
def process_image(path_save_pics, site_nr, pic, channel='gray eye', noise_filter='gaussian', edge_detection='sobel', noise_filter2 = 'gaussian',
thresholding='otsu', closing='closing', fill_holes='fill holes',
filter_params=[None, 5, 5, 5, 5, 2.1, None, 0],plot=False):
"""
Main function to process image
Parameters
----------
path_save_pics : str - path to location where processed images will be saved
site_nr : int - index of LIDT image in a dataset
pic : : np.array() - 3D array representing image
channel : str - possible values: 'r','g','b','gray eye','gray equal','None'
noise_filter : str - possible values: 'gaussian','median','mean','None'
edge_detection : str - possible values: 'sobel','canny','entropy','None'
noise_filter2 : str - possible values: 'gaussian','median','mean','None'
thresholding : str - possible values: 'otsu', 'yen', 'mean', 'minimum', 'local otsu', 'None'
closing : str - possible values: 'closing','None'
fill_holes : str - possible values: 'fill holes', 'None'
Returns
-------
np.array() : processed image
"""
titles_list = ['original']
pics_list = [pic]
if channel != None:
if channel == 'rgb r':
pic_channels = pic[:, :, 0]
elif channel == 'rgb g':
pic_channels = pic[:, :, 1]
elif channel == 'rgb b':
pic_channels = pic[:, :, 2]
elif channel == 'hsv h':
pic_channels = skimage.color.rgb2hsv(pic)[:, :, 0]
elif channel == 'hsv s':
pic_channels = skimage.color.rgb2hsv(pic)[:, :, 1]
elif channel == 'hsv v':
pic_channels = skimage.color.rgb2hsv(pic)[:, :, 2]
elif channel == 'gray eye':
pic_channels = skimage.color.rgb2gray(pic)
elif channel == 'gray equal':
pic_channels = np.average(pic, axis=2)
elif channel == 'normalised':
normalized = (pic.astype(float) / np.mean(pic, axis=(0, 1)))
pic_channels = (np.min(normalized, axis=-1) * 200).astype(np.uint8)
elif channel == 'original':
pic_channels = pic
else:
pic_channels = np.average(pic, axis=2)
titles_list.append("channel")
pics_list.append(pic_channels)
if noise_filter != None:
if noise_filter == 'gaussian':
noise = skimage.filters.gaussian(pic_channels, sigma=filter_params[1], mode= 'nearest')
elif noise_filter == 'median':
noise = skimage.filters.median(skimage.img_as_ubyte(pic_channels), selem=skimage.morphology.disk(int((filter_params[1]))))
elif noise_filter == 'mean':
noise = skimage.filters.rank.mean(skimage.img_as_float(pic_channels/pic_channels.max()), selem=skimage.morphology.disk(int(filter_params[1])))
else:
noise = pic_channels
titles_list.append("noiseFilter" )
pics_list.append(noise)
if edge_detection != None:
if edge_detection == 'sobel':
edge = skimage.filters.sobel(noise, mask=None)
elif edge_detection == 'canny':
edge = skimage.feature.canny(skimage.img_as_ubyte(noise), sigma=float(filter_params[2]))
elif edge_detection == 'entropy':
edge = skimage.filters.rank.entropy(skimage.img_as_float(noise/255), selem=skimage.morphology.disk(int(filter_params[2])))
elif edge_detection == 'background':
pic = (noise * 255).astype(np.uint8)
print("filter_params[6]", filter_params[6])
dalis = pic[::10, ::10, :]
subset = ((dalis - np.median(dalis, axis=[0, 1])) * 1.2 + np.median(dalis, axis=[0, 1])).astype(np.uint8)
data_background = pd.DataFrame(
({'R': subset[:, :, 0].flatten(), 'G': subset[:, :, 1].flatten(), 'B': subset[:, :, 2].flatten()}))
data_all = pd.DataFrame(
({'R': pic[:, :, 0].flatten(), 'G': pic[:, :, 1].flatten(), 'B': pic[:, :, 2].flatten()}))
print("Min_samples", dalis.size, int(0.3*dalis.size))
klasifikatorius = sklearn.cluster.DBSCAN(
eps=float(filter_params[2]), min_samples=int(0.3*dalis.size), metric='euclidean').fit(data_background)
data_background['Labels'] = klasifikatorius.labels_
print(data_background['Labels'].value_counts())
if len(np.unique(data_background['Labels'])) != 1:
data_background = data_background[data_background['Labels'] != -1] # Delete points classified as outliers
hull = scipy.spatial.Delaunay(data_background[['R', 'G', 'B']].values)
outside = hull.find_simplex(data_all[['R', 'G', 'B']].values) < 0
edge = outside.reshape(pic.shape[0], pic.shape[1])
else:
print("Bad DBSCAN cloud clustering")
edge = np.zeros((pic.shape[0], pic.shape[1]))
else:
edge = noise
titles_list.append("edgeDetection")
pics_list.append(edge)
if noise_filter2 != None:
if noise_filter2 == 'gaussian':
noise2 = skimage.filters.gaussian(edge, sigma=float(filter_params[3]))
elif noise_filter2 == 'median':
noise2 = skimage.filters.median(skimage.img_as_ubyte(edge), mask=skimage.morphology.disk(int(filter_params[3])))
elif noise_filter2 == 'mean':
noise2 = skimage.filters.rank.mean(skimage.img_as_ubyte(edge/edge.max()), selem=skimage.morphology.disk(int(filter_params[3])))
else:
noise2 = edge
titles_list.append("noiseFilter2")
pics_list.append(noise2)
if thresholding != None:
# http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html
if len(np.unique(noise2)) == 1:
thresh = noise2
else:
if thresholding == 'otsu >':
tre = skimage.filters.threshold_otsu(skimage.color.rgb2gray(noise2.astype(float)))
thresh = noise2 >= tre
elif thresholding == 'otsu <':
tre = skimage.filters.threshold_otsu(skimage.color.rgb2gray(noise2.astype(float)))
thresh = noise2 <= tre
elif thresholding == 'yen >':
tre = skimage.filters.threshold_yen(noise2, nbins=1256)
thresh = noise2 > tre
elif thresholding == 'yen <':
tre = skimage.filters.threshold_yen(noise2, nbins=1256)
thresh = noise2 < tre
elif thresholding == 'manual >':
if filter_params[7]:
edge_mean,edge_std = background_entropy(edge)
tre = edge_mean - float(filter_params[5]) * edge_std
thresh = noise2 >= tre
else:
edge_mean,edge_std = background_entropy(edge)
tre = edge_mean + float(filter_params[5]) * edge_std
thresh = noise2 >= tre
elif thresholding == 'manual <':
if filter_params[7]:
edge_mean,edge_std = background_entropy(edge)
tre = edge_mean - float(filter_params[5]) * edge_std
thresh = noise2 <= tre
else:
edge_mean,edge_std = background_entropy(edge)
tre = edge_mean + float(filter_params[5]) * edge_std
thresh = noise2 <= tre
elif thresholding == 'mean >':
tre = skimage.filters.threshold_mean(noise2)
thresh = noise2 >= tre
elif thresholding == 'mean <':
tre = skimage.filters.threshold_mean(noise2)
thresh = noise2 <= tre
elif thresholding == 'minimum >':
tre = skimage.filters.threshold_minimum(noise2, nbins=256, max_iter=10000)
thresh = noise2 >= tre
elif thresholding == 'minimum <':
tre = skimage.filters.threshold_minimum(noise2, nbins=256, max_iter=10000)
thresh = noise2 <= tre
else:
thresh = noise2
titles_list.append("thresholding")
pics_list.append(thresh)
if closing != None:
if closing == 'closing':
bin_closing = skimage.morphology.binary_closing(thresh, selem=skimage.morphology.disk(int(filter_params[4])))
else:
bin_closing = thresh
titles_list.append("binClosing")
pics_list.append(bin_closing)
if fill_holes != None:
if fill_holes == 'fill holes':
bin_fill = scipy.ndimage.morphology.binary_fill_holes(bin_closing).astype(int)
else:
bin_fill = bin_closing
titles_list.append("fillHoles")
pics_list.append(bin_fill)
if plot:
fig, axes = plt.subplots(2, 4, figsize=(6, 6))
ax0, ax1, ax2, ax3, ax4, ax5, ax6, ax7 = axes.flatten()
ax0.set_title(titles_list[0])
ax0.axis('off')
img0 = ax0.imshow(pics_list[0])
ax1.set_title(titles_list[1])
ax1.axis('off')
img1 = ax1.imshow(pics_list[1])
ax2.set_title(titles_list[2])
ax2.axis('off')
img2 = ax2.imshow(pics_list[2])
ax3.set_title(titles_list[3])
ax3.axis('off')
img3 = ax3.imshow(pics_list[3])
if edge_detection != 'background':
fig.colorbar(img3, ax=ax3)
ax4.set_title(titles_list[4])
ax4.axis('off')
img4 = ax4.imshow(pics_list[4])
if edge_detection != 'background':
fig.colorbar(img4, ax=ax4)
ax5.set_title(titles_list[5])
ax5.axis('off')
ax5.imshow(pics_list[5])
ax6.set_title(titles_list[6])
ax6.axis('off')
ax6.imshow(pics_list[6])
ax7.set_title(titles_list[7])
ax7.axis('off')
ax7.imshow(pics_list[7])
plt.axis('off')
plt.suptitle("Site: %s" % str(site_nr))
plt.show(block=False)
return bin_fill
def make_circle(points):
"""
Make circle over the figure which encloses all the given points.
Note: If 0 points are given, None is returned. If 1 point is given, a circle of radius 0 is returned.
Parameters
----------
points : list - a sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
Returns
-------
np.array() : a triple of floats representing a circle.
"""
# Initially: No boundary points known
# Convert to float and randomize order
shuffled = [(float(x), float(y)) for (x, y) in points]
random.shuffle(shuffled)
# Progressively add points to circle or recompute circle
c = None
for (i, p) in enumerate(shuffled):
if c is None or not is_in_circle(c, p):
c = _make_circle_one_point(shuffled[: i + 1], p)
return c
def _make_circle_one_point(points, p):
"""
Make circle over the figure if one boundary point is known.
Parameters
----------
points : list - a sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
p : list - contains two coordinates of the know boundary point
Returns
-------
np.array() : a triple of floats representing a circle.
"""
c = (p[0], p[1], 0.0)
for (i, q) in enumerate(points):
if not is_in_circle(c, q):
if c[2] == 0.0:
c = make_diameter(p, q)
else:
c = _make_circle_two_points(points[: i + 1], p, q)
return c
def _make_circle_two_points(points, p, q):
"""
Make circle over the figure if two boundary points are known.
Parameters
----------
points : list - a sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
p : list - contains two coordinates of the know boundary point
q : list - contains two coordinates of the know boundary point
Returns
-------
np.array() : a triple of floats representing a circle.
"""
circ = make_diameter(p, q)
left = None
right = None
px, py = p
qx, qy = q
# For each point not in the two-point circle
for r in points:
if is_in_circle(circ, r):
continue
# Form a circumcircle and classify it on left or right side
cross = _cross_product(px, py, qx, qy, r[0], r[1])
c = make_circumcircle(p, q, r)
if c is None:
continue
elif cross > 0.0 and (
left is None or _cross_product(px, py, qx, qy, c[0], c[1]) > _cross_product(px, py, qx, qy,
left[0],
left[1])):
left = c
elif cross < 0.0 and (
right is None or _cross_product(px, py, qx, qy, c[0], c[1]) < _cross_product(px, py, qx, qy,
right[0],
right[1])):
right = c
# Select which circle to return
if left is None and right is None:
return circ
elif left is None:
return right
elif right is None:
return left
else:
return left if (left[2] <= right[2]) else right
def make_circumcircle(p0, p1, p2):
"""
Implementaiton of circumscribed circle algorithm from Wikipedia.
Parameters
----------
p0 : list - pair of floats or ints, e.g. [(0,5), (3.1,-2.7)].
p1 : list - pair of floats or ints, e.g. [(0,5), (3.1,-2.7)].
p2 : list - pair of floats or ints, e.g. [(0,5), (3.1,-2.7)].
Returns
-------
tuple : contains center coordinates and a radius required.
"""
ax, ay = p0
bx, by = p1
cx, cy = p2
ox = (min(ax, bx, cx) + max(ax, bx, cx)) / 2.0
oy = (min(ay, by, cy) + max(ay, by, cy)) / 2.0
ax -= ox;
ay -= oy
bx -= ox;
by -= oy
cx -= ox;
cy -= oy
d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0
if d == 0.0:
return None
x = ox + ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (
ay - by)) / d
y = oy + ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (
bx - ax)) / d
ra = math.hypot(x - p0[0], y - p0[1])
rb = math.hypot(x - p1[0], y - p1[1])
rc = math.hypot(x - p2[0], y - p2[1])
return (x, y, max(ra, rb, rc))
def make_diameter(p0, p1):
cx = (p0[0] + p1[0]) / 2.0
cy = (p0[1] + p1[1]) / 2.0
r0 = math.hypot(cx - p0[0], cy - p0[1])
r1 = math.hypot(cx - p1[0], cy - p1[1])
return (cx, cy, max(r0, r1))
_MULTIPLICATIVE_EPSILON = 1 + 1e-14
def is_in_circle(c, p):
"""
Check if the provided point p is in circle
Parameters
----------
c : np.array - contains coordinates of the circle and a radius
p : np.array - contains two coordinates of a point
Returns
-------
np.bool : True if point is in circle
"""
return c is not None and math.hypot(p[0] - c[0], p[1] - c[1]) <= c[2] * _MULTIPLICATIVE_EPSILON
# Returns twice the signed area of the triangle defined by (x0, y0), (x1, y1), (x2, y2).
def _cross_product(x0, y0, x1, y1, x2, y2):
return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
def Three_D(masked_image, centers, plot):
"""
3D plot function
Parameters
----------
masked_image : np.array - image array
centers : np.array - centers of a cluster
plot : bool - to plot?
"""
# define number of points to visualize
points_to_visualise = 20000
r = masked_image[:, :, 0].flatten() *255
g = masked_image[:, :, 1].flatten() *255
b = masked_image[:, :, 2].flatten() *255
rgb = [(i, j, k) for i, j, k in zip(r, g, b)]
rgb2 = list(filter(lambda x: x != (0, 0, 0), rgb)) # loosing (0,0,0) elements
# take random points for better visualisation
if len(rgb2) >= points_to_visualise:
ind = np.random.choice(range(len(rgb2)), points_to_visualise)
rgb2 = [rgb2[i] for i in ind]
r, g, b = zip(*rgb2)
else:
r, g, b = zip(*rgb2)
if plot == True:
fig5, ax5 = plt.subplots(figsize=(10, 6))
ax5 = fig5.add_subplot(111, projection='3d')
ax5.scatter(r, g, b, marker='o', c=np.array(rgb2) / 255, alpha=0.3, s=4)
for j in list(range(0, len(centers), 1)): # Plot the centers of clusters
ax5.scatter(centers[j][0], centers[j][1], centers[j][2],
c=centers[j] / 255, marker='+', alpha=1.0, s=100)
ax5.set_xlabel('X-axis')
ax5.set_ylabel('Y-axis')
ax5.set_zlabel('Z-axis')
plt.title("3D color values scatterplot")
plt.show()
def plot_centers(centers):
extra_part = 0.0
patches = []
fig1 = plt.figure("Object's main colors by frequency")
ax3 = fig1.add_subplot(111)
for j in list(range(0, len(centers), 1)):
rect = mpatches.Rectangle(
(0.1 + extra_part, 0.1), 0.9, 0.9,
facecolor=centers[j] / 255)
patches.append(rect)
extra_part += 1.0
for p in patches:
ax3.add_patch(p)
ax3.set_axis_off()
ax3.set_frame_on(False)
plt.axis([0, len(centers), 0, 1])
plt.tight_layout()
plt.show()
def most_common_colors(image, image_mask=None, plot_feature=False, cluster_num=2, silhouette_threshold=0.6,
size_threshold=1400, scale=True, scale_to_px=50, k_min=2, k_max=5):
"""
Cluster (K-means) colors in the image to find the most common colors according to silhouette score.
Parameters
----------
image : np.array - image of interest
image_mask : np.array - mask of the image
plot_feature : bool - if True: silhoutte graph will be made
cluster_num : int - initial number of clusters
silhouette_threshold : int - threshold to define the number of clusters
size_threshold : int - minimal size of the image to run use silhouette evaluation
scale : bool - if True: scale image so that the longest side is no more than scale_to_px
scale_to_px : int - if scale: maximum number of pixels for the longest image border
k_min : int - minimal number of K-means clusters
k_max : int - maximum number of K-means clusters
Returns
-------
np.array : cluster centers coordinates
np.array : size of each cluster
object : K-means object
"""
if scale:
# Scale image so that the longest side is no more than scale_to_px
scale_factor = scale_to_px / max(image.shape) if max(image.shape) > scale_to_px else 1
image = skimage.img_as_ubyte(skimage.transform.rescale(image, scale_factor))
if image_mask is not None:
image_mask = skimage.img_as_ubyte(skimage.transform.rescale(image_mask, scale_factor))
# Run classifier for each number of clusters
image_array = image.reshape((image.shape[0] * image.shape[1], 3))
# Uzkomentuotas iterpiant Original pic opcija
#if image_mask is not None:
# image_array = image_array[image_mask.reshape(image_mask.shape[0] * image_mask.shape[1]).astype(bool), :]
bug = True
# TODO: Fix bug on macOS - sklearn.metrics.silhouette_score doesn't work in multiprocessing app
if not bug and cluster_num != 1:
classifiers = [sklearn.cluster.KMeans(n_clusters=n).fit(image_array) for n in np.arange(k_min, k_max)]
# Find best classifier according to silhouette score
sillhouette = [sklearn.metrics.silhouette_score(image_array, clas.labels_, metric='euclidean') for clas in
classifiers]
if (np.amax(sillhouette) > silhouette_threshold) or ((image.shape[0] * image.shape[1]) >= size_threshold):
best_classifier = classifiers[np.argmax(sillhouette)]
else:
best_classifier = sklearn.cluster.KMeans(n_clusters=1).fit(image_array)
else:
best_classifier = sklearn.cluster.KMeans(n_clusters=cluster_num).fit(image_array)
sillhouette = [0.6, 0.5, 0.4]
# Sort cluster centers according to cluster size
_, counts = np.unique(best_classifier.labels_, return_counts=True)
sort_index = np.argsort(counts)[::-1]
cluster_centers = best_classifier.cluster_centers_[sort_index]
cluster_size = counts[sort_index] / np.sum(counts)
if plot_feature:
plt.figure()
plt.title("Silhouette variance score for different number of clusters")
plt.plot(np.arange(k_min, k_max), sillhouette)
plt.figure()
plt.imshow(image * image_mask[:, :, None])
plot_centers(cluster_centers)
return cluster_centers, cluster_size, best_classifier
def get_geometrical_features(image_mask, features=None):
"""
Extract geometrical features from image.
Parameters
----------
image_mask : np.array - image mask of interest (contains only one object)
features : list - features for extraction
Returns
-------
pd.DataFrame : all the extracted features regarding the object
"""
if features is None:
features = ['Shapes Area', 'Shapes Perimeter',
'Centroids row', 'Centroids column', 'Eccentricity', 'Orientation',
'Area/Square diff', 'Area/Circle diff', 'Object Perimeter/Circle Perimeter',
'Oval', 'Rect', 'Bounding'] # Default features
region = skimage.measure.regionprops(image_mask)[0]
features_dict = collections.OrderedDict({})
# Saving one object information in the image
x, y = region.coords.T # x-row, y-column
feat_num = 0 # to maintain that Three_D() enter once
for feature in features:
if feature == 'Shapes Area':
feature_value = region.area # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Shapes Perimeter':
feature_value = region.perimeter # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Centroids row':
feature_value = np.round(region.centroid[0], decimals=2) # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Centroids column':
feature_value = np.round(region.centroid[1], decimals=2) # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Eccentricity':
feature_value = region.eccentricity # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Orientation':
feature_value = region.orientation # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Area/Square diff':
# draw invisible rectangle around segmented objects to get the percentage object_area/square around the object
minr, minc, maxr, maxc = region.bbox
rect_area = (maxc - minc) * (maxr - minr)
feature_value = region.area / rect_area # Get feature value
features_dict[feature] = [feature_value]
elif (feature == 'Area/Circle diff'):
circle = (make_circle(region.coords))
# Calculating the region.area/the circle area
circle_area = math.pi * circle[2] ** 2
circle_diff = region.area / circle_area
feature_value = circle_diff # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Object Perimeter/Circle Perimeter': # a = Co/Ca (Ca - equivalent_diameter perimeter)
circle_perimeter = region.equivalent_diameter * math.pi
feature_value = region.perimeter / circle_perimeter
features_dict[feature] = [feature_value]
elif feature == 'Oval':
if 'Area/Circle diff' not in features:
circle = (make_circle(region.coords))
oval = mpatches.Circle((circle[1], circle[0]), radius=circle[2], fill=False, edgecolor='pink',
linewidth=1)
feature_value = oval # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Rect':
if 'Area/Square diff' not in features:
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='salmon', linewidth=2)
feature_value = rect # Get feature value
features_dict[feature] = [feature_value]
elif feature == 'Bounding':
minr, minc, maxr, maxc = region.bbox
features_dict['Min row'] = minr
features_dict['Min column'] = minc
features_dict['Max row'] = maxr
features_dict['Max column'] = maxc
features_dataframe = pd.DataFrame(features_dict)
return features_dataframe
def get_features_for_region(image, image_mask, features_dict, features=None, plot_feature=False,
entropy_kernel=skimage.morphology.disk(5)):
"""
Extract color features from image.
Parameters
----------
image : np.array - image of interest
image_mask : np.array - image mask of interest (contains only one object of interest)
features_dict : dict - extracted features
features : list - features to extract
plot_feature : bool - if True, plot extracted features
entropy_kernel : object
features : list - features for extraction
Returns
-------
pd.DataFrame : all the extracted features regarding the object
np.array() : cropped image
np.array() : mask corresponding to a cropped image
"""
if features is None:
features = ['Red part', 'Green part', 'Blue part', 'RGB clusters number',
'Color inertia', 'Texture', 'Moments',
'Gray', 'Entropy', 'Sobel', 'Masked_image'] # Default features
region = skimage.measure.regionprops(image_mask)[0]
masked_image = image_mask[:, :, None] * image
# Saving one object information in the image
x, y = region.coords.T # x-row, y-column
feat_num = 0 # to maintain that Three_D() enter once
for feature in features:
if ((feature == 'Red part') or (feature == 'Green part') or (feature == 'Blue part')
or (feature == 'RGB clusters number') or (feature == 'Color inertia')
or (feature == 'Texture') or (feature == 'Moments')):
if feat_num == 0:
minr, minc, maxr, maxc = region.bbox
cropped_image = image[minr:maxr, minc:maxc]
cropped_mask = image_mask[minr:maxr, minc:maxc]
cropped_image_mask = cropped_mask[:, :, None] * cropped_image
centers, counts, classifier = most_common_colors(cropped_image, cropped_mask, plot_feature,
cluster_num=1, silhouette_threshold=0.6)
cropped_mask_background = np.logical_not(cropped_mask).astype(bool)
background_centers, background_counts, _ = most_common_colors(cropped_image, cropped_mask_background,
plot_feature, cluster_num=1,
silhouette_threshold=0.6)
color_distance = (np.sqrt(np.sum((np.absolute(np.subtract(centers, background_centers)) ** 2), axis=1)))
feat_num += 1
if plot_feature: # Plotting 3D scatter graph of color values and their centers by kmeans
Three_D(cropped_image_mask, centers, plot_feature)
if feature == 'Red part': #taking the main color of the object
features_dict[feature] = centers[:, 0].tolist()[np.argmax(color_distance)]
elif feature == 'Green part':
features_dict[feature] = centers[:, 1].tolist()[np.argmax(color_distance)]
elif feature == 'Blue part':
features_dict[feature] = centers[:, 2].tolist()[np.argmax(color_distance)]
elif feature == 'RGB clusters number':
features_dict[feature] = len(centers[:, 0].tolist())
elif feature == 'Color inertia':
features_dict[feature] = classifier.inertia_
elif feature == 'Texture':
gray_image = skimage.color.rgb2grey(cropped_image_mask)
glcm_matrix = skimage.feature.greycomatrix(skimage.img_as_ubyte(gray_image), [2], [0], 256, symmetric=True, normed=True)
features_dict['Texture dissimilarity'] = skimage.feature.greycoprops(glcm_matrix, 'dissimilarity')[0, 0]
features_dict['Texture contrast'] = skimage.feature.greycoprops(glcm_matrix, 'contrast')[0, 0]
features_dict['Texture homogeneity'] = skimage.feature.greycoprops(glcm_matrix, 'homogeneity')[0, 0]
features_dict['Texture ASM'] = skimage.feature.greycoprops(glcm_matrix, 'ASM')[0, 0]
features_dict['Texture energy'] = skimage.feature.greycoprops(glcm_matrix, 'energy')[0, 0]
features_dict['Texture correlation'] = skimage.feature.greycoprops(glcm_matrix, 'correlation')[0, 0]
elif feature == 'Moments':
gray_image = skimage.color.rgb2grey(cropped_image_mask)
moments = skimage.measure.moments(gray_image)
moments_normalised = skimage.measure.moments_normalized(moments)
moments_hu = skimage.measure.moments_hu(moments_normalised)
features_dict['First moment'] = moments_hu[0]
features_dict['Second moment'] = moments_hu[1]
features_dict['Third moment'] = moments_hu[2]
features_dict['Fourth moment'] = moments_hu[3]
features_dict['Fifth moment'] = moments_hu[4]
features_dict['Sixth moment'] = moments_hu[5]
features_dict['Seventh moment'] = moments_hu[6]
# Features which need to use gray masked image
elif ((feature == 'Gray') or (feature == 'Entropy') or (feature == 'Sobel') or (feature == 'Masked_image')):
gray_masked = (skimage.color.rgb2gray(masked_image))
if feature == 'Gray':
gray = gray_masked[x, y].flatten()
feature_value = max(gray) # Get feature value
features_dict['Gray Max'] = [feature_value]
feature_value = min(gray) # Get feature value
features_dict['Gray Min'] = [feature_value]
feature_value = np.mean(gray) # Get feature value
features_dict['Gray Mean'] = [feature_value]
feature_value = np.std(gray) # Get feature value
features_dict['Gray Std'] = [feature_value]
elif feature == 'Entropy':
entropy_image = skimage.filters.rank.entropy(gray_masked, selem=entropy_kernel, mask=image_mask)
etr = entropy_image[x, y].flatten()
feature_value = min(etr) # Get feature value
features_dict['Entropy min'] = [feature_value]
feature_value = max(etr) # Get feature value
features_dict['Entropy max'] = [feature_value]
feature_value = np.mean(etr) # Get feature value
features_dict['Entropy mean'] = [feature_value]
feature_value = np.std(etr) # Get feature value
features_dict['Entropy std'] = [feature_value]
elif feature == 'Sobel':
sobel_image = skimage.filters.sobel(gray_masked, mask=image_mask)
sob = sobel_image[x, y].flatten()
feature_value = max(sob) # Get feature value
features_dict['Sobel max'] = [feature_value]
feature_value = np.mean(sob) # Get feature value
features_dict['Sobel mean'] = [feature_value]
feature_value = np.std(sob) # Get feature value
features_dict['Sobel std'] = [feature_value]
elif feature == 'Masked_image':
feature_value = gray_masked # Get feature value
features_dict[feature] = [feature_value]
features_dataframe = pd.DataFrame(features_dict)
return features_dataframe, cropped_image, cropped_mask
def get_regions_from_image(path, path_save_pics, kwargs_features, channel='gray_eye', noise_filter='gaussian',
edge_detection='sobel', noise_filter2='gaussian', thresholding='otsu',
closing='closing', fill_holes='fill_holes', filter_params= [None,5,5,5,5,2.1,None,0],
plot_filters=False, plot_object=False, min_region_size=100, cropped_image_save=False):
"""
Main function to divide image to smaller areas of interest and extract features.
Parameters
----------
path : str - path to image
path_save_pics : str - path to location where processed images will be saved
kwargs_features : dict - features to process
channel : str - possible values: 'r','g','b','gray eye','gray equal','None'
noise_filter : str - possible values: 'gaussian','median','mean','None'
edge_detection : str - possible values: 'sobel','canny','entropy','None'
noise_filter2 : str - possible values: 'gaussian','median','mean','None'
thresholding : str - possible values: 'otsu', 'yen', 'mean', 'minimum', 'local otsu', 'None'
closing : str - possible values: 'closing','None'
fill_holes : str - possible values: 'fill holes', 'None'
filter_params : list - contains associated parameter with all the operations above
Returns
-------
pd.DataFrame : extracted information from the image
img : image to show
patches : all circles and rectangles around the damage in the gray picture
site_nr : site number associated with image
"""
site_nr = re.findall(r'\d+', path.split(os.sep)[-1])[0]
image = skimage.img_as_ubyte(skimage.io.imread(path, ))
bin_pic = process_image(path_save_pics, site_nr, image, channel, noise_filter, edge_detection, noise_filter2, thresholding,
closing, fill_holes, filter_params, plot_filters)
labeled_image, n_labels = skimage.measure.label(bin_pic, return_num=True)
# define lists to hold extracted info
image_regions = []
patches = []
# create mask
mask = np.zeros((image.shape[0], image.shape[1]))
img = np.zeros_like(image)
number = 0
if path.rsplit(os.sep,2)[1] == 'thumb':
eq_hist_site = os.path.join(os.path.join(path.rsplit(os.sep, 2)[0], 'eq_hist'), 'site{}.jpg'.format(site_nr))
image2 = skimage.img_as_ubyte(skimage.io.imread(eq_hist_site, ))
else:
thumb_site = os.path.join(os.path.join(path.rsplit(os.sep, 2)[0], 'thumb'), 'site{}.jpg'.format(site_nr))
image2 = skimage.img_as_ubyte(skimage.io.imread(thumb_site, ))
for n in range(1, n_labels + 1): # leave zero as it is background
if ((kwargs_features['features'] is not None) and (n == 1) and (plot_object == True)):
kwargs_features['features'].extend(['Masked_image', 'Oval', 'Rect', 'Bounding'])
image_mask = (labeled_image == n).astype(np.uint8)
region_area = np.sum(image_mask)
feat = get_geometrical_features(image_mask, kwargs_features['features'])
if (region_area < min_region_size or region_area > 0.75 * (image.shape[0]*image.shape[1])):
continue
feat_image, cropped_image, cropped_mask = get_features_for_region(image, image_mask, feat, **kwargs_features)
img[labeled_image == n] = np.array([feat['Red part'], feat['Green part'], feat['Blue part']]).astype(np.uint8).T
feat_image2, _, _ = get_features_for_region(image2, image_mask, collections.OrderedDict({}), **kwargs_features)
if 'Masked_image' in feat_image2:
mask = np.sum([mask, feat_image2['Masked_image'][0]], axis=0)
del feat_image2['Masked_image']
if 'Masked_image' in feat_image:
mask = np.sum([mask, feat_image['Masked_image'][0]], axis=0)
del feat_image['Masked_image']
if 'Oval' in feat_image:
patches.append(feat['Oval'][0])
del feat_image['Oval']
if 'Rect' in feat_image:
patches.append(feat['Rect'][0])
del feat_image['Rect']
if cropped_image_save:
skimage.io.imsave(os.path.join(path_save_pics, 'cropped_site{}_{}.png'.format(site_nr, number)),
cropped_image)
names = []
if path.rsplit(os.sep, 2)[1] == 'thumb':
oposite_name = 'eq_hist'
else:
oposite_name = 'thumb'
for i in feat_image2.columns:
names.append(i + " " + oposite_name)
feat_image2.columns = names # change names of the image2 features columns
feat_image.insert(0, 'Object in image nr', number)
all_feat_image = pd.concat([feat_image, feat_image2], axis=1)
number += 1
image_regions.append(all_feat_image)
if plot_object:
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img.astype(np.uint8))
for p in patches:
new_p = copy(p)
ax.add_patch(new_p)
ax.set_axis_off()
plt.tight_layout()
plt.title("All objects of interest")
plt.show(block=False)
# pass the empty dataframe to concat in case if there is no objects of interest in the image
if len(image_regions) == 0:
features_dict = collections.OrderedDict({})
features_dataframe = pd.DataFrame(data=features_dict)
image_regions.append(features_dataframe)
image_regions_dataframe = pd.concat(image_regions, ignore_index=True)
image_regions_dataframe.insert(0, 'site nr', site_nr)
return image_regions_dataframe, img, patches, site_nr
def get_regions_from_images_mp(directory_read, multi, path_save_pics, cores_number, kwargs_features, **kwargs_regions):
'''
Add multiprocessing functionality for feature extraction.
Parameters
----------
directory_read : str - directory to read images from
multi : bool - if True: use multiprocessing
path_save_pics : dict - path to the directory where images will be saved
cores_number : int - number of cores to use
Returns
-------
pd.DataFrame : extracted information from all the images in the directory
'''
paths = [os.path.join(directory_read, fname) for fname in os.listdir(directory_read) if fname.lower().startswith('site')]
patches_list = dict()
if multi:
with multiprocessing.Pool(processes=cores_number) as pool:
images_regions = pool.map(
functools.partial(
get_regions_from_image,
path_save_pics=path_save_pics,
kwargs_features=kwargs_features,
**kwargs_regions),
paths)
images_regions, img, patches, site_nr = zip(*images_regions)
dataset = pd.concat(images_regions, ignore_index=True)
dataset.to_csv(os.path.join(path_save_pics, 'directory pictures data.csv'))
return dataset
else:
one_image = []
for i in paths:
one_image.append(get_regions_from_image(i, path_save_pics, kwargs_features, **kwargs_regions))
dataset = pd.concat(one_image, ignore_index=True)
dataset.to_csv(os.path.join(path_save_pics,'directory pictures data.csv'))
return dataset | esvazas/cv_bachelor | image_processing/logic/identification.py | identification.py | py | 42,653 | python | en | code | 0 | github-code | 13 |
42659440354 | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 4500))
str = input('give a string')
s.send(str.encode('utf-8'))
print(s.recv(1024).decode('utf-8'))
s.close()
| AatirNadim/Socket-Programming | functional_socket/client.py | client.py | py | 205 | python | en | code | 0 | github-code | 13 |
39740395027 | import operator
from collections import defaultdict
from django.utils.translation import gettext_lazy as _
from django.forms import formset_factory
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import timezone
from .views_common import *
ItemsPerPage = 50
def getPaginator( request, page_key, items ):
paginator = Paginator( items, ItemsPerPage )
page = request.GET.get('page',None) or request.session.get(page_key,None)
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = 1
items = paginator.page(page)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.num_pages
items = paginator.page(page)
request.session[page_key] = page
return items, paginator
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesList( request ):
validate_sequence( Series.objects.all() )
series = Series.objects.all().annotate(num_events=Count('seriescompetitionevent')).order_by( 'sequence' )
return render( request, 'series_list.html', locals() )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesNew( request, categoryFormatId=None ):
if categoryFormatId is None:
category_formats = CategoryFormat.objects.all()
return render( request, 'series_category_format_select.html', locals() )
category_format = get_object_or_404( CategoryFormat, pk=categoryFormatId )
series = Series( name=timezone.now().strftime("SeriesNew %H:%M:%S"), category_format=category_format )
series.save()
series.move_to( 0 )
return HttpResponseRedirect(getContext(request,'cancelUrl') + 'SeriesEdit/{}/'.format(series.id) )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCopy( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
series.make_copy()
return HttpResponseRedirect(getContext(request,'cancelUrl'))
@autostrip
class SeriesForm( ModelForm ):
class Meta:
model = Series
fields = '__all__'
def __init__( self, *args, **kwargs ):
button_mask = kwargs.pop( 'button_mask', OK_BUTTON )
super().__init__(*args, **kwargs)
self.helper = FormHelper( self )
self.helper.form_action = '.'
self.helper.form_class = 'form-inline'
self.helper.layout = Layout(
Row(
Field('name', size=20),
HTML(_('To hide this Series from all users except Super, start the name with an underscore (_)') )
),
Row(
Field('description', size=40),
Field('image'),
),
Row(
Field('ranking_criteria'),
Field('best_results_to_consider'),
Field('must_have_completed'),
HTML(' '*4),
Field('consider_primes'),
),
Row( HTML('<hr/>') ),
Row(
HTML( '<strong>' ), HTML( _('Break Ties as follows:') ), HTML( '</strong>' ),
),
Row(
Field('consider_most_events_completed'),
HTML(' '*6 + '<strong>' ), HTML( _('then consider') ), HTML( '</strong>' + ' '*6),
Field('tie_breaking_rule'),
),
Row(
HTML( _("Finally, break remaining ties by the most recent event's results.") ),
),
Row( HTML('<hr/>') ),
Row(
Field('callup_max'), HTML( _('Specifies the maximum number of Start Wave Callups. If zero, this Series will not be used for Callups.') ),
),
Row(
Field('randomize_if_no_results'),
),
Row(
HTML( _('If False, athletes without Series results will not be included in Callups.') ), HTML('<br/>'),
HTML( _('If True, athletes without Series results will be assigned a random callup.') ),
),
Row( HTML('<hr/>') ),
Row(
Field('show_last_to_first'),
),
Field('category_format', type='hidden'),
Field('sequence', type='hidden'),
Field('custom_category_names', type='hidden'),
)
addFormButtons( self, button_mask )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesEdit( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
if not series.seriesincludecategory_set.exists():
for category in series.category_format.category_set.all():
SeriesIncludeCategory( series=series, category=category ).save()
if series.ranking_criteria == 0 and not series.seriespointsstructure_set.exists():
SeriesPointsStructure( series=series ).save()
included_categories = [ic.category for ic in series.seriesincludecategory_set.all()]
excluded_categories = series.category_format.category_set.exclude( pk__in=[c.pk for c in included_categories] )
ces = list( series.seriescompetitionevent_set.all() )
competitions = defaultdict( list )
for ce in ces:
competitions[ce.event.competition].append( ce )
competitions = sorted( competitions.items(), key=lambda ce: ce[0].start_date, reverse=True )
for c, ces in competitions:
ces.sort( key=lambda ce:ce.event.date_time )
return GenericEdit( Series, request, series.id, SeriesForm, 'series_form.html', locals() )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesDetailEdit( request, seriesId ):
return GenericEdit( Series, request, seriesId, SeriesForm, 'generic_form.html', locals() )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesDelete( request, seriesId, confirmed=0 ):
series = get_object_or_404( Series, pk=seriesId )
if int(confirmed):
series.delete()
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}, {}', _('Delete'), series.name, series.description )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
#-----------------------------------------------------------------------
class EventSelectForm( Form ):
events = forms.MultipleChoiceField( label=_("Events to include from the Competition"), help_text=_('Ctrl-Click to Multi-select') )
def __init__( self, *args, **kwargs ):
series = kwargs.pop( 'series' )
competition = kwargs.pop( 'competition' )
button_mask = kwargs.pop( 'button_mask', OK_BUTTON )
super().__init__(*args, **kwargs)
categories = set( series.get_categories() )
events = [e for e in competition.get_events() if not set(e.get_categories()).isdisjoint(categories)]
events.sort( key=operator.attrgetter('date_time') )
self.fields['events'].choices = [('{}-{}'.format(e.event_type,e.id), '{}: {}'.format(e.date_time.strftime('%Y-%m-%d %H:%M'), e.name)) for e in events]
self.helper = FormHelper( self )
self.helper.form_action = '.'
self.helper.form_class = 'form-inline'
self.helper.layout = Layout(
Row(
Field('events', size=30),
)
)
addFormButtons( self, button_mask )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCompetitionAdd( request, seriesId, competitionId=None ):
page_key = 'series_competition_add_page'
series = get_object_or_404( Series, pk=seriesId )
if competitionId is not None:
competition = get_object_or_404( Competition, pk=competitionId )
series.remove_competition( competition )
default_points_structure = series.get_default_points_structure()
categories = set( series.get_categories() )
for e in competition.get_events():
if not set(e.get_categories()).isdisjoint(categories):
sce = SeriesCompetitionEvent( series=series, points_structure=default_points_structure )
sce.event = e
sce.save()
existing_competitions = set( series.get_competitions() )
competitions = Competition.objects.filter( category_format=series.category_format ).order_by('-start_date')
competitions, paginator = getPaginator( request, page_key, competitions )
return render( request, 'series_competitions_list.html', locals() )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCompetitionRemove( request, seriesId, competitionId, confirmed=0 ):
series = get_object_or_404( Series, pk=seriesId )
competition = get_object_or_404( Competition, pk=competitionId )
if int(confirmed):
series.remove_competition( competition )
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}:{}', _('Remove'), competition.date_range_year_str, competition.name )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCompetitionRemoveAll( request, seriesId, confirmed=0 ):
series = get_object_or_404( Series, pk=seriesId )
if int(confirmed):
series.seriescompetitionevent_set.all().delete()
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}', _('Remove All Events from this Series'), series.name )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
def GetEventForm( series ):
class EventForm( Form ):
select = forms.BooleanField( required=False )
name = forms.CharField( widget = forms.HiddenInput(), required=False )
options = { 'choices':[(ps.pk, ps.name) for ps in series.seriespointsstructure_set.all()] }
if series.ranking_criteria != 0:
options['widget'] = forms.HiddenInput()
points_structure = forms.ChoiceField( **options )
et = forms.IntegerField( widget = forms.HiddenInput(), required=False )
pk = forms.IntegerField( widget = forms.HiddenInput(), required=False )
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
initial = kwargs.get( 'initial', {} )
if initial:
self.fields['label'] = initial['name']
return EventForm
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCompetitionEdit( request, seriesId, competitionId ):
series = get_object_or_404( Series, pk=seriesId )
competition = get_object_or_404( Competition, pk=competitionId )
default_ps = series.get_default_points_structure().pk
EventFormSet = formset_factory(GetEventForm(series), extra=0)
def get_form_set():
categories = set( series.get_categories() )
events = [e for e in competition.get_events() if not set(e.get_categories()).isdisjoint(categories)]
events.sort( key=operator.attrgetter('date_time') )
events_included = set( series.get_events_for_competition(competition) )
points_structure = {ce.event:ce.points_structure.pk for ce in series.seriescompetitionevent_set.all() if ce.event.competition == competition}
initial = [{
'select': e in events_included,
'name': timezone.localtime(e.date_time).strftime('%a %H:%M') + ': ' + e.name,
'points_structure': points_structure.get(e, default_ps),
'et': e.event_type,
'pk': e.pk,
} for e in events]
return EventFormSet( initial=initial )
if request.method == 'POST':
form_set = EventFormSet( request.POST )
if form_set.is_valid():
event_points = []
for f in form_set:
fields = f.cleaned_data
if not fields['select']:
continue
e = (EventMassStart, EventTT)[fields['et']].objects.get( pk=fields['pk'] )
ps = SeriesPointsStructure.objects.get( pk=fields['points_structure'] )
event_points.append( (e, ps) )
series.remove_competition( competition )
for e, ps in event_points:
sce = SeriesCompetitionEvent( series=series, points_structure=ps )
sce.event = e
sce.save()
return HttpResponseRedirect(getContext(request,'cancelUrl'))
else:
form_set = get_form_set()
return render( request, 'series_competition_events_form.html', locals() )
#-----------------------------------------------------------------------
@autostrip
class SeriesPointsStructureForm( ModelForm ):
class Meta:
model = SeriesPointsStructure
fields = '__all__'
def __init__( self, *args, **kwargs ):
button_mask = kwargs.pop( 'button_mask', OK_BUTTON )
super().__init__(*args, **kwargs)
self.helper = FormHelper( self )
self.helper.form_action = '.'
self.helper.form_class = 'form-inline'
self.helper.layout = Layout(
Row(
Field('name', size=20),
),
Row(
Field('points_for_place', size=60),
),
Row(
Field('finish_points'),
Field('dnf_points'),
Field('dns_points'),
),
Field('series', type='hidden'),
Field('sequence', type='hidden'),
)
addFormButtons( self, button_mask )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesPointsStructureNew( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
return GenericNew( SeriesPointsStructure, request, SeriesPointsStructureForm, instance_fields={'series':series, 'name':timezone.now().strftime('Series Points %Y-%m-%f %H:%M:S')} )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesPointsStructureEdit( request, seriesPointsStructureId ):
return GenericEdit( SeriesPointsStructure, request, seriesPointsStructureId, SeriesPointsStructureForm )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesPointsStructureDelete( request, seriesPointsStructureId, confirmed=0 ):
series_points_structure = get_object_or_404( SeriesPointsStructure, pk=seriesPointsStructureId )
if int(confirmed):
series_points_structure.delete()
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}', _('Delete'), series_points_structure.name )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
#-----------------------------------------------------------------------
class CategorySelectForm( Form ):
categories = forms.MultipleChoiceField( label=_("Categories in the Series"), widget=forms.CheckboxSelectMultiple )
custom_categories = forms.MultipleChoiceField( label=_("Custom Categories in the Series"), required=False, widget=forms.CheckboxSelectMultiple )
def __init__( self, *args, **kwargs ):
series = kwargs.pop( 'series' )
custom_category_names = series.get_all_custom_category_names()
button_mask = kwargs.pop( 'button_mask', OK_BUTTON )
super().__init__(*args, **kwargs)
self.fields['categories'].choices = [
(c.id, format_lazy( '{}: {} ({})', c.get_gender_display(), c.code, c.description))
for c in series.category_format.category_set.all()
]
self.fields['custom_categories'].choices = [(i, cc) for i, cc in enumerate(custom_category_names)]
self.helper = FormHelper( self )
self.helper.form_action = '.'
self.helper.form_class = ''
self.helper.layout = Layout(
Row( Col(Field('categories', size=30), 6), Col(Field('custom_categories', size=30),6), ),
)
addFormButtons( self, button_mask )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCategoriesChange( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
if request.method == 'POST':
form = CategorySelectForm( request.POST, button_mask=EDIT_BUTTONS, series=series )
if form.is_valid():
categories = form.cleaned_data['categories']
series.seriesincludecategory_set.all().delete()
for pk in categories:
series.seriesincludecategory_set.create( category=Category.objects.get(pk=pk) )
# Set the custom categories string.
cc_names = []
custom_category_names = series.get_all_custom_category_names()
for v in form.cleaned_data['custom_categories']:
try:
cc_names.append( custom_category_names[int(v)] )
except Exception:
pass
series.custom_category_names = ',\n'.join( cc_names )
series.save()
series.validate()
if 'ok-submit' in request.POST:
return HttpResponseRedirect(getContext(request,'cancelUrl'))
else:
name_id = {cc:i for i, cc in enumerate(series.get_all_custom_category_names())}
custom_category_i = [name_id[name] for name in series.custom_category_names.split(',\n') if name in name_id]
form = CategorySelectForm(
button_mask=EDIT_BUTTONS,
series=series,
initial={
'categories':[ic.category.id for ic in series.seriesincludecategory_set.all()],
'custom_categories':custom_category_i,
}
)
return render( request, 'generic_form.html', locals() )
#-----------------------------------------------------------------------
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesUpgradeProgressionNew( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
up = SeriesUpgradeProgression( series=series )
up.save()
return HttpResponseRedirect( popPushUrl(request, 'SeriesUpgradeProgressionEdit', up.id) )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesUpgradeProgressionDelete( request, seriesUpgradeProgressionId, confirmed=0 ):
series_upgrade_progression = get_object_or_404( SeriesUpgradeProgression, pk=seriesUpgradeProgressionId )
if int(confirmed):
series_upgrade_progression.delete()
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}', _('Delete'), series_upgrade_progression.get_text() )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
class SeriesUpgradeProgressionForm( Form ):
factor = forms.FloatField( label=_('Points Carry-Forward Factor') )
def GetCategoryProgressionForm( series ):
class CategoryProgressionForm( Form ):
category = forms.ChoiceField( label=_('') )
def __init__( self, *args, **kwargs ):
super().__init__(*args, **kwargs)
self.fields['category'].choices = [(-1, '---')] + [
(c.pk, format_lazy('{}: {} - {}', c.get_gender_display(), c.code, c.description)) for c in series.category_format.category_set.all()
]
return CategoryProgressionForm
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesUpgradeProgressionEdit( request, seriesUpgradeProgressionId ):
series_upgrade_progression = get_object_or_404( SeriesUpgradeProgression, pk=seriesUpgradeProgressionId )
series = series_upgrade_progression.series
CategoryProgressionFormSet = formset_factory(GetCategoryProgressionForm(series), extra=6)
ucs = series_upgrade_progression.seriesupgradecategory_set
def get_form():
return SeriesUpgradeProgressionForm( initial={'factor':series_upgrade_progression.factor}, prefix='progression' )
def get_form_set():
initial = []
for uc in ucs.all():
initial.append( {'category': uc.category.id} )
return CategoryProgressionFormSet( initial=initial, prefix='categories' )
if request.method == 'POST':
if 'cancel-submit' in request.POST:
return HttpResponseRedirect(getContext(request,'cancelUrl'))
form = SeriesUpgradeProgressionForm( request.POST, prefix='progression' )
form_set = CategoryProgressionFormSet( request.POST, prefix='categories' )
if form.is_valid() and form_set.is_valid():
series_upgrade_progression.factor = abs( form.cleaned_data['factor'] )
seen = set([-1])
categories = []
for f in form_set:
fields = f.cleaned_data
category = int(fields['category'])
if category not in seen:
categories.append( category )
seen.add( category )
categories_cur = ucs.all().values_list('pk', flat=True)
if categories != categories_cur:
ucs.all().delete()
categories_lookup = Category.objects.in_bulk( categories )
for seq, pk in enumerate(categories):
SeriesUpgradeCategory( sequence=seq, category=categories_lookup[pk], upgrade_progression=series_upgrade_progression ).save()
if 'ok-submit' in request.POST:
return HttpResponseRedirect(getContext(request,'cancelUrl'))
form = get_form()
form_set = get_form_set()
else:
form = get_form()
form_set = get_form_set()
return render( request, 'series_upgrade_progression_form.html', locals() )
#-----------------------------------------------------------------------
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCategoryGroupNew( request, seriesId ):
series = get_object_or_404( Series, pk=seriesId )
cg = CategoryGroup( series=series, name=datetime.datetime.now().strftime('Category Group %H:%M:%S') )
cg.save()
return HttpResponseRedirect( popPushUrl(request, 'SeriesCategoryGroupEdit', cg.id) )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCategoryGroupDelete( request, categoryGroupId, confirmed=0 ):
category_group = get_object_or_404( CategoryGroup, pk=categoryGroupId )
if int(confirmed):
category_group.delete()
return HttpResponseRedirect( getContext(request,'cancelUrl') )
message = format_lazy( '{}: {}, {}', _('Delete'), category_group.name, category_group.get_text() )
cancel_target = getContext(request,'cancelUrl')
target = getContext(request,'path') + '1/'
return render( request, 'are_you_sure.html', locals() )
#-----------------------------------------------------------------------
class CategoryGroupForm( Form ):
name = forms.CharField( label=_('Name') )
categories = forms.MultipleChoiceField( label=_("Categories in the Group"), widget=forms.CheckboxSelectMultiple )
def __init__( self, *args, **kwargs ):
category_group = kwargs.pop( 'category_group' )
series = category_group.series
button_mask = kwargs.pop( 'button_mask', OK_BUTTON )
super().__init__(*args, **kwargs)
self.fields['categories'].choices = [(c.id, format_lazy('{}: {} {}', c.get_gender_display(), c.code, c.description)) for c in series.get_categories_not_in_groups()]
self.helper = FormHelper( self )
self.helper.form_action = '.'
self.helper.form_class = ''
self.helper.layout = Layout(
Row(
Field('name', size=30),
),
Row(
Field('categories', size=30),
),
)
addFormButtons( self, button_mask )
@access_validation()
@user_passes_test( lambda u: u.is_superuser )
def SeriesCategoryGroupEdit( request, categoryGroupId ):
category_group = get_object_or_404( CategoryGroup, pk=categoryGroupId )
series = category_group.series
if request.method == 'POST':
form = CategoryGroupForm( request.POST, button_mask=EDIT_BUTTONS, category_group=category_group )
if form.is_valid():
categories = form.cleaned_data['categories']
category_group.categorygroupelement_set.all().delete()
for pk in categories:
category_group.categorygroupelement_set.create( category=Category.objects.get(pk=pk) )
series.validate()
if 'ok-submit' in request.POST:
return HttpResponseRedirect(getContext(request,'cancelUrl'))
else:
form = CategoryGroupForm(
button_mask=EDIT_BUTTONS,
category_group=category_group,
initial={'name':category_group.name, 'categories':[ge.category.id for ge in category_group.categorygroupelement_set.all()]}
)
return render( request, 'generic_form.html', locals() )
| esitarski/RaceDB | core/series.py | series.py | py | 22,736 | python | en | code | 12 | github-code | 13 |
9475240082 | # "Here we would need to know the account position to know if we can make a trade or not.\n",
# "1. We need to know if we can afford to buy. This means do we have enough cash to make a buy\n",
#"2. We need to know how much we need to sell of the current position of the asset that
# we are interested in. How much do we sell the whole position or a percentage.\n",
import pandas as pd
import time
from alpaca.trading.client import TradingClient
from alpaca.trading.requests import MarketOrderRequest
from alpaca.trading.enums import OrderSide, TimeInForce
from my_secrets import APCA_API_KEY_ID, APCA_API_SECRET_KEY
#1
#api = tradeapi.REST(APCA_API_KEY_ID, APCA_API_SECRET_KEY, url)
#balance = api.get_account().cash
#print(f"Paper trading account balance: ${balance}")
#data
df = pd.read_csv('out.csv')
columns = ['Open', 'Close', 'Timestamp']
df.columns = columns
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
trading_client = TradingClient(APCA_API_KEY_ID, APCA_API_SECRET_KEY, paper=True)
#MACD formula
#when MACD is greater than signal, the price goes up
exp1 = df['Close'].ewm(span=12).mean()
exp2 = df['Close'].ewm(span=26).mean()
macd = exp1 - exp2
signal = macd.ewm(span=9).mean()
def generate_signals(df):
signals = []
for i in range(1, len(df)):
if macd.iloc[i] > signal.iloc[i] and macd.iloc[i-1] <= signal.iloc[i-1]:
order_data_buy = MarketOrderRequest(symbol="BTCUSD", qty=0.1, side=OrderSide.BUY, time_in_force=TimeInForce.GTC)
trading_client.submit_order(order_data=order_data_buy)
#signals.append('Buy') # Buy
elif macd.iloc[i] < signal.iloc[i] and macd.iloc[i-1] >= signal.iloc[i-1]:
order_data_sell = MarketOrderRequest(symbol="BTCUSD", qty=0.1, side=OrderSide.SELL, time_in_force=TimeInForce.GTC)
trading_client.submit_order(order_data=order_data_sell)
#signals.append('Sell') # Sell
else:
print('Hold') # Hold
return signals
while True:
generate_signals(df)
time.sleep(60)
| cpalmer712/Algo_trading | PaperTrading.py | PaperTrading.py | py | 2,050 | python | en | code | 0 | github-code | 13 |
12327917831 | import scrapy
from ..items import ScrapyProjectItem
class MaoyanSpider(scrapy.Spider):
name = "maoyan"
allowed_domains = ["maoyan.com"]
start_urls = ["https://m.maoyan.com/board/4"]
'''
pipeline中如果用with保存内容用w写入,在终端中用 -o data.csv测试不会内容覆盖,会进行附加操作 -> 'a'
'''
def parse(self, response):
'''
1. 终端中查看是否能请求到数据,这里没有请求到,首先想机器人协议,在setting当中关了还请求不到,添加请求头,还是请求不到,加cookies(scrapy当中要单独加),
不行,最终proxy。可以都写上,给优先级,我的理解是,按优先级,如果请求到数据就不会使用优先级低的 -->> headers > cookies > proxy 自己设置的优先级
'''
# print(response.text)
# print(response.request.headers)
divs = response.css('.board-card.clearfix') # 提取到所有的div标签
for div in divs:
name = div.css('.title::text').get()
star = div.css('.actors::text').get()
releasetime = div.css('.date::text').get()
score = div.css('.number::text').get()
# print(name)
yield ScrapyProjectItem(name=name, star=star, releasetime=releasetime, score=score)
# 也可以直接构建成字典{'name': name,'star': star, 'releasetime': releasetime, 'score': score}
'''
2. 在 items.py 数据结构当中 写入需要的数据字段 --->>> yield的数据经过items文件的数据结构处理变为相应的item对象(scrapy定义的字典对象)
'''
'''
3. 在pipeline当中写保存逻辑,然后在setting当中打开管道让yield的数据流进管道进行保存
--->>> 应该是yield的数据先经过items文件的数据结构处理变为相应的item对象(scrapy定义的字典对象)然后再进入管道,在pipeline文件当中直接使用item对象来进行相应的操作
'''
'''
😄 发现到最后打开了五个文件,爬虫文件和四个配置文件来进行协调,不再是单独使用一个文件进行编写了 😄
'''
| lll13508510371/Scrapping | Scrapy_Project/Scrapy_Project/spiders/maoyan.py | maoyan.py | py | 2,272 | python | zh | code | 0 | github-code | 13 |
37292548825 | # File: probe.py
# Description: Space probe that can be placed into an environment
# Last Modified: May 9, 2018
# Modified By: Sky Hoffert
from lib.sensor import TemperatureSensor
class Probe():
'''Probe that can be placed in an environment'''
def __init__(self, environment=None):
self._environment = environment
self._sensor_temperature = None
def add_sensor(self, type='', accuracy=0):
if type == 'temperature':
self._sensor_temperature = TemperatureSensor( environment=self._environment, accuracy=accuracy )
else:
raise Exception('Not implemented')
def sample(self, type=''):
if type == 'temperature':
return self._sensor_temperature.sample()
else:
raise Exception('Not implemented')
| skyhoffert/sandbox_sockets | lib/probe.py | probe.py | py | 820 | python | en | code | 0 | github-code | 13 |
8560083256 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
from tensorflow.contrib import rnn
# In[2]:
import pandas as pd
graph = tf.Graph()
# In[3]:
data=pd.read_csv("samples/allAtt_onehot_large_train.csv")
dataT=pd.read_csv("samples/allAtt_onehot_large_test.csv")
print(data.head())
print(data.shape)
# In[4]:
hm_epochs=10
n_classes = 2
batch_size = 1
chunk_size=34
n_chunks=1
rnn_size=64
with graph.as_default():
x = tf.placeholder('float', [None, n_chunks,chunk_size])
y = tf.placeholder('float')
# In[5]:
def recurrent_neural_model(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x=tf.transpose(x,[1,0,2])
print("transpose",x)
x=tf.reshape(x,[-1,chunk_size])
print("reshape",x)
x=tf.split(x,n_chunks)
print("split",x)
lstm_cell = rnn.BasicLSTMCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1],layer['weights']) + layer['biases']
return output
# In[6]:
def train_neural_network(x):
prediction = recurrent_neural_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
y_pred = tf.nn.softmax(logits=prediction)
saver=tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
hm_epochs=30
for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(0,data.shape[0],batch_size):
epoch_x, epoch_y = data.iloc[i:i+batch_size,1:35].values,data.iloc[i:i+batch_size,35:].values
epoch_x=epoch_x.reshape((batch_size,n_chunks,chunk_size))
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy Train:',accuracy.eval({x:data.iloc[:,1:35].values.reshape((-1,n_chunks,chunk_size)),
y:data.iloc[:,35:].values}))
print('Accuracy Test:',accuracy.eval({x:dataT.iloc[:,1:35].values.reshape((-1,n_chunks,chunk_size)),
y:dataT.iloc[:,35:].values}))
saver.save(sess, "./model/model.ckpt")
# In[7]:
with graph.as_default():
train_neural_network(x)
# In[ ]:
| housan321/-deep_football | LSTM_predict_football/LSTM.py | LSTM.py | py | 2,658 | python | en | code | 0 | github-code | 13 |
15015644786 | from six.moves import reduce
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.metrics_impl import _streaming_confusion_matrix
def recall(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro'):
"""
Multi class recall for Tensorflow
:param labels : (tf.int32 or tf.int64 Tensor) True labels
:param predictions: (tf.int32 or tf.int64 Tensor) Predictions same shape as the labels
:param num_classes: (Int) number of classes
:param pos_indices: (List) indices of positive classes, default = all
:param weights : (tf.int32 Tensor) Mask, must be of compatible shape with labels, default = None
:param average : (String) 'micro' -> counts the total number of true positives, false
positives, and false negatives for the classes in
'pos_indices' and infer the metric from it.
'macro' -> will compute the metric separately for each class in
'pos_indices' and average. Will not account for class imbalance.
'weighted' -> will compute the metric separately for each class in
'pos_indices' and perform a weighted average by the total
number of true labels for each class.
:return: Tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
_, re, _ = metrics_from_confusion_matrix(
cm, pos_indices, average=average)
_, op, _ = metrics_from_confusion_matrix(
op, pos_indices, average=average)
return (re, op)
def precision(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro'):
"""
Multi class precision for Tensorflow
:param labels : (tf.int32 or tf.int64 Tensor) True labels
:param predictions: (tf.int32 or tf.int64 Tensor) Predictions same shape as the labels
:param num_classes: (Int) number of classes
:param pos_indices: (List) indices of positive classes, default = all
:param weights : (tf.int32 Tensor) Mask, must be of compatible shape with labels, default = None
:param average : (String) 'micro' -> counts the total number of true positives, false
positives, and false negatives for the classes in
'pos_indices' and infer the metric from it.
'macro' -> will compute the metric separately for each class in
'pos_indices' and average. Will not account for class imbalance.
'weighted' -> will compute the metric separately for each class in
'pos_indices' and perform a weighted average by the total
number of true labels for each class.
:return: Tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
pr, _, _ = metrics_from_confusion_matrix(
cm, pos_indices, average=average)
op, _, _ = metrics_from_confusion_matrix(
op, pos_indices, average=average)
return (pr, op)
def f1(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro'):
return fbeta(labels, predictions, num_classes, pos_indices, weights, average)
def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro', beta=1):
"""
Multi class fbeta metric for Tensorflow
:param labels : (tf.int32 or tf.int64 Tensor) True labels
:param predictions: (tf.int32 or tf.int64 Tensor) Predictions same shape as the labels
:param num_classes: (Int) number of classes
:param pos_indices: (List) indices of positive classes, default = all
:param weights : (tf.int32 Tensor) Mask, must be of compatible shape with labels, default = None
:param average : (String) 'micro' -> counts the total number of true positives, false
positives, and false negatives for the classes in
'pos_indices' and infer the metric from it.
'macro' -> will compute the metric separately for each class in
'pos_indices' and average. Will not account for class imbalance.
'weighted' -> will compute the metric separately for each class in
'pos_indices' and perform a weighted average by the total
number of true labels for each class.
:param beta : (Int) Weight of precision in harmonic mean, default = 1
:return: Tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
_, _, fbeta = metrics_from_confusion_matrix(
cm, pos_indices, average=average, beta=beta)
_, _, op = metrics_from_confusion_matrix(
op, pos_indices, average=average, beta=beta)
return (fbeta, op)
def safe_div(numerator, denominator):
"""Safe division, return 0 if denominator is 0"""
numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)
zeros = tf.zeros_like(numerator, dtype=numerator.dtype)
denominator_is_zero = tf.equal(denominator, zeros)
return tf.where(denominator_is_zero, zeros, numerator / denominator)
def pr_re_fbeta(cm, pos_indices, beta=1):
"""Uses a confusion matrix to compute precision, recall and fbeta"""
num_classes = cm.shape[0]
neg_indices = [i for i in range(num_classes) if i not in pos_indices]
cm_mask = np.ones([num_classes, num_classes])
cm_mask[neg_indices, neg_indices] = 0
diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))
cm_mask = np.ones([num_classes, num_classes])
cm_mask[:, neg_indices] = 0
tot_pred = tf.reduce_sum(cm * cm_mask)
cm_mask = np.ones([num_classes, num_classes])
cm_mask[neg_indices, :] = 0
tot_gold = tf.reduce_sum(cm * cm_mask)
pr = safe_div(diag_sum, tot_pred)
re = safe_div(diag_sum, tot_gold)
fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)
return pr, re, fbeta
def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro', beta=1):
"""
Precision, Recall and F1 from the confusion matrix
:param cm : (tf.int32 Tensor of shape <num_classes, num_classes>) Streaming confusion matrix
:param pos_indices: (List) indices of positive classes, default = all
:param average : (String) 'micro' -> counts the total number of true positives, false
positives, and false negatives for the classes in
'pos_indices' and infer the metric from it.
'macro' -> will compute the metric separately for each class in
'pos_indices' and average. Will not account for class imbalance.
'weighted' -> will compute the metric separately for each class in
'pos_indices' and perform a weighted average by the total
number of true labels for each class.
:param beta : (Int) Weight of precision in harmonic mean, default = 1
:return: Tuple of (scalar float Tensor, update_op)
"""
num_classes = cm.shape[0]
if pos_indices is None:
pos_indices = [i for i in range(num_classes)]
if average == 'micro':
return pr_re_fbeta(cm, pos_indices, beta)
elif average in {'macro', 'weighted'}:
precisions, recalls, fbetas, n_golds = [], [], [], []
for idx in pos_indices:
pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)
precisions.append(pr)
recalls.append(re)
fbetas.append(fbeta)
cm_mask = np.zeros([num_classes, num_classes])
cm_mask[idx, :] = 1
n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))
if average == 'macro':
pr = tf.reduce_mean(precisions)
re = tf.reduce_mean(recalls)
fbeta = tf.reduce_mean(fbetas)
return pr, re, fbeta
if average == 'weighted':
n_gold = tf.reduce_sum(n_golds)
pr_sum = sum(p * n for p, n in zip(precisions, n_golds))
pr = safe_div(pr_sum, n_gold)
re_sum = sum(r * n for r, n in zip(recalls, n_golds))
re = safe_div(re_sum, n_gold)
fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))
fbeta = safe_div(fbeta_sum, n_gold)
return pr, re, fbeta
else:
raise NotImplementedError()
def masked_conv1d_and_max(t, weights, filters, kernel_size):
"""
Applies 1d convolution and a masked max-pooling
:param t : (tf.Tensor) A tensor with at least 3 dimensions [d1, d2, ..., dn-1, dn]
:param weights : (tf.Tensor or tf.bool) A Tensor of shape [d1, d2, dn-1]
:param filters : (Int) number of filters
:param kernel_size: (Int) kernel size for the temporal convolution
:return: (tf.Tensor) A tensor of shape [d1, d2, dn-1, filters]
"""
# Get shape and parameters
shape = tf.shape(t)
ndims = t.shape.ndims
dim1 = reduce(lambda x, y: x*y, [shape[i] for i in range(ndims - 2)])
dim2 = shape[-2]
dim3 = t.shape[-1]
# Reshape weights
weights = tf.reshape(weights, shape=[dim1, dim2, 1])
weights = tf.to_float(weights)
# Reshape input and apply weights
flat_shape = [dim1, dim2, dim3]
t = tf.reshape(t, shape=flat_shape)
t *= weights
# Apply convolution
t_conv = tf.layers.conv1d(t, filters, kernel_size, padding='same')
t_conv *= weights
# Reduce max -- set to zero if all padded
t_conv += (1. - weights) * tf.reduce_min(t_conv, axis=-2, keepdims=True)
t_max = tf.reduce_max(t_conv, axis=-2)
# Reshape the output
final_shape = [shape[i] for i in range(ndims-2)] + [filters]
t_max = tf.reshape(t_max, shape=final_shape)
return t_max
| koc-lab/turkishlegalner | src/utilities.py | utilities.py | py | 10,552 | python | en | code | 1 | github-code | 13 |
16596022385 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import SignupForm, UserUpdateForm, ProfileImageUpdateForm
# Create your views here.
def register(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
messages.success(request, f"Account has been created for {username}")
return redirect("/login")
else:
form = SignupForm()
return render(request, "users/register.html", {"form": form})
@login_required
def profile(request):
if request.method == "POST":
user_update_form = UserUpdateForm(request.POST, instance=request.user)
profile_image_update_form = ProfileImageUpdateForm(
request.POST, request.FILES, instance=request.user.profile
)
if user_update_form.is_valid() and profile_image_update_form.is_valid():
user_update_form.save()
profile_image_update_form.save()
messages.success(request, f"Your account has been updated!")
return redirect("/profile")
else:
user_update_form = UserUpdateForm(instance=request.user)
profile_image_update_form = ProfileImageUpdateForm(
instance=request.user.profile
)
context = {
"user_update_form": user_update_form,
"profile_image_update_form": profile_image_update_form,
}
return render(request, "users/profile.html", context)
| Ngotrangdh/Kanbancha | users/views.py | views.py | py | 1,616 | python | en | code | 0 | github-code | 13 |
17041435364 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.WithdrawExtend import WithdrawExtend
class AlipayFundWalletWithdrawModel(object):
def __init__(self):
self._amount = None
self._biz_scene = None
self._order_title = None
self._out_biz_no = None
self._product_code = None
self._user_wallet_id = None
self._withdraw_extend = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def order_title(self):
return self._order_title
@order_title.setter
def order_title(self, value):
self._order_title = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def user_wallet_id(self):
return self._user_wallet_id
@user_wallet_id.setter
def user_wallet_id(self, value):
self._user_wallet_id = value
@property
def withdraw_extend(self):
return self._withdraw_extend
@withdraw_extend.setter
def withdraw_extend(self, value):
if isinstance(value, WithdrawExtend):
self._withdraw_extend = value
else:
self._withdraw_extend = WithdrawExtend.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.order_title:
if hasattr(self.order_title, 'to_alipay_dict'):
params['order_title'] = self.order_title.to_alipay_dict()
else:
params['order_title'] = self.order_title
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.user_wallet_id:
if hasattr(self.user_wallet_id, 'to_alipay_dict'):
params['user_wallet_id'] = self.user_wallet_id.to_alipay_dict()
else:
params['user_wallet_id'] = self.user_wallet_id
if self.withdraw_extend:
if hasattr(self.withdraw_extend, 'to_alipay_dict'):
params['withdraw_extend'] = self.withdraw_extend.to_alipay_dict()
else:
params['withdraw_extend'] = self.withdraw_extend
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundWalletWithdrawModel()
if 'amount' in d:
o.amount = d['amount']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'order_title' in d:
o.order_title = d['order_title']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'product_code' in d:
o.product_code = d['product_code']
if 'user_wallet_id' in d:
o.user_wallet_id = d['user_wallet_id']
if 'withdraw_extend' in d:
o.withdraw_extend = d['withdraw_extend']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFundWalletWithdrawModel.py | AlipayFundWalletWithdrawModel.py | py | 4,217 | python | en | code | 241 | github-code | 13 |
1420745241 | def collatzSequence():
print('input a number')
num = int(input())
while num != 1:
if num % 2 == 0:
num = num // 2
print(num)
continue
elif num % 2 != 0:
num = num * 3 + 1
print(num)
continue
print(1)
collatzSequence()
| Mandyiee/pythonPracticeProjects | collatzSequence.py | collatzSequence.py | py | 246 | python | en | code | 0 | github-code | 13 |
72545244819 | # create by fanfan on 2017/10/17 0017
# create by fanfan on 2017/10/17 0017
import numpy as np
import tensorflow as tf
import pickle
import re
from collections import Counter
import itertools
from HAN_network.Chinese_Sentiment_Classification import settings
import os
from tqdm import tqdm
PAD = '_PAD'
UNK = '_UNK'
def Q2B(uchar):
'''全角转半角'''
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
# 转完之后不是半角字符返回原来的字符
if inside_code < 0x0020 or inside_code > 0x7e:
return uchar
return chr(inside_code)
def replace_all(repls,text):
return re.sub('|'.join(re.escape(key) for key in repls.keys()),lambda k:repls[k.group(0)],text)
def split_sentence(txt):
sents = re.split(r'\n|\s|;|;|。|,|\.|,|\?|\!|||[=]{2,}|[.]{3,}|[─]{2,}|[\-]{2,}|~|、|╱|∥', txt)
sents = [c for s in sents for c in re.split(r'([^%]+[\d,.]+%)', s)]
sents = list(filter(None,sents))
return sents
def normalize_punctuation(text):
cpun = [[' '],
['﹗', '!'],
['“', '゛', '〃', '′', '"'],
['”'],
['´', '‘', '’'],
[';', '﹔'],
['《', '〈', '<'],
['》', '〉', '>'],
['﹑'],
['【', '『', '〔', '﹝', '「', '﹁'],
['】', '』', '〕', '﹞', '」', '﹂'],
['(', '「'],
[')', '」'],
['﹖', '?'],
['︰', '﹕', ':'],
['・', '.', '·', '‧', '°'],
['●', '○', '▲', '◎', '◇', '■', '□', '※', '◆'],
['〜', '~', '∼'],
['︱', '│', '┼'],
['╱'],
['╲'],
['—', 'ー', '―', '‐', '−', '─', '﹣', '–', 'ㄧ', '-']]
epun = [' ', '!', '"', '"', '\'', ';', '<', '>', '、', '[', ']', '(', ')', '?', ':', '・', '•', '~', '|', '/', '\\', '-']
repls = {}
for i in range(len(cpun)):
for j in range(len(cpun[i])):
repls[cpun[i][j]] = epun[i]
return replace_all(repls,text)
def clean_str(txt):
txt = txt.replace(" ","")
txt = normalize_punctuation(txt)
txt = ''.join([Q2B(c) for c in list(txt)])
return txt
def build_vocab(sentences):
word_counts = Counter(itertools.chain(*sentences))
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary = {x:i for i,x in enumerate(vocabulary_inv)}
return [vocabulary,vocabulary_inv]
#从pkl文件里面获取vocab
def get_vocab(path = settings.vocab_pkl):
if not os.path.exists(path) or os.path.isdir(path):
raise ValueError("No file at {}".format(path))
char_list = pickle.load(open(path,'rb'))
vocab = dict(zip(char_list,range(len(char_list))))
return vocab,char_list
#随机采样,以免训练数据不足
def upsampling(x,size):
if len(x) > size:
return x
diff_size = size - len(x)
return x + list(np.random.choice(x,diff_size,replace=False))
def write_data(doc,label,out_f,vocab):
doc = split_sentence(clean_str(doc))
document_length = len(doc)
sentence_lengths = np.zeros((settings.max_doc_len,),dtype= np.int64)
data = np.ones((settings.max_doc_len * settings.max_sentence_len,),dtype=np.int64)
doc_len = min(document_length,settings.max_doc_len)
for j in range(doc_len):
sent = doc[j]
actual_len = len(sent)
pos = j * settings.max_sentence_len
sent_len = min(actual_len,settings.max_sentence_len)
sentence_lengths[j] = sent_len
data[pos:pos+sent_len] = [vocab.get(sent[k],0) for k in range(sent_len)]
features = {'sentence_lengths':tf.train.Feature(int64_list=tf.train.Int64List(value=sentence_lengths)),
'document_lengths':tf.train.Feature(int64_list=tf.train.Int64List(value=[doc_len])),
'label':tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'text':tf.train.Feature(int64_list=tf.train.Int64List(value=data))
}
example = tf.train.Example(features=tf.train.Features(feature=features))
out_f.write(example.SerializeToString())
def build_dataset(pos_path = settings.pos_data_path,
neg_path = settings.neg_data_path):
pos_docs = list(open(pos_path,encoding='utf-8').readlines())
neg_docs = list(open(neg_path,encoding='utf-8').readlines())
#加载词库
vocab,_ = get_vocab(settings.vocab_pkl)
pos_size = len(pos_docs)
neg_size = len(neg_docs)
pos_train_size = int(pos_size * 0.9)
pos_valid_size = pos_size - pos_train_size
neg_train_size = int(neg_size * 0.9)
neg_valid_size = neg_size - neg_train_size
train_path = os.path.join(settings.data_dir,'train.tfrecords')
valid_path = os.path.join(settings.data_dir,'valid.tfrecords')
with tf.python_io.TFRecordWriter(train_path) as out_f:
train_size = max(pos_train_size,neg_train_size)
pos_train_docs = np.random.choice(upsampling(pos_docs[:pos_train_size],train_size),train_size,replace=False)
neg_train_docs = np.random.choice(upsampling(neg_docs[:neg_train_size],train_size),train_size,replace=False)
print(len(pos_train_docs),len(neg_train_docs))
for i in tqdm(range(train_size)):
pos_row = pos_train_docs[i]
neg_row = neg_train_docs[i]
write_data(pos_row,1,out_f,vocab)
write_data(neg_row,0,out_f,vocab)
with tf.python_io.TFRecordWriter(valid_path) as out_f:
valid_size = max(pos_valid_size, neg_valid_size)
pos_valid_docs = np.random.choice(upsampling(pos_docs[pos_train_size:], valid_size), valid_size, replace=False)
neg_valid_docs = np.random.choice(upsampling(neg_docs[neg_train_size:], valid_size), valid_size, replace=False)
print(len(pos_valid_docs), len(neg_valid_docs))
for i in tqdm(range(valid_size)):
pos_row = neg_valid_docs[i]
neg_row = neg_train_docs[i]
write_data(pos_row, 1, out_f,vocab)
write_data(neg_row, 0, out_f,vocab)
print('Done {} records, train {}, valid {}'.format(pos_size + neg_size,
pos_train_size + neg_train_size,
pos_valid_size + neg_valid_size))
class sentence_reader():
def __init__(self):
self.pos_docs = settings.pos_data_path
self.neg_docs = settings.neg_data_path
def __iter__(self):
for line in open(self.pos_docs,encoding='utf-8'):
line = line.strip()
line = line.replace(" ", "")
line = normalize_punctuation(line)
yield list(line)
for line in open(self.neg_docs,encoding='utf-8'):
line = line.strip()
line = line.replace(" ", "")
line = normalize_punctuation(line)
yield list(line)
if __name__ == '__main__':
status = 0
if status == 0:
build_dataset()
| fanfanfeng/nlp_research | HAN_network/Chinese_Sentiment_Classification/data_util.py | data_util.py | py | 7,115 | python | en | code | 8 | github-code | 13 |
14058784795 | import numpy as np
import sys
def get_units(variable):
"""
Returns a string of appropriate units for different variable
Parameters
-----------
variable : string
name of variable. Acceptable values are aperture, permeability, and transmissivity
Returns
----------
units : string
appropriate units for provided variable
"""
if variable == "aperture":
units = "m"
elif variable == "permeability":
units = "m^2"
elif variable == "transmissivity":
units = "m^2/s"
else:
error = "ERROR!!! The variable of choice '{0}' is not known in the function get_units()\nAcceptable names are aperture, permeability, and transmissivity\nExiting.".format(
variable)
sys.stderr.write(error)
sys.exit(1)
return units
def check_key(dict, key):
'''
Checks if key is in dict
Parameters
-----------
dict : dictionary
key : string
Returns
----------
bool : bool
True if key is in dictionary, False if not
'''
if key in dict.keys():
return True
else:
return False
def load_fractures(filename, quiet):
'''
Loads fracture information from filename.
Parameters
-----------
filename : string
name of fracture radii file
Returns
----------
r : array of doubles
maximum radii of fractures
family_id : array of ints
family id for each fractures
n : int
number of fractures in the domain
'''
if not quiet:
print("--> Loading Fracture information from {0}".format(filename))
data = np.genfromtxt(filename, skip_header=2)
family_id = (data[:, 2]).astype(int)
n, _ = np.shape(data)
r = np.zeros(n)
for i in range(n):
if data[i, 0] >= data[i, 1]:
r[i] = data[i, 0]
else:
r[i] = data[i, 1]
return r, family_id, n
def convert(x, source, target):
'''
converts between variables aperture, permeability, and transmissivity
Parameters
-----------
x : numpy array
input values
source : string
variable name of source
target : string
variable name of output
Returns
----------
y : numpy array
array of converted values
Notes
-----
permeability/Transmissivty are defined using the cubic law
k = b^2/12
T = (b^3 rho g)/(12 mu)
'''
mu = 8.9e-4 #dynamic viscosity of water at 20 degrees C, Pa*s
g = 9.8 #gravity acceleration
rho = 997 # water density
if source == "aperture" and target == "permeability":
perm = (x**2) / 12
return perm
if source == "aperture" and target == "transmissivity":
T = (x**3 * rho * g) / (12 * mu)
return T
if source == "permeability" and target == "aperture":
b = np.sqrt((12.0 * x))
return b
if source == "permeability" and target == "transmissivity":
b = np.sqrt((12.0 * x))
T = (b * x * rho * g) / (12 * mu)
return T
if source == "transmissivity" and target == "aperture":
b = ((x * 12 * mu) / (rho * g))**(1 / 3)
return b
if source == "transmissivity" and target == "permeability":
b = ((x * 12 * mu) / (rho * g))**(1 / 3)
perm = (b**2) / 12
return perm
else:
error = "Error in conversion! Unknown name provided in convert. Either '{0}' or '{1}' is not known\nAcceptable names are aperture, permeability, and transmissivity\nExiting.\n".format(
source, target)
sys.stderr.write(error)
sys.exit(1)
def log_normal(params, variable, number_of_fractures):
""" Creates Fracture Based Log-Normal values that is number_of_fractures long.
The values has a mean mu and log-variance sigma.
Parameters
-----------
params : dict
Dictionary of parameters for the Log Normal values. Must contain keys mu and sigma.
variable : string
name of values being generated. Acceptable values are aperture, permeability, and transmissivity
number_of_fractures : int
number of fractures in the DFN
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Notes
----------
values are generated for the variable provided. The two remaining variables are derived using those values
"""
print('--> Creating uncorrelated lognormal {0} values.'.format(variable))
units = get_units(variable)
print("--> Mean: {0} {1}".format(params["mu"], units))
print("--> Log Variance: {0}".format(params["sigma"]))
if variable == "aperture":
b = np.log(params["mu"]) * np.ones(number_of_fractures)
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
b = np.exp(b + np.sqrt(params["sigma"]) * perturbation)
perm = convert(b, variable, "permeability")
T = convert(b, variable, "transmissivity")
elif variable == "permeability":
perm = np.log(params["mu"]) * np.ones(number_of_fractures)
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
perm = np.exp(perm + np.sqrt(params["sigma"]) * perturbation)
b = convert(perm, variable, "aperture")
T = convert(perm, variable, "transmissivity")
elif variable == "transmissivity":
T = np.log(params["mu"]) * np.ones(number_of_fractures)
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
T = np.exp(T + np.sqrt(params["sigma"]) * perturbation)
b = convert(T, variable, "aperture")
perm = convert(T, variable, "permeability")
else:
error = "ERROR!!! The variable of choice '{0}'' is not known\nAcceptable names are aperture, permeability, and transmissivity\nExiting.\n".format(
variable)
sys.stderr.write(error)
sys.exit(1)
print('--> Complete\n')
return b, perm, T
def correlated(params, variable, radii):
""" Creates hydraulic properties of fractures based on power-law relationship with
fracture radius. For example, T = alpha*r^beta
Parameters
-----------
params : dict
Dictionary of parameters for the power-law relationship. Must contain alpha and beta.
variable : string
name of values being generated. Acceptable values are aperture, permeability, and transmissivity
radii : array
array of fracture radii in the domain
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Notes
----------
Values are generated for the variable provided. The two remaining variables are derived using those values
"""
print(
'--> Creating Perfectly Correlated {0} values based on fracture radius.'
.format(variable))
units = get_units(variable)
if variable == "aperture":
print("b ={1}*r^{2} {3}".format(variable, params["alpha"],
params["beta"], units))
if variable == "permeability":
print("k ={1}*r^{2} {3}".format(variable, params["alpha"],
params["beta"], units))
if variable == "transmissivity":
print("T ={1}*r^{2} {3}".format(variable, params["alpha"],
params["beta"], units))
if variable == "aperture":
b = params["alpha"] * radii**params["beta"]
perm = convert(b, variable, "permeability")
T = convert(b, variable, "transmissivity")
elif variable == "permeability":
perm = params["alpha"] * radii**params["beta"]
b = convert(perm, variable, "aperture")
T = convert(perm, variable, "transmissivity")
elif variable == "transmissivity":
T = params["alpha"] * radii**params["beta"]
b = convert(T, variable, "aperture")
perm = convert(T, variable, "permeability")
print("--> Complete\n")
return b, perm, T
def semi_correlated(params, variable, radii, number_of_fractures):
""" Creates hydraulic properties of fractures based on power-law relationship with
fracture radius with a noise term. For example, log(T) = log(alpha*r^beta) + sigma * N(0,1)
Parameters
-----------
params : dict
Dictionary of parameters for the power-law relationship. Must contain alpha and beta.
variable : string
name of values being generated. Acceptable values are aperture, permeability, and transmissivity
radii : array
array of fracture radii in the domain
number_of_fractures : int
number of fractures in the DFN
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Notes
----------
Values are generated for the variable provided. The two remaining variables are derived using those values
"""
print("--> Creating Semi-Correlated {0} values based on fracture radius.".
format(variable))
print('--> Coefficient: {0}'.format(params["alpha"]))
print('--> Exponent : {0}'.format(params["beta"]))
print('--> Log Variance: {0}'.format(params["sigma"]))
if variable == "aperture":
b = params["alpha"] * radii**params["beta"]
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
b = np.exp(np.log(b) + np.sqrt(params["sigma"]) * perturbation)
perm = convert(b, variable, "permeability")
T = convert(b, variable, "transmissivity")
elif variable == "permeability":
perm = params["alpha"] * radii**params["beta"]
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
perm = np.exp(np.log(perm) + np.sqrt(params["sigma"]) * perturbation)
b = convert(perm, variable, "aperture")
T = convert(perm, variable, "transmissivity")
elif variable == "transmissivity":
T = params["alpha"] * radii**params["beta"]
perturbation = np.random.normal(0.0, 1.0, number_of_fractures)
T = np.exp(np.log(T) + np.sqrt(params["sigma"]) * perturbation)
b = convert(T, variable, "aperture")
perm = convert(T, variable, "permeability")
print('--> Complete\n')
return b, perm, T
def constant(params, variable, number_of_fractures):
""" Creates hydraulic properties of fractures with constant values
Parameters
-----------
params : dict
Dictionary of parameters for the power-law relationship. Must contain alpha and beta.
variable : string
name of values being generated. Acceptable values are aperture, permeability, and transmissivity
number_of_fractures : int
number of fractures in the DFN
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Notes
----------
Values are generated for the variable provided. The two remaining variables are derived using those values
"""
print("--> Creating constant {0} values.".format(variable))
units = get_units(variable)
print("--> Value: {0} {1}".format(params["mu"], units))
if variable == "aperture":
b = params["mu"] * np.ones(number_of_fractures)
perm = convert(b, variable, "permeability")
T = convert(b, variable, "transmissivity")
elif variable == "permeability":
perm = params["mu"] * np.ones(number_of_fractures)
b = convert(perm, variable, "aperture")
T = convert(perm, variable, "transmissivity")
elif variable == "transmissivity":
T = params["mu"] * np.ones(number_of_fractures)
b = convert(T, variable, "aperture")
perm = convert(T, variable, "permeability")
print('--> Complete\n')
return b, perm, T
def dump_hydraulic_values(self, b, perm, T, prefix=None):
""" Writes variable information to files.
Parameters
-----------
prefix : string
prefix of aperture.dat and perm.dat file names
prefix_aperture.dat and prefix_perm.dat
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
Returns
----------
None
Notes
----------
"""
print("--> Dumping values to files")
n = len(b)
# Write out new aperture.dat
if prefix is not None:
aper_filename = prefix + '_aperture.dat'
perm_filename = prefix + '_perm.dat'
trans_filename = prefix + '_transmissivity.dat'
frac_info_filename = prefix + '_fracture_info.dat'
else:
aper_filename = "aperture.dat"
perm_filename = "perm.dat"
trans_filename = "transmissivity.dat"
frac_info_filename = "fracture_info.dat"
# write aperture file
print("--> Writing {0}".format(aper_filename))
with open(aper_filename, 'w+') as fp:
fp.write('aperture\n')
for i in range(n):
fp.write('-{0:d} 0 0 {1:0.5e}\n'.format(i + 7, b[i]))
# write perm file
print("--> Writing {0}".format(perm_filename))
with open(perm_filename, 'w+') as fp:
fp.write('permeability\n')
for i in range(n):
fp.write('-{0:d} 0 0 {1:0.5e} {1:0.5e} {1:0.5e}\n'.format(
i + 7, perm[i]))
print(f"--> Writing {trans_filename}")
with open(trans_filename, 'w+') as fp:
fp.write('transmissivty\n')
for i in range(n):
fp.write('-{0:d} {1:0.5e}\n'.format(i + 7, T[i]))
## revise fracture_info.dat
print(f"--> Writing {frac_info_filename}")
connections = np.genfromtxt("fracture_info.dat",skip_header = 1)[:,0].astype(int)
with open(frac_info_filename, "w+") as fp:
fp.write("num_connections perm aperture\n")
for i in range(n):
fp.write(f"{connections[i]:d} {perm[i]:0.8e} {b[i]:0.8e}\n")
print("--> Complete")
def generate_hydraulic_values(self,
variable,
relationship,
params,
radii_filename="radii_Final.dat",
family_id=None):
""" Generates hydraulic property values.
Parameters
-----------
self : object
DFN Class
relationship : string
name of functional relationship for apertures.
options are log-normal, correlated, semi-correlated, and
constant
params : dictionary
dictionary of parameters for functional relationship
family_id : int
family id of fractures
Returns
----------
b : array
aperture values
perm : array
permeability values
T : array
transmissivity values
idx : array of bool
true / false of fracture families requested. If family_id = None, all entires are true.
Only family members entires of b, perm, and T will be non-zero
Notes
----------
See Hyman et al. 2016 "Fracture size and transmissivity correlations: Implications for transport simulations in sparse
three-dimensional discrete fracture networks following a truncated power law distribution of fracture size" Water Resources Research for more details
"""
# Check if the variable choice is defined
variables = ["aperture", "permeability", "transmissivity"]
if variable not in variables:
error = "ERROR!!! The variable of choice '{0}'' is not known\nAcceptable names are {1}, {2}, {3}\nExiting.\n".format(
variable, variables[0], variables[1], variables[2])
sys.stderr.write(error)
sys.exit(1)
# else:
# print(
# "Creating aperture, permeability, and transmissivity based on {0}."
# .format(variable))
# check if the function is defined
functions = ["log-normal", "semi-correlated", "constant", "correlated"]
if relationship not in functions:
error = "ERROR!!! The provided relationship '{0}' is unknown\nAcceptable relationship are {1}, {2}, {3}, {4}\nExiting.\n".format(
relationship, functions[0], functions[1], functions[2],
functions[3])
sys.stderr.write(error)
sys.exit(1)
# else:
# print(
# "Creating aperture, permeability, and transmissivity using the {0} function."
# .format(relationship))
# Load Fracture information
radii, families, number_of_fractures = load_fractures(radii_filename,
quiet=False)
if family_id is not None:
print(f"--> Working on Fracture Family {family_id}")
idx = np.where(families == family_id)
if len(idx[0]) == 0:
error = f"ERROR!!! No fractures in the network are in the requested family. {family_id}.\nUser Rectangles = -1\nUser Ellipses = 0.\nStochastic Families > 0.\nExiting\n"
sys.stderr.write(error)
sys.exit(1)
if relationship == "log-normal":
keys = ["mu", "sigma"]
for key in keys:
if not check_key(params, key):
error = "ERROR!!! The required key '{0}' was not found in the params dictionary\nExiting\n".format(
key)
sys.stderr.write(error)
sys.exit(1)
b, perm, T = log_normal(params, variable, number_of_fractures)
if relationship == "correlated":
keys = ["alpha", "beta"]
for key in keys:
if not check_key(params, key):
error = "ERROR!!! The required key '{0}' was not found in the params dictionary\nExiting\n".format(
key)
sys.stderr.write(error)
sys.exit(1)
b, perm, T = correlated(params, variable, radii)
if relationship == "semi-correlated":
keys = ["alpha", "beta", "sigma"]
for key in keys:
if not check_key(params, key):
error = "ERROR!!! The required key '{0}' was not found in the params dictionary\nExiting\n\n".format(
key)
sys.stderr.write(error)
sys.exit(1)
b, perm, T = semi_correlated(params, variable, radii,
number_of_fractures)
if relationship == "constant":
keys = ["mu"]
for key in keys:
if not check_key(params, key):
error = "ERROR!!! The required key '{0}' was not found in the params dictionary\nExiting\n\n".format(
key)
sys.stderr.write(error)
sys.exit(1)
b, perm, T = constant(params, variable, number_of_fractures)
if family_id == None:
return b, perm, T
else:
# Sent entries that are not in the requested family to None
idx = np.where(families != family_id)
b[idx] = 0
T[idx] = 0
perm[idx] = 0
return b, perm, T
# if __name__ == '__main__':
# variable = "transmissivity"
# function = "correlated"
# params = {"alpha":6.7*10**-9,"beta":1.4}
# _,_,T1 = generate_hydraulic_values(variable,function,params,radii_filename="/Users/jhyman/Desktop/radii_Final.dat",family_id=1)
# function = "semi-correlated"
# params = {"alpha":6.3*10**-9,"beta":0.5,"sigma":1.0}
# _,_,T2 = generate_hydraulic_values(variable,function,params,radii_filename="/Users/jhyman/Desktop/radii_Final.dat",family_id=2)
# #combine values
# T = T1 + T2
# print(T)
# # convert to other variables
# perm = convert(T,"transmissivity","permeability")
# b = convert(T,"transmissivity","aperture")
# # write to file
# #dump_values("testing",b,perm,T)
| Laok123dawda/dfnworks | pydfnworks/pydfnworks/dfnGen/generation/hydraulic_properties.py | hydraulic_properties.py | py | 20,499 | python | en | code | 0 | github-code | 13 |
31904166783 | import torch
from torch.nn.functional import cross_entropy
class LargeMarginCosineLossLayer(torch.nn.Module):
def __init__(self, feature_dim, label_nums, margin):
super(LargeMarginCosineLossLayer, self).__init__()
self.params = torch.nn.Parameter(torch.randn((feature_dim, label_nums)))
self.margin = margin
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, feature_input, labels):
# shape of feature_input is (bsz, hidden_size)
# shape of labels is (bsz, label_nums)
w = torch.norm(self.params, dim=0).unsqueeze(0)
x = torch.norm(feature_input, dim=1).unsqueeze(-1)
norm_v = torch.mm(x, w)
# print(norm_v.shape)
matmul = torch.mm(feature_input, self.params)
# print(matmul.shape)
normalized = matmul / norm_v
predict = torch.argmax(normalized, dim=1)
# 分类结果从normalized推出
# 以下是计算loss
logits = (normalized - self.margin)
logits = self.softmax(logits)
loss = cross_entropy(logits, labels)
return predict, loss
| yangyubuaa/customize_loss | lmcl_loss.py | lmcl_loss.py | py | 1,112 | python | en | code | 1 | github-code | 13 |
38586767007 | from django import forms
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from django.template.loader import get_template
from django.template import Context
class ContactUs(forms.Form):
your_name = forms.CharField(max_length=64, widget=forms.TextInput(attrs={'placeholder': _('Name')}))
subject = forms.CharField(max_length=128, widget=forms.TextInput(attrs={'placeholder': _('Subject')}))
email = forms.EmailField(max_length=128, widget=forms.TextInput(attrs={'placeholder': _('E-mail')}))
message = forms.CharField(widget=forms.Textarea(attrs={'placeholder': _('Message')}))
def process(self) -> object:
if self.is_valid():
your_name = self.cleaned_data['your_name']
subject = self.cleaned_data['subject']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
recipients = ['deniszorinets@gmail.com']
if email:
recipients.append(email)
body = get_template('localization/contact_us.html').render(Context({
'name': your_name,
'message': message,
}))
send_mail(subject, body, email, recipients)
return self, True
else:
return self, False
| StepanPilchyck/Neutrino | static_page/forms.py | forms.py | py | 1,387 | python | en | code | 0 | github-code | 13 |
25113439325 | a=input("Enter the string ")
b=input("Enter the word to be removed ")
e=[]
c=a.split(' ')
d=b.split(' ')
length=len(c)
for i in c:
if i not in d:
e.append(i)
z=[]
for j in e:
if j not in z:
z.append(j)
c1=e.count(j)
fre=(c1/length)*100
print(j,fre)
| kishoredevr/Python | removing word in sen.py | removing word in sen.py | py | 315 | python | en | code | 0 | github-code | 13 |
43243076202 | #====================================================================================
# Lista CAP239B - Prof. Reinaldo Rosa
# Aluno: Leonardo Sattler Cassara
# Exercicio 7.2
#====================================================================================
# Importacoes
#------------------------------------------------------------------------------------
import sys
sys.path.append('../../../../signal_generator_codes/')
sys.path.append('../../../../statistical_analysis_codes/')
import stats_tools
import Specplus
import mfdfa_ss_m4
import pmnoise
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#====================================================================================
# Gerando datasets - grupo noise
#------------------------------------------------------------------------------------
# Nome dos arquivos deste exercicio
files_name = './Exercicio7_2_grupo_pmnoise'
# Lista de familias
n = 8192
p_list = [.18, .23, .28, .32, .37, .42]
p_counter = 0
# Listas para exportacao dos dados (para csv)
params_list = []
moms_list = []
# Loop sobre as familias de dados
for p in p_list:
# Definindo parametros
if p_counter <=2:
series_type='Exogenous'
beta=0.7
else:
series_type='Endogenous'
beta=0.4
# Contador de series (10)
p_counter = p_counter + 1
counter = 1
# Calculando 10 series de dados para cada familia
while counter <= 10:
print('p, beta, serie:', p, beta, counter)
# Gerando instancia de dados
data = pmnoise.pmodel(n, p, beta)[1]
data = list(data)
# Calculando parametros (variancia, skewness e kurtosis)
var = stats_tools.variancia(data)
skew = stats_tools.skewness(data)
kur = stats_tools.kurtosis(data)
alfa = Specplus.dfa1d(data, 1)[0]
beta = Specplus.psd(data)[5]
Psi = mfdfa_ss_m4.mfdfa(data, 1, files_name+'_p_'+str(p))[0]
# Salvando parametros numa lista
params_list.append([n, p, counter, skew**2., kur, alfa, beta, Psi])
counter = counter+1
#====================================================================================
# Exportando Dados - grupo noise
#------------------------------------------------------------------------------------
# Escrevendo parametros para csv
params_frame = pd.DataFrame(params_list, columns=['N', 'p', 'Serie', 'Skewness_2', 'Kurtosis', 'Alfa', 'Beta', 'Psi'])
params_frame.to_csv(files_name + '_parametros.csv', index=False)
#====================================================================================
# FIM
#====================================================================================
| leosattler/Mat.-Comp.-I-B | Exercises/Exercise7/7.2/grupo_pmnoise/exercise_7_2_grupo_pmnoise.py | exercise_7_2_grupo_pmnoise.py | py | 2,855 | python | en | code | 0 | github-code | 13 |
953134472 | # Load CSV using Pandas from URL
from pandas import read_csv
filename = "../doc/pima-indians-diabetes.data.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
print(data.shape)
print(data.dtypes)
peek = data.head(20)
print(peek)
| nhancv/ml-14days | src/day3.py | day3.py | py | 303 | python | en | code | 1 | github-code | 13 |
71605850259 | import logging
from time import sleep
import loggerScript
logger = logging.getLogger(__name__)
from flask import Flask, request
logger.warning("Created Flask and Request")
import json
logger.warning("Imported JSON")
from camera import Camera
logger.warning("Imported Camera")
from main import Main
logger.warning("Imported Main")
import sys
logger.warning("Imported Sys")
from threading import Thread
from data import Data
# Setup flask server
app = Flask(__name__)
logger.warning("Setup Flask")
m = Main()
#Checks if user is angry
@app.route('/isangry', methods = ['POST'])
def isAngry():
logger.warning("CHECKING IF ANGRY")
angry = m.isAngry()
logger.warning(int(angry))
return json.dumps({"isangry": angry})
#Collects client requests and finds the right summary
@app.route('/summarize', methods = ['POST'])
def returnSummary():
logger.warning("RETURNING SUMMARY")
data = request.get_json()
emotion1 = data['emotion1']
emotion2 = data['emotion2']
emotion3 = data['emotion3']
comparing = data['comparing']
logger.warning(data)
if comparing == "Applications":
logger.warning("Application Emotion Retreiving")
return json.dumps({"result": m.summarizeApps(emotion1, emotion2, emotion3)})
else:
return json.dumps({"result": m.summarizeTimes(emotion1, emotion2, emotion3)})
#Ends the current session
@app.route('/endsession', methods = ['POST'])
def endSession():
logger.warning("END SESSION")
m.endSession()
return json.dumps([])
#Takes a new image
@app.route('/takeimage', methods = ['POST'])
def takeImage():
return json.dumps({"emotions": m.data.getEmotions()})
#Shuts down the server
@app.route('/kill', methods = ['POST'])
def kill():
logger.warning("Closed Server")
m.writeData()
quit()
#Captures an image in a new thread
def captureImage():
while True:
m.analyze()
sleep(0.1)
#Creates a proper application
if __name__ == "__main__":
logger.warning("Ready to accept")
new_thread = Thread(target = captureImage)
new_thread.start()
app.run(port=5000) | mehulgoel873/EMONITOR | backend/server.py | server.py | py | 2,116 | python | en | code | 0 | github-code | 13 |
37406432663 | import asyncio
from timeit import default_timer
from aiohttp import ClientSession
import aiohttp
import yaml
import aws_lambda_logging
import logging
import time
import botocore.config
import boto3
import slackweb
import ast
import datetime
import json
# JSON logger initialisation
logger = logging.getLogger()
from aws_lambda_logging import setup
setup('log-level')
# Parameters from SSM
cfg = botocore.config.Config(retries={'max_attempts': 0})
ssm = boto3.client('ssm',config=cfg,region_name='specify-region')
# Parameters for Slack Notification
notification = "none"
if notification == "slack":
slack_webhook = ssm.get_parameter(Name='slack_webhook',WithDecryption=True)
slack_payload = {"fallback": "Weburl ping status critical","color": "#ff0000","author_name": "Pinger","title": "Weburl Ping Status","title_link": "https://api.slack.com/","text": "Weburl not responding, HTTP error code ~~~ERRORCODE~~~","fields": [{"title": "Priority","value": "High"},{"title": "View details logs in cloud watch","value":'https://specify-region.console.aws.amazon.com/cloudwatch/home?region=specify-region#logStream:group=%252Faws%252Flambda%252Fpinger'}],"footer": "Slack API","footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png","ts": 1532746720.06}
slack = slackweb.Slack(url=slack_webhook['Parameter']['Value'])
# Parameters & functions for state machine s3 bucket
state_machine_s3 = "s3-bucket-name"
s3 = boto3.resource("s3").Bucket(state_machine_s3)
json.load_s3 = lambda filename: json.load(s3.Object(key=filename).get()["Body"])
json.dump_s3 = lambda object, filename: s3.Object(key=filename).put(Body=json.dumps(object))
yaml.load_s3 = lambda filename: yaml.load(s3.Object(key=filename).get()["Body"])
state_machine = json.load_s3('state_machine')
def slack_notification(url,status_code,status):
""" Send notifications to slack """
if notification == "slack":
if status == "StatusOK":
attachments = []
slack_payload['color'] = "#36a64f"
slack_payload['text'] = '%s is back healthy and responding with status code %d' % (url, status_code)
slack_payload['fallback'] = "Weburl ping status, Normal!!!"
slack_payload['title_link'] = url
slack_payload['fields'][0]['value'] = 'Normal'
slack_payload['ts'] = time.time()
attachments.append(slack_payload)
slack.notify(attachments=attachments)
else:
attachments = []
slack_payload['color'] = "#ff0000"
slack_payload['text'] = '%s is not healthy and failing with error message "%s" exit/status code %d' % (url, status, status_code)
slack_payload['fallback'] = "Weburl ping status, Critical!!!"
slack_payload['title_link'] = url
slack_payload['fields'][0]['value'] = 'High'
slack_payload['ts'] = time.time()
attachments.append(slack_payload)
slack.notify(attachments=attachments)
def ping_urls(urls):
"""Fetch response code and response time of web pages asynchronously."""
start_time = default_timer()
loop = asyncio.get_event_loop() # event loop
future = asyncio.ensure_future(fetch_all(urls)) # tasks to do
loop.run_until_complete(future) # loop until done
tot_elapsed = default_timer() - start_time
logger.info('{" TotalRuntime": %5.2f }' % (tot_elapsed))
async def fetch_all(urls):
"""Launch requests for all web pages."""
tasks = []
fetch.start_time = dict() # dictionary of start times for each url
async with ClientSession(connector=aiohttp.TCPConnector(ssl=False),timeout=aiohttp.ClientTimeout(total=60)) as session:
for url in urls:
# print("get status for %s" % (url))
task = asyncio.ensure_future(fetch(url, session))
tasks.append(task) # create list of tasks
_ = await asyncio.gather(*tasks) # gather task responses
async def fetch(url, session):
"""Fetch a url, using specified ClientSession."""
fetch.start_time[url] = default_timer()
try:
async with session.get(url) as response:
resp = await response.read()
elapsed = default_timer() - fetch.start_time[url]
if response.status == 200:
message = "StatusOK"
else:
message = "StatusNOK"
logger.info('{"URL": "%s", "StatusCode": %d, "ResponseTime": %5.2f, "Message": "%s"}' % (url,response.status,elapsed,message))
# print('{"URL": "%s", "StatusCode": %d, "ResponseTime": %5.2f, "Message": "%s"}' % (url,response.status,elapsed,message))
await update_state_machine(url,response.status,message)
return resp
except aiohttp.InvalidURL as e:
logger.critical('{"URL": "%s", "StatusCode": %d, "ResponseTime": %5.2f, "Message": "%s"}' % (url,1,0.0,"Error::InvalidURL"))
await update_state_machine(url,1,"Error::InvalidURL")
pass
except asyncio.TimeoutError as e:
logger.critical('{"URL": "%s", "StatusCode": %d, "ResponseTime": %5.2f, "Message": "%s"}' % (url,1,0.0,"Error::client timeout after waiting for 60 secs, Increase client timeout if this is excepted behaviour"))
await update_state_machine(url,1,"Error::client timeout after waiting for 60 secs, Increase client timeout if this is excepted behaviour")
pass
except aiohttp.ClientError as e:
logger.critical('{"URL": "%s", "StatusCode": %d, "ResponseTime": %5.2f, "Message": "%s"}' % (url,1,0.0,e))
await update_state_machine(url,1,str(e))
pass
async def update_state_machine(url,status_code,message):
"""update the state machine with current status and send notification to slack"""
if url in state_machine.keys():
if state_machine[url] != message:
slack_notification(url,status_code,message)
state_machine[url] = message
else:
if message != "StatusOK":
state_machine.update({ url : message})
slack_notification(url,status_code,message)
else:
state_machine.update({ url : message})
# print(state_machine)
def ping_handler(event, context):
config = yaml.load_s3('config.yaml')
if config['monitor']['urls'] != None:
ping_urls(config['monitor']['urls'])
json.dump_s3(state_machine,'state_machine') | Servana/aws-lambda-pinger | pinger_template.py | pinger_template.py | py | 6,411 | python | en | code | 4 | github-code | 13 |
29651880983 | from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'ManualInstallCruft',
'MarkLangpacksManuallyInstalledPlugin',
]
import logging
from janitor.plugincore.cruft import Cruft
from janitor.plugincore.i18n import setup_gettext
from janitor.plugincore.plugin import Plugin
_ = setup_gettext()
class ManualInstallCruft(Cruft):
def __init__(self, pkg):
self.pkg = pkg
def get_prefix(self):
return 'mark-manually-installed'
def get_shortname(self):
return self.pkg.name
def get_description(self):
return (_('%s needs to be marked as manually installed.') %
self.pkg.name)
def cleanup(self):
self.pkg.markKeep()
self.pkg.markInstall()
class MarkLangpacksManuallyInstalledPlugin(Plugin):
"""Plugin to mark language packs as manually installed.
This works around quirks in the hardy->intrepid upgrade.
"""
def __init__(self):
self.condition = ['from_hardyPostDistUpgradeCache']
def get_cruft(self):
# language-support-* changed its dependencies from "recommends" to
# "suggests" for language-pack-* - this means that apt will think they
# are now auto-removalable if they got installed as a dep of
# language-support-* - we fix this here
cache = self.app.apt_cache
for pkg in cache:
if (pkg.name.startswith('language-pack-') and
not pkg.name.endswith('-base') and
cache._depcache.IsAutoInstalled(pkg._pkg) and
pkg.is_installed):
# Then...
logging.debug("setting '%s' to manual installed" % pkg.name)
yield ManualInstallCruft(pkg)
| GalliumOS/update-manager | janitor/plugincore/plugins/langpack_manual_plugin.py | langpack_manual_plugin.py | py | 1,778 | python | en | code | 4 | github-code | 13 |
21891674834 | import grokpy
import unittest
from grok_test_case import GrokTestCase
from grokpy.exceptions import GrokError
class DataSourceFieldTestCase(GrokTestCase):
def setUp(self):
self.f = grokpy.DataSourceField()
def testGoodInstantiation(self):
'''
Basic object instantiation
'''
# Instantiate the Grok object
f = grokpy.DataSourceField()
def testIncompleteSettingsFail(self):
'''
If we haven't set all required values getSpec should raise an error
'''
self.assertRaises(GrokError, self.f.getSpec)
def testSetName(self):
'''
Setting a name should work
'''
# Starts empty
self.assertFalse(self.f.name)
# Setting a very long name fails
longName = ('This is a really long name, in fact one might say too long '
'for something as simple as a field name. Who would have a '
'name like this? Really it is insane. OMG this really has '
'to be longer? Is that something that needs to be blah blah '
'mumpty mumpty nurple nurple ting ting bang zap foo')
self.assertRaises(GrokError, self.f.setName, longName)
# Good set works
name = 'foo'
self.f.setName(name)
self.assertEqual(self.f.name, name)
def testSetType(self):
'''
Setting a type should work
'''
# Starts empty
self.assertFalse(self.f.type)
# Bad set fails
self.assertRaises(GrokError, self.f.setType, 'foo')
# Good set works
dataType = grokpy.DataType.SCALAR
self.f.setType(dataType)
self.assertEqual(self.f.type, dataType)
def testSetFlag(self):
'''
Setting a flag should work
'''
# Starts empty
self.assertFalse(self.f.flag)
# Bad set fails
self.assertRaises(GrokError, self.f.setFlag, 'foo')
# Good set succeeds
flag = grokpy.DataFlag.TIMESTAMP
self.f.setFlag(flag)
self.assertEqual(self.f.flag, flag)
def testSetAggregationFunction(self):
'''
Setting a function should work and catch various type mismatches
'''
# Test independance yay!
f = grokpy.DataSourceField()
# Starts empty
self.assertFalse(f.aggregationFunction)
# Early set fails
self.assertRaises(GrokError,
f.setAggregationFunction, grokpy.AggregationFunction.SUM)
# Set a valid type
f.setType(grokpy.DataType.CATEGORY)
# Mismatch with field type set fails
self.assertRaises(GrokError,
f.setAggregationFunction, grokpy.AggregationFunction.SUM)
# Good set succeeds
aggFunc = grokpy.AggregationFunction.FIRST
f.setAggregationFunction(aggFunc)
self.assertEqual(f.aggregationFunction, aggFunc)
def testSetMin(self):
'''
Setting min should work
'''
# Test independance yay!
f = grokpy.DataSourceField()
f.setType(grokpy.DataType.SCALAR)
# Starts empty
self.assertFalse(f.minValue)
# Bad set fails
self.assertRaises(GrokError, f.setMin, 'foo')
# Good set works
minValue = -33
f.setMin(minValue)
self.assertEqual(f.minValue, minValue)
def testSetMax(self):
'''
Setting a max value should work
'''
# Test independance yay!
f = grokpy.DataSourceField()
f.setType(grokpy.DataType.SCALAR)
# Starts empty
self.assertFalse(f.maxValue)
# Bad set fails
self.assertRaises(GrokError, f.setMax, 'foo')
# Good set works
maxValue = 33.00009
f.setMax(maxValue)
self.assertEqual(f.maxValue, maxValue)
def testGetSpec(self):
'''
Check the return value of a fully populated field spec
'''
# Test independance yay!
f = grokpy.DataSourceField()
name = 'Days of Joy'
f.setName(name)
dataType = grokpy.DataType.SCALAR
f.setType(dataType)
aggFunc = grokpy.AggregationFunction.SUM
f.setAggregationFunction(aggFunc)
minValue = -444
maxValue = 444.2
f.setMin(minValue)
f.setMax(maxValue)
expectedDict = {'max': maxValue,
'dataFormat': {'dataType': dataType},
'name': name,
'min': minValue,
'aggregationFunction': aggFunc}
self.assertEqual(f.getSpec(), expectedDict)
if __name__ == '__main__':
debug = 0
if debug:
single = unittest.TestSuite()
single.addTest(ClientTestCase('testBadConnection'))
unittest.TextTestRunner().run(single)
else:
unittest.main()
| Komeil1978/grok-py | tests/unit/test_data_source_field.py | test_data_source_field.py | py | 4,424 | python | en | code | 0 | github-code | 13 |
30828672504 | import math, sys
import maya.api.OpenMaya as om
import maya.api.OpenMayaUI as omui
import maya.api.OpenMayaAnim as oma
import maya.api.OpenMayaRender as omr
from maya.OpenMaya import MGlobal
import pymel.core as pm
from zMayaTools.menus import Menu
from zMayaTools import maya_helpers, node_caching
# This is insane. There are two Python APIs in Maya, and both of them are missing lots of
# stuff, and you can't mix them except in specific careful ways.
import maya.OpenMayaRender as v1omr
glRenderer = v1omr.MHardwareRenderer.theRenderer()
glFT = glRenderer.glFunctionTable()
def maya_useNewAPI(): pass
# Be careful when changing the order of these shapes. Their index is the value of the .shape
# enum, so this affects the file format.
def _make_pyramid():
return {
'quads': [
(-0.5, 0, +0.5),
(+0.5, 0, +0.5),
(+0.5, 0, -0.5),
(-0.5, 0, -0.5),
],
omr.MUIDrawManager.kTriangles: [
(-0.5, 0, +0.5),
(-0.5, 0, -0.5),
(+0.0, 1, -0.0),
(+0.5, 0, +0.5),
(+0.5, 0, -0.5),
(+0.0, 1, -0.0),
(-0.5, 0, -0.5),
(+0.5, 0, -0.5),
(+0.0, 1, -0.0),
(+0.5, 0, +0.5),
(-0.5, 0, +0.5),
(+0.0, 1, -0.0),
]
}
def _make_ball():
points = []
p1 = (1.0) / 2.0
p2 = (0.5) / 2.0
for x in (1,-1):
points.append((x*p1, -p2, -p2))
points.append((x*p1, +p2, -p2))
points.append((x*p1, +p2, +p2))
points.append((x*p1, -p2, +p2))
points.append((-p2, x*p1, -p2))
points.append((+p2, x*p1, -p2))
points.append((+p2, x*p1, +p2))
points.append((-p2, x*p1, +p2))
points.append((-p2, -p2, x*p1))
points.append((+p2, -p2, x*p1))
points.append((+p2, +p2, x*p1))
points.append((-p2, +p2, x*p1))
for y in (1,-1):
points.append((-p2, x*+p2, y*+p1))
points.append((+p2, x*+p2, y*+p1))
points.append((+p2, x*+p1, y*+p2))
points.append((-p2, x*+p1, y*+p2))
points.append((x*+p2, -p2, y*+p1))
points.append((x*+p2, +p2, y*+p1))
points.append((x*+p1, +p2, y*+p2))
points.append((x*+p1, -p2, y*+p2))
points.append((x*+p2, y*+p1, -p2))
points.append((x*+p2, y*+p1, +p2))
points.append((x*+p1, y*+p2, +p2))
points.append((x*+p1, y*+p2, -p2))
tris = []
for x in (1, -1):
for y in (1, -1):
for z in (1, -1):
tris.append((x*-p1, y*-p2, z*p2))
tris.append((x*-p2, y*-p1, z*p2))
tris.append((x*-p2, y*-p2, z*p1))
return {
'quads': points,
omr.MUIDrawManager.kTriangles: tris,
}
# A slightly larger shape that can sit around the others. This is useful for things like
# pivots.
def _make_orbit():
def make_box(x, y, z):
s = 1/6.0
box = [
(-s, -s, +s), # top
(+s, -s, +s),
(+s, -s, -s),
(-s, -s, -s),
(-s, +s, +s), # bottom
(+s, +s, +s),
(+s, +s, -s),
(-s, +s, -s),
(-s, -s, +s), # left
(-s, +s, +s),
(-s, +s, -s),
(-s, -s, -s),
(+s, -s, +s), # right
(+s, +s, +s),
(+s, +s, -s),
(+s, -s, -s),
(-s, +s, +s), # front
(+s, +s, +s),
(+s, -s, +s),
(-s, -s, +s),
(-s, +s, -s), # back
(+s, +s, -s),
(+s, -s, -s),
(-s, -s, -s),
]
result = []
for vx, vy, vz in box:
result.append((vx + x, vy + y, vz + z))
return result
boxes = []
boxes.extend(make_box(-1, 0, 0))
boxes.extend(make_box(+1, 0, 0))
boxes.extend(make_box( 0, 0,+1))
boxes.extend(make_box( 0, 0,-1))
return {
'quads': boxes
}
shapes = [{
'name': 'Ball',
'geometry': _make_ball(),
}, {
'name': 'Pyramid',
'geometry': _make_pyramid(),
}, {
'name': 'Pivot',
'geometry': _make_orbit(),
}]
def _convert_shape(shape):
geometry = shape['geometry']
lines = geometry.setdefault(omr.MUIDrawManager.kLines, [])
# Add edge lines for quads.
if 'quads' in geometry:
quads = geometry['quads']
for i in range(0, len(quads), 4):
lines.append(quads[i+0])
lines.append(quads[i+1])
lines.append(quads[i+1])
lines.append(quads[i+2])
lines.append(quads[i+2])
lines.append(quads[i+3])
lines.append(quads[i+3])
lines.append(quads[i+0])
# Add edge lines for tris.
if omr.MUIDrawManager.kTriangles in geometry:
tris = geometry[omr.MUIDrawManager.kTriangles]
for i in range(0, len(tris), 3):
lines.append(tris[i+0])
lines.append(tris[i+1])
lines.append(tris[i+1])
lines.append(tris[i+2])
lines.append(tris[i+2])
lines.append(tris[i+0])
# Convert quads to tris.
if 'quads' in geometry:
tris = geometry.setdefault(omr.MUIDrawManager.kTriangles, [])
quads = geometry.pop('quads')
for i in range(0, len(quads), 4):
tris.append(quads[i+0])
tris.append(quads[i+1])
tris.append(quads[i+2])
tris.append(quads[i+2])
tris.append(quads[i+3])
tris.append(quads[i+0])
for key, data in geometry.items():
array = om.MPointArray()
for point in data:
array.append(om.MPoint(*point))
geometry[key] = array
return shape
shapes = [_convert_shape(shape) for shape in shapes]
def _getCustomShape(node):
# Return the shape connected to customMeshAttr.
depNode = om.MFnDependencyNode(node)
obj = depNode.userNode()
dataBlock = obj.forceCache()
meshHandle = dataBlock.inputValue(zRigHandle.customMeshAttr)
try:
it = om.MItMeshPolygon(meshHandle.asMesh())
except RuntimeError:
# We'll get "kInvalidParameter: Argument is a NULL pointer" if there's no
# mesh connection. How do we check this?
return shapes[0]['geometry']
tris = []
lines = []
for face in maya_helpers.iterate_mesh(it):
face = it.getPoints(om.MSpace.kObject)
# The data from the iterator doesn't stay valid, so make a copy of the point.
face = [om.MPoint(v) for v in face]
if len(face) == 3:
tris.extend(face)
lines.extend((face[0], face[1], face[1], face[2], face[2], face[0]))
elif len(face) == 4:
tris.extend((face[0], face[1], face[2], face[2], face[3], face[0]))
lines.extend((face[0], face[1], face[1], face[2], face[2], face[3], face[3], face[0]))
else:
# We don't currently support meshes with more than four faces. We could
# triangulate with MFnMesh.polyTriangulate, but I'm not sure it's worth
# the bother.
pass
return {
omr.MUIDrawManager.kTriangles: tris,
omr.MUIDrawManager.kLines: lines,
}
def getShapeBounds(shape):
boundingBox = om.MBoundingBox()
for item in shape.values():
for point in item:
boundingBox.expand(point)
return boundingBox
def _transformShape(shape, transform):
result = {}
for key, data in shape.items():
result[key] = om.MPointArray([v*transform for v in data])
return result
class zRigHandle(om.MPxSurfaceShape):
id = om.MTypeId(0x124743)
drawDbClassification = "drawdb/geometry/zRigHandle"
drawRegistrantId = "zRigHandlePlugin"
def __init__(self):
om.MPxSurfaceShape.__init__(self)
@classmethod
def creator(cls):
return cls()
@classmethod
def initialize(cls):
nAttr = om.MFnNumericAttribute()
enumAttr = om.MFnEnumAttribute()
matAttr = om.MFnMatrixAttribute()
uAttr = om.MFnUnitAttribute()
typedAttr = om.MFnTypedAttribute()
cls.shapeAttr = enumAttr.create('shape', 'sh', 0)
enumAttr.addField('Custom', -1)
for idx, shape in enumerate(shapes):
enumAttr.addField(shape['name'], idx)
enumAttr.channelBox = True
cls.addAttribute(cls.shapeAttr)
cls.customMeshAttr = typedAttr.create("inCustomMesh", "icm", om.MFnMeshData.kMesh)
typedAttr.storable = False
# The kReset constant is missing from the Python 2.0 API.
typedAttr.disconnectBehavior = 1
cls.addAttribute(cls.customMeshAttr)
cls.transformAttr = matAttr.create('transform', 't', om.MFnMatrixAttribute.kFloat)
matAttr.keyable = False
cls.addAttribute(cls.transformAttr)
localRotateX = uAttr.create('localRotateX', 'lrx', om.MFnUnitAttribute.kAngle, 0.0)
localRotateY = uAttr.create('localRotateY', 'lry', om.MFnUnitAttribute.kAngle, 0.0)
localRotateZ = uAttr.create('localRotateZ', 'lrz', om.MFnUnitAttribute.kAngle, 0.0)
cls.localRotateAttr = nAttr.create('localRotate', 'lr', localRotateX, localRotateY, localRotateZ)
nAttr.channelBox = True
nAttr.keyable = False
cls.addAttribute(cls.localRotateAttr)
cls.localTranslateAttr = nAttr.createPoint('localPosition', 'lp')
nAttr.channelBox = True
nAttr.keyable = False
cls.addAttribute(cls.localTranslateAttr)
localScaleX = nAttr.create('localScaleX', 'lsx', om.MFnNumericData.kFloat, 1)
localScaleY = nAttr.create('localScaleY', 'lsy', om.MFnNumericData.kFloat, 1)
localScaleZ = nAttr.create('localScaleZ', 'lsz', om.MFnNumericData.kFloat, 1)
cls.localScaleAttr = nAttr.create('localScale', 'ls', localScaleX, localScaleY, localScaleZ)
nAttr.channelBox = True
nAttr.keyable = False
cls.addAttribute(cls.localScaleAttr)
cls.colorAttr = nAttr.createColor('color', 'dc')
nAttr.default = (.38,0,0.02)
cls.addAttribute(cls.colorAttr)
cls.alphaAttr = nAttr.create('alpha', 'a', om.MFnNumericData.kFloat, 0.333)
nAttr.setSoftMin(0)
nAttr.setSoftMax(1)
nAttr.keyable = False
cls.addAttribute(cls.alphaAttr)
cls.borderColorAttr = nAttr.createColor('borderColor', 'bc')
nAttr.default = (-1,-1,-1)
cls.addAttribute(cls.borderColorAttr)
cls.borderAlphaAttr = nAttr.create('borderAlpha', 'ba', om.MFnNumericData.kFloat, 1)
nAttr.setSoftMin(0)
nAttr.setSoftMax(1)
nAttr.keyable = False
cls.addAttribute(cls.borderAlphaAttr)
cls.xrayAttr = nAttr.create('xray', 'xr', om.MFnNumericData.kBoolean, True)
nAttr.keyable = False
nAttr.channelBox = True
cls.addAttribute(cls.xrayAttr)
def postConstructor(self):
self.isRenderable = True
depNode = om.MFnDependencyNode(self.thisMObject())
depNode.setName("rigHandleShape#");
def setDependentsDirty(self, plug, affectedPlugs):
if plug.isChild:
plug = plug.parent()
if maya_helpers.plug_in_list(plug, self.transformAttr, self.localTranslateAttr, self.localRotateAttr, self.localScaleAttr):
# Discard our transformed shape.
if hasattr(self, 'transformedShape'): del self.transformedShape
if maya_helpers.plug_in_list(plug,
self.transformAttr, self.shapeAttr,
self.localTranslateAttr, self.localRotateAttr, self.localScaleAttr,
self.colorAttr, self.alphaAttr, self.borderColorAttr, self.borderAlphaAttr,
self.xrayAttr, self.customMeshAttr):
self.childChanged(self.kBoundingBoxChanged)
omr.MRenderer.setGeometryDrawDirty(self.thisMObject(), True)
if maya_helpers.plug_in_list(plug, self.shapeAttr, self.customMeshAttr):
# Discard our shape cache. We can't set the new one now, since the new
# plug value hasn't actually been set yet, so we'll do it on the next
# render.
if hasattr(self, 'transformedShape'): del self.transformedShape
if hasattr(self, 'shape'): del self.shape
self.childChanged(self.kBoundingBoxChanged)
return super(zRigHandle, self).setDependentsDirty(plug, affectedPlugs)
def getShapeSelectionMask(self):
# Set both kSelectMeshes, so tumble on pivot sees the object, and kSelectJoints, so we're
# higher priority for selection than meshes that are in front of us. Xray alone won't do
# this.
mask = om.MSelectionMask()
# mask.addMask(om.MSelectionMask.kSelectMeshes)
mask.addMask(om.MSelectionMask.kSelectJoints)
return mask
def isBounded(self):
return True
def getShapeIdx(self):
return om.MPlug(self.thisMObject(), self.shapeAttr).asShort()
def getShape(self):
# If the shape isn't cached, cache it now.
if not hasattr(self, 'shape'):
self.shape = self._getShapeFromPlug()
if not hasattr(self, 'transformedShape'):
shape = self.shape
transform = self._getLocalTransform()
self.transformedShape = _transformShape(shape, transform)
return self.transformedShape
def _getShapeFromPlug(self):
idx = self.getShapeIdx()
if idx == -1:
shape = _getCustomShape(self.thisMObject())
else:
shape = shapes[idx]['geometry']
return shape
def _getLocalTransform(self):
node = self.thisMObject()
transformPlug = om.MPlug(node, self.transformAttr)
transform = om.MFnMatrixData(transformPlug.asMObject()).matrix()
mat = om.MTransformationMatrix(transform)
# Apply local translation.
localTranslatePlug = om.MPlug(node, self.localTranslateAttr)
localTranslation = om.MVector(*[localTranslatePlug.child(idx).asFloat() for idx in range(3)])
mat.translateBy(localTranslation, om.MSpace.kObject)
# Apply local rotation.
localRotatePlug = om.MPlug(node, self.localRotateAttr)
localRotatePlugs = [localRotatePlug.child(idx) for idx in range(3)]
localRotate = om.MVector(*[localRotatePlugs[idx].asMAngle().asRadians() for idx in range(3)])
mat.rotateBy(om.MEulerRotation(localRotate), om.MSpace.kObject)
# Apply local scale.
scalePlug = om.MPlug(node, self.localScaleAttr)
scale = om.MFnNumericData(scalePlug.asMObject()).getData()
mat.scaleBy(scale, om.MSpace.kObject)
return mat.asMatrix()
@property
def xray(self):
return om.MPlug(self.thisMObject(), self.xrayAttr).asBool()
def boundingBox(self):
return getShapeBounds(self.getShape())
def _hitTestShape(view, shape):
# Hit test shape within view.
for itemType, data in shape.items():
view.beginSelect()
glFT.glBegin(v1omr.MGL_TRIANGLES)
for v in data:
glFT.glVertex3f(v.x, v.y, v.z)
glFT.glEnd()
# Check the hit test.
if view.endSelect() > 0:
return True
return False
# This object isn't created in 2016.5 VP2.
class zRigHandleShapeUI(omui.MPxSurfaceShapeUI):
def __init__(self):
omui.MPxSurfaceShapeUI.__init__(self)
@staticmethod
def creator():
return zRigHandleShapeUI()
def select(self, selectInfo, selectionList, worldSpaceSelectPts):
shape = self.surfaceShape().getShape()
# Hit test the selection against the shape.
if not _hitTestShape(selectInfo.view(), shape):
return False
item = om.MSelectionList()
item.add(selectInfo.selectPath())
# Get the world space position of the node. We'll set the position of the selection here,
# so the camera focuses on it.
mat = item.getDagPath(0).inclusiveMatrix()
transformation = om.MTransformationMatrix(mat)
pos = transformation.translation(om.MSpace.kWorld)
priorityMask = om.MSelectionMask(om.MSelectionMask.kSelectJoints)
selectInfo.addSelection(item, om.MPoint(pos), selectionList, worldSpaceSelectPts, priorityMask, False)
return True
def isPathSelected(objPath):
sel = om.MGlobal.getActiveSelectionList()
if sel.hasItem(objPath):
return True
objPath = om.MDagPath(objPath)
objPath.pop()
if sel.hasItem(objPath):
return True
return False
class zRigHandleDrawOverride(omr.MPxDrawOverride):
@staticmethod
def creator(obj):
return zRigHandleDrawOverride(obj)
def __init__(self, obj):
super(zRigHandleDrawOverride, self).__init__(obj, None, False)
def supportedDrawAPIs(self):
return omr.MRenderer.kOpenGL | omr.MRenderer.kDirectX11 | omr.MRenderer.kOpenGLCoreProfile
def isBounded(self, objPath, cameraPath):
return True
def boundingBox(self, objPath, cameraPath):
depNode = om.MFnDependencyNode(objPath.node())
obj = depNode.userNode()
return obj.boundingBox()
def disableInternalBoundingBoxDraw(self):
return True
def prepareForDraw(self, objPath, cameraPath, frameContext, oldData):
depNode = om.MFnDependencyNode(objPath.node())
obj = depNode.userNode()
isSelected = isPathSelected(objPath)
self.xray = obj.xray
plug = om.MPlug(objPath.node(), zRigHandle.colorAttr)
self.color = om.MColor(om.MFnNumericData(plug.asMObject()).getData())
alpha = om.MPlug(objPath.node(), zRigHandle.alphaAttr).asFloat()
self.color.a = alpha
if isSelected:
self.borderColor = omr.MGeometryUtilities.wireframeColor(objPath)
else:
plug = om.MPlug(objPath.node(), zRigHandle.borderColorAttr)
self.borderColor = om.MColor(om.MFnNumericData(plug.asMObject()).getData())
# If no color has been set and we're on the default of (-1,-1,-1), use the main color,
# so in the common case where you want to use the same color you don't have to set both.
if self.borderColor.r == -1 and self.borderColor.g == -1 and self.borderColor.b == -1:
self.borderColor = om.MColor(self.color)
self.borderColor.a = om.MPlug(objPath.node(), zRigHandle.borderAlphaAttr).asFloat()
self.shape = obj.getShape()
def hasUIDrawables(self):
return True
def addUIDrawables(self, objPath, drawManager, frameContext, data):
if self.xray:
drawManager.beginDrawInXray()
drawManager.beginDrawable()
for itemType, data in self.shape.items():
if itemType == omr.MUIDrawManager.kLines:
# X-ray only
continue
drawManager.setColor(self.color)
drawManager.mesh(itemType, data)
lines = self.shape.get(omr.MUIDrawManager.kLines)
if lines:
drawManager.setColor(self.borderColor)
drawManager.mesh(omr.MUIDrawManager.kLines, lines)
drawManager.endDrawable()
if self.xray:
drawManager.endDrawInXray()
class PluginMenu(Menu):
def _add_menu_items(self):
super(PluginMenu, self)._add_menu_items()
# Add "Rig Handle" after "Locator" in Create > Construction Aids.
def create(arg):
node = pm.createNode('zRigHandle')
pm.select(node.getTransform())
pm.mel.eval('ModCreateMenu "mainCreateMenu"')
menu = 'mainCreateMenu'
menu_items = pm.menu(menu, q=True, ia=True)
idx = self.find_item_with_command(menu_items, 'CreateLocator')
self.add_menu_item('zRigHandle', label="Rig Handle", command=create,
insertAfter=menu_items[idx], parent=menu,
image='zRigHandle.png',
annotation='Create a viewport rig handle',
top_level_path='Rigging|Rig_Handle')
menu = PluginMenu()
def initializePlugin(obj):
plugin = om.MFnPlugin(obj)
plugin.registerShape('zRigHandle', zRigHandle.id, zRigHandle.creator, zRigHandle.initialize, zRigHandleShapeUI.creator, zRigHandle.drawDbClassification)
omr.MDrawRegistry.registerDrawOverrideCreator(zRigHandle.drawDbClassification, zRigHandle.drawRegistrantId, zRigHandleDrawOverride.creator)
menu.add_menu_items()
node_caching.enable_caching_for_node_name('zRigHandle')
pm.pluginDisplayFilter('zRigHandle', classification=zRigHandle.drawDbClassification, register=True, label='Rig Handles')
def uninitializePlugin(obj):
plugin = om.MFnPlugin(obj)
omr.MDrawRegistry.deregisterDrawOverrideCreator(zRigHandle.drawDbClassification, zRigHandle.drawRegistrantId)
plugin.deregisterNode(zRigHandle.id)
menu.remove_menu_items()
node_caching.disable_caching_for_node_name('zRigHandle')
pm.pluginDisplayFilter('zRigHandle', deregister=True)
| zewt/zMayaTools | plug-ins/zRigHandle.py | zRigHandle.py | py | 21,095 | python | en | code | 102 | github-code | 13 |
19478327688 | import string, random, os, json, msvcrt, time, encoder, decoder
# Hehecoin Miner
# Ryan Earll
# ryangearll@gmail.com
# @ryanxea
VERSION = "1.5"
DATEUPDATED = "6/22/2017"
os.system('color a')
def getKey(): # Returns the key.txt as a string
with open("key.txt", "r") as f:ret = f.read()
return ret
def clearScreen(): # Clears the screen and prints the header at the top
os.system('cls')
print("Hehecoin Miner v" + VERSION + ", Last Updated " + DATEUPDATED + "\n\n")
def exitProgram(d = None): # Provides the user with a prompt before exiting. If given a dictionary, encodes it accordingly and saves to wallet.txt
if not d==None:
updateWallet(d)
input("\nPress enter to exit...\n")
quit()
def updateWallet(d):
with open("wallet.txt", "w") as f:f.write(encoder.encode(getKey(), json.dumps(d)))
def getWallet(): # Returns the decoded wallet as a dictionary
with open("wallet.txt", "r") as f:wal = f.read()
return json.loads(decoder.decode(getKey(), wal))
def mine(): # Starts mining for hehecoin
clearScreen()
d = getWallet()
print("Server handshake verified, initializing mining process...")
for _ in range(4):
time.sleep(1)
print("...")
print("Beginning mining process. Press any button to stop.\n")
found = 0
start = time.time()
print("Coins found: " + str(found) + " Current balance: " + str(d["Coins"]))
while True:
search = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(4096))
x = 3
for x in range(len(search)):
if search[x]==search[x-1] and search[x]==search[x-2] and search[x]==search[x-3]:
d["Coins"] += 1
found += 1
clearScreen()
print("Server handshake verified, initializing mining process...\n...\n...\n...\n...\nBeginning mining process. Press any button to stop.\n")
print("Coins found: " + str(found) + " Current balance: " + str(d["Coins"]))
updateWallet(d)
end = time.time()
diff = end - start
clearScreen()
print("Server handshake verified, initializing mining process...\n...\n...\n...\n...\nBeginning mining process. Press any button to stop.\n")
print("Coins found: " + str(found) + " Current balance: " + str(d["Coins"]))
print("\n\nElapsed Time: {:0.1f} seconds.".format(diff))
time.sleep(0.2)
if msvcrt.kbhit():break
# Start the "main" function
clearScreen()
if not os.path.isfile("wallet.txt"):
print("No wallet.txt found in current directory, please move it here or use Hehecoin Core to create a new one.")
exitProgram()
if not os.path.isfile("key.txt"):
print("\nkey.txt not found in current directory. Please move it here and restart Hehecoin Miner.")
exitProgram()
while True:
clearScreen()
i = input("\n1. Mine\n2. Exit\n")
if i=="2":quit()
elif i=="1":mine()
| dxeheh/hehecoin | miner.py | miner.py | py | 3,124 | python | en | code | 0 | github-code | 13 |
27847044124 | from django.shortcuts import render, get_object_or_404
from .models import Book
from .forms import BookForm
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .serializers import BookSerializer
from django.http.response import JsonResponse
# Create your views here.
# API METHODS
@csrf_exempt
def BookAPI(request, id=0):
if request.method == 'GET':
books = Book.objects.all()
books_serializer = BookSerializer(books, many=True)
return JsonResponse(books_serializer.data, safe=False)
# elif request.method == 'POST':
# book_data = JSONParser().parse(request)
# book_serializer = BookSerializer(data=book_data)
# if book_serializer.is_valid():
# book_serializer.save()
# return JsonResponse("Added Book successfully", safe=False)
# return JsonResponse("Failed to add the Book", safe=False)
# elif request.method == 'PUT':
# book_data = JSONParser().parse(request)
# # print(book_data)
# book = Book.objects.get(id=book_data['id'])
# # print(book)
# book_serializer = BookSerializer(book, data=book_data)
# # print(book_serializer)
# if book_serializer.is_valid():
# print('hello world')
# book_serializer.save()
# return JsonResponse("Updated successfully", safe=False)
# print(book_serializer.errors)
# return JsonResponse("Failed to update", safe=False)
# elif request.method == 'DELETE':
# book = Book.objects.get(id=id)
# book.delete()
# return JsonResponse("Successfully Deleted", safe=False)
return JsonResponse("Only Get Method is allowed", safe=False)
@csrf_exempt
def BookDetailsAPI(request, id=0):
if(request.method == 'GET'):
obj = Book.objects.get(id=id)
obj.read_counter = obj.read_counter+1
obj.save()
json_obj = Book.objects.filter(id=id).values()
return JsonResponse(json_obj[0], safe=False)
return JsonResponse("Try using GET Method", safe=False)
@csrf_exempt
def SortHomePage(request, sort_by):
if(request.method == 'GET'):
obj = Book.objects.all().order_by(sort_by)
obj_serializer = BookSerializer(obj, many=True)
return JsonResponse(obj_serializer.data, safe=False)
return JsonResponse("Try using GET Method", safe=False)
def HomePage(request):
obj = Book.objects.all().order_by('-read_counter')
context = {
'object_list': obj
}
return render(request, "books/Home.html", context)
def BookDetails(request, pk):
obj = get_object_or_404(Book, id=pk)
obj.read_counter = obj.read_counter+1
obj.save()
context = {
"object": obj
}
return render(request, "books/Book_details.html", context)
| ruthwik34/BookApp-Backend-Django | books/views.py | views.py | py | 2,833 | python | en | code | 0 | github-code | 13 |
17113947004 | import logging.config
import sys
from os import environ, makedirs, path
from agr_literature_service.lit_processing.data_ingest.utils.file_processing_utils import load_pubmed_resource_basic
from agr_literature_service.lit_processing.utils.sqlalchemy_utils import create_postgres_session
from agr_literature_service.lit_processing.data_ingest.pubmed_ingest.generate_pubmed_nlm_resource import (populate_from_url, populate_nlm_info,
generate_json)
from agr_literature_service.lit_processing.data_ingest.utils.file_processing_utils import save_resource_file
from agr_literature_service.lit_processing.utils.sqlalchemy_utils import sqlalchemy_load_ref_xref
from agr_literature_service.lit_processing.data_ingest.post_resource_to_db import post_resources
from agr_literature_service.api.user import set_global_user_id
from agr_literature_service.lit_processing.utils.tmp_files_utils import init_tmp_dir
logging.basicConfig(level=logging.INFO,
stream=sys.stdout,
format= '%(asctime)s - %(levelname)s - {%(module)s %(funcName)s:%(lineno)d} - %(message)s', # noqa E251
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
init_tmp_dir()
def update_resource_pubmed_nlm(set_user=None):
"""
download J_Medline file, convert to json, compare to existing resources, post new ones to api and database
"""
db_session = create_postgres_session(False)
if set_user:
scriptNm = path.basename(__file__).replace(".py", "")
set_global_user_id(db_session, scriptNm)
upload_to_s3 = True
file_data = populate_from_url()
nlm_info = populate_nlm_info(file_data)
generate_json(nlm_info, upload_to_s3)
pubmed_by_nlm, nlm_by_issn = load_pubmed_resource_basic()
xref_ref, ref_xref_valid, ref_xref_obsolete = sqlalchemy_load_ref_xref('resource')
resources_to_create = dict()
for nlm in pubmed_by_nlm:
if 'NLM' in xref_ref and nlm in xref_ref['NLM'] and xref_ref['NLM'][nlm] is not None:
logger.info(f"{nlm} already {xref_ref['NLM'][nlm]}")
else:
logger.info(f"create {nlm}")
resources_to_create[nlm] = pubmed_by_nlm[nlm]
base_path = environ.get('XML_PATH', "")
json_storage_path = base_path + 'sanitized_resource_json/'
if not path.exists(json_storage_path):
makedirs(json_storage_path)
save_resource_file(json_storage_path, resources_to_create, 'NLM')
post_resources(db_session, 'sanitized_resource_json', 'NLM')
db_session.close()
if __name__ == "__main__":
"""
process nlm updates from medline to database
"""
logger.info("start update_resource_pubmed_nlm")
set_user = 1
update_resource_pubmed_nlm(set_user)
logger.info("end update_resource_pubmed_nlm")
| alliance-genome/agr_literature_service | agr_literature_service/lit_processing/data_ingest/pubmed_ingest/pubmed_update_resources_nlm.py | pubmed_update_resources_nlm.py | py | 2,900 | python | en | code | 1 | github-code | 13 |
3342518713 | import sys
sys.path.append("..")
import extend
from ctypes import *
import os
import numpy as np
from subprocess import check_output
import tempfile
import platform
if platform.system()=="Linux":
check_output("gcc -std=c99 -fPIC -c times.c", shell=True)
check_output("gcc -shared times.o -o times.so", shell=True)
times = cdll.LoadLibrary(os.getcwd()+"/times.so")
else:
check_output("gcc -std=c99 -c times.c", shell=True)
check_output("gcc -shared times.o -o times.dll", shell=True)
times = cdll.LoadLibrary(os.getcwd()+"/times.dll")
print(times.multiply(3,3))
trace=times.trace
trace.restype=c_int
trace.argtypes=[extend.c_2Darray,c_int]
print(trace(extend.to_c_2Darray(np.random.randint(4,size=[4,4])),4))
with tempfile.NamedTemporaryFile(mode='r', delete=False) as temp:
with extend.stdout_redirected(temp.name):
tr = trace(extend.to_c_2Darray(np.random.randint(4,size=[4,4])),4)
a=np.loadtxt(temp.name)
print(a)
print(tr)
| szsdk/KSS | test/extend_test.py | extend_test.py | py | 970 | python | en | code | 0 | github-code | 13 |
9985390320 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import codecs
from logging import FileHandler
import logging.config
basedir = os.path.abspath(os.path.dirname(__file__))
logdir = os.path.join(basedir, 'logs')
logini_path = os.path.join(basedir, 'log.ini')
if not os.path.exists(logdir):
os.mkdir(logdir)
class SafeFileHandler(FileHandler):
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.suffix = "%Y-%m-%d"
self.suffix_time = ""
self.delay = delay
def emit(self, record):
"""
Emit a record.
Always check time
"""
try:
if self.check_baseFilename(record):
self.build_baseFilename()
FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def check_baseFilename(self, record):
"""
Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same
"""
timeTuple = time.localtime()
if (self.suffix_time != time.strftime(self.suffix, timeTuple) or not
os.path.exists(self.baseFilename+'.'+self.suffix_time)):
return 1
else:
return 0
def build_baseFilename(self):
"""
do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename
"""
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("."+self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
currentTimeTuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, currentTimeTuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open()
logging.config.fileConfig(logini_path)
logger = logging.getLogger('metadataserver')
# taken from https://stackoverflow.com/questions/6234405/logging-uncaught-exceptions-in-python
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
| abrance/mine | wait/autotest.2020.03.16/log.py | log.py | py | 2,976 | python | en | code | 0 | github-code | 13 |
26881003251 | from lxml import html
import requests
import sys
import re
import urllib
import cgi
menuURLs = []
# get Kanji, Katakana Pairs
# crawl デジタル大辞泉 on コトバンク,
# to crawl other dictionaries on the website,
# just change "4032" to the number of キーワード一覧 pages of the dictionary,
# and change "daijisen" to the dictionary name
for i in range(1,4032):
menuURLs.append("https://kotobank.jp/dictionary/daijisen/"+str(i))
URLs = []
for menuURL in menuURLs:
try:
page = requests.get(menuURL)
tree = html.fromstring(page.text)
lastIsSansyou = False
for result in tree.xpath('//div/div/div/div/section/ul/li/a'):
URLs.append(result.attrib['href'])
if len(URLs)%1000 == 0:
with open('status.txt','w') as f:
f.write("len(URLs): "+str(len(URLs))+"\n")
except:
with open('status.txt','w') as f:
f.write("Error, But Continue\n")
pass
for url in URLs:
try:
page = requests.get("https://kotobank.jp"+url)
tree = html.fromstring(page.text)
for result in tree.xpath('//*'):
if result.tag == "title":
lp=result.text.find("(")
rp=result.text.find(")")
key = result.text[0:lp]
with open('dictionary.csv','a+') as f:
f.write(key+","+result.text[lp+1:rp]+"\n")
if len(dictionary)%1000 == 0:
with open('status.txt','w') as f:
f.write("len(dictionary): "+str(len(dictionary))+"\n")
break
except:
with open('status.txt','w') as f:
f.write("Error, But Continue\n")
pass | shitian-ni/jpn_crawl | kanji-kana/kotoba.py | kotoba.py | py | 1,754 | python | en | code | 2 | github-code | 13 |
42084420673 | class BinarySearchTree(object):
def __init__(self, root):
self.root = root
def insert(self, data):
bstn = self.root
while bstn != None:
# If the data is less than the current nodes data go to the left
# down the tree because of the BST invariant.
if data <= bstn.data:
# If we find a node with an empty left child then put the data
# into a new bstn and insert it into that spot in the tree.
if bstn.left is None:
bstn.left = BinarySearchTreeNode(data)
break
else:
bstn = bstn.left
else:
# If we find a node with an empty right child then put the data
# into a new bstn and insert it into that spot in the tree.
if bstn.right is None:
bstn.right = BinarySearchTreeNode(data)
break
else:
bstn = bstn.right
def remove(self, data):
pass
def is_in_tree(self, data):
bstn = self.root
while bstn != None:
# If we find a node with same value as data
if bstn.data == data:
return True
# If what we are searching for is less than or equal to what is
# in the current node then we should go left.
if data <= bstn.data:
bstn = bstn.left
# Otherwise go the right because it is greater than what is in the
# current node
else:
bstn = bstn.right
return False
def size(node):
if node is not None:
return size(node.left) + 1 + size(node.right)
return 0
def is_bst(node, min_data, max_data):
'''
Check if binary tree is a binary search tree.
Return True if the tree is a binary search tree.
Return False if the tree is not a binary search tree.
'''
if node is None:
return True
if (min_data != None and node.data <= min_data) or (max_data != None and node.data > max_data):
return False
if not is_bst(node.left, min_data, node.data) or not is_bst(node.right, node.data, max_data):
return False
return True
class BinarySearchTreeNode(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
| rwerthman/citc | 4.5/python/binary_tree.py | binary_tree.py | py | 2,430 | python | en | code | 0 | github-code | 13 |
74663821777 | # Nama : Agung Mubarok
# NIM : 0110120196
# Kelas: Sistem Informasi - 05
def jumlah_batas(nums, batas):
# Variabel hasil yang menampung nilai awal 0
hasil = 0
# Membuat perulangan apakah i terdapat di dalam parameter nums
for i in nums:
# Jika i/isi dari parameter nums lebih besar dari parameter batas
if i > batas:
# Maka variabel hasil akan melakukan penambahan 1 di setiap perulangannya
hasil += i
# Mengembalikan nilai hasil
return hasil
# ==========================================================
# ==========================================================
def list_nonvokal(s):
# Variabel yang berisikan list huruf vokal
huruf_vokal = ["a", "i", "u", "e", "o", "A", "I", "U", "E", "O"]
# Membuat variabel hasil yang menampung array kosong
hasil = []
# Membuat perulangan dengan jarak dari 0 hingga sebelum 1 nilai dari banyaknya nilai di dalam parameter s
for i in range(0, len(s)):
# Jika nilai parameter s yang sudah di konvesikan menjadi huruf besar berada di dalam variabel huruf vokal
if s[i] in huruf_vokal:
# Maka akan melanjutkan ke nilai berikutnya (ngeskip)
continue
# Jika tidak
else:
# Variabel hasil akan ditambahkan nilai parameter s dengan i di dalam array
hasil.append(s[i])
# Mengembalikan nilai hasil
return hasil
# ==========================================================
# ==========================================================
def list_modify(alist, command, position, value=None):
# Jika parameter command sama dengan add dan posisi sama dengan end
if command == "add" and position == "end":
# Maka akan menambahkan value dari belakang
alist.append(value)
# Mengembalikan nilai alist
return alist
# Jika parameter command sama dengan add dan posisi sama dengan start
elif command == "add" and position == "start":
# Maka akan menyisipkan nilai value di index 0 / di nilai yang pertama
alist.insert(0,value)
# Mengembalikan nilai alist
return alist
# Jika parameter command sama dengan remove dan posisi sama dengan start
elif command == "remove" and position == "start":
# Maka akan menghapus nilai pada list berdasarkan urutannya yaitu index 0
alist.pop(0)
# Mengembalikan nilai alist
return alist
# Jika parameter command sama dengan remove dan posisi sama dengan end
elif command == "remove" and position == "end":
# Maka akan menghapus nilai pada list berdasarkan urutannya
alist.pop()
# Mengembalikan nilai alist
return alist
# ==========================================================
# ==========================================================
# Mulai baris ini hingga baris paling bawah
# digunakan untuk mengetes fungsi yang telah dibuat.
# Tidak perlu mengubah bagian ini.
# Ketika dijalankan, program akan menampilkan contoh
# pemanggilan fungsi dan solusi yang seharusnya.
# Cocokkan hasil pemanggilan fungsi dengan solusi
# yang seharusnya.
def test():
r = jumlah_batas([8, 7, 6, 10, 1], 5)
print(f"jumlah_batas([8, 7, 6, 10, 1], 5) = {r} \n(solusi: 31)")
print()
r = jumlah_batas([1, -7, -10, 1], -5)
print(f"jumlah_batas([1, -7, -10, 1], -5) = {r} \n(solusi: 2)")
print()
r = list_nonvokal('Halo')
print(f"list_nonvokal('Halo') = {r} \n(solusi: ['H', 'l'])")
print()
r = list_nonvokal('AAAAAooooo')
print(f"list_nonvokal('AAAAAooooo') = {r} \n(solusi: [])")
print()
r = list_nonvokal('Saya cinta programming')
print(f"list_nonvokal('Saya cinta programming') = {r} \n(solusi: ['S', 'y', ' ', 'c', 'n', 't', ' ', 'p', 'r', 'g', 'r', 'm', 'm', 'n', 'g'])")
print()
r = list_modify(['ayam', 'ikan', 'kucing'], 'add', 'start', 'bebek')
print(f"list_modify(['ayam', 'ikan', 'kucing'], 'add', 'start', 'bebek') = {r} \n(solusi: ['bebek', 'ayam', 'ikan', 'kucing'])")
print()
r = list_modify(['ayam', 'ikan', 'kucing'], 'add', 'end', 'bebek')
print(f"list_modify(['ayam', 'ikan', 'kucing'], 'add', 'end', 'bebek') = {r} \n(solusi: ['ayam', 'ikan', 'kucing', 'bebek'])")
print()
r = list_modify(['ayam', 'ikan', 'kucing'], 'remove', 'start')
print(f"list_modify(['ayam', 'ikan', 'kucing'], 'remove', 'start') = {r} \n(solusi: ['ikan', 'kucing'])")
print()
r = list_modify(['ayam', 'ikan', 'kucing'], 'remove', 'end')
print(f"list_modify(['ayam', 'ikan', 'kucing'], 'remove', 'end') = {r} \n(solusi: ['ayam', 'ikan'])")
print()
if __name__ == '__main__':
test() | agung10/List | main.py | main.py | py | 4,445 | python | id | code | 0 | github-code | 13 |
34424984701 | # Project 3 - Calcudoku Solver
#
# Name: Justin Mo
# Instructor: Brian Jones
# Section: 17
from solver_funcs import *
def get_cages():
index = 1
number_of_cages = int(input("Number of cages: "))
list_of_entries = list('')
while index <= number_of_cages:
cage_number = input("Cage number {:d}: ".format(index - 1))
cage_number = [int(a) for a in cage_number.split()]
list_of_entries.append(cage_number)
index += 1
return list_of_entries
def main():
cages = get_cages()
puzzle = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
checks = 0
backtracks = 0
index = 0
while index < 5:
value = 1
inner_index = 0
while inner_index < 5:
puzzle[index][inner_index] = value
print(value)
if check_cages_valid(puzzle, cages) == False and check_cages_valid_2 == False:
puzzle[index][inner_index] = 0
if inner_index == 0:
inner_index = 4
index -= 1
backtracks += 1
else:
inner_index -= 1
backtracks += 1
checks += 1
if value < 5:
value += 1
checks += 1
while value > 5:
if inner_index == 0:
value = 0
index -= 1
inner_index = 4
backtracks += 1
value += 1
else:
value = 0
inner_index -= 1
value += 1
backtracks += 1
else:
if inner_index != 0:
inner_index -= 1
backtracks += 1
else:
index -= 1
inner_index = 4
backtracks += 1
else:
if check_columns_valid(puzzle) == True and check_rows_valid(puzzle) == True:
value = 1
inner_index += 1
checks += 1
else:
value += 1
checks += 1
if inner_index == 0:
value = 0
index -= 1
inner_index = 4
backtracks += 1
value += 1
else:
value = 0
inner_index -= 1
value += 1
backtracks += 1
index += 1
#check_columns_valid(puzzle)
#check_rows_valid(puzzle)
#check_cages_valid(puzzle, cages)
#check_cages_valid_1(puzzle, cages)
print("")
print("Solution:")
print("")
print(puzzle[0])
print(puzzle[1])
print(puzzle[2])
print(puzzle[3])
print(puzzle[4])
print("")
print("checks: {:d} backtracks: {:d}".format(checks, backtracks))
if __name__ == '__main__':
main()
| ohnoitsjmo/cpe101 | cpe101projects/101project03/solver_draft.py | solver_draft.py | py | 3,082 | python | en | code | 0 | github-code | 13 |
74852811856 | # -*- coding: utf-8 -*-
import csv
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from geonames.downloader import Downloader
from geonames.models import Country
logger = logging.getLogger(__name__)
# municipality_levels is a dictionary that tells for some country which adm level holds the municipalities
# http://www.statoids.com/
from .synchgeonames import municipality_levels
class Command(BaseCommand):
help = '''Synchronize countries data from GeoNames
'''
def handle(self, *args, **options):
base_url = 'https://download.geonames.org/export/dump/'
# Let's import countries:
country_dict = {}
downloader = Downloader()
if downloader.download(
source=base_url + "countryInfo.txt",
destination=settings.GEONAMES_DEST_PATH + "countryInfo.txt",
force=False
):
# import the country file
try:
with open(settings.GEONAMES_DEST_PATH + "countryInfo.txt", 'r') as geonames_file:
csv_reader = csv.reader(geonames_file, delimiter='\t', quotechar="\\")
for row in csv_reader:
if row[0][0] != "#":
if Country.objects.filter(code=row[0]).exists():
c = Country.objects.get(code=row[0])
else:
c = Country(code=row[0])
c.name=row[4]
if c.code in municipality_levels.keys():
c.municipality_levels = municipality_levels[c.code]
c.save()
country_dict[row[0]] = c
except Exception as ex:
logger.error("Error %s" % str(ex))
| davidegalletti/django_geonames_cities | geonames/management/commands/synchgeonamescountries.py | synchgeonamescountries.py | py | 1,873 | python | en | code | 0 | github-code | 13 |
36581291392 | """
Define and check rules for a film object.
"""
# Rules for the film object
parse_options = {
'type': 'film',
'runtime_min': 80,
'runtime_max': 140,
'inlude_unkown_runtime': False,
'score_range_min': '6.9',
'score_range_max': '10.0',
'include_unknown_score': False,
'year_range_oldest': 1990,
'year_range_newest': 2019,
'wanted_genres': ['drama'],
'unwanted_genres': ['romance', 'musical','horror', 'documentary'],
# Whether add or remove a film
# whose genre neither in wanted_genres nor unwanted_genres list
'add_not_unwanted_&_not_wanted': True,
'include_watched': False
}
def check_runtime(film_runtime):
if film_runtime == 'unknown':
return parse_options['inlude_unkown_runtime']
min_runtime = parse_options['runtime_min']
max_runtime = parse_options['runtime_max']
return film_runtime >= min_runtime and film_runtime <= max_runtime
def check_genre(film_genre_list):
for genre in film_genre_list:
if genre.lower() in parse_options['unwanted_genres']:
return False
if parse_options['wanted_genres'] is None or len(parse_options['wanted_genres']) == 0:
return True
for genre in film_genre_list:
if genre.lower() in parse_options['wanted_genres']:
return True
return parse_options['add_not_unwanted_&_not_wanted']
def check_score(score_range):
if score_range == 'unknown':
return parse_options['include_unknown_score']
min_score = float(parse_options['score_range_min']) * 10
max_score = float(parse_options['score_range_max']) * 10
return score_range >= min_score and score_range <= max_score
def check_year(year_range):
min_year = parse_options['year_range_oldest']
max_year = parse_options['year_range_newest']
return int(year_range) >= min_year and int(year_range) <= max_year
def check_type(film_type):
if parse_options['type'] == 'both':
return True
elif parse_options['type'] == film_type:
return True
return False
def watched_included():
return parse_options['include_watched']
def check_film_object(film_object, watched_films=None):
if not check_runtime(film_object.runtime):
return False
if not check_genre(film_object.genres):
return False
if not check_score(film_object.rating):
return False
if film_object.type == 'film' and not check_year(film_object.year):
return False
if not check_type(film_object.type):
return False
if watched_films is not None and film_object.name in watched_films:
return False
# All of the above rules applied for the object
return True
| hastagAB/Awesome-Python-Scripts | IMDBQuerier/parser_config.py | parser_config.py | py | 2,690 | python | en | code | 1,776 | github-code | 13 |
71690603219 | from accounts.decorators import admin_only, allowed_users, unauthorized_user
from django.shortcuts import render, redirect
from .models import *
from .forms import *
from django.forms import inlineformset_factory
from .filters import OrderFilter
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
orders = Order.objects.all()
products = Product.objects.all()
customers = Customer.objects.all()
# Create your views here.
@login_required(login_url='login')
@admin_only
def home(request):
customers = Customer.objects.all()
orders = Order.objects.all()
total_orders = orders.count()
total_customers = customers.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
context = {
'orders': orders,
'customers': customers,
'total_orders': total_orders,
'total_customers': total_customers,
'delivered': delivered,
'pending': pending
}
return render(request, "accounts/index.html", context)
@login_required(login_url='login')
def products(request):
products = Product.objects.all()
total_price = 0
for product in products:
total_price = product.price + total_price
context = {
'products': products,
'total_price': total_price
}
return render(request, 'accounts/products.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def customers(request, id):
selected_customer = Customer.objects.get(id=id)
orders_by_customer = Order.objects.filter(
customer=selected_customer.id).count()
orders = Order.objects.filter(customer=selected_customer.id).all()
# # the filtering
myFilter = OrderFilter(request.GET, queryset=orders)
orders = myFilter.qs
# context
context = {
'selected_customer': selected_customer,
'orders_by_customer': orders_by_customer,
# 'products': products,
'orders': orders,
'myFilter': myFilter,
}
return render(request, 'accounts/customers.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def create_order(request, pk):
OrderFormSet = inlineformset_factory(
Customer, Order, fields=('product', 'status'), extra=10)
selected_customer = Customer.objects.get(id=pk)
formset = OrderFormSet(queryset=Order.objects.none(),
instance=selected_customer)
form = OrderForm(initial={'customer': selected_customer})
# checking for submission
if request.method == 'POST':
formset = OrderFormSet(request.POST, instance=selected_customer)
if formset.is_valid():
formset.save()
return redirect('/')
context = {
'formset': formset,
'selected_customer': selected_customer
}
return render(request, 'accounts/order_form.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def update_order(request, pk):
order = Order.objects.get(pk=pk)
form = OrderForm(instance=order)
if request.method == 'POST':
form = OrderForm(request.POST, instance=order)
if form.is_valid():
form.save()
return redirect('/')
context = {
'form': form
}
return render(request, 'accounts/order_form.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def create_customer(request):
form = CustomerForm()
if request.method == 'POST':
form = CustomerForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
context = {
'form': form
}
return render(request, 'accounts/order_form.html', context)
# confirming delete decision
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def confirm_delete(request, pk):
order = Order.objects.get(id=pk)
context = {
'order': order
}
return render(request, 'accounts/delete_confirm.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def delete(request, pk):
order = Order.objects.get(id=pk)
order.delete()
return redirect('/')
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def userpage(request):
orders = request.user.customer.order_set.all()
total_orders = orders.count()
# total_customers = customers.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
context = {
'orders': orders,
'total_orders': total_orders,
# 'total_customers': total_customers,
'delivered': delivered,
'pending': pending
}
return render(request, 'accounts/user.html', context)
@unauthorized_user
def register(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
# group = Group.objects.get(name='customer')
# user.groups.add(group)
# Customer.objects.create(
# user=user, name=user.username, email=user.email)
messages.success(request, f"Account Created for {username}")
return redirect('login')
context = {
'form': form
}
return render(request, 'accounts/register.html', context)
@unauthorized_user
def loginpage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def accountSettings(request):
customer = request.user.customer
form = CustomerForm(instance=customer)
if request.method == 'POST':
form = CustomerForm(request.POST, request.FILES, instance=customer)
if form.is_valid():
form.save()
return redirect('account')
context = {'form': form}
return render(request, 'accounts/account_settings.html', context)
| Joepolymath/customer-management-system | accounts/views.py | views.py | py | 6,780 | python | en | code | 0 | github-code | 13 |
23606167769 | #@ type: compute
#@ dependents:
#@ - func2
#@ - func3
#@ corunning:
#@ mem1:
#@ trans: mem1
#@ type: rdma
import struct
import pickle
from typing import List
TRAIN_SIZE = 60000
SIZE = 28
BATCH_SIZE = 1000
OUTPUT = "data/image_store"
def binarization(img: List[List[int]]) -> List[List[int]]:
"""
对于给定的一张 const.SIZE * const.SIZE 大小的灰度图片
将其二值化:大于等于平均灰度的置为 1,其它置为 0
"""
average = sum(sum(row) for row in img) / (SIZE * SIZE)
for i in range(SIZE):
for j in range(SIZE):
img[i][j] = (1 if img[i][j] >= average else 0)
return img
def compression(img: List[List[int]], group_size = 16) -> List[int]:
"""
对于给定的一张 const.SIZE * const.SIZE 大小的二值化图片
将其按照行优先的顺序,每 group_size 个 bit 压缩成一个数,得到一个一维列表,便于存储和相似度计算
"""
span = sum(img, [])
compressed_img = list()
for i in range(0, SIZE * SIZE, group_size):
segment = "".join(str(d) for d in span[i:i+group_size])
compressed_img.append(int(segment, 2))
return compressed_img
def sendToServer(output, data, remoteIndex):
with open(output, "r+b") as fin:
dataBytes = pickle.dumps(data)
length = len(dataBytes)
lenBytes = struct.pack('@I', length)
fin.seek(remoteIndex, 0)
print(fin.tell())
fin.write(lenBytes)
print(fin.tell())
remoteIndex += 4
fin.seek(remoteIndex, 0)
fin.write(dataBytes)
print(fin.tell())
remoteIndex += length
return remoteIndex
def test():
images = list()
filename = "data/mnist_images"
item_count = TRAIN_SIZE
remoteIndex = 0
count = 0
with open(OUTPUT, "r+b") as fin:
fin.seek(remoteIndex, 0)
countBytes = struct.pack('@I', count)
fin.write(countBytes)
remoteIndex += 4
with open(filename, "rb") as fin:
assert int.from_bytes(fin.read(4), byteorder="big") == 0x00000803
N = int.from_bytes(fin.read(4), byteorder="big")
assert N == item_count
assert int.from_bytes(fin.read(4), byteorder="big") == SIZE
assert int.from_bytes(fin.read(4), byteorder="big") == SIZE
for t in range(N):
if t % 1000 == 0:
print(f"read_mnist_image() index = {t}")
if t % BATCH_SIZE == 0 and t != 0:
remoteIndex = sendToServer(OUTPUT, images, remoteIndex)
count += 1
images.clear()
img = [[0] * SIZE for _ in range(SIZE)]
# 依次读入图片的每一个 byte
for i in range(SIZE):
for j in range(SIZE):
img[i][j] = int.from_bytes(fin.read(1), byteorder="big")
# 二值化 + 压缩
img = binarization(img)
img = compression(img)
images.append(img)
if len(images) != 0:
remoteIndex = sendToServer(OUTPUT, images, remoteIndex)
count += 1
images.clear()
with open(OUTPUT, "r+b") as fin:
fin.seek(0, 0)
fin.write(struct.pack('@I', count))
return {}
if __name__ == "__main__":
test()
| zerotrac/CSE291_mnist | Mnist_test/func1.py | func1.py | py | 3,365 | python | en | code | 2 | github-code | 13 |
11228614174 | # hard
class Solution:
def distinctSubseqII(self, s: str) -> int:
def _inner_func(depth, couples, s, prefix=''):
level_couple = []
for index, one in enumerate(s):
couple = prefix + one
if couple not in couples:
couples.add(couple)
if depth > 1:
for couple in level_couple:
_inner_func(depth - 1, couples, s[index + 1:], prefix + one)
couples = set()
_inner_func(len(s), couples, s)
return len(couples) % (10 ** 9 + 7)
if __name__ == '__main__':
test_list = ['abc', 'aba', 'aaa', 'adbae']
# test_list = ["pcrdhwdxmqdznbenhwjsenjhvulyve"]
for one in test_list:
print(one, 'count :', Solution().distinctSubseqII(one))
| Tritium-leo/leetcode_pratice | _doing_job/test_940.py | test_940.py | py | 798 | python | en | code | 1 | github-code | 13 |
44250666811 | # coding: utf-8
from wordcloud import WordCloud
import jieba
# 打开文本
text = open('lyb.txt').read()
# 加载停用词表
stop_word_set = set()
with open('stop_words.txt', 'r') as f:
for line in f:
word = line.strip()
if word not in stop_word_set:
stop_word_set.add(word)
print(stop_word_set)
# 中文分词
jieba.load_userdict('dict.txt')
words = jieba.cut(text)
# 去停用词
words_clean = []
for word in words:
if word not in stop_word_set:
words_clean.append(word)
text = ' '.join(words_clean)
# 生成对象
wc = WordCloud(font_path='Hiragino.ttf', width=800, height=600, mode='RGBA', background_color=None).generate(text)
# 保存到文件
wc.to_file('2.png')
| ylhao/wordcloud | lyb_2.py | lyb_2.py | py | 733 | python | en | code | 1 | github-code | 13 |
11262615875 | import hashlib
import json
import logging
from tamperfree.browser import ProxiedBrowser
from tamperfree.tor_process import TorProcess
logger = logging.getLogger(__name__)
class MismatchedHashes(object):
def __init__(self, hashes, wrong_hashes):
self.hashes = hashes
self.wrong_hashes = wrong_hashes
def __str__(self):
return "The following hashes do not match with the stamped hashes:\n{}".\
format("\n".join([k + " " + v + "(expected: " + self.hashes[k] + ")" for k,v in self.wrong_hashes]))
class MissingHashes(object):
def __init__(self, missing_hashes):
self.hashes = missing_hashes
def __str__(self):
return "Missing hashes:\n{}".\
format("\n".join([str(v) for v in self.hashes]))
class ExtraneousHashes(object):
def __init__(self, new_hashes):
self.hashes = new_hashes
def __str__(self):
return "Extraneous hashes:\n{}".\
format("\n".join([str(v) for v in self.hashes]))
class SiteContentStamp(object):
def __init__(self, hashes = None):
self.hashes = dict()
if hashes:
for k,v in hashes:
self.hashes[k] = v
def verify_against(self, other):
"""
Look for differences between the hashes in this stamp and the other stamp.
Looks for missing or new hashes, in case content has been deleted or added.
Then it looks for hashes that do not match. The goal here is to try and
explain the results as best as possible so that we know why something has
gone wrong.
Returns true and an empty list if the verification succeeded. Otherwise
returns false and a list of reasons why the verification failed.
"""
# O(n2)
new_hashes = []
wrong_hashes = []
for k, v in other.hashes.iteritems():
if k not in self.hashes:
new_hashes.append((k, v))
elif self.hashes[k] != v:
wrong_hashes.append((k, v))
missing_hashes = [k for k in self.hashes if k not in other.hashes]
reasons = []
if new_hashes:
reasons.append(ExtraneousHashes(new_hashes))
if missing_hashes:
reasons.append(MissingHashes(missing_hashes))
if wrong_hashes:
reasons.append(MismatchedHashes(self.hashes, wrong_hashes))
return len(reasons) == 0, reasons
def add(self, _b):
h = hashlib.sha256()
h.update(_b.body)
hash = h.hexdigest()
self.hashes[_b.request.path] = hash
logger.info("Added hash " + str((_b.request.path, hash)))
return hash
def __str__(self):
return "\n".join([str(h) for h in self.hashes.iteritems()])
def save(self, file):
json.dump({ "hashes": list(self.hashes.iteritems()) }, open(file, "w"))
def _object_hook(dct):
return SiteContentStamp(hashes=dct['hashes'])
def load(file):
return json.load(open(file), object_hook=_object_hook)
def fetch_hashes(dir, url, tor_port = 9150, launch_tor = True):
# Fetches the hashes for a single url.
stamp = SiteContentStamp()
def fetch_stamps():
with ProxiedBrowser(dir, tor_port=tor_port) as b:
r = b.get(url)
for _r in r:
stamp.add(_r)
if launch_tor:
with TorProcess(dir, port=tor_port):
fetch_stamps()
else:
fetch_stamps()
return stamp
| Tethik/tamperfree | tamperfree/verify.py | verify.py | py | 3,463 | python | en | code | 1 | github-code | 13 |
13629366547 |
from typing import Dict, Optional
from ..extraCode.location import HexPoint, Resource
from ..extraCode.util import isNotNone, JsonSerializable, ArgumentMissingError, NotSetupException, AlreadySetupException
from ..extraCode.modifiers import Placeable, Ownable, Purchaseable
from ..playerCode.player import Player
from ..playerCode.turn import Turn
class Road(Placeable, Purchaseable, Ownable, JsonSerializable):
_cost = {
Resource.Lumber: 1,
Resource.Brick: 1
}
_isLineFeature = True
def __init__(self, point1: HexPoint = None, point2: HexPoint = None, **kwargs) -> None:
'''
If both point1 and point2 are None, then it assumes that this is an unplaced Road.
If both are not None, this is a placed road
'''
self.__point1: Optional[HexPoint] = point1
self.__point2: Optional[HexPoint] = point2
hasLocation = False
if point1 is not None and point2 is not None:
# fully specified
hasLocation = True
elif point1 is not None:
# specified only point1
raise ArgumentMissingError('__init__', 'point2')
elif point2 is not None:
# specified only point2
raise ArgumentMissingError('__init__', 'point1')
super().__init__(isPlaced=hasLocation, **kwargs)
def __str__(self):
if self._isPlaced:
return f"Road({self.__point1, self.__point2})"
else:
return f"Road()"
def place(self, point1: HexPoint, point2: HexPoint, turn: Turn):
if self._isPlaced:
raise AlreadySetupException("This road has already been placed")
self.__point1 = point1.copy()
self.__point2 = point2.copy()
turn.gameMap.addLineElement(self, turn) # raises ActionError
self._place()
@property
def point1(self) -> HexPoint:
if self.__point1 is None:
raise NotSetupException("This road hasn't been given a position yet")
return self.__point1
@property
def point2(self) -> HexPoint:
if self.__point2 is None:
raise NotSetupException("This road hasn't been given a position yet")
return self.__point2
def toJsonSerializable(self) -> Dict[str, object]:
return {
'point1': self.__point1,
'point2': self.__point2,
**super().toJsonSerializable()
} | hydrogen602/settlersPy | gameServer/mapCode/lineMapFeatures.py | lineMapFeatures.py | py | 2,445 | python | en | code | 0 | github-code | 13 |
22237183726 | from __future__ import absolute_import, division, print_function
import pytest
from inspire_hal.factory import create_app
@pytest.fixture(scope='session')
def app():
"""
Deprecated: do not use this fixture for new tests, unless for very
specific use cases. Use `isolated_app` instead.
Flask application with demosite data and without any database isolation:
any db transaction performed during the tests are persisted into the db.
Creates a Flask application with a simple testing configuration,
then creates an application context and inside of it recreates
all databases and indices from the fixtures. Finally it yields,
so that all tests that explicitly use the ``app`` fixture have
access to an application context.
See: http://flask.pocoo.org/docs/0.12/appcontext/.
"""
app = create_app(
DEBUG=False,
SECRET_KEY='secret!',
TESTING=True,
)
with app.app_context() as app:
yield app
| inspirehep/inspire-hal | tests/unit/conftest.py | conftest.py | py | 985 | python | en | code | 0 | github-code | 13 |
31078506343 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advent of Code 2020 day 15 module."""
try:
from tqdm import tqdm
HAS_TQDM = True
except ImportError:
HAS_TQDM = False
def run(start):
numbers = {}
turn = 0
for n in start:
yield n
numbers[n] = (None, turn)
turn += 1
next_ = 0
while True:
yield next_
if next_ in numbers:
new = (numbers[next_][1], turn)
numbers[next_] = new
next_ = new[1] - new[0]
else:
numbers[next_] = (None, turn)
next_ = 0
turn += 1
def process(puzzle_input, verbose=False):
p1 = p2 = None
nums = run([int(n) for n in puzzle_input[0].split(",")])
if HAS_TQDM:
r = tqdm(range(30000000))
else:
r = range(30000000)
for i in r:
n = next(nums)
if i == 2019:
p1 = n
p2 = n
return p1, p2
def main():
"""Main entry point."""
import argparse
import fileinput
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='input file to read ("-" for stdin)')
parser.add_argument('-v', '--verbose', '-d', '--debug',
action='store_true', dest='verbose', help='verbose output')
args = parser.parse_args()
try:
puzzle_input = [line.strip() for line in fileinput.input(args.infile) if line.strip()]
p1, p2 = process(puzzle_input, verbose=args.verbose)
print(f'Part one: {p1}')
print(f'Part two: {p2}')
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| pmrowla/aoc2020 | day15.py | day15.py | py | 1,618 | python | en | code | 0 | github-code | 13 |
39740109887 | import os
import datetime
import xlsxwriter
from django.utils.translation import gettext_lazy as _
from . import utils
from .models import *
from .add_excel_info import add_excel_info
data_headers = (
'Bib', 'Status', 'Date',
'LastName', 'FirstName',
'Gender',
'DOB',
'City', 'StateProv',
'License',
'UCIID',
)
def write_row_data( ws, row, row_data, format = None ):
if format is None:
for col, d in enumerate(row_data):
ws.write( row, col, d )
else:
if isinstance(format, list):
col_format = { col:f for col, f in enumerate(format) }
default_format = None
else:
col_format = {}
default_format = format
for col, d in enumerate(row_data):
f = col_format.get(col, default_format)
if f is not None:
ws.write( row, col, d, f )
else:
ws.write( row, col, d )
return row + 1
def get_number_set_excel( nses ):
output = BytesIO()
wb = xlsxwriter.Workbook( output, {'in_memory': True} )
title_format = wb.add_format( dict(bold = True) )
ws = wb.add_worksheet('Number Set Bibs')
row = write_row_data( ws, 0, data_headers, title_format )
for nse in nses:
lh = nse.license_holder
data = [
nse.bib,
'Lost' if nse.date_lost else 'Held',
nse.date_lost.strftime('%Y-%m-%d') if nse.date_lost else '',
lh.last_name,
lh.first_name,
lh.get_gender_display(),
lh.date_of_birth.strftime('%Y-%m-%d'),
lh.city,
lh.state_prov,
lh.license_code,
lh.uci_id,
]
row = write_row_data( ws, row, data )
add_excel_info( wb )
wb.close()
return output.getvalue()
| esitarski/RaceDB | core/get_number_set_excel.py | get_number_set_excel.py | py | 1,543 | python | en | code | 12 | github-code | 13 |
70170628177 | from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
import random
from graph_algo import uniq_edges
import numpy as np
from functools import reduce
import operator
import yajl
from torchvision import transforms
class pairwise_dataset(Dataset) :
'''Uses image_list and adjacency_list for similar pairs. For each
image in similar_pair, randomly generates a dissimilar pair. (1:2)
positive to negative samples.
Labels may be initialized as an ordered pair: 0: similar, 1: dissimilar
'''
def __init__(self, adjacency, image_list,
labels=[0, 1],
transform = None,
dissimilar_fn = None) :
self.adjacency = adjacency
self.image_list = image_list
self.labels = torch.tensor(labels).float()
self.transform = transform
self.dissimilar = dissimilar_fn
if self.dissimilar is None :
self.dissimilar = self.find_dissimilar
self.init_pairs()
def init_pairs(self) :
pairs = uniq_edges(self.adjacency) #gives me a numpy array (N, 2)
flat_pairs = pairs.reshape([-1])
undef = np.full_like(flat_pairs, -1)
more_pairs = np.stack([flat_pairs, undef], axis=1)
self.pairs = np.concatenate([pairs, more_pairs], axis=0)
def __len__(self):
return len(self.pairs)
def __getitem__(self, index) :
x1, x2 = self.pairs[index]
y = int(x2 == -1)
if y != 0 :
x2 = self.dissimilar(x1)
y = self.labels[y]
# lg.info((x1, x2))
x1 = self.load_image(x1)
x2 = self.load_image(x2)
if self.transform :
x1 = self.transform(x1)
x2 = self.transform(x2)
return y, (x1, x2)
def find_dissimilar(self, index) :
indices = set((int(i) for i in self.adjacency.keys()))
# lg.info("indices(%d): %s", len(list(indices)), sorted(list(indices)))
# lg.info("index: %s", index)
# lg.info("adjacency(%d): %s", index, self.adjacency[str(index)])
indices = indices - set(self.adjacency[str(index)] + [int(index)])
indices = list(indices)
# lg.info("indices(%d): %s", len(indices)), sorted(indices))
return random.choice(indices)
def load_image(self, image_index) :
image_name = self.image_list[image_index]
return Image.open(image_name)
class triplet_dataset(pairwise_dataset) :
'''Uses image_list and adjacency_list for similar pairs. For each
image in similar_pair, randomly generates a dissimilar pair. (1:2)
positive to negative samples.
'''
def __init__(self, *args, **kwargs) :
super().__init__(*args, **kwargs)
def init_pairs(self) :
self.pairs = uniq_edges(self.adjacency) #gives me a numpy array (N, 2)
def __len__(self):
return 2 * self.pairs.shape[0]
def __getitem__(self, index) :
i = index // self.pairs.shape[0]
index = index % self.pairs.shape[0]
if i > 0:
x_pos, x = self.pairs[index]
else :
x, x_pos = self.pairs[index]
x_neg = self.dissimilar(x)
x = self.load_image(x)
x_pos = self.load_image(x_pos)
x_neg = self.load_image(x_neg)
if self.transform :
x = self.transform(x)
x_pos = self.transform(x_pos)
x_neg = self.transform(x_neg)
return self.labels, (x, x_pos, x_neg)
class Create(object) :
def __init__(self, base_module) :
self.base_module = base_module
def __call__(self, adjacency, image_list, labels, transform):
with open(adjacency, 'r') as J :
adjacency = yajl.load(J)
with open(image_list, 'r') as J :
image_list = yajl.load(J)['image_list']
transforms = {
'sketch_transform': sketch_transform
}
transform = transforms.get(transform, sketch_transform)()
return self.base_module(adjacency, image_list, labels, transform)
def flatten(inp_list) :
return reduce(operator.concat, inp_list)
def sketch_transform() :
return transforms.Compose([
transforms.Grayscale(3),
transforms.Resize(224),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Lambda(lambda x: 255 - x)
])
if __name__ == '__main__' :
# To Test
# -----------------------------------
# combinations_dataset(similar_pairs, image_list,
# transform = None,
# dissimilar_fn = None)
# Logging:
# -----------------------------------
import logging as lg
lg.basicConfig(level=lg.INFO, format='%(levelname)-8s: %(message)s')
from graph_algo import edge_to_adjacency
# With transforms
# -----------------------------------
from torchvision.transforms import Compose, Grayscale, ToTensor
from torchvision.transforms import Resize, RandomCrop
T = Compose([Grayscale(), Resize(224), RandomCrop(224), ToTensor()])
## Json Loader
# -----------------------------------
import yajl
combinations_json = '/home/bvr/data/pytosine/k_nearest/20180526-153919/combinations.json'
with open(combinations_json, 'r') as J :
similar_pairs = yajl.load(J)['combinations']
lg.info('Loaded similar pairs: size:%s', len(similar_pairs))
adjacency = edge_to_adjacency(similar_pairs)
# TODO: include edge_to_adjacency before tangle
image_list_json = '/home/bvr/data/pytosine/k_nearest/20180521-205730/image_list.json'
image_list_key = 'image_list'
with open(image_list_json, 'r') as J :
image_list = yajl.load(J)[image_list_key]
lg.info('Loaded image_list: size:%s', len(image_list))
def test_dataset(dataset_name) :
global adjacency, image_list, T
dataset = dataset_name(
adjacency, image_list,
transform = T,
labels=[np.array([1, 0]), np.array([0, 1])])
dataloader = DataLoader(
dataset, shuffle=True, batch_size = 64
)
for i, (y, x) in enumerate(dataloader) :
lg.info('sizes: len(y), y[0].size, len(x), x[0].size: %s, %s, %s, %s',
len(y), y[0].size(), len(x), x[0].size())
test_dataset(pairwise_dataset)
test_dataset(triplet_dataset)
| bvraghav/rivet | dataset.py | dataset.py | py | 5,913 | python | en | code | 1 | github-code | 13 |
6606344232 | __author__ = "Timo Konu \n Severi Jääskeläinen \n Samuel Kaiponen \n Heta " \
"Rekilä \n Sinikka Siironen \n Juhani Sundell"
__version__ = "2.0"
import widgets.gui_utils as gutils
import widgets.binding as bnd
from modules.element import Element
from typing import Set
from typing import List
from PyQt5 import uic
from PyQt5 import QtWidgets
class DepthProfileIgnoreElements(QtWidgets.QDialog):
"""
Dialog for ignoring elements in a depth profile.
"""
included_in_graph = bnd.bind("tree_elements")
included_in_ratio = bnd.bind("tree_ratio")
@property
def ignored_from_graph(self):
try:
return self._get_ignored(set(self.included_in_graph))
except AttributeError:
return set()
@property
def ignored_from_ratio(self):
try:
return self._get_ignored(set(self.included_in_ratio))
except AttributeError:
return set()
def _get_ignored(self, included):
return {
elem for elem in self._elements if elem not in included
}
def __init__(self, elements: List[Element], ignored_graph: Set[Element],
ignored_ratio: Set[Element]):
"""Init the dialog.
Args:
elements: A list of elements in Depth Profile.
ignored_graph: A list of elements ignored previously for the graph.
ignored_ratio: A list of elements ignored previously for ratio
calculation.
"""
super().__init__()
uic.loadUi(gutils.get_ui_dir() / "ui_depth_profile_ignored.ui", self)
self._elements = sorted(set(elements))
self.button_ok.clicked.connect(self.accept)
self.button_cancel.clicked.connect(self.reject)
# Fill the trees
gutils.fill_tree(
self.tree_elements.invisibleRootItem(), self._elements)
gutils.fill_tree(
self.tree_ratio.invisibleRootItem(), self._elements)
self.included_in_graph = set(
elem for elem in self._elements if elem not in ignored_graph
)
self.included_in_ratio = set(
elem for elem in self._elements if elem not in ignored_ratio
)
| JYU-IBA/potku | dialogs/measurement/depth_profile_ignore_elements.py | depth_profile_ignore_elements.py | py | 2,234 | python | en | code | 7 | github-code | 13 |
42275566699 | """
Test ion functionality
"""
import astropy.units as u
import numpy as np
import pytest
import fiasco
from fiasco.util.exceptions import MissingDatasetException
temperature = np.logspace(5, 8, 100)*u.K
@pytest.fixture
def ion(hdf5_dbase_root):
return fiasco.Ion('Fe 5', temperature, hdf5_dbase_root=hdf5_dbase_root)
@pytest.fixture
def another_ion(hdf5_dbase_root):
return fiasco.Ion('Fe 6', temperature, hdf5_dbase_root=hdf5_dbase_root)
@pytest.fixture
def h1(hdf5_dbase_root):
return fiasco.Ion('H 1', temperature, hdf5_dbase_root=hdf5_dbase_root)
@pytest.fixture
def fe10(hdf5_dbase_root):
return fiasco.Ion('Fe 10', temperature, hdf5_dbase_root=hdf5_dbase_root)
@pytest.fixture
def c6(hdf5_dbase_root):
return fiasco.Ion('C VI', temperature, hdf5_dbase_root=hdf5_dbase_root)
@pytest.fixture
def fe20(hdf5_dbase_root):
# NOTE: This ion was added because it has reclvl and cilvl files which
# we need to test the level-resolved rate correction factor
return fiasco.Ion('Fe XX', temperature, hdf5_dbase_root=hdf5_dbase_root)
def test_new_instance(ion):
abundance_filename = ion._instance_kwargs['abundance_filename']
new_ion = ion._new_instance()
for k in new_ion._instance_kwargs:
assert new_ion._instance_kwargs[k] == ion._instance_kwargs[k]
assert u.allclose(new_ion.temperature, ion.temperature, rtol=0)
new_ion = ion._new_instance(temperature=ion.temperature[:1])
assert u.allclose(new_ion.temperature, ion.temperature[:1])
new_ion = ion._new_instance(abundance_filename='sun_coronal_1992_feldman')
assert new_ion._instance_kwargs['abundance_filename'] == 'sun_coronal_1992_feldman'
assert ion._instance_kwargs['abundance_filename'] == abundance_filename
def test_level_indexing(ion):
# Integer
assert isinstance(ion[0], fiasco.Level)
assert ion[0].__repr__() == fiasco.Level(0, ion._elvlc).__repr__()
# Slice
levels = ion[:5]
assert len(levels) == 5
assert isinstance(levels, list)
assert isinstance(levels[0], fiasco.Level)
assert levels[2].__repr__() == fiasco.Level(2, ion._elvlc).__repr__()
# Fancy indexing
levels = ion[[1, 5, 10]]
assert len(levels) == 3
assert isinstance(levels, list)
assert isinstance(levels[0], fiasco.Level)
assert levels[2].__repr__() == fiasco.Level(10, ion._elvlc).__repr__()
def test_level(ion):
level = ion[0]
assert isinstance(level, fiasco.Level)
assert level.multiplicity == 5
assert level.total_angular_momentum == 0
assert level.orbital_angular_momentum_label == 'D'
def test_repr(ion):
assert 'Fe 5' in ion.__repr__()
def test_repr_scalar_temp(hdf5_dbase_root):
assert 'Fe 5' in fiasco.Ion('Fe 5', 1e6 * u.K, hdf5_dbase_root=hdf5_dbase_root).__repr__()
def test_ion_properties(ion):
assert ion.atomic_number == 26
assert ion.element_name == 'iron'
assert ion.atomic_symbol == 'Fe'
assert ion.ion_name == 'Fe 5'
def test_level_properties(ion):
assert hasattr(ion[0], 'level')
assert hasattr(ion[0], 'energy')
assert hasattr(ion[0], 'configuration')
def test_scalar_temperature(hdf5_dbase_root):
ion = fiasco.Ion('H 1', 1 * u.MK, hdf5_dbase_root=hdf5_dbase_root)
ioneq = ion.ioneq
assert ioneq.shape == (1,)
t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature']
ioneq_data = ion._ioneq[ion._dset_names['ioneq_filename']]['ionization_fraction']
i_t = np.where(t_data == ion.temperature)
assert u.allclose(ioneq, ioneq_data[i_t])
def test_no_elvlc_raises_index_error(hdf5_dbase_root):
with pytest.raises(IndexError):
fiasco.Ion('H 2', temperature, hdf5_dbase_root=hdf5_dbase_root)[0]
def test_ioneq(ion):
t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature']
ioneq_data = ion._ioneq[ion._dset_names['ioneq_filename']]['ionization_fraction']
ion_at_nodes = ion._new_instance(temperature=t_data)
assert u.allclose(ion_at_nodes.ioneq, ioneq_data, rtol=1e-6)
def test_ioneq_positive(ion):
assert np.all(ion.ioneq >= 0)
def test_ioneq_out_bounds_is_nan(ion):
t_data = ion._ioneq[ion._dset_names['ioneq_filename']]['temperature']
t_out_of_bounds = t_data[[0,-1]] + [-100, 1e6] * u.K
ion_out_of_bounds = ion._new_instance(temperature=t_out_of_bounds)
assert np.isnan(ion_out_of_bounds.ioneq).all()
def test_formation_temeprature(ion):
assert ion.formation_temperature == ion.temperature[np.argmax(ion.ioneq)]
def test_abundance(ion):
assert ion.abundance.dtype == np.dtype('float64')
# This value has not been tested for correctness
assert u.allclose(ion.abundance, 0.0001258925411794166)
def test_proton_collision(fe10):
rate = fe10.proton_collision_excitation_rate
assert u.allclose(rate[0, 0], 4.69587161e-13 * u.cm**3 / u.s)
rate = fe10.proton_collision_deexcitation_rate
assert u.allclose(rate[0, 0], 1.17688025e-12 * u.cm**3 / u.s)
def test_missing_abundance(hdf5_dbase_root):
ion = fiasco.Ion('Li 1',
temperature,
abundance_filename='sun_coronal_1992_feldman',
hdf5_dbase_root=hdf5_dbase_root)
with pytest.raises(KeyError):
_ = ion.abundance
def test_ip(ion):
assert ion.ip.dtype == np.dtype('float64')
# This value has not been tested for correctness
assert u.allclose(ion.ip, 1.2017997435751017e-10 * u.erg)
def test_missing_ip(hdf5_dbase_root):
ion = fiasco.Ion('Fe 27', temperature, hdf5_dbase_root=hdf5_dbase_root)
with pytest.raises(MissingDatasetException):
_ = ion.ip
def test_level_populations(ion):
pop = ion.level_populations(1e8 * u.cm**-3)
assert pop.shape == ion.temperature.shape + (1,) + ion._elvlc['level'].shape
# This value has not been checked for correctness
assert u.allclose(pop[0, 0, 0], 0.011643747849652244)
# Check that the total populations are normalized to 1 for all temperatures
assert u.allclose(pop.squeeze().sum(axis=1), 1, atol=None, rtol=1e-15)
def test_level_populations_proton_data_toggle(ion):
# Fe V has no psplups data so the toggle should have no effect
lp_protons = ion.level_populations(1e9*u.cm**(-3), include_protons=True)
lp_no_protons = ion.level_populations(1e9*u.cm**(-3), include_protons=False)
assert u.allclose(lp_protons, lp_no_protons, atol=0, rtol=0)
def test_contribution_function(ion):
cont_func = ion.contribution_function(1e7 * u.cm**-3)
assert cont_func.shape == ion.temperature.shape + (1, ) + ion._wgfa['wavelength'].shape
# This value has not been tested for correctness
assert u.allclose(cont_func[0, 0, 0], 2.08668713e-30 * u.cm**3 * u.erg / u.s)
def test_emissivity_shape(c6):
# NOTE: Explicitly testing C VI here because it has a psplups file
# and thus will compute the proton rates as well which have a specific
# codepath for coupled density/temperature.
# NOTE: Test that coupled temperature/density appropriately propagate through
# and that resulting quantity has the right shape.
# Using the emissivity quantity here because it is the highest level
# product that needs to manipulate the density. This will implicitly test the
# contribution function as well.
#
# Scalar, no coupling
density = 1e9 * u.cm**(-3)
emiss = c6.emissivity(density)
wavelength = c6.transitions.wavelength[~c6.transitions.is_twophoton]
assert emiss.shape == c6.temperature.shape + (1,) + wavelength.shape
# Array, no coupling
density = [1e8, 1e9, 1e10] * u.cm**(-3)
emiss = c6.emissivity(density)
wavelength = c6.transitions.wavelength[~c6.transitions.is_twophoton]
assert emiss.shape == c6.temperature.shape + density.shape + wavelength.shape
# Array, with coupling
pressure = 1e15 * u.K * u.cm**(-3)
density = pressure / c6.temperature
emiss = c6.emissivity(density, couple_density_to_temperature=True)
wavelength = c6.transitions.wavelength[~c6.transitions.is_twophoton]
assert emiss.shape == c6.temperature.shape + (1,) + wavelength.shape
def test_coupling_unequal_dimensions_exception(ion):
with pytest.raises(ValueError, match='Temperature and density must be of equal length'):
_ = ion.level_populations([1e7, 1e8]*u.cm**(-3), couple_density_to_temperature=True)
@pytest.fixture
def pops_with_correction(fe20):
return fe20.level_populations(1e9*u.cm**(-3)).squeeze()
@pytest.fixture
def pops_no_correction(fe20):
return fe20.level_populations(1e9*u.cm**(-3),
include_level_resolved_rate_correction=False).squeeze()
def test_level_populations_normalized(pops_no_correction, pops_with_correction):
assert u.allclose(pops_with_correction.sum(axis=1), 1, atol=None, rtol=1e-15)
assert u.allclose(pops_no_correction.sum(axis=1), 1, atol=None, rtol=1e-15)
def test_level_populations_correction(fe20, pops_no_correction, pops_with_correction):
# Test level-resolved correction applied to correct levels
i_corrected = np.unique(np.concatenate([fe20._cilvl['upper_level'], fe20._reclvl['upper_level']]))
i_corrected -= 1
# This tests that, for at least some portion of the temperature axis, the populations are
# significantly different for each corrected level
pops_equal = u.isclose(pops_with_correction[:, i_corrected], pops_no_correction[:, i_corrected],
atol=0.0, rtol=1e-5)
assert ~np.all(np.all(pops_equal, axis=0))
# All other levels should be unchanged (with some tolerance for renormalization)
is_uncorrected = np.ones(pops_no_correction.shape[-1], dtype=bool)
is_uncorrected[i_corrected] = False
i_uncorrected = np.where(is_uncorrected)
assert u.allclose(pops_with_correction[:, i_uncorrected], pops_no_correction[:, i_uncorrected],
atol=0.0, rtol=1e-5)
def test_emissivity(ion):
emm = ion.emissivity(1e7 * u.cm**-3)
assert emm.shape == ion.temperature.shape + (1, ) + ion._wgfa['wavelength'].shape
# This value has not been tested for correctness
assert u.allclose(emm[0, 0, 0], 2.08668713e-16 * u.erg / u.cm**3 / u.s)
@pytest.mark.parametrize('em', [
1e29 * u.cm**-5,
[1e29] * u.cm**-5,
1e29 * np.ones(temperature.shape) * u.cm**-5,
])
def test_intensity(ion, em):
wave_shape = ion._wgfa['wavelength'].shape
intens = ion.intensity(1e7 * u.cm**-3, em)
assert intens.shape == ion.temperature.shape + (1, ) + wave_shape
# Test density varying along independent axis
density = [1e7, 1e9, 1e10] * u.cm**(-3)
intens = ion.intensity(density, em)
assert intens.shape == ion.temperature.shape + density.shape + wave_shape
# Test density varying along same axis as temperature
density = 1e15 * u.K * u.cm**(-3) / ion.temperature
intens = ion.intensity(density, em, couple_density_to_temperature=True)
assert intens.shape == ion.temperature.shape + (1, ) + wave_shape
def test_excitation_autoionization_rate(ion):
rate = ion.excitation_autoionization_rate
assert rate.shape == ion.temperature.shape
# This value has not been tested for correctness
assert u.allclose(rate[0], 1.14821255e-12 * u.cm**3 / u.s)
def test_dielectronic_recombination_rate(ion):
rate = ion.dielectronic_recombination_rate
assert rate.shape == ion.temperature.shape
# This value has not been tested for correctness
assert u.allclose(rate[0], 1.60593802e-11 * u.cm**3 / u.s)
def test_free_free(ion):
emission = ion.free_free(200 * u.Angstrom)
assert emission.shape == ion.temperature.shape + (1, )
# This value has not been tested for correctness
assert u.allclose(emission[0], 1.72804216e-29 * u.cm**3 * u.erg / u.Angstrom / u.s)
def test_free_bound(ion):
emission = ion.free_bound(200 * u.Angstrom)
assert emission.shape == ion.temperature.shape + (1, )
# This value has not been tested for correctness
assert u.allclose(emission[0, 0], 9.7902609e-26 * u.cm**3 * u.erg / u.Angstrom / u.s)
def test_free_bound_no_recombining(h1):
# This is test the case where there is no data available for the recombining
# ion (H 2)
emission = h1.free_bound(200 * u.Angstrom)
assert emission.shape == h1.temperature.shape + (1, )
# This value has not been tested for correctness
assert u.allclose(emission[0, 0], 1.9611545671496785e-28 * u.cm**3 * u.erg / u.Angstrom / u.s)
def test_add_ions(ion, another_ion):
collection = ion + another_ion
assert isinstance(collection, fiasco.IonCollection)
assert collection[0] == ion
assert collection[1] == another_ion
def test_radd_ions(ion, another_ion):
collection = another_ion + ion
assert isinstance(collection, fiasco.IonCollection)
assert collection[1] == ion
assert collection[0] == another_ion
def test_transitions(ion):
trans = ion.transitions
assert isinstance(trans, fiasco.Transitions)
assert len(trans) == 361
# These values have not been tested for correctness
assert not trans.is_twophoton[0]
assert trans.is_observed[0]
assert u.allclose(trans.A[0], 0.000155 / u.s)
assert u.allclose(trans.wavelength[0], 703729.75 * u.Angstrom)
assert u.allclose(trans.upper_level[0], 2)
assert u.allclose(trans.lower_level[0], 1)
assert u.allclose(trans.delta_energy[0], 2.82273956e-14 * u.erg)
def test_create_ion_without_units_raises_units_error(hdf5_dbase_root):
with pytest.raises(TypeError):
fiasco.Ion('Fe 5', temperature.value, hdf5_dbase_root=hdf5_dbase_root)
def test_create_ion_with_wrong_units_raises_unit_conversion_error(hdf5_dbase_root):
with pytest.raises(u.UnitsError):
fiasco.Ion('Fe 5', temperature.value*u.s, hdf5_dbase_root=hdf5_dbase_root)
def test_indexing_no_levels(hdf5_dbase_root):
ion = fiasco.Ion('Fe 1', temperature, hdf5_dbase_root=hdf5_dbase_root)
print(ion)
assert [l for l in ion] == []
with pytest.raises(IndexError, match='No energy levels available for Fe 1'):
ion[0]
def test_repr_no_levels(hdf5_dbase_root):
"""
Ensures the repr can be printed without errors even when
no energy level or transition information is available.
"""
assert fiasco.Ion('Fe 1', temperature, hdf5_dbase_root=hdf5_dbase_root).__repr__
def test_next_ion(ion):
next_ion = ion.next_ion()
assert next_ion.ionization_stage == ion.ionization_stage + 1
assert next_ion.atomic_number == ion.atomic_number
def test_previous_ion(ion):
prev_ion = ion.previous_ion()
assert prev_ion.ionization_stage == ion.ionization_stage - 1
assert prev_ion.atomic_number == ion.atomic_number
| wtbarnes/fiasco | fiasco/tests/test_ion.py | test_ion.py | py | 14,663 | python | en | code | 18 | github-code | 13 |
24769689499 | """Collision_Callback.py
This is the base component for all collision components. Because it calls
owner's on_collision callback, it allows components derived from it to react to
that callback. It also takes care of tracking sprite group names, so that PUG
can give the user a nice dropdown of all group names used in the scene.
"""
from weakref import ref
from pug.component import *
from pig.PigDirector import PigDirector
from pig.editor.agui.group_dropdown import GroupDropdown, register_group, \
unregister_group
from pig.components import SpriteComponent
from pig import Sprite
class Collision_Callback( SpriteComponent):
"""Object's "on_collision" method is called when it collides
arguments: on_collision( toSprite, fromSprite, toGroup, my_group)"""
#component_info
_set = 'pig'
_type = 'collision'
_class_list = [Sprite]
# attributes: ['name','desc'] or ['name', agui, {'doc':'desc', extra info}]
_collision_list = [
['their_group', GroupDropdown, {'doc':"Group to collide with"}],
['my_group', GroupDropdown,
{'doc':
"The group this object joins and uses for collision tests"}]
]
_field_list = _collision_list
# defaults
_their_group = "colliders"
_my_group = "colliders"
@component_method
def on_added_to_scene(self):
"Register for object.on_collision callback when object added to scene"
if self._my_group:
self.owner.join_collision_group( self._my_group)
if self._their_group:
PigDirector.scene.register_collision_callback( self.owner,
self.owner.on_collision,
self.my_group,
self.their_group,
ignore_duplicate=True)
@component_method
def on_collision(self, toSprite, fromSprite, toGroup, my_group):
"This component doesn't do anything in the on_collision callback"
pass
### track all available groups for editor dropdowns
### all the functions below are for the editor and aren't really part of
### collision callbacks
def __init__(self, *args, **kwargs):
self.ref = ref(self)
Component.__init__(self, *args, **kwargs)
def __del__(self):
"__del__(): when component is deleted, unregister groups from gui"
if self.ref():
unregister_group( (self.ref, "their_group"))
unregister_group( (self.ref, "my_group"))
def get_their_group(self):
return self._their_group
def set_their_group(self, group):
register_group( (self.ref, "their_group"), group)
self._their_group = group
their_group = property (get_their_group, set_their_group)
def get_my_group(self):
return self._my_group
def set_my_group(self, group):
register_group( (self.ref, "my_group"), group)
self._my_group = group
my_group = property (get_my_group, set_my_group)
register_component( Collision_Callback)
| sunsp1der/pug | pig/components/collision/Collision_Callback.py | Collision_Callback.py | py | 3,307 | python | en | code | 0 | github-code | 13 |
2547526877 | from packages.Board import Board
class Game:
def __init__(self):
print('Game instance')
self.board = Board()
self.board.draw_squares([
'o', 'o', 'X',
'', 'X', 'o',
'X', '', 'x'
])
self.board.on_mouse_click(self._mouse_click_handler)
self.board.on_keyboard_press(self._keyboard_press_handler)
self.board.loop()
def _mouse_click_handler(self, coordinate):
print('Clicked:', coordinate)
self.board.play_sound()
def _keyboard_press_handler(self, keyinfo):
print('Pressed:', keyinfo)
| rafaeltmbr/tic-tac-toe | src/packages/Game.py | Game.py | py | 613 | python | en | code | 0 | github-code | 13 |
5948004290 | import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
import pickle
#load data
users = pd.read_csv('datasets/BX-Users.csv', sep=";", error_bad_lines=False, encoding='latin-1')
books = pd.read_csv('datasets/BX-Books.csv', sep=";", error_bad_lines=False, encoding='latin-1')
rating = pd.read_csv('datasets/BX-Book-Ratings.csv', sep=";", error_bad_lines=False, encoding='latin-1')
#pre-processing data
books = books[['ISBN', 'Book-Title', 'Book-Author', 'Year-Of-Publication', 'Publisher']]
books.rename(
columns={'Book-Title': 'title', 'Book-Author': 'author', 'Year-Of-Publication': 'year', 'Publisher': 'publisher'},
inplace=True)
users.rename(columns={'User-ID': 'user_id', 'Location': 'location', 'Age': 'age'}, inplace=True)
rating.rename(columns={'User-ID': 'user_id', 'Book-Rating': 'rating'}, inplace=True)
#Exploratory Data Analysis
#extract users with ratings of more than 200
x = rating['user_id'].value_counts() > 200
y = x[x].index
rating = rating[rating['user_id'].isin(y)]
#merge ratings with books
rating_with_books = rating.merge(books, on='ISBN')
number_rating = rating_with_books.groupby('title')['rating'].count().reset_index() ## total rating of a book
number_rating.rename(columns={'rating': 'number of rating'},
inplace=True) # feature engineering : changing the column names
final_ratings = rating_with_books.merge(number_rating, on='title')
#extract books that have received more than 50 ratings
final_ratings = final_ratings[
final_ratings['number of rating'] >= 50]
final_ratings.drop_duplicates(['user_id', 'title'], inplace=True)
#creating pivot table
book_pivot = final_ratings.pivot_table(columns='user_id', index='title', values='rating')
book_pivot.fillna(0, inplace=True)
#Modeling
book_sparse = csr_matrix(book_pivot)
model = NearestNeighbors(algorithm='brute') #train the nearest neighbors algorithm.
model.fit(book_sparse)
distances, suggestions = model.kneighbors(book_pivot.iloc[237, :].values.reshape(1, -1))
for i in range(len(suggestions)):
print(book_pivot.index[suggestions[i]])
def recommend(ID):
distances, suggestions = model.kneighbors(book_pivot.iloc[ID,:].values.reshape(1,-1))
suggestions = suggestions[0]
for i in range(len(suggestions)-1):
print(book_pivot.index[suggestions[i+1]])
recommend(1)
pickle_out = open("book_recommender.pkl","wb")
pickle.dump(model,pickle_out)
pickle_out.close()
| tenwang10/Library | RecommendationSystem.py | RecommendationSystem.py | py | 2,477 | python | en | code | 0 | github-code | 13 |
3302411007 | from collections import Counter
from numpy import arange, delete, setdiff1d
from sklearn.model_selection import train_test_split as sk_train_test_split
from sklearn.utils import safe_indexing
from text_categorizer import pickle_manager
from text_categorizer.constants import random_state
from text_categorizer.logger import logger
def train_test_split(corpus, classifications, test_size, preprocessed_data_file, force, indexes_to_remove):
metadata = pickle_manager.get_docs_metadata(preprocessed_data_file)
projected_test_size = metadata.get('test_size')
training_set_indexes = metadata.get('training_set_indexes')
test_set_indexes = metadata.get('test_set_indexes')
perform_split = force or projected_test_size is None or training_set_indexes is None or test_set_indexes is None
if perform_split:
logger.info("Generating new training and test subsets.")
m = _train_test_split(metadata, test_size, classifications, indexes_to_remove)
pickle_manager.set_docs_metadata(metadata=m, filename=preprocessed_data_file)
metadata = pickle_manager.get_docs_metadata(preprocessed_data_file)
projected_test_size = metadata.get('test_size')
training_set_indexes = metadata.get('training_set_indexes')
test_set_indexes = metadata.get('test_set_indexes')
else:
logger.info("Using training and test subsets chosen in a previous execution.")
if projected_test_size != test_size or len(training_set_indexes) + len(test_set_indexes) != len(classifications) - len(indexes_to_remove):
actual_test_size = len(test_set_indexes) / (len(classifications) - len(indexes_to_remove))
logger.warning("The test subset corresponds to %s%% of the dataset instead of %s%%. The regeneration of the subsets can be enabled in the configuration file." % (actual_test_size, test_size))
if not _is_stratified(classifications, metadata, indexes_to_remove):
logger.warning("The training and test subsets are not correctly stratified. Are you using the correct classification column and ignoring the same examples? The regeneration of the subsets can be enabled in the configuration file.")
return get_train_test(corpus, classifications, training_set_indexes, test_set_indexes, indexes_to_remove)
def get_train_test(corpus, classifications, training_set_indexes, test_set_indexes, indexes_to_remove):
assert len(training_set_indexes) == len(set(training_set_indexes))
assert len(test_set_indexes) == len(set(test_set_indexes))
train_idxs = setdiff1d(training_set_indexes, indexes_to_remove, assume_unique=True)
test_idxs = setdiff1d(test_set_indexes, indexes_to_remove, assume_unique=True)
corpus_train = safe_indexing(corpus, train_idxs)
corpus_test = safe_indexing(corpus, test_idxs)
classifications_train = safe_indexing(classifications, train_idxs)
classifications_test = safe_indexing(classifications, test_idxs)
return corpus_train, corpus_test, classifications_train, classifications_test
def _train_test_split(metadata, test_size, classifications, indexes_to_remove):
m = metadata.copy()
m['test_size'] = test_size
idxs = arange(len(classifications))
idxs = setdiff1d(idxs, indexes_to_remove, assume_unique=True)
class_labels = delete(classifications, indexes_to_remove)
train_idxs, test_idxs = sk_train_test_split(idxs, test_size=test_size, random_state=random_state, shuffle=True, stratify=class_labels)
m['training_set_indexes'] = train_idxs
m['test_set_indexes'] = test_idxs
return m
def _is_stratified(classifications, metadata, indexes_to_remove):
train_labels = safe_indexing(classifications, metadata['training_set_indexes'])
test_labels = safe_indexing(classifications, metadata['test_set_indexes'])
class_labels = delete(classifications, indexes_to_remove)
actual_test_size = len(test_labels) / len(class_labels)
m = _train_test_split(metadata, actual_test_size, classifications, indexes_to_remove)
expected_train_labels = safe_indexing(classifications, m['training_set_indexes'])
expected_test_labels = safe_indexing(classifications, m['test_set_indexes'])
return Counter(train_labels) == Counter(expected_train_labels) and Counter(test_labels) == Counter(expected_test_labels)
| LuisVilarBarbosa/TextCategorizer | text_categorizer/train_test_split.py | train_test_split.py | py | 4,312 | python | en | code | 0 | github-code | 13 |
40740259957 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# Встроенные модули
import time, sys, subprocess
from threading import Thread
# Внешние модули
try:
import psycopg2
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Внутренние модули
try:
from mod_common import *
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Класс для работы с трафиком в nftables
class traffic_nftables(Thread):
# Стартовые параметры
def __init__(self, threads_list, todolist):
super().__init__()
self.daemon = True
self.threads_list = threads_list
self.todolist = todolist
# Поток чтения трафика из nftables и обновления базы
def run(self):
# Запись в лог файл
log_write('Thread traffic_nftables running')
try:
# Подключение к базе
conn_pg = psycopg2.connect(database='nifard', user=get_config('DatabaseUserName'), password=get_config('DatabasePassword') )
except psycopg2.DatabaseError as error:
log_write(error)
sys.exit(1)
nft_counters_reset = False
# Цикл чтения nftables по показателю ip трафик
while not app_work.empty():
# Проверка что таблица traffic существует
if subprocess.call('nft list tables | grep traffic',stdout=subprocess.PIPE, shell=True) == 0:
# Проверка текущего часа, если 00 часов, очищаем счётчики трафика
if (str(datetime.now()).split(':')[0]).split()[1] == "00" and not nft_counters_reset:
result = subprocess.check_output('nft reset counters', shell=True).decode()
nft_counters_reset = True
log_write('Counters in nftables reseted')
# Проверка текущего часа, если 01 часа, возвращаем статус очистки обратно
if (str(datetime.now()).split(':')[0]).split()[1] == "01":
nft_counters_reset = False
# Получение данных по трафику для всех ip
result = subprocess.check_output('nft list counters | head -n -2 | tail +2 | xargs | tr "{" " " | sed "s/} /\\n/g" | cut -d" " -f2,8', shell=True).decode()
for line in result.splitlines():
# Выбор ip адреса только соответствующего маске ADUserIPMask
if line.find(get_config('ADUserIPMask')) != -1:
if app_work.empty(): break # Повторная проверка на завершение потока
ip_addr = line.split()[0] # IP адрес
traffic_nft = line.split()[1] # Трафик из nftables
# Поиск в базе выбранного ip адреса
cursor = conn_pg.cursor()
try:
cursor.execute("select ip,traffic from users where ip = %s;", (ip_addr,))
except psycopg2.DatabaseError as error:
log_write(error)
sys.exit(1)
conn_pg.commit()
rows = cursor.fetchall()
for row in rows:
traffic_db = row[1] # Трафик из базы
break
# Если ip адрес есть и его трафик изменился, меняем его в базе
if rows and (int(traffic_db) != int(traffic_nft)):
try:
cursor.execute("update users set traffic = %s where ip = %s;", (traffic_nft,ip_addr,))
except psycopg2.DatabaseError as error:
log_write(error)
sys.exit(1)
conn_pg.commit()
cursor.close()
# Ожидание потока
for tick in range(5):
if app_work.empty():
break
time.sleep(1)
if app_work.empty(): break # Повторная проверка на завершение потока
conn_pg.close()
# Запись в лог файл
log_write('Thread traffic_nftables stopped')
# Удаление потока из списка
self.threads_list.get()
| surarim/nifard | mod_traffic_nftables.py | mod_traffic_nftables.py | py | 4,215 | python | ru | code | 0 | github-code | 13 |
14734375605 | '''
328. Odd Even Linked List
Solution:
'''
class Solution:
def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:
odd = None
even = None
even_head = None
itr = None
if head and head.next and head.next.next:
odd = head
even = head.next
even_head = even
itr = head.next.next
else: return head
flag = 0
while itr:
if flag == 0:
odd.next = itr
odd = odd.next
flag = 1
elif flag == 1:
even.next = itr
even = even.next
flag = 0
itr = itr.next
even.next = None
odd.next = even_head
return head | messiel12pr/LeetCode | Python/Medium/Odd_Even_Linked_List.py | Odd_Even_Linked_List.py | py | 795 | python | en | code | 1 | github-code | 13 |
74166916819 | """
This script takes csv file of the drone trajectory dataset with the following header
timestamp,tx,ty,tz,qx,qy,qz,qw
* it takes only the tx,ty,tz points and and populate two numpy arrays,
u_in: (N, 3*window_size_u)
y_meas; (N, 3*window_size_y)
* then it saves the prepared dataset to be trained by the DynoNet network
* It is assumed that the data points are sampled evenly in time
* Load u_in and y_meas from the saved file
loaded_data = np.load("data_processed.npz")
u_in_loaded = loaded_data["u_in"]
y_meas_loaded = loaded_data["y_meas"]
print("Loaded u_in shape:", u_in_loaded.shape)
print("Loaded y_meas shape:", y_meas_loaded.shape)
"""
import numpy as np
import csv
import os
import sys
import argparse
def read_csv_files_to_numpy(directory_path):
csv_files = [file for file in os.listdir(directory_path) if (file.endswith('.csv') or file.endswith('.txt'))]
if not csv_files:
print("No CSV files (.cvs , .txt) found in the specified directory.")
return None
pos_arrays = {}
for file in csv_files:
file_path = os.path.join(directory_path, file)
try:
# Read CSV using csv module
with open(file_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
headers = next(csv_reader) # Read the header row
pos_arrays[file[:-4]] = np.array([row[0:4] for row in csv_reader], dtype=float)
except Exception as e:
print(f"Error reading {file}: {e}")
print(f"Number of datasets: {len(pos_arrays)}")
return pos_arrays
def process_data(data, dt=0.1, inp_len=10, out_len=5):
"""
Param
--
@data is a dictionarry of numpy arrays of the stamped position data
dt: sampling time in seconds. Default 0.1 second
inp_len: Length of each input trajectory training sample
out_len: Length of each output/predicted trajectory
"""
merged_pos_in=[]
merged_pos_out = []
merged_vel_in = []
merged_vel_out = []
merged_pos_dataset = []
for file_name, array_data in data.items():
# print(f"\nProcessing data from '{file_name}.csv' ...")
# print(f"Size of dataset {file_name}: {len(array_data)} \n")
merged_pos_dataset.append(array_data)
# Estimate velocity
Pnow = array_data[1:,:]
Plast = array_data[:-1, :]
deltas = Pnow-Plast # [dt, dx, dy, dz]
# print("deltas:", deltas)
V=np.array([Pnow[:,0], deltas[:,1]/deltas[:,0], deltas[:,2]/deltas[:,0], deltas[:,3]/deltas[:,0] ])
V = V.T # [vx, vy, vz]
# Process the dataset into u_in and y_meas arrays
freq = 1/dt # Sampling freq Hz
window_size_u = int(inp_len) # Window size for u_in
window_size_y = int(out_len) # Window size for y_meas
pos_in = []
vel_in = []
pos_out = []
vel_out = []
if (len(array_data) < (window_size_u+window_size_y)):
print("Dataset does not have enough points {} < {}".format(len(array_data),window_size_y+window_size_u))
exit(1)
# position trajectory
for i in range(window_size_u, len(array_data) - window_size_y):
pos_in.append(array_data[i - window_size_u : i, 1:].flatten())
pos_out.append(array_data[i : i + window_size_y, 1:].flatten())
pos_in = np.array(pos_in)
pos_in = pos_in.astype(np.float32)
pos_out = np.array(pos_out)
pos_out = pos_out.astype(np.float32)
merged_pos_in.append(pos_in)
merged_pos_out.append(pos_out)
# print(f"position datasets shape in {file_name}: {array_data.shape}")
# print(f"pos_in shape in {file_name}: {pos_in.shape}")
# print(f"pos_out shape in {file_name}: {pos_out.shape}")
# print(f"Type of pos_in in {file_name}: {pos_in.dtype}")
# print(f"Type of pos_out in {file_name}: {pos_out.dtype}")
if (len(V) < (window_size_u+window_size_y)):
print("Velocity dataset does not have enough points {} < {}".format(len(V),window_size_y+window_size_u))
exit(1)
# velocity trrajectory
for i in range(window_size_u, len(V) - window_size_y):
vel_in.append(V[i - window_size_u : i, 1:].flatten())
vel_out.append(V[i : i + window_size_y, 1:].flatten())
vel_in = np.array(vel_in)
vel_in = vel_in.astype(np.float32)
vel_out = np.array(vel_out)
vel_out = vel_out.astype(np.float32)
merged_vel_in.append(vel_in)
merged_vel_out.append(vel_out)
# print(f"velocity datasets shape in {file_name}: {V.shape}")
# print(f"vel_in shape in {file_name}: {vel_in.shape}")
# print(f"vel_out shape in {file_name}: {vel_out.shape}")
# print(f"Type of vel_in in {file_name}: {vel_in.dtype}")
# print(f"Type of vel_out in {file_name}: {vel_out.dtype}")
# merge all lists into numpy arrays
merged_pos_dataset_np = np.concatenate(merged_pos_dataset, axis=0)
merged_pos_in_np = np.concatenate(merged_pos_in, axis=0)
merged_pos_out_np = np.concatenate(merged_pos_out, axis=0)
merged_vel_in_np = np.concatenate(merged_vel_in, axis=0)
merged_vel_out_np = np.concatenate(merged_vel_out, axis=0)
print(f"Number of samples in the concatenated original position dataset = {len(merged_pos_dataset_np)}")
print(f"Number of samples of processed datasets = {len(merged_pos_in_np)}")
print(f"shape of merged_pos_in_np: {merged_pos_in_np.shape}")
print(f"Sampling time: {dt} seconds")
print(f"Input trajectory lenght: {inp_len} points")
print(f"Output trajectory length: {out_len} points")
return merged_pos_dataset_np, merged_pos_in_np, merged_pos_out_np, merged_vel_in_np, merged_vel_out_np
# # Load the CSV file into a NumPy array
# filename = "indoor_forward_7_davis_with_gt.txt"
# file_path = os.path.join("resampled_100ms_dataset", filename)
# out_file=""
# dt = np.array(0.1)
# with open(file_path, "r") as file:
# csv_reader = csv.reader(file)
# headers = next(csv_reader) # Read the header row
# pos_dataset = np.array([row[0:4] for row in csv_reader], dtype=float)
# # Estimate velocity
# Pnow = pos_dataset[1:,:]
# Plast = pos_dataset[:-1, :]
# deltas = Pnow-Plast # [dt, dx, dy, dz]
# # print("deltas:", deltas)
# V=np.array([Pnow[:,0], deltas[:,1]/deltas[:,0], deltas[:,2]/deltas[:,0], deltas[:,3]/deltas[:,0] ])
# V = V.T # [vx, vy, vz]
# # Process the dataset into u_in and y_meas arrays
# freq = 10 # Sampling freq Hz
# window_size_u = freq # Window size for u_in
# window_size_y = int(freq/2) # Window size for y_meas
# pos_in = []
# vel_in = []
# pos_out = []
# vel_out = []
# if (len(pos_dataset) < (window_size_u+window_size_y)):
# print("Dataset does not have enough points {} < {}".format(len(dataset),window_size_y+window_size_u))
# exit(1)
# # position trajectory
# for i in range(window_size_u, len(pos_dataset) - window_size_y):
# pos_in.append(pos_dataset[i - window_size_u : i, 1:].flatten())
# pos_out.append(pos_dataset[i : i + window_size_y, 1:].flatten())
# pos_in = np.array(pos_in)
# pos_in = pos_in.astype(np.float32)
# pos_out = np.array(pos_out)
# pos_out = pos_out.astype(np.float32)
# print("position datasets shape:", pos_dataset.shape)
# print("pos_in shape:", pos_in.shape)
# print("pos_out shape:", pos_out.shape)
# print("Type of pos_in:", pos_in.dtype)
# print("Type of pos_out", pos_out.dtype)
# if (len(V) < (window_size_u+window_size_y)):
# print("Velocity dataset does not have enough points {} < {}".format(len(V),window_size_y+window_size_u))
# exit(1)
# # velocity trrajectory
# for i in range(window_size_u, len(V) - window_size_y):
# vel_in.append(V[i - window_size_u : i, 1:].flatten())
# vel_out.append(V[i : i + window_size_y, 1:].flatten())
# vel_in = np.array(vel_in)
# vel_in = vel_in.astype(np.float32)
# vel_out = np.array(vel_out)
# vel_out = vel_out.astype(np.float32)
# print("velocity datasets shape:", V.shape)
# print("vel_in shape:", vel_in.shape)
# print("vel_out shape:", vel_out.shape)
# print("Type of vel_in:", vel_in.dtype)
# print("Type of vel_out", vel_out.dtype)
# # Save to a file
# output_dir = "dynonet_datasets"
# # Specify the directory name you want to check/create
# directory_name = "my_directory"
# # Get the current directory path
# current_directory = os.getcwd()
# # Concatenate the current directory path with the directory name
# directory_path = os.path.join(current_directory, output_dir)
# # Check if the directory already exists
# if not os.path.exists(directory_path):
# # Create the directory if it doesn't exist
# os.makedirs(directory_path)
# print("Directory created:", directory_path)
# else:
# print("Directory already exists:", directory_path)
# output_file = directory_path+"/indoor_forward_7_davis.npz"
# np.savez(output_file, pos_dataset=pos_dataset, pos_in=pos_in, pos_out=pos_out, vel_in=vel_in, vel_out=vel_out, dt=dt)
# print("Arrays saved to", output_file)
# # Load data example
# # loaded_data = np.load("data_processed.npz")
# # u_in_loaded = loaded_data["u_in"]
# # y_meas_loaded = loaded_data["y_meas"]
# # print("Loaded u_in shape:", u_in_loaded.shape)
# # print("Loaded y_meas shape:", y_meas_loaded.shape)
def main():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Read datasets from CSV files from a directory and prepare them fro Dynonet.")
parser.add_argument("input_directory", help="Path to the directory containing CSV files.")
parser.add_argument("-o", "--output_directory", help="Optional path to save the concatenated numpy array as a CSV file.")
parser.add_argument("-of", "--output_file", help="Optional name of the file that contains the processed dataset as .npz .")
parser.add_argument("-iL", "--input_length", type=int, help="Lenght of each input trajectory sample")
parser.add_argument("-oL", "--output_length", type=int, help="Lenght of each output/predicted trajectory")
parser.add_argument("-dt", "--sampling_time", type=float, default=0.1, help="Sampling time in seconds")
args = parser.parse_args()
if args.sampling_time:
dt = args.sampling_time
if args.input_length:
input_length = args.input_length
else:
input_length = 10
if args.output_length:
output_length = args.output_length
else:
output_length = 5
directory_path = args.input_directory
data = read_csv_files_to_numpy(directory_path)
merged_pos_dataset_np, merged_pos_in_np, merged_pos_out_np, merged_vel_in_np, merged_vel_out_np = process_data(data=data, dt=0.1, inp_len=input_length, out_len=output_length)
# Save the concatenated numpy array to a CSV file if output directory is provided
if args.output_directory:
output_file = args.output_directory+"/dynonet_dataset.npz"
if args.output_file:
output_file = args.output_directory+"/"+args.output_file+".npz"
np.savez(output_file, pos_dataset=merged_pos_dataset_np, pos_in=merged_pos_in_np, pos_out=merged_pos_out_np, vel_in=merged_vel_in_np, vel_out=merged_vel_out_np, dt=dt)
print("Arrays saved to", output_file)
| mzahana/dynonet_trajectory_prediction | prep_dynonet_dataset.py | prep_dynonet_dataset.py | py | 11,254 | python | en | code | 1 | github-code | 13 |
37090059162 | h = int(input ("What is your height (cm)?"))
w = int(input ("What is your weight (kg)?"))
h_m = h/100
BMI = w / (h_m*h_m)
print ("BMI: ", BMI)
if BMI <16:
print ("Severly underweight")
elif BMI <= 18.5:
print ("Underweight")
elif BMI <= 25:
print ("Normal")
elif BMI <= 30:
print ("overweight")
else:
print ("obese") | Nampq281/phamquynam-fundamentals-c4e21 | session02/homework02/serious_1.py | serious_1.py | py | 336 | python | en | code | 0 | github-code | 13 |
498785317 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as imagemodels
import torch.utils.model_zoo as model_zoo
from torchvision import models
from cfg.Pretrain.config import cfg
class Resnet18(imagemodels.ResNet):
def __init__(self, embedding_dim=1024, pretrained=False):
super(Resnet18, self).__init__(imagemodels.resnet.BasicBlock, [2, 2, 2, 2])
if pretrained:
self.load_state_dict(model_zoo.load_url(imagemodels.resnet.model_urls['resnet18']))
self.avgpool = None
self.fc = None
self.embedder = nn.Conv2d(512, embedding_dim, kernel_size=1, stride=1, padding=0)
self.embedding_dim = embedding_dim
self.pretrained = pretrained
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.embedder(x)
return x
class Resnet34(imagemodels.ResNet):
def __init__(self, embedding_dim=1024, pretrained=False):
super(Resnet34, self).__init__(imagemodels.resnet.BasicBlock, [3, 4, 6, 3])
if pretrained:
self.load_state_dict(model_zoo.load_url(imagemodels.resnet.model_urls['resnet34']))
self.avgpool = None
self.fc = None
self.embedder = nn.Conv2d(512, embedding_dim, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.embedder(x)
return x
class Resnet50(imagemodels.ResNet):
def __init__(self, embedding_dim=1024, pretrained=False):
super(Resnet50, self).__init__(imagemodels.resnet.Bottleneck, [3, 4, 6, 3])
if pretrained:
self.load_state_dict(model_zoo.load_url(imagemodels.resnet.model_urls['resnet50']))
self.avgpool = None
self.fc = None
self.embedder = nn.Conv2d(2048, embedding_dim, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.embedder(x)
return x
class VGG16(nn.Module):
def __init__(self, embedding_dim=1024, pretrained=False):
super(VGG16, self).__init__()
seed_model = imagemodels.__dict__['vgg16'](pretrained=pretrained).features
seed_model = nn.Sequential(*list(seed_model.children())[:-1]) # remove final maxpool
last_layer_index = len(list(seed_model.children()))
seed_model.add_module(str(last_layer_index),
nn.Conv2d(512, embedding_dim, kernel_size=(3,3), stride=(1,1), padding=(1,1)))
self.image_model = seed_model
def forward(self, x):
x = self.image_model(x)
return x
class Inception_v3(nn.Module):
def __init__(self):
super(Inception_v3, self).__init__()
model = models.inception_v3()
url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
model.load_state_dict(model_zoo.load_url(url))
for param in model.parameters():
param.requires_grad = False
print('Load pretrained model from ', url)
# print(model)
self.define_module(model)
def define_module(self, model):
self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3
self.Mixed_5b = model.Mixed_5b
self.Mixed_5c = model.Mixed_5c
self.Mixed_5d = model.Mixed_5d
self.Mixed_6a = model.Mixed_6a
self.Mixed_6b = model.Mixed_6b
self.Mixed_6c = model.Mixed_6c
self.Mixed_6d = model.Mixed_6d
self.Mixed_6e = model.Mixed_6e
self.Mixed_7a = model.Mixed_7a
self.Mixed_7b = model.Mixed_7b
self.Mixed_7c = model.Mixed_7c
# self.emb_features = conv1x1(768, self.nef)
# self.emb_cnn_code = nn.Linear(2048, cfg.SPEECH.embedding_dim)
# def init_trainable_weights(self):
# initrange = 0.1
# # self.emb_features.weight.data.uniform_(-initrange, initrange)
# self.emb_cnn_code.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
features = None
# --> fixed-size input: batch x 3 x 299 x 299
x = nn.functional.interpolate(x,size=(299, 299), mode='bilinear', align_corners=False) #上采样或者下采样至给定size
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
# image region features
# features = x
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
# x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
# x = x.view(x.size(0), -1) # for visual_feature_extraction.py use this as the output
x = x.mean(dim=(2,3))
# 2048
# global image features
# cnn_code = self.emb_cnn_code(x)
# 512
# if features is not None:
# features = self.emb_features(features)
return x #nn.functional.normalize(x, p=2, dim=1) #cnn_code #1024
# in input of this network is the image feature
# extracted from the pre-trained model
class LINEAR_ENCODER(nn.Module):
def __init__(self):
super(LINEAR_ENCODER,self).__init__()
self.L1 = nn.Linear(cfg.IMGF.input_dim,cfg.IMGF.embedding_dim)
def init_trainable_weights(self):
initrange = 0.1
# self.emb_features.weight.data.uniform_(-initrange, initrange)
self.L1.weight.data.uniform_(-initrange, initrange)
def forward(self, input):
if len(input.shape)==3:
input = input.squeeze(1)
x = self.L1(input)
return nn.functional.normalize(x,p=2,dim=1)
class LINEAR_ENCODER_2(nn.Module):
def __init__(self):
super(LINEAR_ENCODER_2,self).__init__()
self.L1 = nn.Linear(cfg.IMGF.input_dim,cfg.IMGF.hid_dim)
self.L2 = nn.Linear(cfg.IMGF.hid_dim,cfg.IMGF.embedding_dim)
self.b1 = nn.BatchNorm1d(cfg.IMGF.hid_dim)
self.b2 = nn.BatchNorm1d(cfg.IMGF.embedding_dim)
self.relu = nn.ReLU()
def init_trainable_weights(self):
initrange = 0.1
# self.emb_features.weight.data.uniform_(-initrange, initrange)
self.L1.weight.data.uniform_(-initrange, initrange)
self.L2.weight.data.uniform_(-initrange, initrange)
def forward(self, input):
if len(input.shape)==3:
input = input.squeeze(1)
x = self.L1(input)
x = self.b1(x)
x = self.relu(x)
x = self.L2(x)
x = self.relu(x)
return nn.functional.normalize(x,p=2,dim=1)
class LINEAR_DECODER(nn.Module):
def __init__(self):
super(LINEAR_DECODER,self).__init__()
self.L1 = nn.Linear(cfg.IMGF.embedding_dim,cfg.IMGF.input_dim)
def init_trainable_weights(self):
initrange = 0.1
# self.emb_features.weight.data.uniform_(-initrange, initrange)
self.L1.weight.data.uniform_(-initrange, initrange)
def forward(self, input):
x = self.L1(input)
return x
class LINEAR_DECODER_2(nn.Module):
def __init__(self):
super(LINEAR_DECODER,self).__init__()
self.L1 = nn.Linear(cfg.IMGF.embedding_dim,cfg.IMGF.hid_dim)
self.L2 = nn.Linear(cfg.IMGF.hid_dim,cfg.IMGF.input_dim)
self.relu = nn.ReLU()
def init_trainable_weights(self):
initrange = 0.1
# self.emb_features.weight.data.uniform_(-initrange, initrange)
self.L1.weight.data.uniform_(-initrange, initrange)
self.L2.weight.data.unifrom_(-initrange,initrange)
def forward(self, input):
if len(input.shape)==3:
input = input.squeeze(1)
x = self.L1(input)
x = self.relu(x)
x = self.L2(x)
return x | xinshengwang/S2IGAN | models/ImageModels.py | ImageModels.py | py | 9,307 | python | en | code | 40 | github-code | 13 |
31084184932 |
from treeBinarySearch import NodeABB
if __name__ == '__main__':
def preOrdemArbin(node:NodeABB):
if node is not None:
print(node._data)
if node._esq is not None:
preOrdemArbin(node._esq)
if node._dir is not None:
preOrdemArbin(node._dir)
def preOrdemArbin2(node:NodeABB):
if node is not None:
print(node.raizArbin())
if node.esqArbin() is not None:
preOrdemArbin(node.esqArbin())
if node.dirArbin() is not None:
preOrdemArbin(node.dirArbin())
"""1) int pesoArbin( Arbin a){...}
Calcular e retornar o peso de uma árvore binária ( número de elementos da árvore).
Obs: a complexidade desta função é O(N)
"""
def pesoArbin(arbin:NodeABB):
#arbin vazia retornar zero
# ponto de parada
if(arbin is None): # arbin.vaziaArbin()
return 0
else:
return(1 + pesoArbin(arbin.esqArbin()) + pesoArbin(arbin.dirArbin()))
#return(1 + pesoArbin(arbin._esq) + pesoArbin(arbin._dir))
"""2) int estaArbin( Arbin a, TipoA elem){...}
Verificar se um elemento está presente em uma árvore binária.
Obs: a complexidade desta função é O(N) se a árvore estiver degenerada e O(log N) se a
árvore estiver balanceada(cheia).
"""
def estaArbin(arbin:NodeABB, elem):
# se arbin vazia entao elem nao esta: retornar False
# se elem == raiz, elem esta presente: retornar True
# do contrario procurar elem na subArv esq e dir: Arv binaria
# se for uma ABB ou ele esta na subArvEsq ou na SubArvDir
# ponto de parada da arv vazia
if(arbin is None): #arbin.vaziaArbin()
return False
elif(arbin.raizArbin() == elem): #arbin._data == elem
return True
elif(elem < arbin.raizArbin()): # elem < arbin._data : arbin eh um arv binaria de busca
return estaArbin(arbin.esqArbin(), elem) #arbin._esq
else: # elem > arbin.raizArbin()
return estaArbin(arbin.dirArbin(), elem)
# else: # se arbin for somente uma arvore binaria
# return(estaArbin(arbin.esqArbin()) or estaArbin(arbin.dirArbin()))
# verifica se o elem esta presente na ABB arbin
def insArbinBusca(arbin:NodeABB, elem):
#arbin esta vazia
if arbin is None:
arbin = NodeABB(elem)
elif(elem < arbin.raizArbin()): #elem menor que a raiz
arbin._esq = insArbinBusca(arbin.esqArbin(), elem)
#arbin.setEsqArbin(insArbinBusca(arbin.esqArbin(), elem))
elif(elem > arbin.raizArbin()): #elem maior que a raiz
arbin._dir = insArbinBusca(arbin.dirArbin(), elem)
#arbin.setDirArbin(insArbinBusca(arbin.dirArbin(), elem))
return arbin
def altura(arbin:NodeABB):
if arbin is None:
return -1
else:
alturaEsquerda = altura(arbin.esqArbin())
alturaDireita = altura(arbin.dirArbin())
if alturaEsquerda > alturaDireita:
return alturaEsquerda + 1
else:
return alturaDireita + 1
def nivelElemento(arbin:NodeABB, elem):
if arbin is None:
return -1
elif elem == arbin.raizArbin():
return 0
elif (elem < arbin.raizArbin() and estaArbin(arbin._esq, elem)):
return nivelElemento(arbin._esq, elem) + 1
#---------------------------------------------------------------------
# chamada dos metodos implementados
#---------------------------------------------------------------------
nodee = NodeABB()
if(nodee.vaziaArbin):
print("nodee ta vazio")
#arbin = None
arbin = NodeABB(10) # node que eh a raiz da arvore
preOrdemArbin(arbin)
#inserir 5: criar um no com elem 5
node = NodeABB(5)
#adicionar este node na arbin
arbin.add(node)
# inserir 15
arbin.add(NodeABB(15))
preOrdemArbin(arbin)
print('numero elementos = ', pesoArbin(arbin))
abb = None
abb = insArbinBusca(abb, 100)
abb = insArbinBusca(abb, 45)
abb = insArbinBusca(abb, 200)
abb = insArbinBusca(abb, 300)
abb = insArbinBusca(abb, 250)
preOrdemArbin(abb)
if(estaArbin(abb, 300)):
print('300 esta na abb')
else:
print('300 NAO esta na abb')
if(estaArbin(abb, 50)):
print('50 esta na abb')
else:
print('50 NAO esta na abb')
print('peso de abb = ', pesoArbin(abb))
a = nivelElemento(abb,300)
print(a)
| vlrosa-dev/estrutura-dados-python | 06-struct-data-TreeBinarySearch/main.py | main.py | py | 4,488 | python | pt | code | 1 | github-code | 13 |
70010089937 | #battle_functons.py
import random
from colorama import init, Fore, Style
init(autoreset=True)
import textwrap
import shutil
from adventure_pkg.character_functions import Character
from adventure_pkg.monster_battle_functions import Monster
columns, _ = shutil.get_terminal_size()
class Combat_Actions:
d20 = [x + 1 for x in range(20)]
def __init__(self, character, armor_class, hit_points, abilities):
self.character = character
self.armor_class = armor_class
self.total_hp = hit_points
self.abilities = abilities
def attack(self, target):
print("")
print(Fore.GREEN + Style.BRIGHT + textwrap.fill(f"Hero: {self.character.char_class}", width=columns))
print(Fore.GREEN + Style.BRIGHT + textwrap.fill(f"{self.character.abilities}", width=columns))
print(Fore.GREEN + Style.BRIGHT + textwrap.fill(f"HP: {self.character.total_hp}", width=columns))
print("")
atk_roll = random.choice(self.d20)
weapon_damage = self.character.calculate_weapon_modifier(self.character.equipment['weapon'])
total_tohit = atk_roll + self.character.calculate_tohit(self.character.equipment['bab'])
damage_roll = self.character.calculate_weapon_modifier(self.character.equipment['weapon'])
strength_modifier = self.character.calculate_modifier(self.character.abilities.Strength)
total_damage = max(1, damage_roll) + strength_modifier # Ensure total_damage is at least 1 plus strength modifier
print(textwrap.fill(f"{Fore.YELLOW + Style.BRIGHT}{self.character.char_class} attacks using a {self.character.equipment['weapon']} and rolls a {atk_roll} for a total of {total_tohit} to hit", width=columns))
# Check if the target is a Character or Combat_Actions instance
if isinstance(target, Combat_Actions):
armor_class = target.character.calculate_armor_bonus(target.character.equipment['armor'], target.character.equipment['shield'])
elif isinstance(target, Character):
armor_class = target.calculate_armor_bonus(target.equipment['armor'], target.equipment['shield'])
elif isinstance(target, Monster):
armor_class = target.armor_class
else:
print("Invalid target type. Expected a Character, Combat_Actions, or Monster object.")
return 0 # Return 0 to indicate a miss
if total_tohit >= armor_class:
print(f"{Fore.BLUE + Style.NORMAL}You hit {target.name} and roll weapon damage of {damage_roll}, for a total of {total_damage} damage")
if weapon_damage > 0:
damage_roll = max(1, min(damage_roll, 6))
strength_modifier = self.character.calculate_modifier(self.character.abilities.Strength)
if atk_roll == 20:
print(Fore.GREEN + Style.BRIGHT + textwrap.fill(f"Wow! You Critically Hit the {target.name} for {total_damage * 2}!", width=columns))
return total_damage * 2
return total_damage
else:
print(textwrap.fill(f"{Fore.MAGENTA + Style.BRIGHT}You missed the attack on the {target.name}!", width=columns))
if atk_roll == 1:
print(Fore.GREEN + Style.BRIGHT + textwrap.fill(f"A baby {target.name} is more threatening than that!!!!", width=columns))
return 0 # Return 0 to indicate a miss
def damage(self, target_details):
melee_mod = 0
dmg_roll = random.randint(1, 6)
damage = dmg_roll + melee_mod
print(textwrap.fill(f"{self.character.char_class} rolls a {dmg_roll} for a total of {damage} for damage", width=columns))
print(textwrap.fill(f"You dealt {damage} damage to {target_details['name']}!", width=columns))
if isinstance(target_details, Monster):
melee_mod = target_details.damage
elif isinstance(target_details, dict) and 'damage' in target_details:
melee_mod = target_details['damage']
if dmg_roll == 6:
print("You hit for maximum damage!")
elif dmg_roll == 1:
print("My Yorkie hits harder than that!")
return damage
def take_damage(self, damage):
self.total_hp -= damage
if self.total_hp <= 0:
print(f"{Fore.MAGENTA + Style.BRIGHT}{self.character.char_class} has been defeated!")
return self.damage
def is_alive(self):
return self.total_hp > 0 | hikite1/Adventure-Story | adventure_pkg/battle_functions.py | battle_functions.py | py | 4,464 | python | en | code | 0 | github-code | 13 |
25542540141 | import os
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Literal
from tinydb import Query, TinyDB
@dataclass
class Registrador:
nomeuser: str # Por qual nome deseja ser chamado
user_ident: int # Numero de usuário no telegram
data: str # Data em que foi realizado o registro
rep_dep: str # Se acessor, representa qual deputado
is_writable: bool # Posso escrever algo
tipo: Literal['acessor', 'deputado'] = 'deputado' # Classificação
def as_dict(self):
return asdict(self)
def inserir_dado(usuario, user_ident, data, rep_dep, is_wrritable, types):
db_path = Path(__file__).parent / 'deputados.db'
r1 = Registrador(usuario, user_ident, data, rep_dep, is_wrritable, types)
db = TinyDB(db_path, indent=4)
index1 = db.insert(r1.as_dict())
return index1
def editar_dado(key_s, valor_act, user_idds):
db_path = Path(__file__).parent / 'deputados.db'
db = TinyDB(db_path, indent=4)
Loc = Query()
db.update({key_s: valor_act}, Loc.user_ident == user_idds)
def todo_banco():
db_path = Path(__file__).parent / 'deputados.db'
db = TinyDB(db_path, indent=4)
return db.all()
def remover_elm(nomeuser):
Loc = Query()
db_path = Path(__file__).parent / 'deputados.db'
db = TinyDB(db_path, indent=4)
return(db.remove(Loc.nometelegram == nomeuser))
def limpar_banco():
db_path = Path(__file__).parent / 'deputados.db'
db = TinyDB(db_path, indent=4)
db.truncate()
def buscar_id_user(id_tuser):
Loc = Query()
db_path = Path(__file__).parent / 'deputados.db'
db = TinyDB(db_path, indent=4)
return db.get(Loc.user_ident == id_tuser)
if __name__ == '__main__':
...
limpar_banco()
# inserir_dado("Joao Gomes", 123456, "14/03/2023", "", False, 'deputado')
# user = buscar_id_user(123456)
# print(user["data"]) | cleytonfs777/emendastelebot | data2/datamanager.py | datamanager.py | py | 1,890 | python | pt | code | 0 | github-code | 13 |
17058191374 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PrintModel(object):
def __init__(self):
self._device_id = None
self._enable = None
self._name = None
self._printer_id = None
self._printer_type = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def enable(self):
return self._enable
@enable.setter
def enable(self, value):
self._enable = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def printer_id(self):
return self._printer_id
@printer_id.setter
def printer_id(self, value):
self._printer_id = value
@property
def printer_type(self):
return self._printer_type
@printer_type.setter
def printer_type(self, value):
self._printer_type = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.enable:
if hasattr(self.enable, 'to_alipay_dict'):
params['enable'] = self.enable.to_alipay_dict()
else:
params['enable'] = self.enable
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.printer_id:
if hasattr(self.printer_id, 'to_alipay_dict'):
params['printer_id'] = self.printer_id.to_alipay_dict()
else:
params['printer_id'] = self.printer_id
if self.printer_type:
if hasattr(self.printer_type, 'to_alipay_dict'):
params['printer_type'] = self.printer_type.to_alipay_dict()
else:
params['printer_type'] = self.printer_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PrintModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'enable' in d:
o.enable = d['enable']
if 'name' in d:
o.name = d['name']
if 'printer_id' in d:
o.printer_id = d['printer_id']
if 'printer_type' in d:
o.printer_type = d['printer_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PrintModel.py | PrintModel.py | py | 2,738 | python | en | code | 241 | github-code | 13 |
20593217130 | bunker_dict = {items : {} for items in input().split(', ')}
for _ in range(int(input())):
item, product, params = input().split(' - ')
quan, qual = [ int(x[x.index(':') + 1:]) for x in params.split(';')]
bunker_dict[item][product] = (quan, qual)
print('Count of items:', sum([sum(x[0] for x in tuple(v.values())) for k, v in bunker_dict.items()]))
print(f'Average quality: {sum([sum(x[1] for x in tuple(v.values())) for k, v in bunker_dict.items()]) / len(bunker_dict):.2f}')
[print(f'{items} -> { ", ".join([x for x in bunker_dict[items].keys()])}') for items in bunker_dict]
#--------------------------------------------------------------------(2)-------------------------------------------------
bunker_dict = {items : {} for items in input().split(', ')}
quantity , quality = 0, 0
for _ in range(int(input())):
item, product, params = input().split(' - ')
quan, qual = [ int(x[x.index(':') + 1:]) for x in params.split(';')]
bunker_dict[item][product] = ()
quantity += quan
quality += qual
print(f'Count of items: {quantity}\nAverage quality: {quality / len(bunker_dict):.2f}')
[print(f'{k} -> { ", ".join([x for x in v.keys()])}') for k, v in bunker_dict.items()] | byAbaddon/Advanced-Course-PYTHON-May-2020 | 4.1 Comprehensions - Exercise/09. Bunker.py | 09. Bunker.py | py | 1,224 | python | en | code | 0 | github-code | 13 |
4321611571 | ###############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
###############################################################################
from FinTestCases import FinTestCases, globalTestCaseMode
from financepy.utils.date import set_date_format
from financepy.utils.date import DateFormatTypes
from financepy.utils.date import Date, date_range
import numpy as np
import time
import sys
sys.path.append("..")
testCases = FinTestCases(__file__, globalTestCaseMode)
###############################################################################
set_date_format(DateFormatTypes.UK_LONGEST)
def test_Date():
start_date = Date(1, 1, 2018)
assert Date(1, 1, 2018) == Date.from_string('1-1-2018', '%d-%m-%Y')
testCases.header("DATE", "MONTHS", "CDS DATE")
for num_months in range(0, 120):
next_cds_date = start_date.next_cds_date(num_months)
testCases.print(str(start_date), num_months, str(next_cds_date))
start_date = Date(1, 1, 2018)
testCases.header("STARTDATE", "MONTHS", "CDS DATE")
for num_months in range(0, 365):
start_date = start_date.add_days(1)
next_imm_date = start_date.next_imm_date()
testCases.print(num_months, str(start_date), str(next_imm_date))
###############################################################################
def test_DateTenors():
start_date = Date(23, 2, 2018)
testCases.header("TENOR", "DATE")
tenor = "5d"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "7D"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "1W"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "4W"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "1M"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "24M"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "2Y"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "10y"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "0m"
testCases.print(tenor, start_date.add_tenor(tenor))
tenor = "20Y"
testCases.print(tenor, start_date.add_tenor(tenor))
###############################################################################
def test_DateRange():
start_date = Date(1, 1, 2010)
testCases.header("Tenor", "Dates")
end_date = start_date.add_days(3)
tenor = "Default"
testCases.print(tenor, date_range(start_date, end_date))
end_date = start_date.add_days(20)
tenor = "1W"
testCases.print(tenor, date_range(start_date, end_date, tenor))
tenor = "7D"
testCases.print(tenor, date_range(start_date, end_date, tenor))
testCases.header("Case", "Dates")
case = "Same start_date"
testCases.print(case, date_range(start_date, start_date))
case = "start_date before end_date"
testCases.print(case, date_range(end_date, start_date))
###############################################################################
def test_DateAddMonths():
start_date = Date(1, 1, 2010)
testCases.header("Months", "Dates")
months = [1, 3, 6, 9, 12, 24, 36, 48, 60]
dates = start_date.add_months(months)
testCases.header("DATES", "DATE")
for dt in dates:
testCases.print("DATE", dt)
###############################################################################
def test_DateAddYears():
start_date = Date(1, 1, 2010)
testCases.header("Years", "Dates")
years = [1, 3, 5, 7, 10]
dates1 = start_date.add_years(years)
for dt in dates1:
testCases.print("DATES1", dt)
years = np.array([1, 3, 5, 7, 10])
dates2 = start_date.add_years(years)
for dt in dates2:
testCases.print("DATES2", dt)
years = np.array([1.5, 3.25, 5.75, 7.25, 10.0])
dates3 = start_date.add_years(years)
for dt in dates3:
testCases.print("DATES3", dt)
dt = 1.0/365.0
years = np.array([1.5+2.0*dt, 3.5-6*dt, 5.75+3*dt, 7.25+dt, 10.0+dt])
dates4 = start_date.add_years(years)
for dt in dates4:
testCases.print("DATES4", dt)
###############################################################################
def test_DateSpeed():
num_steps = 100
start = time.time()
dateList = []
for _ in range(0, num_steps):
start_date = Date(1, 1, 2010)
dateList.append(start_date)
end = time.time()
elapsed = end - start
testCases.header("LABEL", "TIME")
testCases.print("TIMING", elapsed)
mem = sys.getsizeof(dateList)
testCases.print("Mem:", mem)
###############################################################################
def test_DateFormat():
dt = Date(20, 10, 2019)
testCases.header("FORMAT", "DATE")
for format_type in DateFormatTypes:
set_date_format(format_type)
testCases.print(format_type.name, dt)
###############################################################################
def test_IntraDay():
testCases.header("Date1", "Date2", "Diff")
d1 = Date(20, 10, 2019, 0, 0, 0)
d2 = Date(25, 10, 2019, 0, 0, 0)
diff = d2 - d1
testCases.print(d1, d2, diff)
testCases.print(d1._excel_date, d2._excel_date, diff)
###########################################################################
d1 = Date(20, 10, 2019, 10, 0, 0)
d2 = Date(25, 10, 2019, 10, 25, 0)
diff = d2 - d1
testCases.print(d1, d2, diff)
testCases.print(d1._excel_date, d2._excel_date, diff)
###########################################################################
d1 = Date(20, 10, 2019, 10, 0, 0)
d2 = Date(20, 10, 2019, 10, 25, 30)
diff = d2 - d1
testCases.print(d1, d2, diff)
testCases.print(d1._excel_date, d2._excel_date, diff)
###########################################################################
d1 = Date(19, 10, 2019, 10, 0, 0)
d2 = Date(20, 10, 2019, 10, 25, 40)
diff = d2 - d1
testCases.print(d1, d2, diff)
testCases.print(d1._excel_date, d2._excel_date, diff)
###############################################################################
def test_DateEOM():
dt = Date(29, 2, 2000)
assert(dt.is_eom() is True)
dt = Date(28, 2, 2001)
assert(dt.is_eom() is True)
dt = Date(29, 2, 2004)
assert(dt.is_eom() is True)
dt = Date(28, 2, 2005)
assert(dt.is_eom() is True)
dt = Date(31, 3, 2003)
assert(dt.is_eom() is True)
dt = Date(30, 4, 2004)
assert(dt.is_eom() is True)
dt = Date(31, 5, 2004)
assert(dt.is_eom() is True)
dt = Date(31, 12, 2010)
assert(dt.is_eom() is True)
dt = Date(2, 2, 2000)
assert(dt.eom().is_eom() is True)
dt = Date(24, 2, 2001)
assert(dt.eom().is_eom() is True)
dt = Date(22, 2, 2004)
assert(dt.eom().is_eom() is True)
dt = Date(1, 2, 2005)
assert(dt.eom().is_eom() is True)
dt = Date(1, 3, 2003)
assert(dt.eom().is_eom() is True)
dt = Date(3, 4, 2004)
assert(dt.eom().is_eom() is True)
dt = Date(5, 5, 2004)
assert(dt.eom().is_eom() is True)
dt = Date(7, 12, 2010)
assert(dt.eom().is_eom() is True)
###############################################################################
import datetime
from financepy.utils import from_datetime
def test_add_weekdays():
today = datetime.date(2022,2,13) # Sunday 13th Feb
next_weekday = from_datetime(today).add_weekdays(1)
last_weekday = from_datetime(today).add_weekdays(-1)
assert( (last_weekday == Date(11, 2, 2022)) is True)
assert( (next_weekday == Date(14, 2, 2022)) is True)
today = datetime.date(2022,2,13) # Sunday 13th Feb
next_weekday = from_datetime(today).add_weekdays(7)
last_weekday = from_datetime(today).add_weekdays(-7)
assert( (last_weekday == Date(3, 2, 2022)) is True)
assert( (next_weekday == Date(22, 2, 2022)) is True)
###############################################################################
test_add_weekdays()
start = time.time()
test_Date()
test_DateTenors()
test_DateRange()
test_DateAddMonths()
test_DateAddYears()
test_DateSpeed()
test_DateFormat()
test_IntraDay()
test_DateEOM()
end = time.time()
elapsed = end - start
# print("Elapsed time:", elapsed)
testCases.compareTestCases()
set_date_format(DateFormatTypes.UK_LONG)
| domokane/FinancePy | tests_golden/TestFinDate.py | TestFinDate.py | py | 8,660 | python | en | code | 1,701 | github-code | 13 |
72857924819 | #User function Template for python3
class Solution:
# Version 1: Greedy
# Start from the smallest number and subtract K for this.
# For the following numbers, try to subtract first, then keep the same, then increase K.
# Since we start from the smallest number and try to make the number small first, so we can get the maximum number of distinct numbers.
# TC: O(n), SC: O(n)
def distinctElements(self, N, K, A):
#code here
A.sort()
used = set()
for val in A:
if val - K not in used:
used.add(val - K)
elif val not in used:
used.add(val)
else:
used.add(val + K)
return len(used) | john35452/GFG_Weekly_Coding_Contest | gfg-weekly-coding-contest-104/Distinct Elements.py | Distinct Elements.py | py | 727 | python | en | code | 0 | github-code | 13 |
33760309721 | import numpy as n, matplotlib.pyplot as p
from mpl_toolkits.axes_grid.axislines import SubplotZero
from random import random
# fig = p.figure(1)
# ax = SubplotZero(fig, 111)
# fig.add_subplot(ax)
# for direction in ["xzero", "yzero"]:
# ax.axis[direction].set_axisline_style("-|>")
# ax.axis[direction].set_visible(True)
# for direction in ["left", "right", "bottom", "top"]:
# ax.axis[direction].set_visible(False)
def gen_color(l=1):
colors = []
for i in range(l): colors.append((random(),random(),random()))
return n.array(colors)
mark = {'1':'+','-':'x','2':'d','3':'^','4':'s'}
colors_temp = gen_color(l=10)
def set_color(sep):
a,b,c,d = int(sep[0]),int(sep[1]),int(sep[-2]),int(sep[-1])
if sep[3] == '-':
dc = d+b
else:
dc = -d+b
return n.sqrt(colors_temp[dc]*random())
arr = n.genfromtxt('something.csv', dtype=None,delimiter=' ',names=True)
dt = arr['dT']
##############
dt = n.abs(dt)
##############
corr = arr['peak']
sep = []
for s1, s2 in zip(arr['sep'],arr['sep2']):
sep.append(str(s1)+':'+str(s2))
mult = arr['mult']
colors = gen_color(len(sep))
fig = p.figure()
ax1 = fig.add_subplot(211)
for a,b,c,d in zip(dt,corr,colors,sep):
marker = mark[d[0]]
ax1.scatter(n.abs(a), b,color=c,label=d,marker=marker,s=40)
ax1.grid()
ax1.xaxis.set_ticks(n.arange(0, 0.225, 0.025))
ax1.set_ylabel('Raw Correlation [Normalized to 1]')
ax1.set_xlim([-0.01,0.2])
#ax1.set_title("Raw Peak Heights")
p.setp(ax1.get_xticklabels(), visible=False)
p.legend(scatterpoints=1,ncol=5,fontsize=10,loc=1,frameon=False)
ax2 = fig.add_subplot(212,sharex=ax1)
mult = mult/float(3136) #sep10,10
for a,b,b2,c,d in zip(dt,corr,mult,colors,sep):
marker = mark[d[0]]
#ax2.scatter(n.abs(a), b*b2,color=set_color(d),label=d,marker=marker,s=40)
ax2.scatter(n.abs(a), b*b2,color=c,label=d,marker=marker,s=40)
ax2.xaxis.set_ticks(n.arange(0, 0.225, 0.025))
ax2.grid()
ax2.set_xlim([-0.01,0.2])
#ax2.set_title("Corrected for Multiplicities")
ax2.set_xlabel('Time Delay [Sidereal Day]')
ax2.set_ylabel('Correlation [Weighted by Multiplicities]')
p.show() | HERA-Team/hera_sandbox | ygz/simu/legend_sens.py | legend_sens.py | py | 2,060 | python | en | code | 1 | github-code | 13 |
16402778170 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy(wxnacy@gmail.com)
# Description:
import hashlib
from typing import (
List
)
__all__ = ['md5', 'md5file', 'sha1', 'sha256', 'sha512', 'short']
code_map = (
'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' ,
'i' , 'j' , 'k' , 'l' , 'm' , 'n' , 'o' , 'p' ,
'q' , 'r' , 's' , 't' , 'u' , 'v' , 'w' , 'x' ,
'y' , 'z' , '0' , '1' , '2' , '3' , '4' , '5' ,
'6' , '7' , '8' , '9' , 'A' , 'B' , 'C' , 'D' ,
'E' , 'F' , 'G' , 'H' , 'I' , 'J' , 'K' , 'L' ,
'M' , 'N' , 'O' , 'P' , 'Q' , 'R' , 'S' , 'T' ,
'U' , 'V' , 'W' , 'X' , 'Y' , 'Z'
)
def short(long_url: str) -> List[str]:
'''生成短连接'''
hkeys = []
hex_text = md5(long_url)
for i in range(0, 4):
n = int(hex_text[i*8:(i+1)*8], 16)
v = []
e = 0
for j in range(0, 5):
x = 0x0000003D & n
e |= ((0x00000002 & n ) >> 1) << j
v.insert(0, code_map[x])
n = n >> 6
e |= n << 5
v.insert(0, code_map[e & 0x0000003D])
hkeys.append(''.join(v))
return hkeys
def sha1(text: str) -> str:
'''Returns a sha1 hash object; optionally initialized with a string'''
sha1 = hashlib.sha1()
sha1.update(text.encode())
return sha1.hexdigest()
def sha256(text: str) -> str:
'''Returns a sha256 hash object; optionally initialized with a string'''
sha1 = hashlib.sha256()
sha1.update(text.encode())
return sha1.hexdigest()
def sha512(text: str) -> str:
'''Returns a sha512 hash object; optionally initialized with a string'''
sha1 = hashlib.sha512()
sha1.update(text.encode())
return sha1.hexdigest()
def md5(text: str) -> str:
'''Returns a md5 hash object; optionally initialized with a string'''
sha1 = hashlib.md5()
sha1.update(text.encode())
return sha1.hexdigest()
def md5file(filepath: str) -> str:
h = hashlib.md5()
with open(filepath, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
return h.hexdigest()
| wxnacy/wpy | wpy/hashs.py | hashs.py | py | 2,090 | python | en | code | 0 | github-code | 13 |
38462324919 | import numpy as np
import pytest
from hypothesis import given, HealthCheck, settings
from PyDynamic.signals import Signal
from test.test_signal_class.conftest import signal_inputs
@given(signal_inputs())
@settings(
deadline=None,
suppress_health_check=[
*settings.default.suppress_health_check,
HealthCheck.too_slow,
],
)
@pytest.mark.slow
def test_signal_class_raise_not_implemented_multivariate_signal(inputs):
inputs["values"] = inputs["values"][..., np.newaxis]
with pytest.raises(
NotImplementedError,
match=r"Signal: Multivariate signals are not implemented yet.",
):
Signal(**inputs)
@given(signal_inputs(ensure_time_step_to_be_float=True))
@settings(
deadline=None,
suppress_health_check=[
*settings.default.suppress_health_check,
HealthCheck.too_slow,
],
)
@pytest.mark.slow
def test_signal_raise_value_error_on_non_matching_sampling_freq_and_time_step(inputs):
inputs["Fs"] = inputs["Ts"]
with pytest.raises(
ValueError,
match=r"Signal: Sampling interval and sampling frequency are assumed to be "
r"approximately multiplicative inverse to each other.*",
):
Signal(**inputs)
| Met4FoF/Code | PyDynamic/test/test_signal_class/test_signal_raise_error_on_wrong_inputs.py | test_signal_raise_error_on_wrong_inputs.py | py | 1,226 | python | en | code | 0 | github-code | 13 |
22686978712 | # Name:
# Section:
# strings_and_lists.py
from tokenize import Double
print("********** Exercise 2.7 **********")
def sum_all(number_list):
# number_list is a list of numbers
total = 0
for num in number_list:
total += num
return total
# Test cases
print ("sum_all of [4, 3, 6] is:", sum_all([4, 3, 6]))
print ("sum_all of [1, 2, 3, 4] is:", sum_all([1, 2, 3, 4]))
def cumulative_sum(number_list):
# number_list is a list of numbers
##### YOUR CODE HERE #####
for num in number_list:
ind = number_list.index(num)
if(ind == 0):
continue
num += number_list[ind-1]
number_list[ind] = num
return number_list
# Test Cases
##### YOUR CODE HERE #####
print("Cumulative sum of [4,3,6]: ", cumulative_sum([4, 3, 6]))
print ("********** Exercise 2.8 **********")
def report_card():
noOfClasses = int(input("How many classes did you take? "))
i = 1
classList = []
gradeList = []
while i <=noOfClasses:
className = input("What is the name of this classs? ")
grade = float(input("What was your grade in this class? "))
classList.append(className)
gradeList.append(grade)
i+=1
print("REPORT CARD: ")
for (cl, gl) in zip(classList, gradeList):
print(cl," - ", gl)
sum = 0
for gl in gradeList:
sum+=gl
print("OVER ALL GRADE: ", sum/noOfClasses)
##### YOUR CODE HERE #####
# report_card()
# Test Cases
## In comments, show the output of one run of your function.
print ("********** Exercise 2.9 **********")
# Write any helper functions you need here.
##### YOUR CODE HERE #####
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
def pig_latin(word):
# word is a string to convert to pig-latin
wordList = list(word)
isVowel = False
for n in vowels:
if n == wordList[0]:
isVowel = True
break
if isVowel == True:
temp = wordList[0]
wordList.remove(wordList[0])
wordList.append(temp)
wordList.append("ay")
else:
wordList.append("hay")
print(''.join(wordList))
# Test Cases
##### YOUR CODE HERE #####
pig_latin("Omer")
pig_latin("Sheeeesh")
pig_latin("A.Hitler is pog")
print ("********** Exercise 2.10 **********")
# Test Cases
##### YOUR CODE HERE #####
newlist = [x**3 for x in range(0, 10)]
print(newlist)
coinFlips = [x + y for x in ["H", "T"] for y in ["H", "T"]]
print(coinFlips)
def vowelsL(word):
word = list(word)
vowelsList = [x for x in vowels for y in word if x == y]
print(vowelsList)
vowelsL("Omer")
vowelsL("AMARICA IS SHIT")
listC = [x*y for x in [10,20,30] for y in [1,2,3]]
print(listC)
# ********** Exercise OPT.1 **********
# If you do any work for this problem, submit it here | omerAtique/Python | Python_MIT_courseware_Homeworks_and_projects/stringsAndLists.py | stringsAndLists.py | py | 2,990 | python | en | code | 0 | github-code | 13 |
37631096172 | # author: ICL-U
"""
Validate the weakness detection.
"""
import argparse
import io
import logging
import sys
import subprocess
import os
import shutil
from shapely.geometry import Point # pylint: disable=import-error
from shapely.geometry.polygon import Polygon # pylint: disable=import-error
from deeplab_mgr import DeeplabMgr, raw_image_pos_to_deeplab_pos
from json_utils import read_json_file
from image_utils import get_image_size
from nn_labels import (DRIVENET_CLASS_IDS, DEEPLAB_CLASS_ID_TO_YOLO_CLASS_ID,
YoloLabel)
from yolo_bbox import yolo_format_in_bbox
from find_roi_by_lane import find_roi_by_lane_instance
REPO_DIR = subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
REPO_DIR = REPO_DIR.decode("utf-8").strip()
UTILITIES_IOU_DIR = os.path.join(REPO_DIR, "src", "utilities", "iou")
sys.path.append(UTILITIES_IOU_DIR)
from iou_utils import calc_iou5 # pylint: disable=import-error
class Validator():
def __init__(self, yolo_result_json, coef, weak_image_list, iou_threshold, with_deeplab, save_files, with_roi):
self.iou_threshold = iou_threshold
self.coef = coef
self.save_files = save_files
self.with_deeplab = with_deeplab
self.with_roi = with_roi
self.yolo_result = self.get_yolo_result(yolo_result_json)
with io.open(weak_image_list, encoding="utf-8") as _fp:
contents = _fp.read()
self.weak_images = [_.strip() for _ in contents.splitlines()]
def get_yolo_result(self, json_file):
yolo_result = {}
for doc in read_json_file(json_file):
filename = doc["filename"]
img_width, img_height = get_image_size(filename)
yolo_result[filename] = []
for obj in doc["objects"]:
class_id = obj["class_id"]
if class_id not in DRIVENET_CLASS_IDS:
continue
bbox = [class_id]
bbox += yolo_format_in_bbox(
obj["relative_coordinates"]["center_x"],
obj["relative_coordinates"]["center_y"],
obj["relative_coordinates"]["width"],
obj["relative_coordinates"]["height"],
img_width, img_height)
yolo_result[filename].append(bbox)
return yolo_result
def get_deeplab_results(self, filename):
png = filename[:-4] + "_deeplab_labels.png"
return DeeplabMgr(png)
def get_yolo_bboxes(self, filename):
return self.yolo_result[filename]
def get_edet_bboxes(self, filename):
bboxes = []
pred = read_json_file(filename[:-4] + "_efficientdet_d{}.json".format(self.coef))
nobjs = len(pred["rois"])
for j in range(nobjs):
class_id = pred['class_ids'][j]
if class_id not in DRIVENET_CLASS_IDS:
continue
box = [class_id]
for item in pred['rois'][j]:
box.append(int(item + 0.5))
bboxes.append(box)
return bboxes
def get_gt_bboxes(self, image_filename):
txt_filename = image_filename[:-4] + ".txt"
img_width, img_height = get_image_size(image_filename)
bboxes = []
with io.open(txt_filename, encoding="utf-8") as _fp:
contents = _fp.read()
for line in contents.splitlines():
fields = line.strip().split()
class_id = int(fields[0])
_cx = float(fields[1])
_cy = float(fields[2])
width = float(fields[3])
height = float(fields[4])
left_x, top_y, right_x, bottom_y = yolo_format_in_bbox(
_cx, _cy, width, height, img_width, img_height)
bboxes.append([class_id, left_x, top_y, right_x, bottom_y])
return bboxes
def within_roi(self, image_filename, x, y):
lane_instance_fn = image_filename[:-4] + "_lane_instance.png"
if not os.path.isfile(lane_instance_fn):
return False
roi = find_roi_by_lane_instance(lane_instance_fn)
if len(roi) <= 2:
return False
img_width, img_height = get_image_size(image_filename)
scale_x = img_width / 512
scale_y = img_height / 256
scaled_roi = []
for p in roi:
q = (p[0] * scale_x, p[1] * scale_y)
scaled_roi.append(q)
point = Point(x, y)
polygon = Polygon(scaled_roi)
return polygon.contains(point)
def bbox_within_roi(self, image_filename, gt_box):
if gt_box[0] == YoloLabel.PERSON:
return True
left_x, top_y, right_x, bottom_y = gt_box[1:]
return (self.within_roi(image_filename, left_x, top_y) and
self.within_roi(image_filename, right_x, top_y) and
self.within_roi(image_filename, left_x, bottom_y) and
self.within_roi(image_filename, right_x, bottom_y))
def run(self):
all_tp = 0
all_fp = 0
all_fn = 0
records = []
for filename in self.weak_images:
logging.info("Analyze %s", filename)
if self.with_deeplab:
true_positive, false_positive, false_negative = self.calc_tp_fp_fn(filename)
else:
true_positive, false_positive, false_negative = self.calc_tp_fp_fn_only_edet(filename)
if self.save_files and true_positive + false_positive + false_negative > 0:
dest = "/tmp"
logging.warn("cp %s", filename)
shutil.copy(filename, dest)
shutil.copy(filename[:-4] + ".txt", dest)
all_tp += true_positive
all_fp += false_positive
all_fn += false_negative
records.append("{},{},{},{}".format(true_positive, false_positive, false_negative, filename))
logging.info("TP: %d, FP: %d, FN: %d", all_tp, all_fp, all_fn)
if all_tp + all_fp > 0:
precision = float(all_tp) / (all_tp + all_fp)
recall = float(all_tp) / (all_tp + all_fn)
else:
precision = 0
recall = 0
logging.info("precision: %f, recall: %f", precision, recall)
with io.open("records.log", "w") as _fp:
_fp.write("\n".join(records))
_fp.write("\n")
logging.warning("Write records.log")
def calc_tp_fp_fn_only_edet(self, filename):
yolo_bboxes = self.get_yolo_bboxes(filename)
edet_bboxes = self.get_edet_bboxes(filename)
gt_bboxes = self.get_gt_bboxes(filename)
true_positive = 0
true_negative = 0
false_negative = 0
false_positive = 0
img_width, img_height = get_image_size(filename)
for gt_bbox in gt_bboxes:
if self.with_roi and not self.bbox_within_roi(filename, gt_bbox):
logging.info("Skip gtbox (type: %d), pos: %s", gt_bbox[0], gt_bbox[1:])
continue
if self.with_roi:
logging.info("gtbox (type: %d) in roi: %s", gt_bbox[0], gt_bbox[1:])
yolo_match = False
edet_match = False
for yolo_bbox in yolo_bboxes:
if calc_iou5(yolo_bbox, gt_bbox) >= self.iou_threshold:
logging.debug("Yolo match: %s with %s", yolo_bbox, gt_bbox)
yolo_match = True
break
for edet_bbox in edet_bboxes:
if calc_iou5(edet_bbox, gt_bbox) >= self.iou_threshold:
logging.debug("Edet match: %s with %s", edet_bbox, gt_bbox)
edet_match = True
break
if yolo_match and not edet_match:
false_positive += 1
elif not yolo_match and edet_match:
true_positive += 1
elif not yolo_match and not edet_match:
false_negative += 1
else:
true_negative += 1
logging.info("%s (%dx%d): true_positive: %d, false_positive:%d, false_negative: %d, true_negative: %d, groundtruth: %d",
filename, img_width, img_height, true_positive, false_positive, false_negative, true_negative, len(gt_bboxes))
return true_positive, false_positive, false_negative
def calc_tp_fp_fn(self, filename):
yolo_bboxes = self.get_yolo_bboxes(filename)
edet_bboxes = self.get_edet_bboxes(filename)
gt_bboxes = self.get_gt_bboxes(filename)
deeplab_mgr = self.get_deeplab_results(filename)
true_positive = 0
false_negative = 0
false_positive = 0
img_width, img_height = get_image_size(filename)
for gt_bbox in gt_bboxes:
if self.with_roi and not self.bbox_within_roi(filename, gt_bbox):
logging.info("Skip gtbox (type: %d), pos: %s", gt_bbox[0], gt_bbox[1:])
continue
if self.with_roi:
logging.info("gtbox (type: %d) in roi: %s", gt_bbox[0], gt_bbox[1:])
yolo_match = False
edet_match = False
deeplab_match = False
for yolo_bbox in yolo_bboxes:
if calc_iou5(yolo_bbox, gt_bbox) >= self.iou_threshold:
logging.debug("Yolo match: %s with %s", yolo_bbox, gt_bbox)
yolo_match = True
break
for edet_bbox in edet_bboxes:
if calc_iou5(edet_bbox, gt_bbox) >= self.iou_threshold:
logging.debug("Edet match: %s with %s", edet_bbox, gt_bbox)
edet_match = True
break
for y in range(gt_bbox[2], gt_bbox[4]):
if deeplab_match:
break
for x in range(gt_bbox[1], gt_bbox[3]):
deeplab_x, deeplab_y = raw_image_pos_to_deeplab_pos(x, y, img_width, img_height)
class_id = deeplab_mgr.get_label_by_xy(deeplab_x, deeplab_y)
if class_id not in DEEPLAB_CLASS_ID_TO_YOLO_CLASS_ID:
continue
if DEEPLAB_CLASS_ID_TO_YOLO_CLASS_ID[class_id] == gt_bbox[0]:
logging.debug("Deeplab match at (%d, %d) for gt_box %s", x, y, gt_bbox)
deeplab_match = True
break
if not deeplab_match:
logging.warn("Deeplab not match gt_box %s", gt_bbox)
if yolo_match and (not edet_match) and (not deeplab_match):
false_positive += 1
if not yolo_match:
if edet_match and deeplab_match:
true_positive += 1
else:
false_negative += 1
logging.info("%s (%dx%d): true_positive: %d, false_positive:%d, false_negative: %d, groundtruth: %d",
filename, img_width, img_height, true_positive, false_positive, false_negative, len(gt_bboxes))
return true_positive, false_positive, false_negative
def main():
logging.basicConfig(format='%(asctime)-15s %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--yolo-result-json", required=True)
parser.add_argument("--coef", type=int, default=4)
parser.add_argument("--iou-threshold", type=float, default=0.25)
parser.add_argument("--weak-image-list", required=True)
parser.add_argument("--with-deeplab", action="store_true")
parser.add_argument("--save-files", action="store_true")
parser.add_argument("--with-roi", action="store_true")
args = parser.parse_args()
obj = Validator(args.yolo_result_json, args.coef, args.weak_image_list,
args.iou_threshold, args.with_deeplab, args.save_files, args.with_roi)
obj.run()
if __name__ == "__main__":
main()
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/utilities/weakness_detection/calc_validation.py | calc_validation.py | py | 11,849 | python | en | code | 2 | github-code | 13 |
8585850765 | import json
from urllib.request import urlopen
import tweepy
import re
import time
# DATA ---------------------------------------------------------------------------------------------------------------
# Musix Match API data
musixmatchAPI_KEY = ""
# Twitter account API data
twitterConsumerKey = ""
twitterConsumerSecret = ""
twitterAccessTokenKey = ""
twitterAccessTokenSecret = ""
twitterAPI = None
# Target
target = {"userID" : "", # Something like 4852873365
"userName" : "", # Something like do_the_bender
"tag" : ""} # Something like #spacerocks.12
# PROGRAM ------------------------------------------------------------------------------------------------------------
# Gets a song list given its lyrics. Uses Musix Match API
def getSongListByLyrics(lyrics):
queryString = f"http://api.musixmatch.com/ws/1.1/track.search?apikey={musixmatchAPI_KEY}&q_lyrics={lyrics.replace(' ','%20')}&format=json&f_has_lyrics=1&s_track_rating=desc&page_size=15"
response = json.loads(urlopen(queryString).read().decode(encoding='UTF-8'))
try:
if response['message']['header']['status_code'] == 200:
return response['message']['body']['track_list']
else:
return []
except KeyError:
return []
# Removes everything that comes after a (, [ or - from a string
def removeExtras(string):
return string.split('(')[0].split('[')[0].split('-')[0].strip()
# Removes emojis from a string
def removeEmoji(string):
try:
emojiPattern = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])') # UCS-4
except re.error:
emojiPattern = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])') # UCS-2
return emojiPattern.sub('', string)
# Twitter stream listener
class StreamListener(tweepy.StreamListener):
def on_status(self, status):
print(f"Got new status {status}")
if target['tag'] in status.text.lower():
lyrics = removeEmoji(status.text).replace(target['tag'], '')
songList = getSongListByLyrics(lyrics)
if songList:
artistName = removeExtras(songList[0]['track']['artist_name'])
trackName = removeExtras(songList[0]['track']['track_name'])
tweet = '@' + target['userName'] + ' ' + artistName + ' - ' + trackName + ' ' + target['tag']
time.sleep(0.5)
twitterAPI.update_status(tweet, in_reply_to_status_id = status.id)
print("Posted the following tweet:\n" + tweet)
else:
print("Didn't find any matching songs!")
def on_error(self, status_code):
print('Encountered error with status code:', status_code)
time.sleep(3000)
return False
def on_timeout(self):
print('Timeout...')
time.sleep(3000)
return False
# Main program
if __name__ == "__main__":
auth = tweepy.OAuthHandler(twitterConsumerKey, twitterConsumerSecret)
auth.secure = True
auth.set_access_token(twitterAccessTokenKey, twitterAccessTokenSecret)
twitterAPI = tweepy.API(auth)
listener = StreamListener()
print(f"Started tracking @{target['userName']}...")
stream = tweepy.Stream(auth = twitterAPI.auth, listener = listener)
stream.filter(follow = [target['userID']])
| dvilelaf/SpaceRocksBot | SpaceRocksBot.py | SpaceRocksBot.py | py | 3,408 | python | en | code | 2 | github-code | 13 |
19150462524 | import plotly.graph_objects as go
from game_model import BALL_RADIUS, TABLE_DIMENSIONS, POCKET_COORDINATES
def draw_circle(fig, coordinates, color, r=BALL_RADIUS):
fig.add_shape(type="circle",
xref="x", yref="y",
x0=(coordinates[0] - r), y0=(coordinates[1] - r), x1=(coordinates[0] + r), y1=(coordinates[1] + r),
line_color=color, fillcolor=color,
)
def draw_board(fig, target_balls, opponent_balls, cue_ball, eight_ball, dpm=500):
# Set axes properties
fig.update_xaxes(range=[0, TABLE_DIMENSIONS[0]], zeroline=False, constrain='domain')
fig.update_yaxes(range=[0, TABLE_DIMENSIONS[1]], constrain='domain')
# Add circles
for ball in target_balls:
draw_circle(fig, ball, 'red')
for ball in opponent_balls:
draw_circle(fig, ball, 'blue')
draw_circle(fig, cue_ball, 'white')
draw_circle(fig, eight_ball, 'black')
for pocket in POCKET_COORDINATES:
draw_circle(fig, pocket, 'black', r=0.03)
# Set figure size
fig.update_layout(
width=TABLE_DIMENSIONS[0] * dpm,
height=TABLE_DIMENSIONS[1] * dpm,
)
fig.update_yaxes(
scaleanchor="x",
scaleratio=1,
)
def draw_trajectory(fig, v1, v2, color='black'):
fig.add_shape(type="line",
x0=v1[0], y0=v1[1], x1=v2[0], y1=v2[1],
line=dict(
color=color,
width=1,
dash="dash",
),
)
fig.add_trace(go.Scatter(x=[v1[0], v2[0]], y=[v1[1], v2[1]], mode="markers"))
"""fig.add_shape(type="line",
x0=v2[0], y0=v2[1], x1=target[0], y1=target[1],
line=dict(
color=color,
width=1,
dash="dash",
),
)
fig.add_trace(go.Scatter(x=[v2[0], target[0]], y=[v2[1], target[1]], mode="markers"))"""
def draw_all_actions(fig, actions):
for action in actions:
for trajectory in action.predicted_trajectories:
draw_trajectory(fig, trajectory.start_coordinate.to_array(), trajectory.end_coordinate.to_array())
| Enrico-Call/virtual-pool-coach | strategy/draw.py | draw.py | py | 2,230 | python | en | code | 0 | github-code | 13 |
71817579218 | import os
import json
# Parses metadata from a file and returns it in a dictionary
def parse_metadata(filename):
# Does the file exist?
if not os.path.isfile(filename):
raise FileNotFoundError("Metadata JSON not found.")
with open(filename, "r") as file:
data = file.read()
file.close()
metadata = json.loads(data)
return metadata
# Prints genotype representation to a file (pytest code)
def write_to_file(metadata, test_suite):
outfile = metadata["location"] + "test_" + metadata["file"] + ".py"
f = open(outfile, "w+") # Overwrites the old file with this name
f.write("import " + metadata["file"] + "\nimport pytest\n")
for test in range(len(test_suite)):
test_case = test_suite[test]
f.write("\n\ndef test_%d():\n" % test)
# Initialize the constructor
parameters = test_case[0][1]
init_string = "\tcut = " + metadata["file"] + "." + metadata[
"class"] + "(" + str(parameters[0])
for parameter in range(1, len(parameters)):
init_string = init_string + "," + str(parameters[parameter])
init_string += ")\n"
f.write(init_string)
# Print each test step
for action in range(1, len(test_case)):
name = metadata["actions"][test_case[action][0]]["name"]
parameters = test_case[action][1]
action_type = metadata["actions"][test_case[action][0]]["type"]
out_string = ""
if action_type == "assign":
out_string = "\tcut." + name + " = " + str(parameters[0]) + "\n"
elif action_type == "method":
if parameters:
out_string = "\tcut." + name + "(" + str(parameters[0])
for parameter in range(1, len(parameters)):
out_string = out_string + "," + str(parameters[parameter])
out_string += ")\n"
else:
out_string = "\tcut." + name + "()\n"
f.write(out_string)
f.close()
| Greg4cr/PythonUnitTestGeneration | src/file_utilities.py | file_utilities.py | py | 2,073 | python | en | code | 1 | github-code | 13 |
9868561034 | from django.shortcuts import render,get_object_or_404,redirect
from catalog.models import *
from django.http import HttpResponse
from django.conf import settings
from product.models import *
from felixuser.models import Profile
from django.db.models import F,Avg
from .forms import *
city_slug = City.slug
cat_slug = Category.slug
subcat_slug = Subcategory.slug
tovar_slug = Tovar.slug
cat_list = Category.objects.order_by('name')
subcat_list = Subcategory.objects.order_by('name')
tovar_list = Tovar.objects.order_by('name')
import datetime
def set_cookie(response, key, value, days_expire = 365):
if days_expire is None:
max_age = 365 * 24 * 60 * 60 #one year
else:
max_age = days_expire * 24 * 60 * 60
expires = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie(key, value, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None)
def index(request):
subcat_list = Product.objects.select_related('name__category__category')
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
return render(request, "catalog/index.html", {"cat_list":cat_list,"subcat_list":subcat_list,"sales":sales})
def company(request, pk):
company = Profile.objects.get(id=pk)
orders = Review.objects.filter(order__vendor=company.user)
avg = orders.aggregate(average_range=Avg('range'))
tar = Tariff.objects.get(user=company)
photo = Gallery.objects.filter(company=company)
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
try:
count = Count.objects.filter(profile=company.user)
except Count.DoesNotExist:
count = None
cookie = request.COOKIES.get("%s"%(pk))
if request.method == 'POST':
orderform = CreateOrder(request.POST)
if orderform.is_valid():
fs = orderform.save()
fs.refresh_from_db()
city = request.COOKIES.get('City_choise')
city_form = City.objects.get(slug=city)
fs.city = city_form.name
fs.vendor = company.user
fs.save()
return redirect('home')
# reviewform = CreateReview(request.Post)
# if reviewform.is_valid():
# fs=reviewform.save()
# fs.refresh_from_db()
# order = Order.objects.filter(vendor=company.user)
# if fs.order in order.id:
# fs.order = reviewform.cleaned_data.get('order')
# else:
# reviewform.add_error('order', 'Заказ не найден')
# fs.save()
else:
orderform = CreateOrder()
response = render(request, 'catalog/company.html', {'company':company, 'review':avg, 'rev':orders, 'count':count,'cookie':cookie,'orderform':orderform,'tar':tar,'photo':photo,"sales":sales})
if cookie is None:
set_cookie(response, "%s"%(pk),value="visited")
count.update(name=F("name") + 1)
return response
def city(request, city_slug):
cts = get_object_or_404(City, slug=city_slug)
response = redirect('home')
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
set_cookie(response, 'City_choise', value=cts.slug)
return response
def category(request,city_slug,cat_slug):
tovar_list = Product.objects.select_related('name__category')
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
cathegory = get_object_or_404(Category, slug=cat_slug)
subcat_list = Subcategory.objects.filter(category=cathegory.id).order_by('name')
return render(request, "catalog/category.html",{"cath":cathegory,"cat_list":cat_list,"subcat_list":subcat_list,"cat_slug": cat_slug,"tovar_list":tovar_list,"sales":sales})
def subcategory(request,city_slug,cat_slug,subcat_slug):
cathegory = get_object_or_404(Category, slug=cat_slug)
subcathegory = get_object_or_404(Subcategory, slug=subcat_slug)
tovar = Tovar.objects.filter(category=subcathegory)
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
product = Product.objects.order_by('price').select_related('company','company__tariff').exclude(company__tariff__status=1).filter(company__regions__slug=city_slug)
orders = Review.objects.filter()
avg = orders.aggregate(average_range=Avg('range'))
if request.method == 'POST':
orderform = CreateOrderr(request.POST)
if orderform.is_valid():
fs = orderform.save()
fs.refresh_from_db()
city = request.COOKIES.get('City_choise')
city_form = City.objects.get(slug=city).name
fs.city = city_form
fs.vendor = product.company
fs.save()
return redirect('home')
else:
orderform = CreateOrderr()
return render(request, "catalog/subcategory.html", {"cts":cathegory,"subcathegory":subcathegory,"products":orders, "avg":avg, "tovar":tovar, "product":product,"cat_slug":cat_slug,"subcat_slug":subcat_slug,"orderform":orderform,"sales":sales})
def tovar(request,city_slug,cat_slug,subcat_slug,tovar_slug):
cathegory = get_object_or_404(Category, slug=cat_slug)
subcathegory = get_object_or_404(Subcategory, slug=subcat_slug)
tovar = Tovar.objects.filter(category=subcathegory)
sales = Banner.objects.filter(user__regions__slug=request.COOKIES.get('City_choise')).order_by('?')[:2]
tovar_item=get_object_or_404(Tovar, slug=tovar_slug)
product = Product.objects.order_by('price').select_related('company','company__tariff').filter(name=tovar_item).exclude(company__tariff__status=1).filter(company__regions__slug=city_slug)
if request.method == 'POST':
orderform = CreateOrderr(request.POST)
if orderform.is_valid():
fs = orderform.save()
fs.refresh_from_db()
city = request.COOKIES.get('City_choise')
city_form = City.objects.get(slug=city)
fs.city = city_form.name
fs.vendor = product
fs.save()
return redirect('home')
else:
orderform = CreateOrderr()
return render(request, "catalog/subcategory.html",{"cts":cathegory,"subcathegory":subcathegory,"tovar":tovar,"tovar_item":tovar_item, "product":product,"cat_slug":cat_slug,"subcat_slug":subcat_slug,"orderform":orderform,"sales":sales})
def search(request):
l = request.COOKIES.get('City_choise')
l_2 = City.objects.get(slug=l)
city = request.GET.get('city',l_2)
if request.GET.get('city') =='':
city =l_2
tovar = request.GET.get('tovar', 1)
city_url = City.objects.get(name=city)
tovar_url = Tovar.objects.get(id=tovar)
subcat_url = Subcategory.objects.get(name=tovar_url.category)
cat_url = Category.objects.get(name=subcat_url.category)
return redirect('catalog_tovar', city_slug=city_url.slug,cat_slug=cat_url.slug,subcat_slug=subcat_url.slug,tovar_slug=tovar_url.slug)
| LevupCompany/beton | catalog/views.py | views.py | py | 7,200 | python | en | code | 0 | github-code | 13 |
5415744853 | from data.shared import getcwd, p
from time import sleep
def coin(scale_size):
hero_sheet_image = p.image.load(getcwd() + '\\sprites\\coin\\coin_sprite_sheet.png').convert()
hero_sheet_image.set_colorkey(hero_sheet_image.get_at((0, 0)))
hero_sheet_image = p.transform.scale(hero_sheet_image, scale_size)
hero_sheet_size = (hero_sheet_image.get_size())
hero_size = (int(hero_sheet_size[0] / 10), hero_sheet_size[1])
class Gate:
def __init__(self):
self.hero_size = hero_size
def draw_coin_animation(self, option, location, surface):
for ii in range(0, hero_sheet_size[0], hero_size[0]):
rect = p.Rect(ii, hero_size[1] * option, hero_size[0], hero_size[1])
image_1 = p.Surface(rect.size)
image_1.blit(hero_sheet_image, (0, 0), rect)
sleep(.1)
surface.blit(image_1, location)
p.display.flip()
def draw_single_sprite(self, option, location, surface):
rect = p.Rect(option * hero_size[0], 0, hero_size[0], hero_size[1])
image_1 = p.Surface(rect.size)
image_1.blit(hero_sheet_image, (0, 0), rect)
surface.blit(image_1, location)
p.display.flip()
hero_sprite = Gate()
return hero_sprite
| DannyGersh/Maze-of-doooom | sprites/coin.py | coin.py | py | 1,419 | python | en | code | 0 | github-code | 13 |
13328046299 | # Калькулятор
print("Буква q будет закрывать программу")
while True:
s = input("Знак (+,-,*,/,%): ")
if s == "q":
break
if s in ('+', '-', '*', '/', '%'):
if s == '%':
print("x - число от которог берём %")
print("y - процент, который берём")
x = float(input("x= "))
y = float(input("y= "))
if s == '+':
print("%.2f" % (x+y))
elif s == '-':
print("%.2f" % (x-y))
elif s == '*':
print("%.2f" % (x*y))
elif s == '%':
print("%.2f" % (x / 100 * y))
elif s == '/':
if y != 0:
print("%.2f" % (x/y))
else:
print("Деление на 0")
else:
print("Знак операции не распознан")
# Сортировка выбором
from random import randint
n = 10
arr = []
for i in range(n):
arr.append(randint(1,99))
print(arr)
i = 0
while i < n - 1:
m = i
j = i + 1
while j < n:
if arr [j] < arr[m]:
m = j
j += 1
arr[i], arr[m] = arr[m], arr[i]
i += 1
print(arr)
# Сортировка пузырьком
arr = [7, 13, 5, 3, 9]
n = len(arr)
print(n)
print(arr)
for i in range(n-1):
for j in range(n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
print (arr)
# Сортировка пузырьком
arr = [7, 13, 5, 3, 9]
n = len(arr)
print(arr)
i = 0
while i < n - 1:
j = 0
while j < n - 1 - i:
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
j += 1
i += 1
print(arr)
# Сортировка вставкой
arr = [7, 13, 5, 3, 9]
n = len(arr)
print(arr)
for i in range(n):
print(i)
j = i - 1
val = arr[i]
while arr[j] > val and j >= 0:
arr[j+1] = arr[j]
j -= 1
arr[j+1] = val
print(arr)
#print(arr)
from datetime import datetime
arr = [[4, 6, 2, 1, 9, 63, -134, 566], [-52, 56, 30, 29, -54, 0, -110], [42, 54, 65, 87, 0], [5]]
def insertion(data):
for i in range(len(data)):
j = i - 1
key = data[i]
while data[j] > key and j >= 0:
data[j + 1] = data[j]
j -= 1
data[j + 1] = key
return data
def bubble(data):
n = len(data)
for i in range(n - 1):
for j in range(n - i - 1):
if data[j] > data[j + 1]:
data[j], data[j + 1] = data[j + 1], data[j]
return data
def vibor(data):
n = len(data)
i = 0
while i < n - 1:
m = i
j = i + 1
while j < n:
if data[j] < data[m]:
m = j
j += 1
data[i], data[m] = data[m], data[i]
i += 1
return data
def default(data):
for data in arr:
data = sorted(data)
return data
def minimum(arr):
print("МИНИМАЛЬНЫЕ ЗНАЧЕНИЯ")
print("Метод сортировки встроенный")
start_time = datetime.now()
for data in arr:
data = sorted(data)
print("Минимальное значение из массива:", data, min(data))
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки вставкой")
start_time = datetime.now()
for data in arr:
print("Минимальное значение из массива:", data, insertion(data)[0])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки пузырьком")
start_time = datetime.now()
for data in arr:
print("Минимальное значение из массива:", data, bubble(data)[0])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки выбором")
start_time = datetime.now()
for data in arr:
print("Минимальное значение из массива:", data, vibor(data)[0])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
def maximum(arr):
print("МАКСИМАЛЬНЫЕ ЗНАЧЕНИЯ")
print("Метод сортировки встроенный")
start_time = datetime.now()
for data in arr:
data = sorted(data)
print("Максимальное значение из массива:", data, max(data))
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки вставкой")
start_time = datetime.now()
for data in arr:
print("Максимальное значение из массива:", data, insertion(data)[len(data) - 1])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки пузырьком")
start_time = datetime.now()
for data in arr:
print("Максимальное значение из массива:", data, bubble(data)[len(data) - 1])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
print("Метод сортировки выбором")
start_time = datetime.now()
for data in arr:
print("Максимальное значение из массива:", data, vibor(data)[len(data) - 1])
end_time = datetime.now()
print('Продолжительность: {}'.format(end_time - start_time))
def main():
print(minimum(arr))
print(maximum(arr))
print(main())
# Задача №1
salary = float(input("Введи сумму зарплаты в месяц: "))
expenses = float(input("Введи расходы на проживание: "))
month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
exptemp = expenses
if salary >= expenses:
print("Входные данные не корректны")
else:
for i in month:
print("Сейчас месяц", i)
if i != 1:
all_salary = salary * i
print("Зарплата: ", all_salary)
expenses = expenses * 1.1
exptemp = exptemp + expenses
print("Расходы: ", exptemp)
else:
print("1 месяц у нас без %")
print("Зарплата: ", salary)
print("Расходы: ", expenses)
live = abs(all_salary - exptemp)
print("Сотрудник будет должен: ", round(live, 2), " рублей")
# Бинарный поиск /// ошибка
def binary_search(arr, low, high, x):
# Проверяем середину
if high >= low:
mid = (high + low) // 2
# Если элемент в середине:
if arr[mid] == x:
return mid
# Если элемент не в середине, то проверяем левую часть
elif arr[mid] > x:
return binary_search(arr, low, mid - 1, x)
# Или элемент в правой части массива
else:
return binary_search(arr, mid + 1, high, x)
# В случае, если элемент не в массиве
else:
return -1
# Тестовый массив
arr = [3, 6, 10, 15, 19, 21, 22, 25, 27]
x = input("Введи число искомое: ")
x = int(x)
# Вызов функции
resultat = binary_search(arr, 0, len(arr) - 1, x)
# Интрерпритация результата
if resultat != -1:
print("Элемент присутствует в массиве под индексом ", str(resultat))
else:
print("Элемент НЕ присутствует в массиве")
# Версия №2
def binar(arr, x):
low = 0
high = len(arr) - 1
index = -1
while (low <= high) and (index == -1):
mid = (low + high) // 2
if arr[mid] == x:
index = mid
else:
if x < arr[mid]:
high = mid - 1
else:
low = mid + 1
return index
# Тестовый массив
arr = [3, 6, 10, 15, 16, 17, 19]
x = input("Введи число искомое: ")
x = int(x)
# Вызов функции
resultat = binar(arr, x)
# Интрерпритация результата
if resultat != -1:
print("Элемент присутствует в массиве под индексом ", str(resultat))
else:
print("Элемент НЕ присутствует в массиве") | elenashestakova/project_01 | sort.py | sort.py | py | 8,213 | python | ru | code | 0 | github-code | 13 |
29404053888 | # *********************************************************
# Program: TL15_G01.py
# Course: PSP0101 PROBLEM SOLVING AND PROGRAM DESIGN
# Tutorial Section: TL15 Group: G1
# Trimester: 2215
# Year: 2022/23 Trimester 1
# Member_1: 1221101160 | MUHAMMAD NABIL NAUFAL BIN MD ZAID
# Member_2: 1211112042 | GOH ROU LOU
# Member_3: 1221101048 | HABEBA NADER DEYAAEDDEIN EISA
# Member_4: 1221101167 | SIN YI WEI
# *********************************************************
# Task Distribution
# Member_1: GROUP LEADER, WROTE THE CODING, MADE FLOWCHARTS, WROTE THE DOCUMENTATION, CODE TESTER
# Member_2: WROTE THE CODING, CODE TESTER
# Member_3: MADE FLOWCHARTS, CODE TESTER
# Member_4: WROTE THE DOCUMENTATION, CODE TESTER
# *********************************************************
print("Welcome to Uber-Like")
print("1. Sign Up")
print("2. Sign In")
print("3. Exit")
# Requests user to enter a choice
choice = int(input("Enter your choice: "))
while choice < 1 or choice > 3:
print("Invalid choice")
choice = int(input("Enter your choice: "))
if choice == 1:
# Moves to the signup.py file to display the signup page
import auth.signup
if choice == 2:
# Moves to the signin.py file to display the signin page
import auth.session
if auth.session.loggedInUser["role"] == "admin":
# Displays the admin menu if the user is an admin
import adminmenu
if auth.session.loggedInUser["role"] == "user":
# Displays the user menu if the user is a user
import usermenu
if choice == 3:
exit()
| mnanwarmz/python-uni-uber-like | TL15_G01.py | TL15_G01.py | py | 1,541 | python | en | code | 0 | github-code | 13 |
31228320427 | def infinity_gen(input_list):
len_list = len(input_list)
if len_list == 0:
raise ValueError(
'В функцию infinity_gen не может быть передан пустой список!'
)
left = 0
right = len_list - 1
while True:
yield input_list[left]
if right > left:
yield input_list[right]
left += 1
right -= 1
if left > right:
left, right = 0, len_list - 1
if __name__ == '__main__':
st = input('Введите последовательность символов содержащую цифры: ')
num_list = [num for num in st if num.isdecimal()]
if len(num_list) == 0:
run = False
print('Вы не ввели ни одной цифры.')
else:
run = True
gen = infinity_gen(num_list)
while run:
answer = input('Продолжить выполнение программы?(y/n) ')
if answer.lower() in ('yes', 'y'):
print(next(gen))
elif answer.lower() in ('n', 'no'):
run = False
else:
print(
'\nНеверный ввод. Введите "y" или "n".'
)
print('Заверщение работы!')
| palmage/Homework | Solution1.py | Solution1.py | py | 1,287 | python | ru | code | 0 | github-code | 13 |
70865462417 | import pandas as pd
import numpy as np
class Information:
def __init__(self):
#用户、电影、评分总数
self.user_number = 943
self.movie_number = 1682
self.rating_number = 100000
self.age_bin = [0,18,25,35,60,100]
def load_info(self):
# 读入用户信息
user_names = ['user id', 'age', 'gender', 'occupation', 'zip code']
self.user_info = pd.read_table('./ml-100k/u.user', sep = '\|', names = user_names, engine = 'python')
# 读入用户评分
rating_names = ['user id', 'movie id', 'rating', 'timestamp']
self.rating_info = pd.read_table('./ml-100k/u.data', sep = '\t', names = rating_names, engine = 'python')
# 读入电影信息
movie_names = ['movie id', 'movie title', 'release date', 'video release date',
'IMDb URL', 'unknown', 'Action', 'Adventure', 'Animation',
'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'War', 'Western']
self.movie_info = pd.read_table('./ml-100k/u.item', sep = '\|', names = movie_names, engine = 'python')
#读入电影类型编号
genre_names = ['genre', 'index']
self.genre_info = pd.read_table('./ml-100k/u.genre', sep = '\|', names = genre_names, engine = 'python')
#读入职业
occu_names = ['occupation']
self.occu_info = pd.read_table('./ml-100k/u.occupation', names = occu_names, engine = 'python')
def question1(self):
'''
计算每种类别的电影数量
Returns
-------
genre_df : DataFrame
每种类别的电影数量
'''
genre_df = pd.DataFrame()
genre_df = self.movie_info.loc[0:self.movie_number-1, 'unknown':'Western']
print(genre_df.sum())
return genre_df.sum()
def question1_2(self):
'''
#寻找出现电影名字中最常出现的词
Returns
-------
word_dct_ten : list
最常出现的10个词
'''
movie_df = pd.DataFrame()
movie_df = self.movie_info['movie title']
movie_list = movie_df.tolist()
#对数据进行基础的处理
word_list=[]
for i in movie_list:
i = i.split()[:-1] #去掉末尾的年份
for j in i:
j = j.lower()
j = (j.replace(',','').replace('.','').replace(':','').replace('*','').
replace('&','').replace('(','').replace(')',''))
word_list.append(j)
#通过字典计算词频
word_dct = {}
for k in word_list:
if k not in word_dct:
word_dct[k] = 1
else:
word_dct[k] += 1
#去掉功能词(即代词、数词、冠词、助动词和情态动词,部分副词、介词、连词和感叹词)
meaningless_word = {'the','of','a','in','and','to','for','my','','on',
'la','with','2','de','i','it','ii'}
for i in meaningless_word:
del(word_dct[i])
#打印出现频率最高的10个词
newdct = sorted(word_dct.items(), key = lambda d:d[1], reverse = True)
word_dct_ten=[]
for i in range(10):
word_dct_ten.append(newdct[i])
return(word_dct_ten)
def question2(self):
'''
计算不同职业的评分高低
难点:表连接和groupby
Returns
-------
occu_rating_mean_df : DataFrame
不同职业人群给出的评分均值
occu_rating_var_df : DataFrame
不同职业人群给出的评分方差
'''
user_df = pd.DataFrame(self.user_info,
columns=['user id', 'occupation'])
rating_df = pd.DataFrame(self.rating_info,
columns=['user id', 'rating'])
# 连接表
uid_occu_rating_df = pd.merge(user_df, rating_df, on='user id')
uid_occu_rating_df = uid_occu_rating_df[['occupation','rating']]
# 分组
occu_rating_df = uid_occu_rating_df.groupby('occupation')
# # 初步展示结果
# occu_rating_df.agg(['mean','var']).plot(kind='bar')
# 平均值
occu_rating_mean_df = occu_rating_df.agg('mean').sort_values(by='rating',ascending = False )
# 方差
occu_rating_var_df = occu_rating_df.agg('var').sort_values(by='rating')
return occu_rating_mean_df, occu_rating_var_df
def question3(self):
'''
计算不同性别不同年龄段的评分
难点:多个条件group by和可视化
Returns
-------
age_gender_rating_mean_df : DataFrame
不同性别不同年龄段的评分平均数
age_gender_rating_var_df : DataFrame
不同性别不同年龄段的评分方差
'''
user_df = pd.DataFrame(self.user_info,
columns=['user id', 'age', 'gender'])
rating_df = pd.DataFrame(self.rating_info,
columns=['user id', 'rating'])
# 连接表
uid_age_rating_df = pd.merge(user_df, rating_df, on='user id')
uid_age_rating_df = uid_age_rating_df[['age','gender','rating']]
# 年龄分组
age_group = pd.cut(uid_age_rating_df['age'],bins=self.age_bin)# cut左开右闭,且数据集中没有>100岁的人
age_gender_rating_df = uid_age_rating_df.groupby([age_group,'gender'])
# # 初步展示结果
# age_gender_rating_df.agg(['mean','var']).plot(kind='bar')
# 平均值
age_gender_rating_mean_df = age_gender_rating_df.agg('mean').sort_values(by='rating', ascending=False)['rating']
# 方差
age_gender_rating_var_df = age_gender_rating_df.agg('var').sort_values(by='rating')['rating']
return age_gender_rating_mean_df, age_gender_rating_var_df
def question4(self):
'''
不同性别不同年龄段评分高的电影类型
难点:三张表连接的后续处理(多重索引等)和可视化
Returns
-------
genre_gender_age_mean_rating_df : DataFrame
所有电影类别在不同性别不同年龄段的评分均值
genre_gender_age_var_rating_df : DataFrame
所有电影类别在不同性别不同年龄段的评分方差
'''
user_df = pd.DataFrame(self.user_info, columns=['user id', 'age', 'gender'])
rating_df = pd.DataFrame(self.rating_info, columns=['user id', 'movie id', 'rating'])
genre_list = self.genre_info['genre'].tolist() # 获得所有movie类型
columns_need = ['movie id']
columns_need.extend(genre_list)
movie_df = pd.DataFrame(self.movie_info, columns = columns_need)
user_rating_df = pd.merge(user_df, rating_df, on='user id')
user_rating_movie_df = pd.merge(user_rating_df,
movie_df,
how='left',
on='movie id')
# 分组输出
age_group = pd.cut(user_rating_movie_df['age'],bins=self.age_bin)
genre_gender_age_mean_rating_df = pd.DataFrame()
genre_gender_age_var_rating_df = pd.DataFrame()
for genre in genre_list:
# 裁切当前DataFrame,提高运行效率
cur_user_rating_movie_df = user_rating_movie_df[['age','gender',genre,'rating']]
# 修改列名,方便后续分组,同时让分组聚合结果插入到genre_gender_age_rating_df中具有合理的语义信息
cur_user_rating_movie_df.rename(columns={genre:'is current genre'},inplace=True)
# 按照三重索引group by
cur_genre_gender_age_rating_df = cur_user_rating_movie_df.groupby([age_group, 'gender', 'is current genre'])
# 拼接所需要的平均值和方差信息
genre_gender_age_mean_rating_df[genre] = cur_genre_gender_age_rating_df.agg('mean')['rating']
genre_gender_age_var_rating_df[genre] = cur_genre_gender_age_rating_df.agg('var')['rating']
return genre_gender_age_mean_rating_df, genre_gender_age_var_rating_df
if __name__ == '__main__':
info = Information()
info.load_info()
# genre_df = info.question1()
occu_rating_mean_df, occu_rating_var_df = info.question2()
age_rating_mean_df, age_rating_var_df = info.question3()
genre_gender_age_mean_rating_df, genre_gender_age_var_rating_df = info.question4()
| haolin-nju/ExploreML100k | main.py | main.py | py | 8,892 | python | en | code | 0 | github-code | 13 |
43077350682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/4/11 11:11
# @Author : guoyankai
# @Email : 392759421@qq.com
# @File : detect.py.py
# @software: PyCharm
import cv2
import time
import numpy as np
import torch
from torch.autograd.variable import Variable
from models.models import PNet, RNet, ONet
from core.image_reader import convert_image_to_tensor
from core.image_tools import convert_chwTensor_to_hwcNumpy
def creat_mtcnn_net(p_model_path=None, r_model_path=None,
o_model_path=None, use_cuda=True):
pnet, rnet, onet = None, None, None
if p_model_path is not None:
pnet = PNet(use_cuda=use_cuda)
if (use_cuda):
device = torch.device("cuda:0")
ckpt = torch.load("{}/best_model.pth".format(p_model_path), map_location=device)
pnet.load_state_dict(ckpt["net"])
else:
device = torch.device("cpu")
ckpt = torch.load("{}/best_model.pth".format(p_model_path), map_location=device)
pnet.load_state_dict(ckpt["net"])
print("pnet Model loaded!")
if r_model_path is not None:
rnet = RNet(use_cuda=use_cuda)
if (use_cuda):
device = torch.device("cuda:0")
ckpt = torch.load("{}/best_model.pth".format(r_model_path), map_location=device)
rnet.load_state_dict(ckpt["net"])
else:
device = torch.device("cpu")
ckpt = torch.load("{}\\best_model.pth".format(r_model_path), map_location=device)
rnet.load_state_dict(ckpt["net"])
print("rnet Model loaded!")
if o_model_path is not None:
onet = ONet(use_cuda=use_cuda)
if (use_cuda):
device = torch.device("cuda:0")
ckpt = torch.load("{}\\best_model.pth".format(o_model_path), map_location=device)
onet.load_state_dict(ckpt["net"])
else:
device = torch.device("cpu")
ckpt = torch.load("{}\\best_model.pth".format(o_model_path), map_location=device)
onet.load_state_dict(ckpt["net"])
print("onet Model loaded!")
return pnet, onet, rnet
class MtcnnDetector(object):
"""P,R,O net face detection(人脸检测) and landmark align(关键点排列)"""
def __init__(self, pnet=None, rnet=None, onet=None,
min_face_size=12,
stride=2,
threshold=[0.6, 0.7, 0.7],
scale_factor=0.709):
self.pnet_detector = pnet
self.rnet_detector = rnet
self.onet_detector = onet
self.min_face_size = min_face_size
self.stride = stride
self.thresh = threshold
self.scale_factor = scale_factor
def unique_image_fromat(self, im):
# 统一图片格式
if not isinstance(im, np.ndarray):
if im.mode == "I":
im = np.array(im, np.int32, copy=False)
elif im.mode == "I;16":
im = np.array(im, np.int16, copy=False)
else:
im = np.asarray(im)
return im
def square_bbox(self, bbox):
"""
转换为方形的bbox
:param bbox: np.array,shape:n*m 个inputbbox
:return: 方形bbox
"""
square_bbox = bbox.copy() # 返回数组的副本
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
l = np.maximum(h, w)
# x1 = x1 + w*0.5- l*0.5
# y1 = y1 + w*0.5 -l*0.5
square_bbox[:, 2] = square_bbox[:, 0] + l - 1
square_bbox[:, 3] = square_bbox[:, 1] + l - 1
return square_bbox
def generate_bounding_box(self, map, reg, scale, threshold):
"""
未完成!!!!!!!!
从feature map中生成bbox
:param map: numpy.array,shape:[n,m,1]
:param reg: numpy.array, shape:[n,m,4]
:param scale: float number, 检测的scale
:param threshold: float number, 检测阈值
:return: bbox array
"""
stride = 2
callsize = 12 # 感受野
t_index = np.where(map > threshold)
def resize_image(self, img, scale):
"""
resize image and transform dimention to [batchsize, channel, height, width]
Parameters:
----------
img: numpy array , height x width x channel
input image, channels in BGR order here
scale: float number
scale factor of resize operation
Returns:
-------
transformed image tensor , 1 x channel x height x width
"""
height, width, channels = img.shape
new_height = int(height * scale) # resized new height
new_width = int(width * scale) # resized new width
new_dim = (new_width, new_height)
img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image
return img_resized
def detect_pnet(self, im):
"""
get face condidates through pnet
:param im: numpy.array, input image array, one batch
:return:
bboxes:numpy.array,校准(calibration)前检测的bboxes
bboxes_algin:numpy array校准后的bboxes
"""
h, w, c = im.shape
net_size = 12
current_scale = float(net_size) / self.min_face_size
print('img shape:{0}, current_scale:{1}'.format(im.shape, current_scale))
im_resized = self.resize_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
all_boxes= list()
i = 0
while min(current_height, current_width) > net_size:
feed_imgs = []
print("im_resized:", im_resized.shape, type(im_resized))
image_tensor = convert_image_to_tensor(im_resized)
print("image_tensor:", image_tensor.shape, type(image_tensor))
feed_imgs.append(image_tensor)
feed_imgs = torch.vstack(feed_imgs)
feed_imgs = Variable(feed_imgs)
feed_imgs = torch.unsqueeze(feed_imgs, 0)
print("feed_imgs:", feed_imgs.shape, type(feed_imgs))
# device = torch.device("cuda:0")
# feed_imgs = feed_imgs.to(device, dtype=torch.float32)
cls_map, reg = self.pnet_detector(feed_imgs)
print("cls_map:", cls_map.shape, "reg:", reg.shape)
cls_map_np = convert_chwTensor_to_hwcNumpy(cls_map.cpu())
print("cls_map_np:", cls_map_np.shape)
reg_np = convert_chwTensor_to_hwcNumpy(reg.cpu())
print("reg_np:", reg_np.shape)
# ---------------------gyk待看----------------------
boxes = self.generate_bounding_box(cls_map_np[0, :, :], reg_np, current_scale, self.thresh[0])
break
return 1, 2 | Guo-YanKai/mtcnn | core/detect.py | detect.py | py | 6,838 | python | en | code | 1 | github-code | 13 |
18749410425 | #!/usr/bin python3.5
import random
import time
from itertools import chain
import matplotlib.pyplot as plt
from collections import deque
from copy import deepcopy
class Queue(object):
"""
Queue wrapper implementation of deque
"""
def __init__(self, arg=list()):
self._queue = deque(arg)
def __iter__(self): # TODO: find a way to do it without using sorted
for value in sorted(self._queue, reverse=False):
yield value
def __len__(self):
return len(self._queue)
def __str__(self):
return str(self._queue)
def enqueue(self, value):
"""
docstring for enqueue
"""
self._queue.appendleft(value)
def dequeue(self):
"""
docstring for dequeue
"""
return self._queue.pop()
def is_empty(self):
"""
Return True when the queue is empty
False otherwise
"""
return True if len(self._queue) == 0 else False
def clear(self):
"""
Clear out the queue
"""
self._queue.clear()
class UPATrial:
"""
Simple class to encapsulate optimizated trials for the UPA algorithm
Maintains a list of node numbers with multiple instance of each number.
The number of instances of each node number are
in the same proportion as the desired probabilities
Uses random.choice() to select a node number from this list for each trial.
"""
def __init__(self, num_nodes):
"""
Initialize a UPATrial object corresponding to a
complete graph with num_nodes nodes
Note the initial list of node numbers has num_nodes copies of
each node number
"""
self._num_nodes = num_nodes
self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]
def run_trial(self, num_nodes):
"""
Conduct num_nodes trials using by applying random.choice()
to the list of node numbers
Updates the list of node numbers so that each node number
appears in correct ratio
Returns:
Set of nodes
"""
# compute the neighbors for the newly-created node
new_node_neighbors = set()
for _ in range(num_nodes):
new_node_neighbors.add(random.choice(self._node_numbers))
# update the list of node numbers so that each node number
# appears in the correct ratio
self._node_numbers.append(self._num_nodes)
for dummy_idx in range(len(new_node_neighbors)):
self._node_numbers.append(self._num_nodes)
self._node_numbers.extend(list(new_node_neighbors))
# update the number of nodes
self._num_nodes += 1
return new_node_neighbors
def timeit(func, *args, **kwargs):
start = time.time()
func(*args, **kwargs)
return time.time() - start
def make_graph(nodes, edges):
"""Returns a graph from a list of nodes
and a list of edges represented as tuples"""
graph = dict()
for node in nodes:
graph[node] = set()
for edge in edges:
graph[edge[0]].add(edge[1])
return graph
def remove_node(graph, node):
for neighbor in graph[node]:
graph[neighbor].remove(node)
del graph[node]
def make_complete_graph(num_nodes):
"""Returns a complete graph"""
nodes = list(range(num_nodes))
edges = [(node_1, node_2) for node_1 in nodes for node_2 in nodes if node_1 != node_2]
return make_graph(nodes, edges)
def make_er(num_nodes, probability):
nodes = list(range(num_nodes))
edges = list(chain.from_iterable([(i, j), (j, i)]
for i in nodes
for j in nodes
if i != j and random.random() < probability))
return make_graph(nodes, edges)
def make_upa(num_edges, num_nodes):
graph = make_complete_graph(num_edges)
trials = UPATrial(num_edges)
for node in range(num_edges, num_nodes):
new_nodes = trials.run_trial(num_edges)
graph[node] = new_nodes
for neighbor in new_nodes:
graph[neighbor].add(node)
return graph
def load_graph_data(file_name):
"""
Function that loads a graph given a text
representation of the graph
Returns a dictionary that models a graph
"""
graph_file = open(file_name)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_file.close()
graph_lines = graph_lines[:-1]
print("Loaded graph with", len(graph_lines), "nodes")
nodes = []
edges = []
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
nodes.append(node)
for neighbor in neighbors[1:-1]:
edges.append((node, int(neighbor)))
edges.append((node, int(neighbor))[::-1])
return nodes, edges
def bfs_visited(ugraph, start_node):
"""
Breadth-first search implementation
Takes the undirected graph #ugraph and the node #start_node
Returns the set consisting of all nodes that are visited
by a breadth-first search that starts at start_node.
"""
queue = Queue()
visited = set()
visited.add(start_node)
queue.enqueue(start_node)
while not queue.is_empty():
node = queue.dequeue()
for neighbor in ugraph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.enqueue(neighbor)
return visited
def cc_visited(ugraph):
"""
Compute connected components
Takes the undirected graph #ugraph
Returns a list of sets, where each set consists of all
the nodes in a connected component, and there is exactly
one set in the list for each connected component in ugraph and nothing else.
"""
remaining_nodes = set(ugraph.keys())
connected_components = list()
while remaining_nodes:
node = random.choice(list(remaining_nodes))
visited = bfs_visited(ugraph, node)
connected_components.append(visited)
remaining_nodes.difference_update(visited)
return connected_components
def largest_cc_size(ugraph):
"""Takes the undirected graph #ugraph.
Returns the size (an integer) of the largest connected component in ugraph.
"""
connected_components = cc_visited(ugraph)
if not connected_components:
return 0
return len(sorted(connected_components, key=len, reverse=True)[0])
def compute_resilience(ugraph, attack_order):
"""Takes the undirected graph #ugraph, a list of nodes #attack_order
For each node in the list, the function removes the given node and its edges
from the graph and then computes the size of the largest connected component
for the resulting graph.
Returns a list whose k+1th entry is the size of the largest connected component
in the graph after the removal of the first k nodes in attack_order.
The first entry (indexed by zero) is the size of the largest connected component
in the original graph.
"""
ugraph = deepcopy(ugraph)
cc_lst = list()
for node in attack_order:
cc_lst.append(largest_cc_size(ugraph))
remove_node(ugraph, node)
cc_lst.append(largest_cc_size(ugraph))
return cc_lst
def random_order(graph):
nodes = list(graph.keys())
random.shuffle(nodes)
return nodes
def fast_targeted_order(graph):
graph = deepcopy(graph)
graph_length = len(graph)
degree_sets = [set([]) for degree in range(graph_length)]
for i in graph.keys():
d = len(graph[i])
degree_sets[d].add(i)
order = []
for k in range(len(graph) - 1, -1, -1):
while degree_sets[k]:
node = degree_sets[k].pop()
for neighbor in graph[node]:
d = len(graph[neighbor])
degree_sets[d].remove(neighbor)
degree_sets[d - 1].add(neighbor)
order.append(node)
remove_node(graph, node)
return order
def targeted_order(graph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = deepcopy(graph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##################################################
# Application 2 questions
def Q1():
# Generating graphs
nodes, edges = load_graph_data('alg_rf7.txt')
num_nodes = len(nodes)
# Computer Network graph
comp_net_graph = make_graph(nodes, edges)
# Erdos and Renyi graph
er_graph = make_er(num_nodes, .002)
# Preferential Attachment graph
pa_graph = make_upa(3, num_nodes)
comp_attack_order = random_order(comp_net_graph)
er_attack_order = random_order(er_graph)
pa_attack_order = random_order(pa_graph)
comp_resilience = compute_resilience(comp_net_graph, comp_attack_order)
er_resilience = compute_resilience(er_graph, er_attack_order)
pa_resilience = compute_resilience(pa_graph, pa_attack_order)
plt.figure(figsize=(7, 7), dpi=300)
plt.plot(comp_resilience, color='blue', label='Computer Network')
plt.plot(er_resilience, color='green', label='ER random graph')
plt.plot(pa_resilience, color='red', label='UPA graph')
plt.title('Resilience of different graphs',
fontsize=18,
color='#ff8800')
plt.xlabel('Number of nodes removed',
fontsize=14,
color='#ff8800')
plt.ylabel('Size of the largest connected component',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['Computer Network',
'ER random graph, P = .02',
'UPA graph, M = 3'])
# plt.show()
plt.savefig('Q1', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
# print(len(comp_net_graph), sum([len(x) for x in comp_net_graph.values()]) // 2, largest_cc_size(comp_net_graph))
# print(len(er_graph), sum([len(x) for x in er_graph.values()]) // 2, largest_cc_size(er_graph))
# print(len(pa_graph), sum([len(x) for x in pa_graph.values()]) // 2, largest_cc_size(pa_graph))
def Q3():
"""
fast_targeted_order: fast
targeted_order: slow
"""
graph_lengths = range(10, 1000, 10)
graphs = [make_upa(5, x) for x in graph_lengths]
fast_times = [timeit(fast_targeted_order, graph) for graph in graphs]
slow_times = [timeit(targeted_order, graph) for graph in graphs]
# Plotting
plt.plot(graph_lengths, fast_times, color='b', label='fast_targeted_order')
plt.plot(graph_lengths, slow_times, color='g', label='targeted_order')
plt.title('Regular and fast targeted order - Desktop',
fontsize=18,
color='#ff8800')
plt.xlabel('Size of graph, with M = 5',
fontsize=14,
color='#ff8800')
plt.ylabel('Time in seconds',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['fast_targeted_order',
'targeted_order'])
plt.show()
# plt.savefig('Q3', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
def Q4():
# Generating graphs
nodes, edges = load_graph_data('alg_rf7.txt')
num_nodes = len(nodes)
# Computer Network graph
comp_net_graph = make_graph(nodes, edges)
# Erdos and Renyi graph
er_graph = make_er(num_nodes, .002)
# Preferential Attachment graph
pa_graph = make_upa(3, num_nodes)
comp_attack_order = fast_targeted_order(comp_net_graph)
er_attack_order = fast_targeted_order(er_graph)
pa_attack_order = fast_targeted_order(pa_graph)
comp_resilience = compute_resilience(comp_net_graph, comp_attack_order)
er_resilience = compute_resilience(er_graph, er_attack_order)
pa_resilience = compute_resilience(pa_graph, pa_attack_order)
# Plotting
plt.plot(comp_resilience, color='blue', label='Computer Network')
plt.plot(er_resilience, color='green', label='ER random graph')
plt.plot(pa_resilience, color='red', label='UPA graph')
plt.title('Resilience of different graphs under tageted attacks\nusing fast_targeted_order',
fontsize=18,
color='#ff8800')
plt.xlabel('Number of nodes removed',
fontsize=14,
color='#ff8800')
plt.ylabel('Size of the largest connected component',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['Computer Network',
'ER random graph, P = .02',
'UPA graph, M = 3'])
# plt.show()
plt.savefig('Q4', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
# print(timeit(Q1))
| MohamedAbdultawab/FOC_RiceUniv | algorithmic-thinking-1/module-2-project-and-application/02_application-2-analysis-of-a-computer-network/main.py | main.py | py | 13,373 | python | en | code | 0 | github-code | 13 |
41118952771 | import pytest
from _pytest.config import PytestPluginManager, hookimpl
from pytest_richtrace.plugin import PytestRichTrace
def pytest_addoption(parser: pytest.Parser, pluginmanager: PytestPluginManager) -> None:
"""
Add options to the pytest command line for the richtrace plugin
:param parser: The pytest command line parser
"""
group = parser.getgroup("richtrace")
group.addoption(
"--rich-trace",
dest="rich_trace",
action="store_true",
help="Enable the richtrace plugin",
)
group.addoption(
"--output-svg",
dest="output_svg",
help="Output the trace as an SVG file",
)
group.addoption(
"--output-html",
dest="output_html",
help="Output the trace as an HTML file",
)
group.addoption(
"--output-text",
dest="output_text",
help="Output the trace as a text file",
)
group.addoption(
"--output-json",
dest="output_json",
help="Output the results as JSON as a text file.",
)
@hookimpl(trylast=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Configure the richtrace plugin
:param config: The pytest config object
"""
if config.option.rich_trace:
reporter = config.pluginmanager.get_plugin("terminalreporter")
config.pluginmanager.unregister(plugin=reporter)
PytestRichTrace(config)
| sffjunkie/pytest-richtrace | src/pytest_richtrace/__init__.py | __init__.py | py | 1,431 | python | en | code | 0 | github-code | 13 |
40113417411 | import tarfile
# Define the path to the TAR archive
tar_file_path = 'example.tar.gz'
# Specify the target directory where you want to extract the files
target_directory = 'extracted_files/'
# Create a context manager using the 'tarfile.open' method
with tarfile.open(tar_file_path, 'r:gz') as tar:
# List the contents of the TAR archive
tar.list()
# Extract all files from the archive to the specified target directory
tar.extractall(target_directory)
# The 'with' statement ensures that the TAR archive is properly closed after the block
print("Files extracted to:", target_directory) | fdac23/ChatGPT_Insecure_Code_Analysis | All Generated Codes/CWE-22/CWE-22_ILP-3c.py | CWE-22_ILP-3c.py | py | 606 | python | en | code | 0 | github-code | 13 |
36334421836 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 14:27:45 2019
@author: isaacscarrott
"""
#Import the correct variables
from numpy import linalg, array, dot, identity, asarray, asmatrix
from pandas import read_csv
from matplotlib.pyplot import plot, figure, xlim, ylim, title, legend, savefig
#Where all of the weights are calculated using matrix multiplication and inversion and addition
def ridge_regression(features_train, y_train, regularisationFactor):
#Identity Matrix in the shape of the features
I = identity(features_train.shape[1])
print(y_train)
#This is the equasion to work out the weights of each of the twelve features using the equasion discussed in lecture 3 slide 63 and return the value of this as a numpy array
parameters = linalg.solve(dot(features_train.T, features_train) + dot(regularisationFactor, I), dot(features_train.T, y_train))
return asarray(parameters)
#Reads the 2 CSV files into two variables in a panada dataframe
featuresTrainDF = read_csv("Data/train.csv")
plottingDF = read_csv("Data/plotting.csv")
#Selects the respective columns for the variable form the respective dataframe and converts it to a numpy array
xTrain = array(featuresTrainDF['x'])
yTrain = array(featuresTrainDF['y'])
featuresTrain = array(featuresTrainDF[["features0","features1","features2","features3","features4","features5","features6","features7","features8","features9","features10","features11"]])
xPlotting = array(plottingDF['x'])
featuresPlotting = array(plottingDF[["features0","features1","features2","features3","features4","features5","features6","features7","features8","features9","features10","features11"]]).T
#Used to store the weights of each feature and the features multiplied by the weight
weights = []
featuresPlottingMultiplied = []
#Regularisation factors that we will loop through
values = [10**-6, 10**-4, 10**-2, 10**-1]
#Loops through the regularisation factors
for x in values:
#Puts the returned array of weights for the given regularisation factor into the weight array
weights.append(ridge_regression(asmatrix(featuresTrain), asmatrix(yTrain).T, x))
#Calculates the learned function using the weight just calulcated and store it in an array
featuresPlottingMultiplied.append(dot(featuresPlotting.T,weights[-1]))
#Loops through each of the learned functions
for index,y in enumerate(featuresPlottingMultiplied):
#Sets the limits of the x and y axis
xlim([-5, 5])
ylim([-1000, 1000])
titleTemp = 'Graph for λ = ' + str(values[index])
#Plots the learned function and the training points on the same graph and labels it
title(titleTemp)
plot(xTrain, yTrain, 'o', label="Training Points")
plot(xPlotting, y, label="Learned Function")
legend()
savefig(titleTemp + ".png")
figure()
| isaac-scarrott/Ridge-Regression-K-Means | Ridge Regression/RidgeRegression.py | RidgeRegression.py | py | 2,858 | python | en | code | 0 | github-code | 13 |
11411327047 | import pandas as pd
import numpy as np
from collections import Counter
from sklearn.manifold import TSNE
def _uni_counts_embedder(data, **kwargs):
if 'index_col' not in kwargs:
index_col = data.trajectory.retention_config['index_col']
else:
index_col = kwargs['index_col']
if 'event_col' not in kwargs:
event_col = data.trajectory.retention_config['event_col']
else:
event_col = kwargs['event_col']
last_k = kwargs.get('last_k')
if last_k is not None:
data = data.groupby(index_col).tail(last_k)
cv = data.groupby([index_col, event_col]).size().rename('event_count').reset_index()
cv = cv.pivot(index=index_col, columns=event_col).fillna(0)
cv.columns = cv.columns.levels[1]
cv.columns.name = None
cv.index.name = None
setattr(cv.retention, 'datatype', 'features')
return cv
def _ngram_agg(x, ngram_range):
res = []
shifts = []
for i in range(ngram_range[0] - 1, ngram_range[1]):
shifts.append(x.shift(i))
res.extend(zip(*shifts))
return Counter(res)
def counts_embedder(data, ngram_range=(1, 1), **kwargs):
"""
Calculate session embedding (continuous vector form) by counting of events appearance for user
:param data: clickstream dataset
:param ngram_range: range of ngrams to use in feature extraction
:param kwargs: index_col, event_col params
:return: pd.DataFrame with sessions vectorized by counts of events
"""
if max(ngram_range) == 1:
return _uni_counts_embedder(data, **kwargs)
if 'index_col' not in kwargs:
index_col = data.trajectory.retention_config['index_col']
else:
index_col = kwargs['index_col']
if 'event_col' not in kwargs:
event_col = data.trajectory.retention_config['event_col']
else:
event_col = kwargs['event_col']
last_k = kwargs.get('last_k')
if last_k is not None:
data = data.groupby(index_col).tail(last_k)
wo_last = kwargs.get('wo_last_k')
if wo_last is not None:
bad_ids = data.groupby(index_col).tail(wo_last).index.values
data = data[~data.index.isin(bad_ids)]
cv = data.groupby(index_col)[event_col].apply(_ngram_agg, ngram_range=ngram_range).reset_index()
cv = cv.pivot(index=index_col, columns='level_1', values=event_col).fillna(0)
cv = cv.loc[:, [i for i in cv.columns if i[-1] == i[-1]]]
cv.columns.name = None
cv.index.name = None
return cv
def frequency_embedder(data, ngram_range=(1, 1), **kwargs):
"""
Similar to `count_embedder`, but normalize events count over index_col story
:param data: clickstream dataset
:param ngram_range: range of ngrams to use in feature extraction
:param kwargs: index_col, event_col params
:return: pd.DataFrame with sessions vectorized by frequencies of events
"""
cv = counts_embedder(data, ngram_range, **kwargs)
freq = pd.DataFrame(
cv.values / cv.values.sum(1).reshape(-1, 1),
index=cv.index.values,
columns=cv.columns.values,
)
setattr(freq.retention, 'datatype', 'features')
return freq
def tfidf_embedder(data, ngram_range=(1, 1), **kwargs):
"""
Similar to `frequency_embedder`, but normalize events frequencies with inversed document frequency
:param data: clickstream dataset
:param ngram_range: range of ngrams to use in feature extraction
:param kwargs: index_col, event_col params
:return: pd.DataFrame with sessions vectorized by Tf-Idf of events
"""
tf = frequency_embedder(data, ngram_range, **kwargs)
idf = np.log((tf.shape[0]) / ((tf > 0).sum(0) + 1e-20)).values
tfidf = tf * idf
setattr(tfidf.retention, 'datatype', 'features')
return tfidf
def learn_tsne(data, **kwargs):
"""
Calculates TSNE transform for given matrix features
:param data: array of features
:param kwargs: arguments for sklearn.manifold.TSNE
:return: np.ndarray with calculated TSNE transform
"""
_tsne_filter = TSNE.get_params(TSNE)
kwargs = {i: j for i, j in kwargs.items() if i in _tsne_filter}
res = TSNE(random_state=0, **kwargs).fit_transform(data.values)
return pd.DataFrame(res, index=data.index.values)
| Demiurgy/retentioneering-tools | retentioneering/core/feature_extraction.py | feature_extraction.py | py | 4,227 | python | en | code | 0 | github-code | 13 |
26581079014 | import numpy as np
import math
import copy
def check_precision(a, eps):
if abs(a) < abs(eps):
return 0
return a
def is_diagonal_matrix(x):
return np.count_nonzero(x - np.diag(np.diagonal(x))) == 0
def get_p_q_indexes(A):
max_abs = 0
max_i = 0
max_j = 0
for i in range(len(A)):
for j in range(i + 1, len(A)):
if abs(A[i][j]) > max_abs:
max_abs = abs(A[i][j])
max_i = i
max_j = j
return max_i, max_j
def get_theta_c_s_t(A, p, q):
alpha = (A[p][p] - A[q][q]) / (2 * A[p][q])
if alpha >= 0:
t = -alpha + (alpha ** 2 + 1) ** (1 / 2)
else:
t = -alpha - (alpha ** 2 + 1) ** (1 / 2)
c = 1 / ((1 + t ** 2) ** (1 / 2))
s = t / ((1 + t ** 2) ** (1 / 2))
theta = math.atan(t)
return theta, c, s, t
### Deprecated
def construct_R_p_q(size, p, q, c, s):
R_p_q = np.identity(size)
R_p_q[p][p] = c
R_p_q[q][q] = c
R_p_q[p][q] = s
R_p_q[q][p] = -s
return R_p_q
def construct_new_A(A, p, q, c, s, t, eps):
for j in range(0, len(A)):
if j != p and j != q:
A[p][j] = check_precision(c * A[p][j] + s * A[q][j], eps)
for j in range(0, len(A)):
if j != p and j != q:
A[q][j] = A[j][q] = check_precision(-s * A[j][p] + c * A[q][j], eps)
for j in range(0, len(A)):
if j != p and j != q:
A[j][p] = A[p][j]
A[p][p] = check_precision(A[p][p] + t * A[p][q], eps)
A[q][q] = check_precision(A[q][q] - t * A[p][q], eps)
A[p][q] = 0
A[q][p] = 0
return A
def construct_new_U(U, p, q, c, s, eps):
new_U = copy.deepcopy(U)
for i in range(len(U)):
new_U[i][p] = check_precision((c * new_U[i][p]) + (s * new_U[i][q]), eps)
new_U[i][q] = check_precision((-s * U[i][p]) + (c * new_U[i][q]), eps)
return new_U
def custom_sum(lambda_J, lambda_b):
# J - Calculat cu Jacobi
# b- biblioteca
sum = 0
for ej in lambda_J:
min = abs(ej - lambda_b[0])
for ei in lambda_b:
if abs(ej - ei) < min:
min = abs(ej - ei)
sum += min
print("Sum of min differences:", sum)
def algorithm(A, eps):
A_initial = copy.deepcopy(A)
k = 0
k_max = 100
U = np.identity(len(A), dtype=float)
p, q = get_p_q_indexes(A)
theta, c, s, t = get_theta_c_s_t(A, p, q)
# print(p, q)
# print(theta, c, s, t)
# print(c ** 2 + s ** 2)
print(A)
while not is_diagonal_matrix(A) and k <= k_max:
print("K:", k)
R_p_q = construct_R_p_q(len(A), p, q, c, s)
# print("R_p_q", R_p_q)
A = construct_new_A(A, p, q, c, s, t, eps)
U = construct_new_U(U, p, q, c, s, eps)
# print('CUSTOM_U:\n',U)
# A = np.matmul((np.matmul(R_p_q, A)), np.transpose(R_p_q))
# for i in range(len(A)):
# for j in range(len(A[0])):
# A[i][j]=check_precision(A[i][j],eps)
# U = np.matmul(U, np.transpose(R_p_q))
# print("U:\n",U)
# for i in range(len(U)):
# for j in range(len(U[0])):
# U[i][j]=check_precision(U[i][j],eps)
p, q = get_p_q_indexes(A)
theta, c, s, t = get_theta_c_s_t(A, p, q)
k += 1
### Valori proprii
print('A:', A)
print('Valori proprii A:', np.diagonal(A))
### Aproximarea vectorilor proprii
print('U:', U)
print("Steps:", k)
np.set_printoptions(suppress=True)
# print('A_init*U', np.matmul(A_initial, U))
# print('U*Lambda', np.matmul(U, A))
print("Norm:", np.linalg.norm(np.matmul(A_initial, U) - np.matmul(U, A), np.inf))
eigenvalues_np, eigenvectors_np = np.linalg.eigh(A_initial)
print("Eigenvalues:", eigenvalues_np)
print("Eigenvectors:\n", eigenvectors_np)
custom_sum(np.diagonal(A), eigenvalues_np)
u, s, v_transpose = np.linalg.svd(A_initial)
print("u:", u)
print('s:', s)
print('v_transpose:', v_transpose)
print('Valorile singule ale matricei:', s)
print('Rangul matricii:', np.count_nonzero(s[abs(s) >= eps]))
print('Numarul de conditionare:', np.max(s) / np.min(s[abs(s) >= eps]))
print('Pseudoinversa Moore-Penrose NP:\n', np.linalg.pinv(A_initial))
S_i = np.zeros([len(u), len(v_transpose)])
np.fill_diagonal(S_i, [0 if check_precision(e, eps) == 0 else 1 / e for e in s])
A_i = np.matmul(np.matmul(np.transpose(v_transpose), S_i), np.transpose(u))
print('Pseudoinversa Moore-Penrose calculata:\n', A_i)
A_transpose = np.transpose(A_initial)
A_t_A = np.matmul(A_transpose, A_initial)
A_t_A_inverse = np.linalg.pinv(A_t_A)
A_j = np.matmul(A_t_A_inverse, A_transpose)
print("A_j:", A_j)
print("Norm:", np.linalg.norm(A_i - A_j, 1))
# A_j = np.matmult(np.linalg.inv(np.matmul(np.transpose(A_initial), A_initial)), np.transpose(A_initial))
# print('Matricea pseudo inversa:', A_j)
if __name__ == '__main__':
print("Tema 5")
A = np.array([[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0]]) # Merge OK
# A = np.array([[1.0, 1.0, 2.0], [1.0, 1.0, 2.0], [2.0, 2.0, 2.0]])
# A = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
# A = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
#A = np.array([[1.0, 2.0, 3.0, 4.0], [2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [4.0, 5.0, 6.0, 7.0]])
algorithm(A, eps=10 ** -7)
| robertadriang/CalculNumeric2022 | Tema5/main.py | main.py | py | 5,455 | python | en | code | 0 | github-code | 13 |
71071810259 | # resnets + fpn
import math
import torch
import torch.nn as nn
from torchsummary import summary
from nets.resnets import resnet18, resnet34, resnet50, resnet101, resnet152
from nets.efficientnet import EfficientNet as EffNet
from nets.darknet import darknet53
from nets.deform_conv import DeformConv2d
from torchstat import stat # calc flops
class Resnet(nn.Module):
'''
input: model index
output: 3 feature maps (for FPN)
'''
def __init__(self, model_index, load_weights=False):
super(Resnet, self).__init__()
self.model_edition = [resnet18, resnet34, resnet50, resnet101, resnet152]
model = self.model_edition[model_index](load_weights)
# remove avgpool, fc layers for future structure
del model.avgpool
del model.fc
self.model = model
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
feat1 = self.model.layer2(x)
feat2 = self.model.layer3(feat1)
feat3 = self.model.layer4(feat2)
# size: 64, 32, 16 channels: 512, 1024, 2048
return feat1, feat2, feat3
class EfficientNet(nn.Module):
def __init__(self, phi, load_weights=False):
super(EfficientNet, self).__init__()
model = EffNet.from_pretrained(f'efficientnet-b{phi}', load_weights)
del model._conv_head
del model._bn1
del model._avg_pooling
del model._dropout
del model._fc
self.model = model
def forward(self, x):
x = self.model._conv_stem(x)
x = self.model._bn0(x)
x = self.model._swish(x)
feature_maps = []
last_x = None
for idx, block in enumerate(self.model._blocks):
drop_connect_rate = self.model._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.model._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if block._depthwise_conv.stride == [2, 2]:
feature_maps.append(last_x)
elif idx == len(self.model._blocks) - 1:
feature_maps.append(x)
last_x = x
del last_x
return feature_maps[2:]
class FPN(nn.Module):
'''
input: 3 feature maps
output: 5 features maps -> ConvTranspose2d to 1 feature map (for detection head)
'''
def __init__(self, C3_channels, C4_channels, C5_channels, out_channels=256, deform=False):
super(FPN, self).__init__()
self.final_out_channels = 64 # final feature h,w
self.fpchannels = [64, 32, 16, 8, 4] # pyramid network channels
if deform:
self.C3_conv1 = DeformConv2d(C3_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False, modulation=False) # 1*1 conv, keep feature map size
self.C3_conv2 = DeformConv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False, modulation=False) # 3*3 conv, keep feature map size
self.C4_conv1 = DeformConv2d(C4_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False, modulation=False) # 1*1 conv, keep feature map size
self.C4_conv2 = DeformConv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False, modulation=False) # 3*3 conv, keep feature map size
self.C5_conv1 = DeformConv2d(C5_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False, modulation=False) # 1*1 conv, keep feature map size
self.C5_conv2 = DeformConv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False, modulation=False) # 3*3 conv, keep feature map size
self.P6_conv = DeformConv2d(C5_channels, out_channels, kernel_size=3, padding=1, stride=2, bias=False, modulation=False) # 3*3 conv, stride=2, reduce feature map size
self.P7_conv = DeformConv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=2, bias=False, modulation=False) # 3*3 conv, stride=2, reduce feature map size
else:
self.C3_conv1 = nn.Conv2d(C3_channels, out_channels, kernel_size=1, stride=1, padding=0) # 1*1 conv, keep feature map size
self.C3_conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) # 3*3 conv, keep feature map size
self.C4_conv1 = nn.Conv2d(C4_channels, out_channels, kernel_size=1, stride=1, padding=0) # 1*1 conv, keep feature map size
self.C4_conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) # 3*3 conv, keep feature map size
self.C5_conv1 = nn.Conv2d(C5_channels, out_channels, kernel_size=1, stride=1, padding=0) # 1*1 conv, keep feature map size
self.C5_conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) # 3*3 conv, keep feature map size
self.P6_conv = nn.Conv2d(C5_channels, out_channels, kernel_size=3, stride=2, padding=1) # 3*3 conv, stride=2, reduce feature map size
self.P7_conv = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1) # 3*3 conv, stride=2, reduce feature map size
# inplace=True save memory, but backward exists problem of calculating grad, (>=pytorch 0.4)
self.P7_relu = nn.ReLU()
self.unsample = nn.Upsample(scale_factor=2) # unsample by a factor of 2
# ConvTranspose2d for P3-P7
# layer cannot be define in forward function !!!
# raw feature map size -> 128*128*64
self.P3_convtrans = self._make_convtrans_sequence(out_channels, self.final_out_channels, self.fpchannels[0])
self.P4_convtrans = self._make_convtrans_sequence(out_channels, self.final_out_channels, self.fpchannels[1])
self.P5_convtrans = self._make_convtrans_sequence(out_channels, self.final_out_channels, self.fpchannels[2])
self.P6_convtrans = self._make_convtrans_sequence(out_channels, self.final_out_channels, self.fpchannels[3])
self.P7_convtrans = self._make_convtrans_sequence(out_channels, self.final_out_channels, self.fpchannels[4])
# self.no_merge_conv = nn.Conv2d(C3_channels//2, out_channels//4, kernel_size=3, stride=1, padding=1)
def _make_convtrans_sequence(self, in_channels, final_out_channels, in_sizes, out_sizes = 128):
sequences = []
length = math.log2(out_sizes//in_sizes) # default float
for i in range(int(length)):
if length == 1:
out_channels = final_out_channels
else:
in_channels = in_channels//pow(2, i) if in_channels//pow(2, i) > self.final_out_channels else self.final_out_channels
out_channels = in_channels//pow(2, i+1) if in_channels//pow(2, i+1) > self.final_out_channels else self.final_out_channels
sequences.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels,kernel_size=4,stride=2, padding=1))
sequences.append(nn.BatchNorm2d(out_channels))
sequences.append(nn.ReLU())
return nn.Sequential(*sequences)
def forward(self, input_1, input_2, input_3):
C3, C4, C5 = input_1, input_2, input_3
C3_x1 = self.C3_conv1(C3)
C4_x1 = self.C4_conv1(C4)
C5_x1 = self.C5_conv1(C5)
P5 = self.C5_conv2(C5_x1)
C5_conv_unsample = self.unsample(C5_x1)
C4_x2 = C4_x1 + C5_conv_unsample
P4 = self.C4_conv2(C4_x2)
C4_unsample = self.unsample(C4_x2)
C3_x2 = C3_x1 + C4_unsample
P3 = self.C3_conv2(C3_x2)
P6 = self.P6_conv(C5)
P7_x = self.P7_relu(P6)
P7 = self.P7_conv(P7_x)
# ---------------------feature fusion of different scales----------------------#
# ConvTranspose2d to lift feature map size to the same
P3 = self.P3_convtrans(P3)
P4 = self.P4_convtrans(P4)
P5 = self.P5_convtrans(P5)
P6 = self.P6_convtrans(P6)
P7 = self.P7_convtrans(P7)
# [64, 128, 128]
# weights
P_merge = 0.5 * P3 + 0.2 * P4 + 0.1 * P5 + 0.1 * P6 + 0.1 * P7
# P_merge = 0.5 * P3 + 0.3 * P4 + 0.2 * P5
return P_merge
# ---------------------feature fusion of different scales----------------------#
# remove feature fusion of different scales
# P3: ConvTranspose2d, conv, 64*64*256 -> 128*128*64
# P3_out = self.P3_convtrans(P3)
#
# return P3_out
class DetectionHead(nn.Module):
'''
input: 1 features maps
output: detection results (type, center, vertex, phy_size)
'''
def __init__(self, in_channels, out_channels=64, num_classes=3, center=2, num_vertex=16, phy_size=3):
super(DetectionHead, self).__init__()
self.sigmoid = nn.Sigmoid()
self.pred_dimension = num_classes + center + num_vertex + phy_size
# type (num_classes)
self.type_sequence = self._make_sequence(in_channels, out_channels, num_classes)
# regression (center_offset, vertex, size)
self.center_sequence = self._make_sequence(in_channels, out_channels, center)
self.vertex_sequence = self._make_sequence(in_channels, out_channels, num_vertex)
self.phy_size_sequence = self._make_sequence(in_channels, out_channels, phy_size)
self.attention_avgpool, self.attention_fc = self._se_attention(out_channels)
def _make_sequence(self, in_channels, out_channels, final_out_channels):
'''[4 groups of conv and relu] + [1 group of output conv]'''
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels, final_out_channels, kernel_size=3, stride=1, padding=1))
def _se_attention(self, out_channels, reduction=16):
'''se attention'''
return nn.AdaptiveAvgPool2d(1), nn.Sequential(nn.Linear(out_channels, out_channels // reduction, bias=False),
nn.ReLU(),
nn.Linear(out_channels // reduction, out_channels, bias=False),
nn.Sigmoid())
def forward(self, x):
# cls predict with se attention
# extract all layers of make_sequence except fot the last, combined with se attention
# the last layer in make_sequence, with sigmoid, final cls output
for i in range(len(self.type_sequence)-1):
temp_out_type = self.type_sequence[i](x)
temp_out_type = self.attention_avgpool(temp_out_type).view(x.shape[0], x.shape[1])
temp_out_type = self.attention_fc(temp_out_type).view(x.shape[0], x.shape[1], 1, 1)
out_type = x * temp_out_type.expand_as(x)
out_type = self.sigmoid(self.type_sequence[len(self.type_sequence)-1](out_type))
out_center = self.center_sequence(x)
out_vertex = self.vertex_sequence(x)
out_phy_size = self.phy_size_sequence(x)
# output = torch.cat([out_type, out_center, out_vertex, out_phy_size], dim = 1) # concat(type + center + vertex + size)
return out_type, out_center, out_vertex, out_phy_size
class KeyPointDetection(nn.Module):
'''
inplementation of the network (4 components)
'''
def __init__(self, model_name, model_index, num_classes, pretrained_weights=False, deform=False):
super(KeyPointDetection, self).__init__()
self.pretrained_weights = pretrained_weights
self.deform = deform
if model_name == "resnet":
self.backbone = Resnet(model_index, pretrained_weights)
fpn_size_dict = {
0: [128, 256, 512],
1: [128, 256, 512],
2: [512, 1024, 2048],
3: [512, 1024, 2048],
4: [512, 1024, 2048],
}[model_index]
if model_name == "efficientnet":
self.backbone_phi = [0, 1, 2, 3, 4, 5, 6, 6]
fpn_size_dict = {
0: [40, 112, 320],
1: [40, 112, 320],
2: [48, 120, 352],
3: [48, 136, 384],
4: [56, 160, 448],
5: [64, 176, 512],
6: [72, 200, 576],
7: [72, 200, 576],
}[model_index]
# C3 64, 64, 40
# C4 32, 32, 112
# C5 16, 16, 320
self.backbone = EfficientNet(self.backbone_phi[model_index], pretrained_weights)
if model_name == "darknet":
self.backbone = darknet53(pretrained_weights, deform)
fpn_size_dict = {0: [256, 512, 1024]}[model_index]
self.fpn = FPN(fpn_size_dict[0], fpn_size_dict[1], fpn_size_dict[2], 256, self.deform)
self.detection_head = DetectionHead(in_channels=64, num_classes=num_classes)
def freeze_backbone(self):
for param in self.backbone.parameters():
param.requires_grad = False
def unfreeze_backbone(self):
for param in self.backbone.parameters():
param.requires_grad = True
def forward(self, x):
resnet_features_1, resnet_features_2, resnet_features_3 = self.backbone(x)
fpn_features = self.fpn(resnet_features_1, resnet_features_2, resnet_features_3)
bt_hm, bt_center, bt_vertex, bt_size = self.detection_head(fpn_features)
return bt_hm, bt_center, bt_vertex, bt_size
if __name__ == "__main__":
feature = torch.randn((1, 3, 512, 512))
# resnet-50
model = KeyPointDetection("resnet", 2, num_classes=3)
# efficientnet-b5
# model = KeyPointDetection("efficientnet", 5, num_classes=3)
# darknet-53(input:416*416)
# feature = torch.randn((1, 3, 416, 416))
# model = KeyPointDetection("darknet", 0, num_classes=3, pretrained_weights=True, deform=True)
bt_hm, bt_center, bt_vertex, bt_size = model(feature)
print(bt_center.shape)
print(bt_size.shape)
# model return feature map can not be packed as list like ([bt_hm, bt_center, bt_vertex, bt_size])
print(summary(model, (3, 512, 512), batch_size=1, device='cpu'))
# calculate model flops and print
stat(model, (3, 512, 512))
| stjuliet/CenterLoc3D | nets/fpn.py | fpn.py | py | 14,237 | python | en | code | 10 | github-code | 13 |
70551296338 | import pygame
from time import time
from fonctions import *
from constantes import *
class Ballon(pygame.sprite.Sprite): #initialisation des variables, vitesse définie par la puissance
def __init__(self, x, y, angle, vitesse):
super().__init__()
self.image = pygame.image.load("ressources/ballon.png").convert_alpha()
self.angle_initiale = angle
self.rect = self.image.get_rect()
self.rect.x = x + 25
self.rect.y = y - 20
self.time = time()
self.vitesse_initiale = vitesse
self.change_x = vitesse_x(self.vitesse_initiale, self.angle_initiale)
def update(self, spritegroup1, spritegroup2): #mise à jour du ballon
self.change_y = vitesse_y(time() - self.time, self.vitesse_initiale, self.angle_initiale) #mise à jour vitesse
self.rect.x += self.change_x / fps
self.rect.y += self.change_y / fps
if self.rect.x + self.rect.width >= SCREEN_WIDTH or self.rect.x <= 0 or self.rect.y + self.rect.height >= SCREEN_HEIGHT: # si le ballon sort de l'écran sur les cotés il se tue, au haut il retombe
self.kill()
if pygame.sprite.spritecollide(self, spritegroup1, False): #s'il touche les rectangles du panier, ça simule la réaction du support
self.change_x = - self.change_x
self.rect.x += self.change_x / fps
if pygame.sprite.spritecollide(self, spritegroup2, False):
self.vitesse_initiale = - self.vitesse_initiale
| Baptistekeunbroek/Projet-Transverse-L1 | Projet Transverse/Classes/Ballon.py | Ballon.py | py | 1,529 | python | fr | code | 0 | github-code | 13 |
3860935511 |
x = [2]
for i in range(3,105000,2):
is_prime=True
for j in x[1:]:
if i%j==0:
is_prime=False
if is_prime==True:
x.append(i)
print(x)
tmp = []
for i in x:
tmp.append(str(i))
print(tmp[10000])
| lemonwisard/project-euler | problem6.py | problem6.py | py | 207 | python | en | code | 0 | github-code | 13 |
70102270739 | import pygame
import cv2
import os
import random
from .music import Music
import smrc.utils
pygame.init()
# Initialize the joysticks
pygame.joystick.init()
# root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'media')
# # /home/kai/optimize_tool/smrc/media
# # print(f'root_dir = {root_dir}')
# print(f'Loading music playlist from {root_dir} ..')
# playlist = smrc.line.get_file_list_recursively(root_dir, ext_str='.mp3')
# print(f'self.music_playlist = {playlist}')
# def init_music_playlist():
# init_music_playlist()
class GameController(Music):
def __init__(self, music_on=False):
super().__init__(music_on)
self.game_controller_available = False
self.game_controller_on = True
self.joystick = None
# We'll store the states here.
self.axis_states = {}
self.button_states = {}
self.hat_states = {}
# These constants were borrowed from linux/input.h
self.axis_names = {}
self.button_names = {}
self.hat_names = {}
self.axis_map = []
self.button_map = []
self.hat_map = []
self.game_controller_axis_moving_on = False
self.game_controller_axis1_moving_on = False
# self.music_playlist = []
# # code for pygame joystick from https://www.pygame.org/docs/ref/joystick.html
# pygame.init()
# # Used to manage how fast the screen updates
# # self.clock = pygame.time.Clock()
#
# # Initialize the joysticks
# pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
print("Number of joysticks: {}".format(joystick_count))
# self.init_music_playlist()
if joystick_count > 0:
# we always use first game controller even if more than one are detected.
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
# Get the name from the OS for the controller/joystick
self.js_name = self.joystick.get_name()
print('Device name: %s' % self.js_name)
# IFYOO Game for Windows
self.hat_names = {
0: 'hat0' # value format (0, 1)
#1: 'hat1'
}
# use test_button_axis_hat() function to assit the defination of the names of the buttons,
# axis, and hat.
# 'Elecom JC-U3613M' in linux, and 'Controller (JC-U3613M - Xinput Mode)' in Windows
if self.js_name.find('JC-U3613M') > 0:
# note that axises 2 and 5 are in pair, 3 and 4 are in pair.
self.axis_names = {
0: 'x',
1: 'y',
2: 'tx', #once pressed, always -1.00
3: 'ry',
4: 'rx',
5: 'ty' # once pressed, always -1.00,
}
self.button_names = {
0: 'A',
1: 'B',
2: 'X',
3: 'Y',
4: 'L1',
5: 'R1',
6: 'back',
7: 'start',
8: 'mode'
#9, 10 not defined in the game controller (can not those buttons)
}
else: #self.js_name == 'SHANWAN IFYOO Gamepad': #SHANWAN IFYOO Gamepad
self.axis_names = {
0: 'x',
1: 'y',
2: 'rx',
3: 'ry'
}
#try not to use buttons {'L2', 'R2','select'}, as they maybe not defined for different game controller
self.button_names = {
0: 'Y',
1: 'B',
2: 'A',
3: 'X',
4: 'L1',
5: 'R1',
6: 'L2', # try not to use this button
7: 'R2', # try not to use this button
8: 'select', # try not to use this button
9: 'start',
12: 'mode' #SHANWAN IFYOO Gamepad
#button 10 and button 12 will not exist at the same time, so do not worry the name issue
}
num_buttons = self.joystick.get_numbuttons()
print("Number of buttons: {}".format(num_buttons))
for i in range(num_buttons):
btn_name = self.button_names.get(i, 'unknown(0x%01x)' % i)
self.button_map.append(btn_name)
self.button_states[btn_name] = 0
print(' button {}, name {} '.format(i, btn_name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
num_axes = self.joystick.get_numaxes()
print("Number of axes: {}".format(num_axes))
for i in range(num_axes):
axis_name = self.axis_names.get(i, 'unknown(0x%02x)' % i)
self.axis_map.append(axis_name)
self.axis_states[axis_name] = 0.0
print(' axis {}, name {} '.format(i, axis_name) )
num_hats = self.joystick.get_numhats()
print("Number of hats: {}".format(num_hats))
for i in range(num_hats):
hat_name = self.hat_names.get(i, 'unknown(0x%03x)' % i)
self.hat_map.append(hat_name)
self.hat_states[hat_name] = (0, 0) #0.0
print(' hat {}, name {} '.format(i, hat_name) )
print('You can use smrc/show_joystick_map.py to display the button, axis and hat ID.')
print('Also check which button, axis, hat is pressed to understand the reference.')
# if we find any joystick
self.game_controller_available = True
print('Game controller is ready for use.')
# def init_music_playlist(self):
# root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'media')
# # /home/kai/optimize_tool/smrc/media
# # print(f'root_dir = {root_dir}')
# print(f'Loading music playlist from {root_dir} ..')
# self.music_playlist = smrc.line.get_file_list_recursively(root_dir, ext_str='.mp3')
# print(f'self.music_playlist = {self.music_playlist}')
#
# pygame.mixer.music.load(self.music_playlist.pop())
# # Get the first object_tracking from the playlist
# # pygame.mixer.music.queue(self.music_playlist.pop()) # Queue the 2nd song
def is_axis_triggered(self, axis_name, axis_value):
assert axis_name in self.axis_states
if abs(axis_value) > abs(self.axis_states[axis_name]) and \
abs(axis_value) > 0.2:
return True
else:
return False
def test_button_axis_hat(self):
'''
usage:
from smrc.game_controller import GameController
game_controller = GameController()
game_controller.test_button_axis_hat()
'''
if not self.game_controller_available:
print('No game controller is available.')
else:
print('Testing game controller ...')
print('Press a button, axis, or hat to see its name ...')
while True:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
break
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.JOYBUTTONUP:
print('event =', event)
# ('event =', < Event(10-JoyButtonDown {'joy': 0, 'button': 2}) >)
# print('event["button"] =', event['button']) # this is wrong
# print('event.button =', event.button) #'event.key =', event.key,
# get_button ID and button name
btn_id = event.button # get the id of the button
btn_name = self.button_names[btn_id] # get the name of the button
if event.type == pygame.JOYBUTTONDOWN:
#print("Joystick button pressed.")
print("%s pressed" % (btn_name))
elif event.type == pygame.JOYBUTTONUP:
#print("Joystick button released.")
print("%s released" % (btn_name))
# JOYAXISMOTION parameter: joy, hat, value
if event.type == pygame.JOYHATMOTION:
#print("Joystick hat pressed.")
#print('event =', event)
hat_id, hat_value = event.hat, event.value # get the id of the hat
hat_name = self.hat_names[hat_id] # get the name of the hat
print("%s pressed, " % (hat_name))
print("hat value : {}".format(hat_value))
# JOYAXISMOTION parameter: joy, axis, value
if event.type == pygame.JOYAXISMOTION:
#print("Joystick axis pressed.")
print('event =', event)
# ('event =', < Event(7-JoyAxisMotion {'joy': 0, 'value': 0.0, 'axis': 3}) >)
# get_axis
axis_id = event.axis # get the id of the axis
axis_name = self.axis_names[axis_id] # get the name of the axis
print("%s axis pressed" % (axis_name))
axis_value = event.value
print("axis value : {}".format(axis_value))
pressed_key = cv2.waitKey(20)
if pressed_key & 0xFF == 27: # Esc key is pressed
# cv2.destroyWindow(self.IMAGE_WINDOW_NAME)
break
def Event_MoveToNextMusic(self):
# print(f'self.music_playlist = {self.music_playlist}')
if self.play_music_on and len(self.music_playlist) > 0:
next_song = random.choice(self.music_playlist)
print(f'Move to next music {next_song} ...')
pygame.mixer.music.load(next_song)
if self.game_controller_available:
pygame.mixer.music.play() # Play the music
else:
pygame.mixer.music.play(-1) # Play the music
# if len(playlist) > 0:
# pygame.mixer.music.queue(playlist.pop()) # Queue the next one in the list
def init_and_play_music(self):
if self.play_music_on:
# print(f'self.play_music_on = {self.play_music_on}')
print('Music mode is turned on, to load and play music now ...')
self.init_music_playlist()
if len(self.music_playlist) > 0:
pygame.mixer.music.load(self.music_playlist[0])
print(f'Playing music {self.music_playlist[0]}')
# Get the first object_tracking from the playlist
# pygame.mixer.music.queue(self.music_playlist.pop()) # Queue the 2nd song
if self.game_controller_available:
# pygame.mixer.music.set_endevent(pygame.USEREVENT) # Setup the end object_tracking event
pygame.mixer.music.play() # Play the music
else:
pygame.mixer.music.play(-1) # Play the music
| cyoukaikai/ahc_ete | smrc/utils/annotate/game_controller.py | game_controller.py | py | 11,736 | python | en | code | 2 | github-code | 13 |
70494407379 | import requests
from bs4 import BeautifulSoup
import json
# Set your custom user agent
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
# Send a GET request to the URL with the custom user agent
url = "https://www.proflowers.com/blog/types-of-flowers"
headers = {'User-Agent': user_agent}
response = requests.get(url, headers=headers)
# Create a BeautifulSoup object
soup = BeautifulSoup(response.content, 'html.parser')
# Find all the <td> elements
td_elements = soup.find_all('td', class_='article-sections_tableCell__2HBAw')
# Initialize a list to store the extracted data
flowers = []
# Extract the data
for td in td_elements:
# Find the <b> elements within the <td>
b_elements = td.find_all('b')
# Initialize a dictionary to store the flower data
flower_data = {}
# Extract the data from each <b> element
for b in b_elements:
if b.text == "Sun Needs":
flower_data["Sun Needs"] = b.find_next_sibling('br').next_sibling.strip()
elif b.text == "Soil Needs":
flower_data["Soil Needs"] = b.find_next_sibling('br').next_sibling.strip()
elif b.text == "Zones":
flower_data["Zones"] = b.find_next_sibling('br').next_sibling.strip()
elif b.text == "Height":
flower_data["Height"] = b.find_next_sibling('br').next_sibling.strip()
elif b.text == "Blooms in":
# Check if there are multiple lines for 'Blooms in'
if b.find_next_sibling('br').next_sibling:
flower_data["Blooms in"] = b.find_next_sibling('br').next_sibling.strip()
else:
flower_data["Blooms in"] = b.find_next_sibling('br').next_sibling.strip()
flower_data["Blooms in"] += " - " + b.find_next_sibling('br').next_sibling.next_sibling.strip()
elif b.text == "Features":
flower_data["Features"] = b.find_next_sibling('br').next_sibling.strip()
# Add the flower data to the list if it is not empty
if flower_data:
flowers.append(flower_data)
# Convert the list to JSON
output_json = json.dumps(flowers, indent=4)
# Print the JSON output
with open('flower_data.json', 'w') as json_file:
json.dump(flowers, json_file, indent=4)
| wasemalwisy/Task-2-Proflowers-Scraper | final flowers .py | final flowers .py | py | 2,370 | python | en | code | 0 | github-code | 13 |
7041944010 | import eqsig
from eqsig import sdof
import numpy as np
from o3seespy import opy
from o3seespy import cc as opc
def get_inelastic_response(mass, k_spring, f_yield, motion, dt, xi=0.05, r_post=0.0):
"""
Run seismic analysis of a nonlinear SDOF
:param mass: SDOF mass
:param k_spring: spring stiffness
:param f_yield: yield strength
:param motion: list, acceleration values
:param dt: float, time step of acceleration values
:param xi: damping ratio
:param r_post: post-yield stiffness
:return:
"""
opy.wipe()
opy.model('basic', '-ndm', 2, '-ndf', 3) # 2 dimensions, 3 dof per node
# Establish nodes
bot_node = 1
top_node = 2
opy.node(bot_node, 0., 0.)
opy.node(top_node, 0., 0.)
# Fix bottom node
opy.fix(top_node, opc.FREE, opc.FIXED, opc.FIXED)
opy.fix(bot_node, opc.FIXED, opc.FIXED, opc.FIXED)
# Set out-of-plane DOFs to be slaved
opy.equalDOF(1, 2, *[2, 3])
# nodal mass (weight / g):
opy.mass(top_node, mass, 0., 0.)
# Define material
bilinear_mat_tag = 1
mat_type = "Steel01"
mat_props = [f_yield, k_spring, r_post]
opy.uniaxialMaterial(mat_type, bilinear_mat_tag, *mat_props)
# Assign zero length element
beam_tag = 1
opy.element('zeroLength', beam_tag, bot_node, top_node, "-mat", bilinear_mat_tag, "-dir", 1, '-doRayleigh', 1)
# Define the dynamic analysis
load_tag_dynamic = 1
pattern_tag_dynamic = 1
values = list(-1 * motion) # should be negative
opy.timeSeries('Path', load_tag_dynamic, '-dt', dt, '-values', *values)
opy.pattern('UniformExcitation', pattern_tag_dynamic, opc.DOF_X, '-accel', load_tag_dynamic)
# set damping based on first eigen mode
angular_freq2 = opy.eigen('-fullGenLapack', 1)
if hasattr(angular_freq2, '__len__'):
angular_freq2 = angular_freq2[0]
angular_freq = angular_freq2 ** 0.5
alpha_m = 0.0
beta_k = 2 * xi / angular_freq
beta_k_comm = 0.0
beta_k_init = 0.0
opy.rayleigh(alpha_m, beta_k, beta_k_init, beta_k_comm)
# Run the dynamic analysis
opy.wipeAnalysis()
opy.algorithm('Newton')
opy.system('SparseGeneral')
opy.numberer('RCM')
opy.constraints('Transformation')
opy.integrator('Newmark', 0.5, 0.25)
opy.analysis('Transient')
tol = 1.0e-10
iterations = 10
opy.test('EnergyIncr', tol, iterations, 0, 2)
analysis_time = (len(values) - 1) * dt
analysis_dt = 0.001
outputs = {
"time": [],
"rel_disp": [],
"rel_accel": [],
"rel_vel": [],
"force": []
}
while opy.getTime() < analysis_time:
curr_time = opy.getTime()
opy.analyze(1, analysis_dt)
outputs["time"].append(curr_time)
outputs["rel_disp"].append(opy.nodeDisp(top_node, 1))
outputs["rel_vel"].append(opy.nodeVel(top_node, 1))
outputs["rel_accel"].append(opy.nodeAccel(top_node, 1))
opy.reactions()
outputs["force"].append(-opy.nodeReaction(bot_node, 1)) # Negative since diff node
opy.wipe()
for item in outputs:
outputs[item] = np.array(outputs[item])
return outputs
def test_sdof():
"""
Create a plot of an elastic analysis, nonlinear analysis and closed form elastic
:return:
"""
from tests.conftest import TEST_DATA_DIR
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
dt = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_signal = eqsig.AccSignal(rec, dt)
period = 1.0
xi = 0.05
mass = 1.0
f_yield = 1.5 # Reduce this to make it nonlinear
r_post = 0.0
periods = np.array([period])
resp_u, resp_v, resp_a = sdof.response_series(motion=rec, dt=dt, periods=periods, xi=xi)
k_spring = 4 * np.pi ** 2 * mass / period ** 2
outputs = get_inelastic_response(mass, k_spring, f_yield, rec, dt, xi=xi, r_post=r_post)
outputs_elastic = get_inelastic_response(mass, k_spring, f_yield * 100, rec, dt, xi=xi, r_post=r_post)
ux_opensees = outputs["rel_disp"]
disp_inelastic_final = ux_opensees[-1]
time = acc_signal.time
acc_opensees_elastic = np.interp(time, outputs_elastic["time"], outputs_elastic["rel_accel"]) - rec
ux_opensees_elastic = np.interp(time, outputs_elastic["time"], outputs_elastic["rel_disp"])
diff_disp = abs(np.sum(ux_opensees_elastic - resp_u[0]))
diff_acc = abs(np.sum(acc_opensees_elastic - resp_a[0]))
assert diff_disp < 1.0e-4
assert diff_acc < 5.0e-4
assert np.isclose(disp_inelastic_final, 0.0186556)
if __name__ == '__main__':
test_sdof()
| o3seespy/o3seespy | tests/binary/test_sdof.py | test_sdof.py | py | 4,628 | python | en | code | 16 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.