seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
9001276762 | from time import time
from gurobipy import Model
class MDSP:
def __init__(self, d: list, filename: str, optimize=False, time_limit=3600):
self.D = d
self.B = sum(d)
self.k = len(self.D)
self.D_ = self.get_unique_distances()
self.M = self.get_mult()
self.P = list(range(self.B + 1))
self.filename = filename
if optimize:
self.P = self.valid_points()
self.model = Model()
self.model.setParam('LogFile', f'{filename}.log')
self.model.setParam('LogToConsole', 0)
self.model.setParam('TimeLimit', time_limit)
def solve(self):
t1 = time()
self.model.update()
self.model.optimize()
t2 = time()
self.write_time_file(t1, t2)
self.model.write(f'{self.filename}.lp')
if self.model.status == 3:
print('Infeasible')
else:
self.model.write(f'{self.filename}.sol')
print('Obj: %s' % self.model.ObjVal)
def get_unique_distances(self):
d = list(set(self.D))
d.sort()
return d
def get_mult(self):
m = dict()
for i in self.D_:
m[i] = self.D.count(i)
return m
def valid_points(self):
c = {0}
for d in self.D:
t = set()
for p in c:
t = t.union({p-d, p+d})
c = c.union(t)
p = [x for x in c if x >= 0]
p.sort()
return p
def write_time_file(self, t1, t2):
with open(f'{self.filename}.time', 'w') as file:
file.write(f'{self.model.Status}\n')
file.write(f'{t2 - t1}\n')
file.close()
| cleberoli/mdsp | model/mdsp.py | mdsp.py | py | 1,706 | python | en | code | 0 | github-code | 6 |
23565353773 | # -*- coding: utf-8 -*-
'''Polynomial basis linear model data generator'''
import numpy as np
import hw3_1a
def polynomial(basis,var,weights,n=1):
noise = hw3_1a.normal_generating(0, var)
x = np.random.uniform(-1, 1, n)
X=[]
for power in range(basis):
X.append( x[:] ** power)
X=np.array(X)
weights=np.array(weights)
y=weights.dot(X)+ noise
return y
def main():
basis=int(input("n=")) #basis num
var=float(input("a=")) #var
weights=[float(x) for x in input("w=").split()] #weight list
#n=1 #Number of data to generate
values = polynomial(basis,var,weights)
print(*values, sep=' ')
if __name__ == '__main__':
main() | n860404/Machine_learning_2019 | HW3/hw3_1b.py | hw3_1b.py | py | 736 | python | en | code | 0 | github-code | 6 |
41211514297 | import rabacus as ra
import pylab as plt
import numpy as np
z = 3.0
Nnu = 100
q_min = 1.0e-2
q_max = 1.0e6
uvb = ra.BackgroundSource( q_min, q_max, 'hm12', z=z, Nnu=Nnu )
NT=100
T = np.logspace( 4.0, 5.0, NT ) * ra.u.K
nH = np.ones( NT ) * 1.0e-2 / ra.u.cm**3
nHe = nH * 10**(-1.0701)
H1i = np.ones(T.size) * uvb.thin.H1i
He1i = np.ones(T.size) * uvb.thin.He1i
He2i = np.ones(T.size) * uvb.thin.He2i
H1h = np.ones(T.size) * uvb.thin.H1h
He1h = np.ones(T.size) * uvb.thin.He1h
He2h = np.ones(T.size) * uvb.thin.He2h
fcA_H2 = 1.0
fcA_He2 = 1.0
fcA_He3 = 1.0
kchem = ra.ChemistryRates( T, fcA_H2, fcA_He2, fcA_He3,
H1i=H1i, He1i=He1i, He2i=He2i )
kcool = ra.CoolingRates( T, fcA_H2, fcA_He2, fcA_He3,
H1h=H1h, He1h=He1h, He2h=He2h )
x_pce = ra.Solve_PCE( nH, nHe, kchem )
heat_H1 = H1h * nH * x_pce.H1
heat_He1 = He1h * nHe * x_pce.He1
heat_He2 = He2h * nHe * x_pce.He2
heat = heat_H1 + heat_He1 + heat_He2
plt.loglog( T, heat, color='black', lw=2.0 )
plt.loglog( T, heat_H1 )
plt.loglog( T, heat_He1 )
plt.loglog( T, heat_He2 )
| galtay/rabacus | cloudy/cooling/rabacus_confirm.py | rabacus_confirm.py | py | 1,097 | python | en | code | 4 | github-code | 6 |
13879303932 | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020-06-20 16:15
# @Author : 小凌
# @Email : 296054210@qq.com
# @File : test_06_audit.py
# @Software: PyCharm
import json
import unittest
import ddt
from common.excel_handler import ExcelHandler
from common.http_handler import visit
from middlerware.handler import Handler
logger = Handler.logger
excel = Handler.excel
yaml = Handler.yaml
sheet_name = yaml['excel']['auditsheet']
test_data = excel.get_data(sheet_name)
@ddt.ddt
class TestAudit(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
logger.info('------------------------------TestAuditBegin------------------------------')
cls.admin_token = Handler().admin_token
cls.token = Handler().token
def setUp(self) -> None:
# 封装使用测试号生成的项目,并提取loan_id
self.loan_id = Handler().loan_id
self.db = Handler.database_cls()
self.excel = ExcelHandler(Handler.excel_path)
@ddt.data(*test_data)
def test01_audit(self, case_data):
"""审核项目接口测试"""
global case_result
logger.info('**********正在获取第%d条<%s>用例**********' % (case_data['case_id'], case_data['title']))
headers = case_data['headers']
# 增加一个用户登陆登陆进行审核的失败操作用例
if ("#admin_token#" in headers) or ("#token#" in headers):
headers = headers.replace("#admin_token#", self.admin_token)
headers = headers.replace("#token#", self.token)
data = case_data['data']
if "#loan_id#" in data:
data = data.replace("#loan_id#", str(self.loan_id))
# 取一个不存在的项目id
if "#fail_loan_id#" in data:
data = data.replace("#fail_loan_id#", str(self.loan_id + 1000))
# 判断是否为已审批的用例,若为已审批的账号则从数据库提取一条status != 1的数据
if "#approve_loan_id#" in data:
self.loan_id = self.db.query("select * from futureloan.loan where `status` !=2 limit 1;")['id']
data = data.replace("#approve_loan_id#", str(self.loan_id))
response = visit(
url=yaml['host'] + case_data['url'],
method=case_data['method'],
json=json.loads(data),
headers=json.loads(headers)
)
expected = json.loads(case_data["expected"])
try:
self.assertEqual(response.json()["code"], expected['code'])
if response.json()['code'] == 0:
sql = "select `status` from futureloan.loan where id = {};".format(self.loan_id)
after_state = self.db.query(sql)['status']
self.assertEqual(expected['status'], after_state)
logger.info('**********第%d条<%s>用例测试结束**********' % (case_data['case_id'], case_data['title']))
case_result = "pass"
except AssertionError as error:
logger.error("第{}用例出现异常,异常为{}".format(case_data['case_id'], error))
case_result = "fail"
raise error
finally:
# 最后执行用例回写操作
row = case_data['case_id'] + 1
self.excel.excel_write(name=sheet_name, row=row, column=len(case_data), value=case_result)
self.excel.excel_write(name=sheet_name, row=row, column=len(case_data) - 1, value=str(response.json()))
logger.info("Write the response and result: %s " % case_result)
def tearDown(self) -> None:
self.db.close()
@classmethod
def tearDownClass(cls) -> None:
logger.info('------------------------------TestAuditOver------------------------------')
if __name__ == '__main__':
unittest.main()
| galaxyling/api-framework | testcases/test_06_audit.py | test_06_audit.py | py | 3,807 | python | en | code | 1 | github-code | 6 |
20209358126 | n , k = [int(s) for s in input().split()]
s = set([str(s) for s in range(n + 1)])
mm = set()
for i in range(k):
a_i, b_i = [int(s) for s in input().split()]
j = 0
while a_i + j * b_i <= n:
m = a_i + j * b_i
mm.update(str(m))
s.remove(m)
j += 1
print(len(s)) | Nayassyl/22B050835 | pt/sets/100.py | 100.py | py | 301 | python | en | code | 0 | github-code | 6 |
73025036669 | from enum import Enum
from typing import List
import sqlalchemy as sa
from sqlalchemy import orm as so
from .base import BaseMixin, db, IdentityMixin, TimestampMixin
__all__ = ['Chat', 'ChatEntry']
class Chat(BaseMixin, IdentityMixin, TimestampMixin, db.Model):
"""Chat Model.
Represents a chat conversation in the application.
"""
__tablename__ = 'chats'
title: so.Mapped[str] = so.mapped_column(
sa.String(64),
unique=False,
index=True,
nullable=False,
)
entry: so.Mapped[List['ChatEntry']] = so.relationship(
back_populates='chat',
)
def teaser(self, length=None) -> str:
"""Return a teaser for the chat."""
if not self.entry:
return ''
first_entry = min(self.entry, key=lambda x: x.created_at)
length = length or 150
if len(first_entry.content) > length:
return first_entry.content[:length - 3] + '...'
return first_entry.content[:length]
@classmethod
def create_new_chat(cls, title=None):
"""Create a new chat and return the new chat."""
title = title or 'New chat'
new_chat = cls.create(title=title)
new_chat.save()
return new_chat
class ChatEntry(BaseMixin, IdentityMixin, TimestampMixin, db.Model):
"""ChatEntry Model.
Represents a single message in a Chat.
"""
class Role(Enum):
"""Represents the role of a :class:`.ChatEntry`."""
USER = 'user'
ASSISTANT = 'assistant'
def __str__(self) -> str:
"""Get a string representation of this role.
:return: The name of this role.
:rtype: str
"""
return self.name.strip().lower()
__tablename__ = 'chat_entries'
content: so.Mapped[str] = so.mapped_column(
sa.Text,
nullable=False,
)
chat_id: so.Mapped[int] = so.mapped_column(
sa.ForeignKey('chats.id'),
nullable=False
)
role: so.Mapped[Role] = so.mapped_column(
sa.Enum(
Role,
name='role_types',
values_callable=lambda obj: [str(item.value) for item in obj]
),
nullable=False,
)
chat: so.Mapped['Chat'] = so.relationship(
back_populates='entry',
)
| sergeyklay/promptly | backend/promptly/models/chat.py | chat.py | py | 2,313 | python | en | code | 1 | github-code | 6 |
70506428347 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
count = [0]
ret = [0]
self.travel(root, k, count, ret)
return ret[0]
def travel(self, root, k, count, ret):
if not root:
return None
if root:
self.travel(root.left, k, count, ret)
count[0] += 1
if count[0] == k:
ret[0] = root.val
self.travel(root.right, k, count, ret)
# if we can modify BST
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
count = []
self.helper(root, count)
return count[k - 1]
def helper(self, node, count):
if not node:
return
self.helper(node.left, count)
count.append(node.val)
self.helper(node.right, count) | yangh9596/Algo-Leetcode | Leetcode/230_Kth Smallest Element in a BST.py | 230_Kth Smallest Element in a BST.py | py | 1,178 | python | en | code | 0 | github-code | 6 |
22019313936 | # -*- coding: utf-8 -*-
import numpy as np
from progbar import progress
import sys
def findBchange(initialPDB, multiDoseList, Bmetric, relative=True):
# function to determine the Bfactor/Bdamage (specified by Bmetric)
# change between the initial and later datasets --> becomes an
# object attribute for the later datasets
# check that valid metric specified
if Bmetric not in ('Bfactor', 'Bdamage'):
print('Unrecognised metric (choose between Bfactor and Bdamage)')
print('---> terminating script...')
sys.exit()
print('------------------------------------------------------------')
print('Finding {} change between first and later datasets'.format(Bmetric))
num_atoms = len(multiDoseList)
# ensure atom list ordered by number of atom in structure (atomnum)
multiDoseList.sort(key=lambda x: x.atomnum)
initialPDB.sort(key=lambda x: x.atomnum)
BmetDic = {}
initBfacDic = {a.getAtomID(): getattr(a, Bmetric) for a in initialPDB}
for c, atom in enumerate(multiDoseList):
# unessential loading bar add-in
progress(c+1, num_atoms, suffix='')
atmID = atom.getAtomID()
try:
initB = initBfacDic[atmID]
except KeyError:
print('Error!! Atom "{}" not present in dataset 1'.format(atmID))
initB = np.nan
laterBs = np.array(
map(float, atom.densMetric[Bmetric]['Standard']['values']))
if not relative:
metric = list(laterBs - initB)
else:
metric = list((laterBs - initB)/initB)
BmetDic[atom.getAtomID()] = metric
print('\n---> success...')
return BmetDic
| GarmanGroup/RIDL | lib/findMetricChange.py | findMetricChange.py | py | 1,686 | python | en | code | 3 | github-code | 6 |
40759032093 | from utilities.Constants import Constants
from indicators.Indicator import Indicator
import pandas as pd
class MACD(Indicator):
# price is DataFrame, = adj_close
def __init__(self, df=None, fast_period=12, slow_period=26, signal_period=9):
super().__init__()
self.fast_period = fast_period
self.slow_period = slow_period
self.signal_period = signal_period
# Set dataframe keys
self.indicator_key = None
self.signal_key = None
if df is not None:
self.set_input_data(df)
def set_input_data(self, df):
super().set_input_data(df)
self.indicator_key = Constants.get_key("MACD")
self.signal_key = Constants.get_key("Signal")
prices_temp = pd.DataFrame()
df_list = []
for ticker in self.tickers:
if ticker in df:
df_list.append(
pd.concat(
[df[ticker].loc[:, [self.prices_key]], prices_temp],
axis=1,
keys=[ticker]
)
)
df_indicator = pd.concat(
df_list,
axis=1
)
self.df = df_indicator.copy()
def calculate(self):
"""function to calculate MACD
typical values a = 12; b =26, c =9"""
super().calculate()
# Set temp dataframe keys
fast_key = Constants.get_key("MA_Fast")
slow_key = Constants.get_key("MA_Slow")
df_result = []
for ticker in self.tickers:
df_data = self.df[ticker].copy()
df_data[fast_key] = \
df_data[self.prices_key].ewm(
span=self.fast_period,
min_periods=self.fast_period
).mean()
df_data[slow_key] = \
df_data[self.prices_key].ewm(
span=self.slow_period,
min_periods=self.slow_period
).mean()
df_data[self.indicator_key] = \
df_data[fast_key] - df_data[slow_key]
df_data[self.signal_key] = \
df_data[self.indicator_key].ewm(
span=self.signal_period,
min_periods=self.signal_period
).mean()
df_data.drop(columns=[fast_key, slow_key], inplace=True)
df_data.dropna(inplace=True)
df_result.append(df_data.loc[:, [self.indicator_key, self.signal_key]])
self.df = pd.concat(df_result, axis=1, keys=self.tickers)
return self.df
| alejandropriv/stocksAnalysis | indicators/MACD.py | MACD.py | py | 2,625 | python | en | code | 0 | github-code | 6 |
41584638888 | """Celery를 사용하는 예제"""
import random
import time
from os import path
from urllib import parse
import requests
from celery import Celery
from pydub import AudioSegment
from my_logging import get_my_logger
logger = get_my_logger(__name__)
# 크롤링 요청 간격 리스트 정의
RANDOM_SLEEP_TIMES = [x * 0.1 for x in range(10, 40, 5)]
# 아티스트 이름
ARTIST_NAME = "Maurice RAVEL "
# 앨범 타이틀
ALBUM_NAME = "The Piano Music of Maurice Ravel from archive.org"
# 크롤링 대상 URL 리스트
MUSIC_URLS = [
'https://archive.org/download/ThePianoMusicOfMauriceRavel/01PavanePourUneInfanteDfuntePourPianoMr19.mp3',
'https://archive.org/download/ThePianoMusicOfMauriceRavel/02JeuxDeauPourPianoMr30.mp3',
'https://archive.org/download/ThePianoMusicOfMauriceRavel/03SonatinePourPianoMr40-Modr.mp3',
'https://archive.org/download/ThePianoMusicOfMauriceRavel/04MouvementDeMenuet.mp3',
'https://archive.org/download/ThePianoMusicOfMauriceRavel/05Anim.mp3',
]
# Reids의 0번째 DB를 사용하는 예
app = Celery('crawler_with_celery_sample', broker='redis://localhost:6379/0')
app.conf.update(
# Redis에 태스크 또는 실행 결과를 저장할 때의 형식을 JSON으로 지정
task_serializer='json',
accept_content=['json'],
result_serializer='json',
timezone='Asia/Seoul',
enable_utc=True, # Celery 태스크 내부의 시간을 UTC로 다룸
# 1개의 워커는 동시에 1개의 프로세스만 실행하게 함
worker_max_tasks_per_child=1,
# Redis에 저장되는 태스크의 실행 결과는 60초가 지나면 제거(파기)함
result_expires=60,
# 워커가 표준 출력으로 출력한 내용을 명령어 실행 터미널에 출력하지 않음
worker_redirect_stdouts=False,
# 태스크 실행 시간이 180초를 넘으면 자동으로 종료함
task_soft_time_limit=180,
# 어떤 태스크를 어떤 워커로 라우팅할지 설정함
task_routes={
'crawler_with_celery_sample.download': {
'queue': 'download',
'routing_key': 'download',
},
'crawler_with_celery_sample.cut_mp3': {
'queue': 'media',
'routing_key': 'media',
},
},
)
# 재시도는 최대 2회까지, 재시도할 때는 10초 간격을 둠
@app.task(bind=True, max_retries=2, default_retry_delay=10)
def download(self, url, timeout=180):
"""파일 내려받기"""
try:
# mp3 파일 이름을 URL을 기반으로 추출
parsed_url = parse.urlparse(url)
file_name = path.basename(parsed_url.path)
# 요청 간격을 랜덤하게 선택
sleep_time = random.choice(RANDOM_SLEEP_TIMES)
# 내려받기 시작을 로그에 출력
logger.info("[download start] sleep: {time} {file_name}".format(time=sleep_time, file_name=file_name))
# 요청 대기
time.sleep(sleep_time)
# 음악 파일 내려받기
r = requests.get(url, timeout=timeout)
with open(file_name, 'wb') as fw:
fw.write(r.content)
# 내려받기 종료를 로그에 출력
logger.info("[download finished] {file_name}".format(file_name=file_name))
cut_mp3.delay(file_name) # cut_mp3 함수 실행을 태스크로 큐에 넣음
except requests.exceptions.RequestException as e:
# 예외가 발생하면 로그를 출력하고 재시도
logger.error("[download error - retry] file: {file_name}, e: {e}".format( file_name=file_name, e=e))
raise self.retry(exc=e, url=url)
@app.task
def cut_mp3(file_name):
"""앞의 2초를 추출해서 저장하기"""
logger.info("[cut_mp3 start] {file_name}".format(file_name=file_name))
# 내려받은 파일을 pydub 데이터 형식으로 변환해서 읽어 들임
music = AudioSegment.from_mp3(file_name)
# mp3 파일의 앞 2초만 잘라내기
head_time = 2 * 1000 # milliseconds
head_part = music[:head_time] # 잘라냄
root_name, ext = path.splitext(file_name) # 파일 이름을 확장자와 이외의 부분으로 분할
# 저장
# 원래 파일과 구별할 수 있게 확장자 이름 앞에 _head를 붙임
file_handler = head_part.export(
root_name + "_head" + ext,
format="mp3",
tags={
'title': root_name,
'artist': ARTIST_NAME,
'album': ALBUM_NAME,
}
)
# 주의: 파일 핸들러 닫기를 잊으면 안 됩니다.
file_handler.close()
logger.info("[cut_mp3 finished] {file_name}".format(file_name=file_name))
if __name__ == '__main__':
logger.info("[main start]")
# 크롤링 대상 URL 별로 download() 함수를 태스크로 큐에 넣음
# 큐에 들어간 태스크는 워커에 의해서 자동 실행됨
for music_url in MUSIC_URLS:
download.delay(music_url)
logger.info("[main finished]")
| JSJeong-me/2021-K-Digital-Training | Web_Crawling/python-crawler/chapter_5/crawler_with_celery_sample.py | crawler_with_celery_sample.py | py | 5,058 | python | ko | code | 7 | github-code | 6 |
72492708988 | import pytest
from pytest_persistence import plugin
plg = plugin.Plugin()
@pytest.mark.parametrize("scope", ["session", "package", "module", "class", "function"])
@pytest.mark.parametrize("result", ["result", 42])
def test_store_fixture(result, scope):
fixture_id = ('fixture1', scope, 'tests/test_mock.py')
plg.store_fixture(result, fixture_id, 'tests/test_mock.py', None)
if scope == "session":
assert plg.output[scope] == {"('fixture1', 'session', 'tests/test_mock.py', None)": result}
else:
assert plg.output[scope]["tests/test_mock.py"] == {
f"('fixture1', '{scope}', 'tests/test_mock.py', None)": result}
@pytest.fixture(params=[(x, y)
for x in ["session", "package", "module", "class", "function"]
for y in ["result", 42]])
def store_fixtures(request):
scope = request.param[0]
result = request.param[1]
fixture_id = ('fixture1', scope, 'tests/test_mock.py')
plg.store_fixture(result, fixture_id, 'tests/test_mock.py', None)
plg.input = plg.output
return scope, result
def test_load_fixture(store_fixtures):
scope = store_fixtures[0]
result = store_fixtures[1]
fixture_id = ('fixture1', scope, 'tests/test_mock.py')
fixture_result = plg.load_fixture(fixture_id, 'tests/test_mock.py')
assert fixture_result == result
| JaurbanRH/pytest-persistence | tests/test_unit.py | test_unit.py | py | 1,367 | python | en | code | 0 | github-code | 6 |
2246643792 | testname = 'TestCase apwds_1.2.1'
avoiderror(testname)
printTimer(testname,'Start','Check Ac basic wds configuration in open mode')
###############################################################################
#Step 1
#操作
# AC上show wireless network 2
#预期
# 显示WDS Mode....................................... Disable
################################################################################
printStep(testname,'Step 1','WDS mode is disable by default')
res1=1
#operate
data = SetCmd(switch1,'show wireless network 2')
res1 = CheckLine(data,'WDS Mode','Disable',IC=True)
#result
printCheckStep(testname, 'Step 1',res1)
################################################################################
#Step 2
#操作
#AC上配置
# Network 2
# wds-mode rootap
#预期
#显示WDS Mode is enabled and the other network configuration is cleared
################################################################################
printStep(testname,'Step 2',\
'Config network 2 as wds mode')
res1=1
#operate
EnterNetworkMode(switch1,2)
data = SetCmd(switch1,'wds-mode rootap')
res1 = CheckLine(data,'WDS Mode is enabled and the other network configuration is cleared',IC=True)
#result
printCheckStep(testname, 'Step 2',res1)
################################################################################
#Step 3
#操作
#AC在network2下配置vlan 200
#预期
#显示The Network WDS mode has been enabled, so this configuration is invalid.
################################################################################
printStep(testname,'Step 3',\
'Config vlan 200 in network 2')
res1=1
# operate
data = SetCmd(switch1,'vlan 200')
res1 = CheckLine(data,'The Network WDS mode has been enabled, so this configuration is invalid')
#result
printCheckStep(testname, 'Step 3',res1)
################################################################################
#Step 4
#操作
# AC配置
# network 2
# wds-mode rootap
# security mode none
# wds-remote-vap ap2mac
# ssid ${wds_ssid}
#预期
# AC上show wireless network 2,显示
# SSID........................................... ${wds_ssid}
# WDS Mode....................................... RootAP
# WDS Remote VAP MAC............................. ap2mac
# Security Mode.................................. None
################################################################################
printStep(testname,'Step 4',\
'Config network 2 as wds and open mode')
res1=1
#operate
# AC上配置network2为wds,apmode为rootap
Ac_wds_config(switch1,2,
ssid=wds_ssid,
remotemac=ap2vapmac)
data = SetCmd(switch1,'show wireless network 2')
res1 = CheckLineList(data,[('SSID',wds_ssid),
('WDS Mode','RootAP'),
('WDS Remote VAP MAC',ap2vapmac),
('Security Mode','None')],
IC=True)
#result
printCheckStep(testname, 'Step 4',res1)
################################################################################
#Step 5
#操作
# AC配置
# network 2
# no wds-mode
#预期
# AC上show wireless network 2,显示
# 显示WDS Mode....................................... Disable
################################################################################
printStep(testname,'Step 5',\
'No wds mode in network 2')
res1=1
#operate
EnterNetworkMode(switch1,2)
data = SetCmd(switch1,'no wds-mode')
IdleAfter(5)
data = SetCmd(switch1,'show wireless network 2')
res1 = CheckLine(data,'WDS Mode','Disable',IC=True)
#result
printCheckStep(testname, 'Step 5',res1)
################################################################################
#Step 6
#操作
# AC配置
# network 2
# wds-mode rootap
# security mode none
# wds-remote-vap ap2mac
# ssid ${wds_ssid}
# 下发配置apply ap profile 1
#预期
# AP1上get wds wds1
# 显示
# radio wlan0
# wds-status up
# wds-mode rootap
# wds-ssid ${wds_ssid}
# wds-security-policy plain-text
# remote-mac ap2mac
################################################################################
printStep(testname,'Step 6',\
'Config network 2 as wds and open mode',\
'Apply ap profile 1')
res1=res2=1
#operate
# AC上配置network2为wds,apmode为rootap,并下发profile 1
Ac_wds_config(switch1,2,
ssid=wds_ssid,
remotemac=ap2vapmac)
res1=WirelessApplyProfileWithCheck(switch1,['1'],[ap1mac])
res2 = Check_ap_wdsconfig(ap1,Ap1cmdtype,wds1num,
check=[('radio',wlan),
('wds-status','up'),
('wds-mode','rootap'),
('wds-ssid',wds_ssid),
('wds-security-policy','plain-text'),
('remote-mac',ap2mac_type1)],
IC=True)
#result
printCheckStep(testname, 'Step 6',res1,res2)
################################################################################
#Step 7
#操作
#恢复默认配置
################################################################################
printStep(testname,'Step 7',\
'Recover initial config for switches.')
#operate
# 清除network2配置
ClearNetworkConfig(switch1,2)
WirelessApplyProfileWithCheck(switch1,['1'],[ap1mac])
#end
printTimer(testname, 'End') | guotaosun/waffirm | autoTests/module/apwds/apwds_1.2.1.py | apwds_1.2.1.py | py | 5,416 | python | de | code | 0 | github-code | 6 |
12702052399 | """
To render html web pages
"""
import random
from django.http import HttpResponse
from django.template.loader import render_to_string
from articles.models import Article
def home_view(request, id=None, *args, **kwargs):
"""
Take in a request (Django send request)
return HTML as a response
(We pick to return the response)
"""
name = "Artem" # hard coded
number = random.randint(1, 2) # pseudo random
article_obj = Article.objects.get(id=number)
article_queryset = Article.objects.all()
context = {
"object_list": article_queryset,
"title": article_obj.title,
"content": article_obj.content,
"id": article_obj.id
}
# Django templates
HTML_STRING = render_to_string("home-view.html", context=context)
return HttpResponse(HTML_STRING) | L1verly/djproject-private | djproject/views.py | views.py | py | 832 | python | en | code | 0 | github-code | 6 |
34313307894 | import os
import subprocess
import time
import sys
import tracemalloc
import concurrent.futures
import threading
stopProcessing = False
def get_all_pids():
ps_cmd = ['ps', '-e', '-o', 'pid']
out = subprocess.Popen(ps_cmd, stdout = subprocess.PIPE).communicate()[0]
out = ''.join(map(chr,out))
out = out.splitlines()
out.pop(0)
return out
def perf_run(pid):
i=0
while(True):
i+=1
op_file = 'cpu_output_%s'%(pid)
cmd='perf stat -x, -o %s -p %s sleep %s' % (op_file, pid, sys.argv[1])
start_time = time.time()
p=os.system(cmd)
end_time = time.time()
power_thread = threading.Thread(target=power_calc,args=(pid,start_time,end_time,))
power_thread.start()
rm(pid)
def power_calc(pid,start,end):
cpu_output = 'cpu_output_%s' %(pid)
with open(cpu_output) as reader:
lines = [line.rstrip('\n') for line in reader]
task_clock=0.0 if (lines[2].split(',')[4]=="<not defined>" or lines[2].split(',')[4]=="") else float(lines[2].split(',')[4])
context_switch = 0.0 if (lines[3].split(',')[4]=="<not defined>" or lines[3].split(',')[4]=="") else float(lines[3].split(',')[4])
cpu_migration = 0.0 if (lines[4].split(',')[4]=="<not defined>" or lines[4].split(',')[4]=="") else float(lines[4].split(',')[4])
page_faults = 0.0 if (lines[5].split(',')[4]=="<not defined>" or lines[5].split(',')[4]=="") else float(lines[5].split(',')[4])
cycles = 0.0 if (lines[6].split(',')[4]=="<not defined>" or lines[6].split(',')[4]=="") else float(lines[6].split(',')[4])
instructions =0.0 if (lines[7].split(',')[4]=="<not defined>" or lines[7].split(',')[4]=="") else float( lines[7].split(',')[4])
branches = 0.0 if (lines[8].split(',')[4]=="<not defined>" or lines[8].split(',')[4]=="") else float(lines[8].split(',')[4])
branch_misses = 0.0 if (lines[9].split(',')[4]=="<not defined>" or lines[9].split(',')[4]=="") else float(lines[9].split(',')[4])
lock = threading.Lock()
lock.acquire()
cmd2 = ["python3", "predict.py",str(task_clock)+","+str(context_switch)+","+str(cpu_migration)+","+str(page_faults)+","+str(cycles)+","+str(instructions)+","+str(branches)+","+str(branch_misses)]
p1 = str(subprocess.Popen(cmd2,stdout=subprocess.PIPE).communicate()[0])
with open("power_output2","a") as fp:
fp.write(str(start)+","+str(end)+","+pid+","+p1[2:-3]+" \n")
lock.release()
print(str(start)+"-"+str(end)+" for pid:"+pid+" carbon output:" + p1[2:-3])
def rm(pid):
cmd='sudo rm cpu_output_%s' %(pid)
os.system(cmd)
if __name__=='__main__':
pids = get_all_pids()
with open("power_output2","w") as fp:
fp.write("start_time,end_time,pid,power \n")
with concurrent.futures.ThreadPoolExecutor(max_workers=len(pids)) as executor:
for i in range(0,len(pids)):
executor.submit(perf_run,str(pids[i]).strip())
executor.shutdown()
#Task Clock, Context-Switches, CPU-migrations, page-faults, cycles, instructions, branches, branch-misses
| noman-bashir/CarbonTop | code/power_model/powerTrial.py | powerTrial.py | py | 3,096 | python | en | code | 0 | github-code | 6 |
19239217812 | # tree ! 트리 나라 관광 가이드
# 부모 도시 없다면 만들어주기
K = int(input())
A = list(map(int, input().split()))
N = max(A)
parent = [-2] * (N+1) # 루트 도시의 부모는 -1이니 존재하지 않는 값인 -2로 통일
parent[A[0]] = -1 # 루트 도시가 0번이 아닌 경우도 있다!
for i in range(K-1): # 만약 아직 부모가 없는 도시라면 바로 전 도시를 부모로 하기
if parent[A[i+1]] == -2:
parent[A[i+1]] = A[i]
print(N+1)
print(*parent) | sdh98429/dj2_alg_study | BAEKJOON/tree/b15805.py | b15805.py | py | 508 | python | ko | code | 0 | github-code | 6 |
34097968081 | #!/usr/bin/python
import curses
import sys
import RPi.GPIO as GPIO
def main(stdscr):
# do not wait for input when calling getch
stdscr.nodelay(1)
initGPIO()
while True:
# get keyboard input, returns -1 if none available
c = stdscr.getch()
if c != -1:
# print numeric value
stdscr.addstr(str(c))
stdscr.refresh()
# return curser to start position
stdscr.move(0, 0)
if c == 97:
GPIO.cleanup()
sys.exit('Bye bye')
elif c == 117:
lightToggle()
elif c == 114:
light()
def initGPIO():
GPIO.setmode(GPIO.BCM)
GPIO.setup(8, GPIO.OUT)
def lightToggle():
GPIO.output(8, not GPIO.input(8))
def light():
GPIO.output(8, GPIO.HIGH)
def dark():
GPIO.output(8, GPIO.LOW)
if __name__ == '__main__':
curses.wrapper(main)
| tophsic/gpio | one_led_controled_by_s.py | one_led_controled_by_s.py | py | 914 | python | en | code | 0 | github-code | 6 |
36733301943 | import re
import json
from collections import defaultdict
def file_paths(file_path= 'logs_2/postcts.log1'):
with open(file_path, 'r') as file:
file_data = file.read()
return file_data
def parse_log_file():
file_contents = file_paths()
# Compile regex patterns for improved performance
regex_pattern_summary = re.compile(
r'\s+opt_design Final Summary(.*?)Routing Overflow:\s*(.*?)\n', re.DOTALL)
regex_pattern_setupHold = re.compile(
r'\|\s+(?:WNS \(ns\):\|\s*(-?\d+\.?\d+)|TNS \(ns\):\|\s*(-?\d+\.?\d+)|Violating Paths:\|\s*(\d+\.?\d+)\s*)')
pattern_DRV = re.compile(
r'\|\s+(max_cap|max_tran|max_fanout)\s*\|\s*(\d+\s*\(\d+\))\s*\|\s*(-?\d+\.?\d+)\s*')
regex_pattern_density = re.compile(r'Density:\s*(\d+\.?\d+)\s*')
# regex_pattern_routing = re.compile(
# r'Routing Overflow:\s*(-?\d+\.?\d+)\%\s*H and (-?\d+\.?\d+)\%\s*V')
# or
regex_pattern_routing = re.compile(
r'(-?\d+\.?\d+)\%\s*H and (-?\d+\.?\d+)\%\s*V')
regex_pattern_total_power = re.compile(
r'^(Total Power\s*\n)[-]*\n(Total Internal Power:\s*(.*?)\s*(\d+\.?\d+)%\n)(Total Switching Power:\s*(.*?)\s*(\d+\.?\d+)%\n)(Total Leakage Power:\s*(.*?)\s*(\d+\.?\d+)%\n)', re.MULTILINE)
regex_pattern_instance_design = re.compile(
r'Instances in design\s*=\s*(\d+)')
regex_pattern_vt = re.compile(
r'(LVT|SVT|HVT) : inst = (\d+) \((\d+\.?\d+)%\)')
regex_pattern_run_time = re.compile(
r'totcpu=(.*?),\s*real=(.*?),\s*mem=(.*?)$')
setUpMode = defaultdict(list)
holdMode = defaultdict(list)
DRV = {}
density = []
congestion_overflow = {"H": [], "V": []}
VT_dist = defaultdict(list)
insts_count = []
power = {"Dynamic": [], "Leakage": []}
runTime = []
summary_match = regex_pattern_summary.findall(file_contents)
# Finding Total Summary (WNS, TNS, FEP), DRV's, Density, Routing_Overflow
if summary_match:
summary_data = summary_match[-1][0]
# print(summary_data)
wns_tns_match = re.findall(regex_pattern_setupHold, summary_data)
if len(wns_tns_match) > 4:
setUpMode["WNS"].append(wns_tns_match[0][0])
setUpMode["TNS"].append(wns_tns_match[1][1])
setUpMode["FEP"].append(wns_tns_match[2][2])
holdMode["WNS"].append(wns_tns_match[3][0])
holdMode["TNS"].append(wns_tns_match[4][1])
holdMode["FEP"].append(wns_tns_match[5][2])
else:
setUpMode["WNS"].append(wns_tns_match[0][0])
setUpMode["TNS"].append(wns_tns_match[1][1])
setUpMode["FEP"].append(wns_tns_match[2][2])
holdMode["WNS"].append("-")
holdMode["TNS"].append("-")
holdMode["FEP"].append("-")
matches = re.findall(pattern_DRV, summary_data)
DRV = {key: {"terms": value, "slack": slack}
for key, value, slack in matches}
density_match = re.search(regex_pattern_density, summary_data)
density_val = density_match.group(1) if density_match else "-"
density.append(density_val)
# routing_overflow_match = re.search(regex_pattern_routing, summary_match[-1])
routing_overflow_match = re.search(
regex_pattern_routing, summary_match[-1][1])
routing_overflow_h = routing_overflow_match.group(
1) if routing_overflow_match else "-"
routing_overflow_v = routing_overflow_match.group(
2) if routing_overflow_match else "-"
congestion_overflow["H"].append(routing_overflow_h)
congestion_overflow['V'].append(routing_overflow_v)
else:
print("Setup value pattern not found")
# Finding the Total Power (Switching, Leakage)
total_power_match = regex_pattern_total_power.search(file_contents)
if total_power_match:
power['Dynamic'].append(total_power_match.group(6))
power['Leakage'].append(total_power_match.group(9))
else:
print("Pattern not found for Total Power.")
# Find Instance count
matches = regex_pattern_instance_design.findall(file_contents)
if matches:
instances_in_design = matches[-1].strip()
insts_count.append(instances_in_design)
else:
print("Pattern not found for Instances in Design.")
# Find LVT, SVT, HVT and %
matches = regex_pattern_vt.findall(file_contents)
for design_type, inst_value, percentage in matches[-3:]:
VT_dist[design_type].append(inst_value)
VT_dist[f"{design_type} %"].append(percentage)
# Find Run_Time
run_time_match = regex_pattern_run_time.search(file_contents)
if run_time_match:
runTime.append(run_time_match.group(2))
else:
print("Pattern not found for Run Time.")
return {
"setUpMode": dict(setUpMode),
"holdMode": dict(holdMode),
"DRV": DRV,
"density": density,
"congestion_overflow": congestion_overflow,
"VT_dist": dict(VT_dist),
"insts_count": insts_count,
"power": power,
"runTime": runTime
}
# parsed_data = parse_log_file()
# output_file_path = 'parsed_data.json'
# # Write the data to the JSON file
# with open(output_file_path, 'w') as json_file:
# json.dump(parsed_data, json_file, indent=4)
# def main():
# file_contents = file_paths()
# try:
# pattern_text_between = r'\s+flow\.cputime\s+flow.realtime\s+timing\.setup\.tns\s+timing\.setup\.wns\s+snapshot\nUM:\s*\d+\.?\d+\s+\d+\.?\d+\s+report_finish'
# # Use re.search() to find the pattern in the string
# match = re.search(pattern_text_between, file_contents, re.DOTALL | re.MULTILINE)
# if match:
# print(match)
# else:
# print("Pattern not found.")
# except Exception as e
# print(e)
# file_path = 'logs_2/postcts.log1'
# main(file_path)
import os
def get_max_numbered_log(logfiles, base_name):
pattern = re.compile(fr"{base_name}\.log(\d*)")
number_logs = [file for file in log_files if pattern.fullmatch(file)]
# print(number_logs)
if number_logs:
print(number_logs)
return False
folder_path = 'D:/COE_07/COE-PRO/ZLogParse/logs_1'
log_files = os.listdir(folder_path)
selected_logs = []
base_names_to_check = ['floorplan', 'cts', 'prects', 'postcts']
for base_name in base_names_to_check:
selected_log = get_max_numbered_log(log_files, base_name)
# if selected_log:
# selected_logs.append(selected_log)
# print(selected_logs)
| DavidJose2000/Log_parse | Zpharse.py | Zpharse.py | py | 6,755 | python | en | code | 0 | github-code | 6 |
5153764381 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import xlsxwriter
#Reading the file into the system
file1 = pd.read_csv("/home/user/Downloads/portugese bank/bank-full-encoded.csv", sep=";" ,parse_dates= True)
print(file1.shape)
#Splitting into x,y and train and test data
y = file1["y"].values
x = file1.drop("y", axis = 1).values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size= 0.3, random_state= 25)
#Running the Random Forrest Classifier
rf_classifier = RandomForestClassifier()
rf_classifier.fit(x_train, y_train)
rf_prediction = rf_classifier.predict(x_test)
print("\nThe Confusion Matrix is as follows:\n", confusion_matrix(y_test,rf_prediction))
print("\nThe Classification Report for the random forrest classifier is as follows:\n", classification_report(y_test, rf_prediction))
# Writing output to Excel
writer = pd.ExcelWriter(path = "/home/user/Downloads/portugese bank/Random Forrest.xlsx", engine = 'xlsxwriter')
workbook = writer.book
rf_output = []
for i in rf_prediction:
rf_output.append(i)
df_rfoutput = pd.DataFrame(rf_output)
df_rfoutput.to_excel(writer, sheet_name="RandomForrest", startrow=0, startcol=0)
print(len(rf_prediction))
features = rf_classifier.feature_importances_
print(features)
print(len(features))
feature_list = file1.columns.values.tolist()
print(feature_list)
x = 0
for (i,j) in np.ndenumerate(features):
x = x + j
print(x)
print("\nThe feature list and its corresponding importance is as follows:")
feature_output = []
for i in range(26):
print(feature_list[i], "=", features[i]*100, "%")
feature_output.append(features[i]*100)
df_feature_names = pd.DataFrame(feature_list)
df_feature_values = pd.DataFrame(feature_output)
df_feature_values.to_excel(writer, sheet_name="RandomForrest", startrow= 0, startcol=5)
df_feature_names.to_excel(writer, sheet_name="RandomForrest", startrow= 0, startcol=4)
workbook.close()
writer.save() | Royston2708/Loan_Defaulter_Project | Models/Decision Trees and Random Forrest.py | Decision Trees and Random Forrest.py | py | 2,138 | python | en | code | 0 | github-code | 6 |
13446768071 | """ Отсортируйте по убыванию методом пузырька одномерный целочисленный массив,
заданный случайными числами на промежутке [-100; 100). Выведите на экран исходный
и отсортированный массивы.
"""
import random, math
def bubble_sort(array):
n = 1
while n < len(array):
change = 0
for i in range(len(array) - n):
if array[i] < array[i + 1]: # чуть не забыл, блин..!
array[i], array[i + 1] = array[i + 1], array[i]
change += 1
if not change:
break
n += 1
package = [math.floor(random.uniform(-1, 1) * 100) for i in range(11)]
print(package)
print('*' * 100)
bubble_sort(package)
print(package)
| byTariel/Algoritms | dz_7_task_1.py | dz_7_task_1.py | py | 860 | python | ru | code | 0 | github-code | 6 |
36651552794 | #!/usr/bin/python3
# Codeforces - Educational Round #90
# Author: frostD
# Problem B - 01 Game
def read_int():
n = int(input())
return n
def read_ints():
ints = [int(x) for x in input().split(" ")]
return ints
#---
def solve(s):
moves = 0
ms1 = s.split('10') # move set 1
ms2 = s.split('01') # move set 2
while len(ms1) > 1 or len(ms2) > 1:
if len(ms1) > len(ms2):
moves += len(ms1) - 1
s = ''.join(ms1)
else:
moves += len(ms2) - 1
s = ''.join(ms2)
ms1 = s.split('10') # move set 1
ms2 = s.split('01') # move set 2
if moves % 2 == 1:
return "DA"
else:
return "NET"
# Main
t = read_int()
for case in range(t):
s = input()
sol = solve(s)
print (sol)
| thaReal/MasterChef | codeforces/ed_round_90/game.py | game.py | py | 709 | python | en | code | 1 | github-code | 6 |
19570224957 |
def read_cook_book(file, cook_book_):
list_temp = []
line1 = str(file.readline().strip())
num2 = int(file.readline())
i = 0
while i < num2:
line = file.readline()
list_line = line.split(' | ')
dict_ingr = {'ingredient_name': list_line[0],
'quantity': int(list_line[1]),
'measure': list_line[2].strip()}
list_temp.append(dict_ingr)
i += 1
cook_book_[line1] = list_temp
return cook_book_
def get_shop_list_by_dishes(dishes, person_count):
ingredient_order = {}
for dish in dish_order:
ing_ord = cook_book[dish]
for ingr in ing_ord:
measure_quantity = {}
measure = ingr.get('measure')
measure_quantity['measure'] = measure
quantity = ingr.get('quantity')*pers_count
measure_quantity['quantity'] = quantity
ing = list(ingredient_order.keys())
ingr_key = ingr.get('ingredient_name')
if ing.count(ingr_key) > 0:
ingredient_order.get(ingr_key)
del(ingredient_order[ingr_key])
quantity_rep = ingr.get('quantity') * pers_count
measure_quantity['quantity'] = quantity + quantity_rep
ingredient_order[ingr_key] = measure_quantity
return ingredient_order
file_ = 'recipes.txt'
cook_book = {}
with open(file_, encoding="utf-8") as file_:
read_cook_book(file_, cook_book)
for line in file_:
read_cook_book(file_, cook_book)
dish_order = ['Омлет', 'Фахитос', 'Утка по-пекински']
pers_count = 3
order = get_shop_list_by_dishes(dish_order, pers_count)
print(order) | IlAnSi/DZ_2_8 | Cook_Book.py | Cook_Book.py | py | 1,706 | python | en | code | 0 | github-code | 6 |
32756126137 | # !/usr/bin/python
import os
import sys
# Logging configuration
import logging
class logger(logging.Logger):
def __init__(self):
"""Initializer."""
super().__init__()
logging.basicConfig(filename="errlog.log",
filemode="a",
format="(%(asctime)s) | %(name)s | %(levelname)s:%(message)s",
datefmt="%d %B %Y , %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO"))
| MohdFarag/Musical-Instruments-Equalizer | src/logger.py | logger.py | py | 478 | python | en | code | 0 | github-code | 6 |
26023698980 | import matplotlib.pyplot as plt
import numpy as np
#plot 1
x=np.arange(-8,8,0.1)
y=x**3
plt.subplot(2,2,1)
plt.plot(x,y)
plt.title("plot 1")
#plot 2
x=np.linspace(0,3*np.pi,400)
y=x/(1+(x**4)*(np.sin(x))**2)
plt.subplot(2,2,2)
plt.plot(x,y)
plt.title("plot 2")
#plot 3
x=np.linspace(1,10,400)
y=np.sin(1/(x**(1/2)))
plt.subplot(2,2,3)
plt.plot(x,y)
plt.title("plot 3")
#plot 4
x=np.linspace(0,2*np.pi,400)
y=x**(np.sin(x))
plt.subplot(2,2,4)
plt.plot(x,y)
plt.title("plot 4")
plt.show()
| suanhaitech/pythonstudy2023 | Wangwenbin/Matplotlib4.py | Matplotlib4.py | py | 492 | python | uk | code | 2 | github-code | 6 |
6794457250 | from __future__ import annotations
import typing
from dataclasses import dataclass
from anchorpy.borsh_extension import EnumForCodegen
import borsh_construct as borsh
class UninitializedJSON(typing.TypedDict):
kind: typing.Literal["Uninitialized"]
class ActiveJSON(typing.TypedDict):
kind: typing.Literal["Active"]
class PostOnlyJSON(typing.TypedDict):
kind: typing.Literal["PostOnly"]
class PausedJSON(typing.TypedDict):
kind: typing.Literal["Paused"]
class ClosedJSON(typing.TypedDict):
kind: typing.Literal["Closed"]
class TombstonedJSON(typing.TypedDict):
kind: typing.Literal["Tombstoned"]
@dataclass
class Uninitialized:
discriminator: typing.ClassVar = 0
kind: typing.ClassVar = "Uninitialized"
@classmethod
def to_json(cls) -> UninitializedJSON:
return UninitializedJSON(
kind="Uninitialized",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Uninitialized": {},
}
@dataclass
class Active:
discriminator: typing.ClassVar = 1
kind: typing.ClassVar = "Active"
@classmethod
def to_json(cls) -> ActiveJSON:
return ActiveJSON(
kind="Active",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Active": {},
}
@dataclass
class PostOnly:
discriminator: typing.ClassVar = 2
kind: typing.ClassVar = "PostOnly"
@classmethod
def to_json(cls) -> PostOnlyJSON:
return PostOnlyJSON(
kind="PostOnly",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"PostOnly": {},
}
@dataclass
class Paused:
discriminator: typing.ClassVar = 3
kind: typing.ClassVar = "Paused"
@classmethod
def to_json(cls) -> PausedJSON:
return PausedJSON(
kind="Paused",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Paused": {},
}
@dataclass
class Closed:
discriminator: typing.ClassVar = 4
kind: typing.ClassVar = "Closed"
@classmethod
def to_json(cls) -> ClosedJSON:
return ClosedJSON(
kind="Closed",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Closed": {},
}
@dataclass
class Tombstoned:
discriminator: typing.ClassVar = 5
kind: typing.ClassVar = "Tombstoned"
@classmethod
def to_json(cls) -> TombstonedJSON:
return TombstonedJSON(
kind="Tombstoned",
)
@classmethod
def to_encodable(cls) -> dict:
return {
"Tombstoned": {},
}
MarketStatusKind = typing.Union[
Uninitialized, Active, PostOnly, Paused, Closed, Tombstoned
]
MarketStatusJSON = typing.Union[
UninitializedJSON, ActiveJSON, PostOnlyJSON, PausedJSON, ClosedJSON, TombstonedJSON
]
def from_decoded(obj: dict) -> MarketStatusKind:
if not isinstance(obj, dict):
raise ValueError("Invalid enum object")
if "Uninitialized" in obj:
return Uninitialized()
if "Active" in obj:
return Active()
if "PostOnly" in obj:
return PostOnly()
if "Paused" in obj:
return Paused()
if "Closed" in obj:
return Closed()
if "Tombstoned" in obj:
return Tombstoned()
raise ValueError("Invalid enum object")
def from_json(obj: MarketStatusJSON) -> MarketStatusKind:
if obj["kind"] == "Uninitialized":
return Uninitialized()
if obj["kind"] == "Active":
return Active()
if obj["kind"] == "PostOnly":
return PostOnly()
if obj["kind"] == "Paused":
return Paused()
if obj["kind"] == "Closed":
return Closed()
if obj["kind"] == "Tombstoned":
return Tombstoned()
kind = obj["kind"]
raise ValueError(f"Unrecognized enum kind: {kind}")
layout = EnumForCodegen(
"Uninitialized" / borsh.CStruct(),
"Active" / borsh.CStruct(),
"PostOnly" / borsh.CStruct(),
"Paused" / borsh.CStruct(),
"Closed" / borsh.CStruct(),
"Tombstoned" / borsh.CStruct(),
)
| Ellipsis-Labs/phoenixpy | phoenix/types/market_status.py | market_status.py | py | 4,121 | python | en | code | 5 | github-code | 6 |
24883752413 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'informes.views.home', name='i_home'),
url(r'^pendientes/$', 'informes.views.informes_pendientes', name='i_pend'),
url(r'^arreglados/$', 'informes.views.informes_arreglados', name='i_fixed'),
url(r'^noarreglados/$', 'informes.views.informes_wontfix', name='i_wontfix'),
url(r'^equipo/(?P<equipo_id>\d+)/levantar/$', 'informes.views.levantar_informe_equipo', name='informe_le'),
url(r'^(?P<informe_id>\d+)/resolver/$', 'informes.views.resolver_informe', name='informe_r'),
)
| efylan/ccreservas | informes/urls.py | urls.py | py | 592 | python | es | code | 0 | github-code | 6 |
31559622204 | num = input()
#First Method for python
print(num[::-1])
#Second Method for c
num,a = int(num),0
while num > 0:
a = a*10 + num%10
num = num//10
print(a) | Shobhit0109/programing | EveryOther/python/Codes/New codes/Rev num in 2 ays.py | Rev num in 2 ays.py | py | 166 | python | en | code | 0 | github-code | 6 |
38336203002 | #!/usr/bin/python
from websocket import create_connection
import unittest
from common import read_info
from common import read_message
from common import check_action as c
import time
import json
class websocket_request(unittest.TestCase):
"""32. 安装脚本"""
def setUp(self):
rt=read_info.ReadInfo()
web=rt.get_device_ip()
port=rt.get_port()
url=web+":"+port
try:
self.ws=create_connection(url,timeout=5) #建立设备连接
if self.ws.connected:
print("服务:%s连接成功!"%url)
except Exception as e:
print("websocket连接失败:%s"%e)
pass
def test_install_script(self):
"""32. 安装脚本/32.1. 发送数据 """
rm=read_message.ReadMessage()
data_c=rm.get_data("5","control")
url=self.ws
print("step 1、控制设备:")
c.checkAction(url,data_c)
time.sleep(1)
data_initialize=rm.get_data("3","initialize")
print("step 2、初始化:")
c.checkAction(url,data_initialize)
time.sleep(8)
data_install_script=rm.get_data("32","install_script")
"""重新设置安装文件名"""
data_dict=json.loads(data_install_script)
data_dict["data"]["index"]=1
data_dict["data"]["name"]="test.lua"
print("安装脚本:"+data_dict["data"]["name"])
data_install_script=json.dumps(data_dict)
print(data_install_script)
print("step 3、安装test.lua文件")
c.checkAction(url,data_install_script)
time.sleep(2)
data_script_start=rm.get_data("1","run_script_start_test")
print("step 4、运行step 3安装的脚本:test.lua:")
c.checkAction(url,data_script_start)
time.sleep(6)
data_script_stop=rm.get_data("1","run_script_stop")
print("step 5、停止脚本运行:")
c.checkAction(url,data_script_stop)
data_r=rm.get_data("6","release")
print("step 6、释放设备:")
c.checkAction(url,data_r)
def tearDown(self):
self.ws.close()
if __name__ == "__main__":
unittest.main() | leen0910/websocket_api | websocket_api/test_case/test10_InstallScript.py | test10_InstallScript.py | py | 2,199 | python | en | code | 0 | github-code | 6 |
5897258860 | import pickle
import numpy as np
from flask import Flask, request, jsonify
# Load the pickled model
with open('model.pkl', 'rb') as file:
model = pickle.load(file)
app = Flask(__name__)
# Endpoint for making predictions
@app.route('/predict', methods=['POST'])
def predict():
try:
data = request.get_json(force=True)
# Assuming your input data is in the format of a list of dictionaries
# where each dictionary represents a row in the CSV
predictions = []
for row in data:
# Preprocess the input data (You may need to adjust this based on your actual data)
input_data = [
int(row['AGE']), int(row['PackHistory']),
int(row['MWT1']), int(row['MWT2']), float(row['FEV1']), float(row['FVC']),
int(row['CAT']), int(row['HAD']), float(row['SGRQ']),
int(row['copd']), int(row['gender']), int(row['smoking'])
]
# Make prediction using the loaded model
prediction = model.predict([input_data])
predictions.append(int(prediction[0]))
return jsonify({'predictions': predictions})
except Exception as e:
return jsonify({'error': str(e)}), 400
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| mdalamin706688/copd-ml-model | app.py | app.py | py | 1,327 | python | en | code | 0 | github-code | 6 |
75114039226 | from timeit import default_timer as timer
import re
start = timer()
file = open('input.txt')
# exponential growth, every 7 days, after 0
# unsynchronized
# +2 day before first cycle
memo = {} # global const
def solve_babies(days, initial_clock, spawn_clock, cycle):
if initial_clock > days:
return 0
key = (days, initial_clock)
if key in memo:
return memo[key]
# sync to first cycle and spawn
days -= initial_clock
count = 1 + solve_babies(days, spawn_clock, spawn_clock, cycle)
# if still enough for more cycles, recurse further
while days >= cycle:
days -= cycle
count += 1 + solve_babies(days, spawn_clock, spawn_clock, cycle)
memo[key] = count
return count
spawn = 9
cycle = 7
days = 80
result = 0
for initial_clock in file.readlines()[0].split(","):
initial_clock = int(initial_clock) + 1 # converting so that spawn happens on 0th day instead of after
result += 1 + solve_babies(days, initial_clock, spawn, cycle)
print("Completed in %fms" % ((timer() - start) * 1000))
print("%d is the result" % result) | kmckenna525/advent-of-code | 2021/day06/part2.py | part2.py | py | 1,044 | python | en | code | 2 | github-code | 6 |
10691788495 | import logging
from sentry.client.handlers import SentryHandler
logger = logging.getLogger()
# ensure we havent already registered the handler
if SentryHandler not in map(lambda x: x.__class__, logger.handlers):
logger.addHandler(SentryHandler(logging.WARNING))
# Add StreamHandler to sentry's default so you can catch missed exceptions
logger = logging.getLogger('sentry.errors')
logger.propagate = False
logger.addHandler(logging.StreamHandler())
| 8planes/langolab | django/web/sentry_logger.py | sentry_logger.py | py | 475 | python | en | code | 3 | github-code | 6 |
73080806907 | from NaiveTruthReader import NaiveTruthReader
from headbytes import HeadBytes
import numpy as np
feature_maker = HeadBytes(10)
reader = NaiveTruthReader(feature_maker, "test.csv")
reader.run()
data = [line for line in reader.data]
split_index = int(0.5 * len(data))
train_data = data[:split_index] # split% of data.
test_data = data[split_index:] # 100% - split% of data.
# np.zeros: create empty 2D X numpy array (and 1D Y numpy array) for features.
X_train = np.zeros((len(train_data), int(reader.feature.nfeatures)))
Y_train = np.zeros(len(train_data))
X_test = np.zeros((len(test_data), int(reader.feature.nfeatures)))
Y_test = np.zeros(len(test_data))
groups = [[train_data, X_train, Y_train],
[test_data, X_test, Y_test]]
for group in groups:
raw_data, X, Y = group
print("new group")
for i in range(len(raw_data)):
print(raw_data[i][2])
x, y = reader.feature.translate(raw_data[i])
X[i] = x
Y[i] = y
print("X:", X)
print("Y:", Y) | xtracthub/XtractPredictor | features/reader_test.py | reader_test.py | py | 967 | python | en | code | 0 | github-code | 6 |
72743745468 | from app.shared.common.recaptcha import CaptchaValidation
from app.shared.database.dynamodb_client import DynamodbClient
from app.shared.models import CustomerReviewModel
def main(object_id: str) -> dict:
dynamodb = DynamodbClient()
try:
dynamodb.contact_us.delete(object_id)
except Exception as error:
return({'error': error}, 400)
return ({'success': 'Message deleted successfully'}, 200) | ishwar2303/graphidot-serverless-backend | app/functions/contact_us/delete_customer_message.py | delete_customer_message.py | py | 428 | python | en | code | 0 | github-code | 6 |
12483191239 | # reference: J. P. Tignol
# "Galois Thoery of Algebraic Equations" chapter 12
import numpy as np
from sympy import factorint,root,expand
class Period:# Gaussian periods
@classmethod
def init(cls,p):# p must be prime
n = p-1
g = 2 # generator mod p
f = factorint(n)
while True:
for q in f:
if pow(g, n//q, p)==1: break
else:
break
g+=1
i = np.empty(p, dtype=np.int)
x = np.empty(n, dtype=np.int)
a = 1
for j in range(n):
x[j] = a
i[a] = j
a *= g
a %= p
cls.p = p
cls.x = x
cls.index = i
cls.factor = f
@classmethod
def SetDim(cls,e):# e must divide p-1
p = cls.p
x = cls.x
i = cls.index
n = p-1
f = n//e
# multiplication table
w = np.zeros((e,e), dtype=np.int)
for j in range(e):
for k in range(j,n,e):
l = (1 + x[k])%p
if l: w[j,i[l]%e] += 1
else: w[j] -= f
w = [np.roll(w,j,(0,1)) for j in range(e)]
cls.w = np.asarray(w)
@classmethod
def save_context(cls):
if not hasattr(cls, 'p'): return
context = {'p':cls.p, 'x':cls.x,
'index':cls.index,
'factor':cls.factor}
if hasattr(cls, 'context'):
cls.context.append(context)
else:
cls.context = [context]
@classmethod
def restore_context(cls):
if(not hasattr(cls, 'context') or
len(cls.context)==0): return
context = cls.context.pop()
cls.p = context['p']
cls.x = context['x']
cls.index = context['index']
cls.factor = context['factor']
def __init__(self, c):
self.c = c
def __mul__(self, p):
c = np.dot(self.c, np.dot(p.c, self.w))
return Period(c)
def __pow__(self, n):
m = (1<<(n.bit_length() - 1))>>1
p = self
while m:
p *= p
if n&m: p *= self
m >>= 1
return p
class Ring:# ring of cyclotomic polynomial
def __init__(self, c):# symbolic representation
self.c = c # by array of coefficients
def __add__(self, a):
if isinstance(a, Ring):
return Ring(self.c + a.c)
elif a==0:
return self
else:# never occurs
c = self.c.copy()
c[0] += a
return Ring(c)
def __mul__(self, a):
if isinstance(a, Ring):
n = len(self.c)
t = np.convolve(self.c, a.c)
c = t[:n]
c[:-1] += t[n:] - c[-1]
c[-1] = 0 # normalize
return Ring(c)
else:
return Ring(a*self.c)
def __rmul__(self, a):
return Ring(a*self.c)
def cyclo_(p, recur=True):
""" solve cyclotomic equation by radicals
return p-th roots of unity (except 1)
[exp(2pi ij/p) for j=1,2,...,p-1]
p must be prime
if recur is True, q-th roots of unity (q<p) are
recursively replaced by radical expressions
"""
if p==2: return -1
if recur: Period.save_context()
Period.init(p)
n = 1
y = np.zeros(p-1, dtype='object')
y[0] = -1
for p in list(Period.factor)[::-1]:
r = np.eye(p, dtype=np.int64)
r = [Ring(x) for x in r]
if recur: w = cyclo_(p)
else: w = [root(1,p,i) for i in range(1,p)]
w = np.insert(w,0,1)
i = np.outer(np.r_[:p], np.r_[:p])%p
w,z = w[i],w[-i]
for _ in range(Period.factor[p]):
m = n*p
Period.SetDim(m)
v = np.zeros(m, dtype='object')
v[::n] = r
u = (Period(v)**p).c
u = [x.c for x in np.r_[u[:n], u[-n:]]]
# DFT (a.k.a. Lagrange resolvent)
u = np.dot(u, w[:,1:])
for k in range(n):
t = np.dot(y[:n], u[:n])
y[k+n:m:n] = [root(x,p,0) for x in t]
# inverse DFT
v[k::n] = np.dot(z, y[k:m:n])/p
# cyclic permutation of periods
u = np.roll(u, 1, axis=0)
n = m
y[:n] = [expand(x) for x in v]
y = y[Period.index[1:]]
if recur: Period.restore_context()
return y
def cyclo(n, recur=True):
""" solve cyclotomic equation by radicals
return n-th roots of unity (except 1)
[exp(2pi ij/n) for j=1,2,...,n-1]
if recur is True, q-th roots of unity (q<n) are
recursively replaced by radical expressions
"""
if n<2:
raise RuntimeError("n<2 in cyclo")
f = factorint(n)
z = np.empty(n, dtype='object')
j = n
for p in f:
k,m = n//p,n
z[k::k] = cyclo_(p, recur)
for _ in range(1,f[p]):
l = k//p
a = np.r_[k:n:k][:,np.newaxis]
b = np.r_[l:k:l]
z[b] = [root(x,p,0) for x in z[k:m:k]]
z[a+b] = z[a]*z[b]
k,m = l,k
if j<n:
a = np.r_[j:n:j][:,np.newaxis]
b = np.r_[k:n:k]
z[(a+b)%n] = z[a]*z[b]
j = j*k//n
return z[1:]
| tt-nakamura/cyclo | cyclo.py | cyclo.py | py | 5,447 | python | en | code | 0 | github-code | 6 |
20216419382 | from model.flyweight import Flyweight
from model.static.database import database
class Operation(Flyweight):
def __init__(self,activity_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.activity_id = activity_id
cursor = database.get_cursor(
"select * from staOperations where activityID={};".format(
self.activity_id))
row = cursor.fetchone()
self.operation_id = row["operationID"]
self.operation_name = row["operationName"]
self.description = row["description"]
self.fringe = row["fringe"]
self.corridor = row["corridor"]
self.hub = row["hub"]
self.border = row["border"]
self.ratio = row["ratio"]
self.caldari_station_type_id = row["caldariStationTypeID"]
self.minmatar_station_type_id = row["minmatarStationTypeID"]
self.amarr_station_type_id = row["amarrStationTypeID"]
self.gallente_station_type_id = row["gallenteStationTypeID"]
self.jove_station_type_id = row["joveStationTypeID"]
cursor.close()
| Iconik/eve-suite | src/model/static/sta/operation.py | operation.py | py | 1,189 | python | en | code | 0 | github-code | 6 |
18002323535 | from hydra import compose, initialize
import logging
import torch
from torch.utils.tensorboard import SummaryWriter
from data.dataset import get_dex_dataloader
from trainer import Trainer
from utils.global_utils import log_loss_summary, add_dict
from omegaconf import OmegaConf
from omegaconf.omegaconf import open_dict
import os
from os.path import join as pjoin
from tqdm import tqdm
import argparse
from utils.interrupt_handler import InterruptHandler
def process_config(cfg, save=True):
root_dir = cfg["exp_dir"]
os.makedirs(root_dir, exist_ok=True)
with open_dict(cfg):
cfg["device"] = f'cuda:{cfg["cuda_id"]}' if torch.cuda.is_available() else "cpu"
if save:
yaml_path = pjoin(root_dir, "config.yaml")
print(f"Saving config to {yaml_path}")
with open(yaml_path, 'w') as f:
print(OmegaConf.to_yaml(cfg), file=f)
return cfg
def log_tensorboard(writer, mode, loss_dict, cnt, epoch):
for key, value in loss_dict.items():
writer.add_scalar(mode + "/" + key, value / cnt, epoch)
writer.flush()
def main(cfg):
cfg = process_config(cfg)
""" Logging """
log_dir = cfg["exp_dir"]
os.makedirs(log_dir, exist_ok=True)
logger = logging.getLogger("TrainModel")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(f'{log_dir}/log.txt')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
""" Tensorboard """
writer = SummaryWriter(pjoin(log_dir, "tensorboard"))
""" DataLoaders """
train_loader = get_dex_dataloader(cfg, "train")
test_loader = get_dex_dataloader(cfg, "test")
""" Trainer """
trainer = Trainer(cfg, logger)
start_epoch = trainer.resume()
""" Test """
def test_all(dataloader, mode, iteration):
test_loss = {}
for _, data in enumerate(tqdm(dataloader)):
_, loss_dict = trainer.test(data)
loss_dict["cnt"] = 1
add_dict(test_loss, loss_dict)
cnt = test_loss.pop("cnt")
log_loss_summary(test_loss, cnt,
lambda x, y: logger.info(f'{mode} {x} is {y}'))
log_tensorboard(writer, mode, test_loss, cnt, iteration)
""" Train """
# Upon SIGINT, it will save the current model before exiting
with InterruptHandler() as h:
train_loss = {}
for epoch in range(start_epoch, cfg["total_epoch"]):
for _, data in enumerate(tqdm(train_loader)):
loss_dict = trainer.update(data)
loss_dict["cnt"] = 1
add_dict(train_loss, loss_dict)
if trainer.iteration % cfg["freq"]["plot"] == 0:
cnt = train_loss.pop("cnt")
log_loss_summary(train_loss, cnt,
lambda x, y: logger.info(f"Train {x} is {y}"))
log_tensorboard(writer, "train", train_loss, cnt, trainer.iteration)
train_loss = {}
if trainer.iteration % cfg["freq"]["step_epoch"] == 0:
trainer.step_epoch()
if trainer.iteration % cfg["freq"]["test"] == 0:
test_all(test_loader, "test", trainer.iteration)
if trainer.iteration % cfg["freq"]["save"] == 0:
trainer.save()
if h.interrupted:
break
if h.interrupted:
break
trainer.save()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config-name", type=str, default="ipdf_config")
parser.add_argument("--exp-dir", type=str, help="E.g., './ipdf_train'.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
initialize(version_base=None, config_path="../configs", job_name="train")
if args.exp_dir is None:
cfg = compose(config_name=args.config_name)
else:
cfg = compose(config_name=args.config_name, overrides=[f"exp_dir={args.exp_dir}"])
main(cfg)
| PKU-EPIC/UniDexGrasp | dexgrasp_generation/network/train.py | train.py | py | 4,171 | python | en | code | 63 | github-code | 6 |
1999311786 | import os
from enum import Enum, auto
from random import randint
import pygame
class Main:
@staticmethod
def start():
pygame.font.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (400, 100)
surface = pygame.display.set_mode((1200, 900))
pygame.display.set_caption('Minesweeper')
state = States.running
player = Player()
grid = Grid(player)
running = True
clock = pygame.time.Clock()
while running:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and state == States.running:
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
grid.click(pos[0], pos[1])
elif pygame.mouse.get_pressed()[2]:
pos = pygame.mouse.get_pos()
grid.mark_mine(pos[0] // 30, pos[1] // 30)
if grid.check_if_win():
state = States.win
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and (state == States.game_over or state == States.win):
grid.reload()
state = States.running
if event.key == pygame.K_b:
grid.show_mines()
surface.fill((0, 0, 0))
if player.get_health() == 0:
state = States.game_over
if state == States.game_over:
Stats.draw(surface, 'Game over!', (970, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
elif state == States.win:
Stats.draw(surface, 'You win!', (1000, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
grid.draw(surface)
Stats.draw(surface, 'Lives remaining', (950, 100))
Stats.draw(surface, str(player.get_health()), (1020, 200))
pygame.display.flip()
class States(Enum):
running = auto()
game_over = auto()
win = auto()
class Player:
def __init__(self):
self.health = 5
def sub_health(self):
self.health -= 1
def get_health(self):
return self.health
class Stats:
@staticmethod
def draw(surface, label, pos):
textsurface = pygame.font.SysFont('Comic Sans MS', 24).render(label, False, (255, 255, 255))
surface.blit(textsurface, (pos[0], pos[1]))
class Cell:
def __init__(self, pos, random_mine):
self.visible = False
self.mine = random_mine
self.show_mine = False
self.size = 30
self.color = (200, 200, 200)
self.pos = pos
self.label = False
self.mine_counter = 0
self.font_color = (0, 0, 0)
self.marked = False
self.explosion = False
self.img_flag = pygame.image.load('../resources/minesweeper/cell-flagged.png')
self.img_flag = pygame.transform.scale(self.img_flag, (self.size, self.size))
self.img_explode = pygame.image.load('../resources/minesweeper/mine-exploded.png')
self.img_explode = pygame.transform.scale(self.img_explode, (self.size, self.size))
self.img_mine = pygame.image.load('../resources/minesweeper/mine.png')
self.img_mine = pygame.transform.scale(self.img_mine, (self.size, self.size))
self.img_cell = []
for i in range(9):
_img = pygame.image.load(f'../resources/minesweeper/cell-{i}.png')
_img = pygame.transform.scale(_img, (self.size, self.size))
self.img_cell.append(_img)
def draw(self, surface):
if self.visible and not self.label and not (self.show_mine and self.mine):
surface.blit(self.img_cell[0], (self.pos[0], self.pos[1]))
elif self.label:
self.show_label(surface, self.mine_counter, self.pos)
elif self.marked:
surface.blit(self.img_flag, (self.pos[0], self.pos[1]))
elif self.show_mine and self.mine:
surface.blit(self.img_mine, (self.pos[0], self.pos[1]))
elif self.explosion:
surface.blit(self.img_explode, (self.pos[0], self.pos[1]))
else:
pygame.draw.rect(surface, (50, 50, 50), (self.pos[0], self.pos[1], self.size, self.size))
def show_label(self, surface, label, pos):
# textsurface = pygame.font.SysFont('Comic Sans MS', 18).render(label, False, self.font_color)
# surface.blit(textsurface, (pos[0] + 10, pos[1] + 4))
surface.blit(self.img_cell[int(label)], (pos[0], pos[1]))
class Grid:
def __init__(self, player):
self.player = player
self.cells = []
self.search_dirs = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]
for y in range(30):
self.cells.append([])
for x in range(30):
self.cells[y].append(Cell((x * 30, y * 30), self.random_mines()))
self.lines = []
for y in range(1, 31, 1):
temp = []
temp.append((0, y * 30))
temp.append((900, y * 30))
self.lines.append(temp)
for x in range(1, 31, 1):
temp = []
temp.append((x * 30, 0))
temp.append((x * 30, 900))
self.lines.append(temp)
def random_mines(self):
r = randint(0, 10)
if r > 9:
return True
else:
return False
def draw(self, surface):
for row in self.cells:
for cell in row:
cell.draw(surface)
for line in self.lines:
pygame.draw.line(surface, (0, 125, 0), line[0], line[1])
def is_within_bounds(self, x, y):
return x >= 0 and x < 30 and y >= 0 and y < 30
def search(self, x, y):
if not self.is_within_bounds(x, y):
return
cell = self.cells[y][x]
if cell.visible:
return
if cell.mine:
cell.explosion = True
self.player.sub_health()
return
cell.visible = True
num_mines = self.num_of_mines(x, y)
if num_mines > 0:
cell.label = True
cell.mine_counter = str(num_mines)
return
for xx, yy in self.search_dirs:
self.search(x + xx, y + yy)
def num_of_mines(self, x, y):
counter = 0
for xx, yy in self.search_dirs:
if self.is_within_bounds(x + xx, y + yy) and self.cells[y + yy][x + xx].mine:
counter += 1
return counter
def click(self, x, y):
grid_x, grid_y = x // 30, y // 30
self.search(grid_x, grid_y)
def reload(self):
self.player.health = 5
for row in self.cells:
for cell in row:
cell.visible = False
cell.label = False
cell.marked = False
cell.show_mine = False
cell.explosion = False
cell.mine = self.random_mines()
def check_if_win(self):
if self.player.health < 1:
return False
for row in self.cells:
for cell in row:
if not cell.visible and not cell.mine:
return False
return True
def show_mines(self):
for row in self.cells:
for cell in row:
if not cell.show_mine:
cell.show_mine = True
else:
cell.show_mine = False
def mark_mine(self, x, y):
self.cells[y][x].marked = True
if __name__ == "__main__":
Main.start()
| MaximCosta/messy-pypi | messy_pypi/done/main_minesweeper.py | main_minesweeper.py | py | 7,807 | python | en | code | 2 | github-code | 6 |
3885504768 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.html import mark_safe
from rooms.models import Room
from .models import User
# admin.ModelAdmin을 상속받는 경우
# @admin.register(User)
# class CustomUserAdmin(admin.ModelAdmin):
# """ Custom User Admin """
# list_display = ("username", "email", "gender", "language", "currency", "superhost")
# list_filter = (
# "language",
# "currency",
# "superhost",
# )
class RoomInline(admin.TabularInline):
model = Room
# 방법 1
@admin.register(User) # 데코레이터를 붙여 주면 CustomUserAdmin 클래스가 models.User를 사용한다는 의미
class CustomUserAdmin(UserAdmin):
"""Custom User Admin"""
inlines = (RoomInline,)
fieldsets = UserAdmin.fieldsets + (
(
"Custom Profile",
{
"fields": (
"avatar",
"gender",
"bio",
"birthdate",
"language",
"currency",
"superhost",
"login_method",
)
},
),
)
list_display = (
"username",
# "get_thumbnail",
"first_name",
"last_name",
"email",
"is_active",
"language",
"currency",
"superhost",
"is_staff",
"is_superuser",
"email_verified",
"email_secret",
"login_method",
)
list_filter = UserAdmin.list_filter + ("superhost",)
# def get_thumbnail(self, obj):
# return mark_safe(f"<img width='50px' src='{obj.avatar}' />")
# get_thumbnail.short_description = "avatar"
# admin.site.register(models.User, CustomUserAdmin) # 방법 2
| Odreystella/Pinkbnb | users/admin.py | admin.py | py | 1,838 | python | en | code | 0 | github-code | 6 |
42660213870 | # read the sequence file to python
n = 0
for line in open("ampR.fastq"):
line = line.strip()
if not line:
continue
n += 1
# starts with '@'
if line.startswith("@") and n != 4:
name = line[1:].split(" ", maxsplit=1)[0]
seq = score = ""
n = 1
elif n == 2:
seq = line
elif n == 3:
assert line.startswith("+"), "error fastq record"
elif n == 4:
score = line
else:
pass
print("name: %s\nseq: %s" % (name, seq))
# length
print("length: %s" % len(seq))
# score
phred33 = [ord(i)-33 for i in score]
print("score: %s\nPhred33: %s" % (score, phred33))
| FlyPythons/Python-and-Biology | data/1/read_fastq.py | read_fastq.py | py | 647 | python | en | code | 2 | github-code | 6 |
11844211331 | from flask import Flask, render_template, request
from mbta_helper import find_stop_near
app = Flask(__name__, template_folder="templates")
@app.route("/")
def index():
"""
This function asks for the user's location
"""
return render_template("index.html")
@app.route("/POST/nearest", methods=["POST","GET"])
def find():
"""
This function returns whether the location entered has a station nearby and if there is wheelchair accessibility
"""
if request.method == "POST":
place = request.form["location"]
place = str(place)
result = find_stop_near(place)
if result == "MBTA Not Available":
return render_template("notavailable.html")
else:
result = result.split(",")
return render_template("available.html", location = result[0], wheelchair = result[1])
if __name__ == "__main__":
app.run(debug=True) | nandini363/Assignment-3 | app.py | app.py | py | 917 | python | en | code | 0 | github-code | 6 |
17372597106 | # LinearlyVariableInfill
"""
Linearly Variable Infill for 3D prints.
Author: Barnabas Nemeth
Version: 1.5
"""
from ..Script import Script
from UM.Logger import Logger
from UM.Application import Application
import re #To perform the search
from cura.Settings.ExtruderManager import ExtruderManager
from collections import namedtuple
from enum import Enum
from typing import List, Tuple
from UM.Message import Message
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
__version__ = '1.5'
##-----------------------------------------------------------------------------------------------------------------------------------------------------------------
Point2D = namedtuple('Point2D', 'x y')
Segment = namedtuple('Segment', 'point1 point2')
class Infill(Enum):
"""Enum for infill type."""
LINEAR = 1 # Linear infill like rectilinear or triangles
class Section(Enum):
"""Enum for section type."""
NOTHING = 0
INNER_WALL = 1
OUTER_WALL = 2
INFILL = 3
def dist(segment: Segment, point: Point2D) -> float:
"""Calculate the distance from a point to a line with finite length.
Args:
segment (Segment): line used for distance calculation
point (Point2D): point used for distance calculation
Returns:
float: distance between ``segment`` and ``point``
"""
px = segment.point2.x - segment.point1.x
py = segment.point2.y - segment.point1.y
norm = px * px + py * py
u = ((point.x - segment.point1.x) * px + (point.y - segment.point1.y) * py) / float(norm)
if u > 1:
u = 1
elif u < 0:
u = 0
x = segment.point1.x + u * px
y = segment.point1.y + u * py
dx = x - point.x
dy = y - point.y
return (dx * dx + dy * dy) ** 0.5
def two_points_distance(point1: Point2D, point2: Point2D) -> float:
"""Calculate the euclidean distance between two points.
Args:
point1 (Point2D): first point
point2 (Point2D): second point
Returns:
float: euclidean distance between the points
"""
return ((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2) ** 0.5
def min_distance_to_segment(segment: Segment, segments: List[Segment]) -> float:
"""Calculate the minimum distance from the midpoint of ``segment`` to the nearest segment in ``segments``.
Args:
segment (Segment): segment to use for midpoint calculation
segments (List[Segment]): segments list
Returns:
float: the smallest distance from the midpoint of ``segment`` to the nearest segment in the list
"""
middlePoint = Point2D((segment.point1.x + segment.point2.x) / 2, (segment.point1.y + segment.point2.y) / 2)
return min(dist(s, middlePoint) for s in segments)
def getXY(currentLineINcode: str) -> Point2D:
"""Create a ``Point2D`` object from a gcode line.
Args:
currentLineINcode (str): gcode line
Raises:
SyntaxError: when the regular expressions cannot find the relevant coordinates in the gcode
Returns:
Point2D: the parsed coordinates
"""
searchX = re.search(r"X(\d*\.?\d*)", currentLineINcode)
searchY = re.search(r"Y(\d*\.?\d*)", currentLineINcode)
if searchX and searchY:
elementX = searchX.group(1)
elementY = searchY.group(1)
else:
raise SyntaxError('Gcode file parsing error for line {currentLineINcode}')
return Point2D(float(elementX), float(elementY))
def mapRange(a: Tuple[float, float], b: Tuple[float, float], s: float) -> float:
"""Calculate a multiplier for the extrusion value from the distance to the perimeter.
Args:
a (Tuple[float, float]): a tuple containing:
- a1 (float): the minimum distance to the perimeter (always zero at the moment)
- a2 (float): the maximum distance to the perimeter where the interpolation is performed
b (Tuple[float, float]): a tuple containing:
- b1 (float): the maximum flow as a fraction
- b2 (float): the minimum flow as a fraction
s (float): the euclidean distance from the middle of a segment to the nearest perimeter
Returns:
float: a multiplier for the modified extrusion value
"""
(a1, a2), (b1, b2) = a, b
return b1 + ((s - a1) * (b2 - b1) / (a2 - a1))
def gcode_template(x: float, y: float, extrusion: float) -> str:
"""Format a gcode string from the X, Y coordinates and extrusion value.
Args:
x (float): X coordinate
y (float): Y coordinate
extrusion (float): Extrusion value
Returns:
str: Gcode line
"""
return "G1 X{} Y{} E{}".format(round(x, 3), round(y, 3), round(extrusion, 5))
def is_layer(line: str) -> bool:
"""Check if current line is the start of a layer section.
Args:
line (str): Gcode line
Returns:
bool: True if the line is the start of a layer section
"""
return line.startswith(";LAYER:")
def is_innerwall(line: str) -> bool:
"""Check if current line is the start of an inner wall section.
Args:
line (str): Gcode line
Returns:
bool: True if the line is the start of an inner wall section
"""
return line.startswith(";TYPE:WALL-INNER")
def is_outerwall(line: str) -> bool:
"""Check if current line is the start of an outer wall section.
Args:
line (str): Gcode line
Returns:
bool: True if the line is the start of an outer wall section
"""
return line.startswith(";TYPE:WALL-OUTER")
def ez_nyomtatasi_vonal(line: str) -> bool:
"""Check if current line is a standard printing segment.
Args:
line (str): Gcode line
Returns:
bool: True if the line is a standard printing segment
"""
return "G1" in line and " X" in line and "Y" in line and "E" in line
def is_infill(line: str) -> bool:
"""Check if current line is the start of an infill.
Args:
line (str): Gcode line
Returns:
bool: True if the line is the start of an infill section
"""
return line.startswith(";TYPE:FILL")
def fill_type(Mode):
"""Definie the type of Infill pattern
Linearly Variable Infill like lineas or triangles = 1
Args:
line (Mode): Infill Pattern
Returns:
Int: the Type of infill pattern
"""
iMode=0
if Mode == 'grid':
iMode=1
if Mode == 'lines':
iMode=1
if Mode == 'triangles':
iMode=1
if Mode == 'trihexagon':
iMode=1
if Mode == 'cubic':
iMode=1
if Mode == 'cubicsubdiv':
iMode=0
if Mode == 'tetrahedral':
iMode=1
if Mode == 'quarter_cubic':
iMode=1
if Mode == 'concentric':
iMode=0
if Mode == 'zigzag':
iMode=0
if Mode == 'cross':
iMode=0
if Mode == 'cross_3d':
iMode=0
if Mode == 'gyroid':
iMode=0
return iMode
class LinearlyVariableInfill(Script):
def getSettingDataString(self):
return """{
"name": "Linearly Variable Infill",
"key": "LinearlyVariableInfill",
"metadata": {},
"version": 2,
"settings":
{
"variableSegmentLength":
{
"label": "Valtoztatott szakasz hossza",
"description": "Distance of the gradient (max to min) in mm",
"unit": "mm",
"type": "float",
"default_value": 6.0,
"minimum_value": 1.0,
"minimum_value_warning": 2.0
},
"divisionNR":
{
"label": "Szakasz felosztasanak szama",
"description": "Only applicable for Linearly Variable Infills; number of segments within the gradient(fullSegmentLength=variableSegmentLength / divisionNR); use sensible values to not overload",
"type": "int",
"default_value": 4,
"minimum_value": 1,
"minimum_value_warning": 2
},
"variableSpeed":
{
"label": "Valtozo sebesseg",
"description": "Activate also Valtozo sebesseg linked to the gradual flow",
"type": "bool",
"default_value": false
},
"maxSpeedFactor":
{
"label": "Max sebesseg szorzo",
"description": "Maximum over speed factor",
"unit": "%",
"type": "int",
"default_value": 200,
"minimum_value": 100,
"maximum_value": 400,
"minimum_value_warning": 110,
"maximum_value_warning": 370,
"enabled": "variableSpeed"
},
"minSpeedFactor":
{
"label": "Min sebesseg szorzo",
"description": "Minimum over speed factor",
"unit": "%",
"type": "int",
"default_value": 60,
"minimum_value": 10,
"maximum_value": 100,
"minimum_value_warning": 40,
"maximum_value_warning": 90,
"enabled": "variableSpeed"
},
"extruderNR":
{
"label": "Extruder sorszam",
"description": "Define Extruder szam in case of multi extruders",
"unit": "",
"type": "int",
"default_value": 1
}
}
}"""
## -----------------------------------------------------------------------------
#
# Main Prog
#
## -----------------------------------------------------------------------------
def execute(self, data):
Logger.log('w', 'Plugin is starting ' )
print('naygvera')
division_nr = float(self.getSettingValueByKey("divisionNR"))
variable_segment_lengh = float(self.getSettingValueByKey("variableSegmentLength"))
extruder_nr = self.getSettingValueByKey("extruderNR")
extruder_nr = extruder_nr -1
variable_speed= bool(self.getSettingValueByKey("variableSpeed"))
max_speed_factor = float(self.getSettingValueByKey("maxSpeedFactor"))
max_speed_factor = max_speed_factor /100
min_speed_factor = float(self.getSettingValueByKey("minSpeedFactor"))
min_speed_factor = min_speed_factor /100
# machine_extruder_count
# extruder_count=Application.getInstance().getGlobalContainerStack().getProperty("machine_extruder_count", "value")
# extruder_count = extruder_count-1
# if extruder_nr>extruder_count :
# extruder_nr=extruder_count
# Deprecation function
extrud = list(Application.getInstance().getGlobalContainerStack().extruders.values())
#extrud = Application.getInstance().getGlobalContainerStack().extruderList
Message('Extrud:{}'.format(extrud), title = catalog.i18nc("@info:title", "Post Processing")).show()
infillpattern = extrud[extruder_nr].getProperty("infill_pattern", "value")
connectinfill = extrud[extruder_nr].getProperty("zig_zaggify_infill", "value")
"""Parse Gcode and modify infill portions with an extrusion width gradient."""
currentSection = Section.NOTHING
lastPosition = Point2D(-10000, -10000)
littleSegmentLength = variable_segment_lengh / division_nr
infill_type=fill_type(infillpattern)
if infill_type == 0:
#
Logger.log('d', 'Infill Pattern not supported : ' + infillpattern)
Message('Infill Pattern not supported : ' + infillpattern , title = catalog.i18nc("@info:title", "Post Processing")).show()
return None
if connectinfill == True:
#
Logger.log('d', 'Connect Infill Lines no supported')
Message('Gcode must be generate without Connect Infill Lines mode activated' , title = catalog.i18nc("@info:title", "Post Processing")).show()
return None
Logger.log('d', "GradientFill Param : " + str(littleSegmentLength) + "/" + str(division_nr)+ "/" + str(variable_segment_lengh) ) #str(max_flow) + "/" + str(min_flow) + "/" +
Logger.log('d', "Pattern Param : " + infillpattern + "/" + str(infill_type) )
for layer in data:
layer_index = data.index(layer)
lines = layer.split("\n")
for currentLineINcode in lines:
new_Line=""
stringFeed = ""
line_index = lines.index(currentLineINcode)
if is_layer(currentLineINcode):
perimeterSegments = []
if is_innerwall(currentLineINcode):
currentSection = Section.INNER_WALL
# Logger.log('d', 'is_innerwall' )
if is_outerwall(currentLineINcode):
currentSection = Section.OUTER_WALL
# Logger.log('d', 'is_outerwall' )
if currentSection == Section.INNER_WALL:
if ez_nyomtatasi_vonal(currentLineINcode):
Logger.log('d', 'Ez sor rossz ' + currentLineINcode)
perimeterSegments.append(Segment(getXY(currentLineINcode), lastPosition))
if is_infill(currentLineINcode):
# Log Size of perimeterSegments for debuging
Logger.log('d', 'PerimeterSegments seg : {}'.format(len(perimeterSegments)))
currentSection = Section.INFILL
# ! Important
continue
if currentSection == Section.INFILL:
if "F" in currentLineINcode and "G1" in currentLineINcode:
searchSpeed = re.search(r"F(\d*\.?\d*)", currentLineINcode)
if searchSpeed:
current_speed=float(searchSpeed.group(1))
new_Line="G1 F{}\n".format(current_speed)
else:
Logger.log('d', 'Gcode file parsing error for line : ' + currentLineINcode )
if "E" in currentLineINcode and "G1" in currentLineINcode and "X" in currentLineINcode and "Y" in currentLineINcode:
currentPosition = getXY(currentLineINcode)
splitLine = currentLineINcode.split(" ")
# ha lineraris
if infill_type == 1:
for element in splitLine:
if "E" in element:
E_inCode = float(element[1:])
fullSegmentLength = two_points_distance(lastPosition, currentPosition)
segmentSteps = fullSegmentLength / littleSegmentLength
extrudeLengthPERsegment = (0.006584 * fullSegmentLength) / segmentSteps
E_inCode_last = E_inCode - (extrudeLengthPERsegment * segmentSteps)
littlesegmentDirectionandLength = Point2D((currentPosition.x - lastPosition.x) / fullSegmentLength * littleSegmentLength,(currentPosition.y - lastPosition.y) / fullSegmentLength * littleSegmentLength)
speed_deficit = ((current_speed * max_speed_factor + current_speed * min_speed_factor) / division_nr)
step_number = 0
last_step_number = 0
if segmentSteps >= 2:
# new_Line=new_Line+"; LinearlyVariableInfill segmentSteps >= 2\n"
for step in range(int(segmentSteps)):
segmentEnd = Point2D(lastPosition.x + littlesegmentDirectionandLength.x, lastPosition.y + littlesegmentDirectionandLength.y)
extrudeLength=E_inCode_last+extrudeLengthPERsegment
if perimeterSegments==[] :
Logger.log('d', 'Itt a hiba ' + currentLineINcode)
shortestDistance = min_distance_to_segment(Segment(lastPosition, segmentEnd), perimeterSegments)
if shortestDistance < variable_segment_lengh:
segmentSpeed = current_speed
if variable_speed:
if variable_speed:
if step_number < division_nr:
segmentSpeed = current_speed * min_speed_factor + (speed_deficit * step_number)
if step_number >= division_nr:
segmentSpeed = current_speed * max_speed_factor
if step_number >= segmentSteps - division_nr:
segmentSpeed = current_speed * max_speed_factor - (speed_deficit * last_step_number)
last_step_number=last_step_number + 1
stringFeed = " F{}".format(int(segmentSpeed))
else:
segmentSpeed = current_speed * min_speed_factor
if variable_speed:
if step_number < division_nr:
segmentSpeed = current_speed * min_speed_factor + (speed_deficit * step_number)
if step_number >= division_nr:
segmentSpeed = current_speed * max_speed_factor
if step_number >= segmentSteps - division_nr:
segmentSpeed = current_speed * max_speed_factor - (speed_deficit * last_step_number)
last_step_number=last_step_number + 1
stringFeed = " F{}".format(int(segmentSpeed))
new_Line=new_Line + gcode_template(segmentEnd.x, segmentEnd.y, extrudeLength) + stringFeed + "\n" #szakaszExtrudalas
lastPosition = segmentEnd
E_inCode_last = extrudeLength
step_number = step_number + 1
segmentSpeed = current_speed * min_speed_factor
lastSpeed = " F{}".format(int(segmentSpeed))
new_Line=new_Line + gcode_template(currentPosition.x, currentPosition.y, E_inCode, ) + lastSpeed + "\n" #Original line for finish
lines[line_index] = new_Line
else :
outPutLine = ""
# outPutLine = "; LinearlyVariableInfill segmentSteps < 2\n"
for element in splitLine:
if "E" in element:
outPutLine = outPutLine + "E" + str(round(E_inCode, 5))
else:
outPutLine = outPutLine + element + " "
outPutLine = outPutLine # + "\n"
lines[line_index] = outPutLine
# writtenToFile = 1
#
# comment like ;MESH:NONMESH
#
if ";" in currentLineINcode:
currentSection = Section.NOTHING
lines[line_index] = currentLineINcode # other Comment
#
# line with move
#
if "X" in currentLineINcode and "Y" in currentLineINcode and ("G1" in currentLineINcode or "G0" in currentLineINcode):
lastPosition = getXY(currentLineINcode)
final_lines = "\n".join(lines)
data[layer_index] = final_lines
return data
| vaxbarn/LinearlyVariableInfill | LinearlyVariableInfill.py | LinearlyVariableInfill.py | py | 21,725 | python | en | code | 0 | github-code | 6 |
20040130347 | from random import choice
def get_binary():
output = []
for i in range(8):
output.append(choice([0,1]))
return output
def get_binary_sum():
output = []
for i in range(8):
output.append(choice([0,1]))
return sum(output)
samps = []
counts = 0
while 0 not in samps:
samps.append(get_binary_sum())
counts += 1
print(samps)
print(counts) | mwboiss/DSI-Prep | intro_py/binary_sum.py | binary_sum.py | py | 385 | python | en | code | 0 | github-code | 6 |
35160550288 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from .models import Employee
from .forms import AddEmployeeForm
@login_required(login_url='authapp:login')
def index(request):
context = dict()
context['employees'] = Employee.objects.all().order_by('-joined_date')[:5]
return render(request, 'employee/index.html', context)
@login_required(login_url='authapp:login')
def view_all(request):
context = dict()
search = ''
try:
search = request.GET['search']
except:
pass
context['employees'] = Employee.objects.all().order_by('-joined_date')
if search is not None:
print(search)
context['employees'] = Employee.objects.filter(
Q(name__icontains=search) |
Q(email__icontains=search) |
Q(phone__icontains=search) |
Q(department__name__icontains=search) |
Q(role__name__icontains=search)
).order_by('-joined_date')
return render(request, 'employee/view_all.html', context)
@login_required(login_url='authapp:login')
def view_single_employee(request, id):
context = dict()
try:
employee = Employee.objects.get(pk=id)
except:
pass
context['employee'] = employee
return render(request, 'employee/employee.html', context)
@login_required(login_url='authapp:login')
def add(request):
context = dict()
context['form'] = AddEmployeeForm()
if request.method == 'POST':
form = AddEmployeeForm(request.POST)
context['form'] = form
if form.is_valid():
form.save()
messages.info(request, 'Employee added successfully.')
return redirect('index')
else:
return render(request, 'employee/add.html', context)
else:
context['form'] = AddEmployeeForm()
return render(request, 'employee/add.html', context)
@login_required(login_url='authapp:login')
def update(request, id):
context = dict()
try:
employee = Employee.objects.get(pk=id)
except Employee.DoesNotExist:
messages.error(request, f'Empoyee does not exist with id {id}')
return render(request, 'employee/update.html', context)
form = AddEmployeeForm(instance=employee)
context['form'] = form
if request.method == 'POST':
form = AddEmployeeForm(request.POST, instance=employee)
context['form'] = form
if form.is_valid():
form.save()
messages.info(request, 'Employee updated successfully.')
return redirect('employee:view_single_employee', employee.id)
print(request.resolver_match.url_name)
return render(request, 'employee/update.html', context)
@login_required(login_url='authapp:login')
def delete(request, id):
try:
employee = Employee.objects.get(pk=id)
except Employee.DoesNotExist:
messages.error(request, 'Employee does not exist.')
return redirect('employee:view_all')
employee.delete()
messages.info(request, 'Employee deleted successfully.')
return redirect('employee:view_all')
| somukhan9/django-employee-management-system | employee/views.py | views.py | py | 3,206 | python | en | code | 0 | github-code | 6 |
7848372415 | import time
import numpy as np
import json
from simplex_algorithm.Interaction import Interaction
class SimplexSolver():
'''
Class is responsable to solve maximization Linear Programming Problems.
@author: Matheus Phelipe
'''
def __init__(self, matrix_a, matrix_b, matrix_c, max_iteractions, has_slack_var = True) -> None:
try:
self.convert_data_2_np_array(matrix_a, matrix_b, matrix_c)
self.has_slack_var = has_slack_var
self.is_done = False
self.iterations = 0
self.max_iteractions = max_iteractions
self.previous_interactions = []
except Exception as ex:
print(f'Failed to create numpy matrices\n {ex}')
def convert_data_2_np_array(self, matrix_a, matrix_b, matrix_c):
self.matrix_a = np.array([item for item in matrix_a])
self.matrix_b = np.reshape(np.array([item for item in matrix_b]), (len(matrix_b), 1))
self.matrix_c = np.reshape(np.array([item for item in matrix_c]), (1, len(matrix_c)))
def start(self, set_B, set_N):
if self.has_slack_var is False:
self.add_slack_variable()
start = time.time()
while self.is_done is False and self.iterations <= self.max_iteractions:
print(f'Starting iteraction {self.iterations}')
b_inverted, list_z_index, matrix_XB = self.make_iteraction(set_B, set_N)
print(f'Z score:{self.previous_interactions[-1].z_value}')
self.iterations += 1
if self.is_done is False:
index_entering = self.compare_tuples(list_z_index)[1]
leaving_data = self.calculate_leaving_variable(b_inverted, index_entering)
alpha_data = self.calculate_alpha(leaving_data, matrix_XB, set_B)
index_leaving = set_B[alpha_data[0][1]]
print(f'XB({index_leaving}) is leaving. XB({index_entering}) is entering')
set_B = self.swap_matrices_index(set_B, index_leaving, index_entering)
set_N = self.swap_matrices_index(set_N, index_entering, index_leaving)
end = time.time()
print(f'Simplex Iteractions has finished: {end-start}')
self.generate_report()
return self.previous_interactions
def add_slack_variable(self):
pass
def swap_matrices_index(self, set_list, leaving, entering):
aux = []
for index in set_list:
if index == leaving:
aux.append(entering)
else:
aux.append(index)
return aux
def make_iteraction(self, set_B, set_N):
try:
matrix_B = self.matrix_a[:, set_B]
matrix_N = self.matrix_a[:, set_N]
matrix_CB = self.matrix_c[:, set_B]
return self.calculate(matrix_B=matrix_B, matrix_CB=matrix_CB, set_B=set_B, set_N=set_N, matrix_N=matrix_N)
except Exception as ex:
print(f'Failed on make_iteraction method\n{ex}')
def calculate(self, matrix_B, matrix_CB, set_B, set_N, matrix_N):
b_inverted = np.linalg.inv(matrix_B)
u = np.matmul(matrix_CB, b_inverted)
matrix_XB = np.matmul(b_inverted, self.matrix_b)
z = np.matmul(u, self.matrix_b)[:,0][0]
list_z_index = [(np.matmul(u, self.matrix_a[:, index])[0] - self.matrix_c[:, index][0], index) for index in set_N]
self.previous_interactions.append(Interaction(matrix_B, set_B, matrix_N, set_N, matrix_XB, z))
self.is_done = all(i[0] >= 0 for i in list_z_index)
return b_inverted, list_z_index, matrix_XB
def compare_tuples(self, tuple): #(z_value, index)
smaller = tuple[0]
for index in range(1, len(tuple)):
if tuple[index][0] < smaller[0]:
smaller = tuple[index]
return smaller
def calculate_leaving_variable(self, b_inverted, index_N):
y_index_N = np.matmul(b_inverted, self.matrix_a[:, index_N])
greater_0 = []
for index in range(len(y_index_N)):
if y_index_N[index] > 0:
greater_0.append(index)
return y_index_N, greater_0
def calculate_alpha(self, leaving_data, matrix_XB, set_B):
alpha_list = [(matrix_XB[index][0]/ leaving_data[0][index], index) for index in leaving_data[1]]
smaller = alpha_list[0]
index_leaving = set_B[0]
for index in range(1, len(alpha_list[0])):
if(alpha_list[index][0]<smaller[0]):
smaller = alpha_list[index]
index_leaving = set_B[index]
return smaller, index_leaving #retorna posição que deve ser removida
def generate_report(self):
#dict_list = [item.__dict__ for item in self.previous_interactions]
#return json.dumps(dict_list)
print(self.previous_interactions) | matheusphalves/simplex-algorithm | simplex_algorithm/SimplexSolver.py | SimplexSolver.py | py | 4,877 | python | en | code | 0 | github-code | 6 |
35560642063 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mayavi import mlab
from scipy.ndimage import map_coordinates
from scipy import signal, interpolate
from PIL import Image, ImageDraw
from matplotlib.colors import ListedColormap
from tqdm import tqdm, trange
def create_block_diagram(strat, prop, facies, dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity):
"""function for creating a 3D block diagram in Mayavi
strat - input array with stratigraphic surfaces
facies - property or facies array
dx - size of gridcells in the horizontal direction in 'strat'
ve - vertical exaggeration
offset - offset in the y-direction relative to 0
scale - scaling factor
ci - contour interval
strat_switch - 1 if you want to plot stratigraphy on the sides; 0 otherwise
contour_switch - 1 if you want to plot contours on the top surface; 0 otherwise
bottom - elevation value for the bottom of the block"""
r,c,ts = np.shape(strat)
# if z is increasing downward:
if np.max(strat[:, :, -1] - strat[:, :, 0]) < 0:
strat = -1 * strat
z = scale*strat[:,:,ts-1].T
if plot_strat:
z1 = strat[:,:,0].T
else:
z1 = strat[:,:,-1].T
X1 = scale*(xoffset + np.linspace(0,c-1,c)*dx) # x goes with c and y with r
Y1 = scale*(yoffset + np.linspace(0,r-1,r)*dx)
X1_grid , Y1_grid = np.meshgrid(X1, Y1)
if export == 1:
surf = mlab.surf(X1,Y1,z,warp_scale=ve,colormap='gist_earth',vmin=scale*topo_min,vmax=scale*topo_max, opacity = opacity)
# cmapf = matplotlib.cm.get_cmap('Blues_r',256)
BluesBig = matplotlib.cm.get_cmap('Blues_r', 512)
newcmp = ListedColormap(BluesBig(np.linspace(0.0, 1.0, 256)))
normf = matplotlib.colors.Normalize(vmin=scale*topo_min,vmax=scale*topo_max)
z_range = np.linspace(scale*topo_min, scale*topo_max, 256)
surf.module_manager.scalar_lut_manager.lut.table = (np.array(newcmp(normf(z_range)))*255).astype('uint8')
else:
# if color_mode == 'property':
# mlab.mesh(X1_grid, Y1_grid, ve*z, scalars = prop[:, :, -1], colormap='YlOrBr', vmin=0, vmax=1, opacity = opacity)
# if not plot_sides:
# mlab.mesh(X1_grid, Y1_grid, ve*scale*strat[:,:,0].T, scalars = facies[:, :, 0], colormap='YlOrBr', vmin=0, vmax=1, opacity = opacity)
# else:
mlab.surf(X1, Y1, z, warp_scale=ve, colormap='gist_earth', opacity = opacity) #, line_width=5.0, vmin=scale*topo_min,vmax=scale*topo_max, representation='wireframe')
if not plot_sides:
mlab.surf(X1, Y1, scale*strat[:,:,0].T, warp_scale=ve, colormap='gist_earth', opacity=opacity) #colormap='gist_earth',vmin=scale*topo_min,vmax=scale*topo_max, opacity = opacity)
if plot_contours:
vmin = scale * topo_min #np.min(strat[:,:,-1])
vmax = scale * topo_max #np.max(strat[:,:,-1])
contours = list(np.arange(vmin, vmax, ci*scale)) # list of contour values
mlab.contour_surf(X1, Y1, z, contours=contours, warp_scale=ve, color=(0,0,0), line_width=1.0)
if plot_sides:
gray = (0.6,0.6,0.6) # color for plotting sides
# updip side:
vertices, triangles = create_section(z1[:,0],dx,bottom)
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + np.zeros(np.shape(vertices[:,0])))
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# downdip side:
vertices, triangles = create_section(z1[:,-1],dx,bottom)
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + (r-1)*dx*np.ones(np.shape(vertices[:,0])))
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# left edge (looking downdip):
vertices, triangles = create_section(z1[0,:],dx,bottom)
x = scale*(xoffset + np.zeros(np.shape(vertices[:,0])))
y = scale*(yoffset + vertices[:,0])
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# right edge (looking downdip):
vertices, triangles = create_section(z1[-1,:],dx,bottom)
x = scale*(xoffset + (c-1)*dx*np.ones(np.shape(vertices[:,0])))
y = scale*(yoffset + vertices[:,0])
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# bottom face of block:
vertices = dx*np.array([[0,0],[c-1,0],[c-1,r-1],[0,r-1]])
triangles = [[0,1,3],[1,3,2]]
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + vertices[:,1])
z = scale*bottom*np.ones(np.shape(vertices[:,0]))
mlab.triangular_mesh(x, y, ve*z, triangles, color=gray, opacity = opacity)
def add_stratigraphy_to_block_diagram(strat, prop, facies, dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity):
"""function for adding stratigraphy to the sides of a block diagram
colors layers by relative age
strat - input array with stratigraphic surfaces
facies - 1D array of facies codes for layers
h - channel depth (height of point bar)
thalweg_z - array of thalweg elevations for each layer
dx - size of gridcells in the horizontal direction in 'strat'
ve - vertical exaggeration
offset - offset in the y-direction relative to 0
scale - scaling factor
plot_surfs - if equals 1, stratigraphic boundaries will be plotted on the sides as black lines
color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies'
colors - colors scheme for facies (list of RGB values)
line_thickness - tube radius for plotting layers on the sides
export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)"""
r,c,ts=np.shape(strat)
if color_mode == 'time':
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1)
cmap = matplotlib.cm.get_cmap(colormap)
if (color_mode == 'property') | (color_mode == 'facies'):
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=0.35)
cmap = matplotlib.cm.get_cmap(colormap)
for layer_n in trange(ts-1): # main loop
vmin = 0.0
vmax = 0.35
top = strat[:,0,layer_n+1] # updip side
base = strat[:,0,layer_n]
if color_mode == "property":
props = prop[:,0,layer_n]
if plot_surfs:
Y1 = scale*(yoffset + dx*np.arange(0,r))
X1 = scale*(xoffset + np.zeros(np.shape(base)))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = scale*(yoffset + vertices[:,0])
X1 = scale*(xoffset + dx*0*np.ones(np.shape(vertices[:,0])))
Z1 = scale*vertices[:,1]
if color_mode == "property":
scalars = props[Inds[i]]
else:
scalars = []
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
top = strat[:,-1,layer_n+1] # downdip side
base = strat[:,-1,layer_n]
if color_mode == "property":
props = prop[:,-1,layer_n]
if plot_surfs:
Y1 = scale*(yoffset + dx*np.arange(0,r))
X1 = scale*(xoffset + dx*(c-1)*np.ones(np.shape(base)))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = scale*(yoffset + vertices[:,0])
X1 = scale*(xoffset + dx*(c-1)*np.ones(np.shape(vertices[:,0])))
Z1 = scale*vertices[:,1]
if color_mode == "property":
scalars = props[Inds[i]]
else:
scalars = []
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
top = strat[0,:,layer_n+1] # left edge (looking downdip)
base = strat[0,:,layer_n]
if color_mode == "property":
props = prop[0,:,layer_n]
if plot_surfs:
Y1 = scale*(yoffset + np.zeros(np.shape(base)))
X1 = scale*(xoffset + dx*np.arange(0,c))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = scale*(yoffset + dx*0*np.ones(np.shape(vertices[:,0])))
X1 = scale*(xoffset + vertices[:,0])
Z1 = scale*vertices[:,1]
if color_mode == "property":
scalars = props[Inds[i]]
else:
scalars = []
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
top = strat[-1,:,layer_n+1] # right edge (looking downdip)
base = strat[-1,:,layer_n]
if color_mode == "property":
props = prop[-1,:,layer_n]
if plot_surfs:
Y1 = scale*(yoffset + dx*(r-1)*np.ones(np.shape(base)))
X1 = scale*(xoffset + dx*np.arange(0,c))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = scale*(yoffset + dx*(r-1)*np.ones(np.shape(vertices[:,0])))
X1 = scale*(xoffset + vertices[:,0])
Z1 = scale*vertices[:,1]
if color_mode == "property":
scalars = props[Inds[i]]
else:
scalars = []
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
def create_exploded_view(strat, prop, facies, x0, y0, nx, ny, gap, dx, ve, scale, plot_strat, plot_surfs, plot_contours, plot_sides, color_mode, colors, colormap, line_thickness, bottom,export, topo_min, topo_max, ci, opacity):
"""function for creating an exploded-view block diagram
inputs:
strat - stack of stratigraphic surfaces
facies - 1D array of facies codes for layers
topo - stack of topographic surfaces
nx - number of blocks in x direction
ny - number of blocks in y direction
gap - gap between blocks (number of gridcells)
dx - gridcell size
ve - vertical exaggeration
scale - scaling factor (for whole model)
strat_switch - if equals 1, the stratigraphy will be plotted on the sides of the blocks
plot_surfs - if equals 1, the stratigraphic surfaces will be plotted on the sides (adds a lot of triangles - not good for 3D printing)
contour_swicth - if equals 1, contours will be plotted on the top surface
color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies'
colors - colors scheme for facies (list of RGB values)
line_thickness - - tube radius for plotting layers on the sides
bottom - elevation value for the bottom of the block
export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)"""
r,c,ts = np.shape(strat)
count = 0
for i in range(nx):
for j in range(ny):
x1 = i * int(c/nx)
x2 = (i+1) * int(c/nx)
y1 = j * int(r/ny)
y2 = (j+1) * int(r/ny)
xoffset = x0 + (x1+i*gap)*dx
yoffset = y0 + (y1+j*gap)*dx
if color_mode == "property":
create_block_diagram(strat[y1:y2,x1:x2,:], prop[y1:y2,x1:x2,:], facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity)
if plot_strat:
add_stratigraphy_to_block_diagram(strat[y1:y2,x1:x2,:], prop[y1:y2,x1:x2,:], facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity)
else:
create_block_diagram(strat[y1:y2,x1:x2,:], prop, facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity)
if plot_strat:
add_stratigraphy_to_block_diagram(strat[y1:y2,x1:x2,:], prop, facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity)
count = count+1
print("block "+str(count)+" done, out of "+str(nx*ny)+" blocks")
def create_fence_diagram(strat, prop, facies, x0, y0, nx, ny, dx, ve, scale, plot_surfs, plot_sides, color_mode, colors, colormap, line_thickness, bottom, export, opacity):
"""function for creating a fence diagram
inputs:
strat - stack of stratigraphic surfaces
facies - 1D array of facies codes for layers
topo - stack of topographic surfaces
nx - number of strike sections
ny - number of dip sections
dx - gridcell size
ve - vertical exaggeration
scale - scaling factor (for whole model)
plot_surfs - if equals 1, the stratigraphic surfaces will be plotted on the sides (adds a lot of triangles - not good for 3D printing)
color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies'
colors - colors scheme for facies (list of RGB values)
line_thickness - - tube radius for plotting layers on the sides
bottom - elevation value for the bottom of the block
export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)"""
r,c,ts=np.shape(strat)
gray = (0.6,0.6,0.6)
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1)
cmap = matplotlib.cm.get_cmap(colormap)
vmin = np.min(prop)
vmax = np.max(prop)
gray = (0.6,0.6,0.6) # color for plotting sides
z = scale*strat[:,:,ts-1].T
z1 = strat[:,:,0].T
xoffset = 0; yoffset = 0
# updip side:
vertices, triangles = create_section(z1[:,0],dx,bottom)
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + np.zeros(np.shape(vertices[:,0])))
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# downdip side:
vertices, triangles = create_section(z1[:,-1],dx,bottom)
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + (r-1)*dx*np.ones(np.shape(vertices[:,0])))
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# left edge (looking downdip):
vertices, triangles = create_section(z1[0,:],dx,bottom)
x = scale*(xoffset + np.zeros(np.shape(vertices[:,0])))
y = scale*(yoffset + vertices[:,0])
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# right edge (looking downdip):
vertices, triangles = create_section(z1[-1,:],dx,bottom)
x = scale*(xoffset + (c-1)*dx*np.ones(np.shape(vertices[:,0])))
y = scale*(yoffset + vertices[:,0])
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity)
# bottom face of block:
vertices = dx*np.array([[0,0],[c-1,0],[c-1,r-1],[0,r-1]])
triangles = [[0,1,3],[1,3,2]]
x = scale*(xoffset + vertices[:,0])
y = scale*(yoffset + vertices[:,1])
z = scale*bottom*np.ones(np.shape(vertices[:,0]))
mlab.triangular_mesh(x, y, ve*z, triangles, color=gray, opacity = opacity)
section_inds = np.hstack((0, int(c/(nx+1)) * np.arange(1, nx+1), c-1))
for x1 in tqdm(section_inds): # strike sections
if plot_sides:
vertices, triangles = create_section(strat[:,x1,0],dx,bottom)
y = y0 + scale*(vertices[:,0])
x = x0 + scale*(x1*dx+np.zeros(np.shape(vertices[:,0])))
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x,y,z,triangles,color=gray)
for layer_n in range(ts-1): # main loop
top = strat[:,x1,layer_n+1]
base = strat[:,x1,layer_n]
if color_mode == 'property':
props = prop[:,x1,layer_n]
if plot_surfs:
Y1 = y0 + scale*(dx*np.arange(0,r))
X1 = x0 + scale*(x1*dx+np.zeros(np.shape(base)))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = y0 + scale*(vertices[:,0])
X1 = x0 + scale*(x1*dx+dx*0*np.ones(np.shape(vertices[:,0])))
Z1 = scale*vertices[:,1]
if color_mode == 'property':
scalars = props[Inds[i]]
else:
scalars = []
# plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scale*scalars,cmap,norm,vmin,vmax,export)
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
section_inds = np.hstack((0, int(r/(ny+1)) * np.arange(1, ny+1), r-1))
for y1 in tqdm(section_inds): # dip sections
if plot_sides:
vertices, triangles = create_section(strat[y1,:,0],dx,bottom)
y = y0 + scale*(y1*dx+np.zeros(np.shape(vertices[:,0])))
x = x0 + scale*(vertices[:,0])
z = scale*ve*vertices[:,1]
mlab.triangular_mesh(x,y,z,triangles,color=gray)
for layer_n in range(ts-1): # main loop
top = strat[y1,:,layer_n+1]
base = strat[y1,:,layer_n]
if color_mode == 'property':
props = prop[y1,:,layer_n]
if plot_surfs:
Y1 = y0 + scale*(y1*dx+np.zeros(np.shape(base)))
X1 = x0 + scale*(dx*np.arange(0,c))
Z1 = ve*scale*base
mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness)
if np.max(top-base)>0:
Points,Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
triangles, scalars = create_triangles(vertices)
Y1 = y0 + scale*(y1*dx + dx*0*np.ones(np.shape(vertices[:,0])))
X1 = x0 + scale*(vertices[:,0])
Z1 = scale*vertices[:,1]
if color_mode == 'property':
scalars = props[Inds[i]]
else:
scalars = []
# plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scale*scalars,cmap,norm,vmin,vmax,export)
plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity)
# print('done with section '+str(nsec)+' of '+str(ny)+' dip sections')
r,c = np.shape(strat[:,:,-1])
Y1 = scale*(np.linspace(0,r-1,r)*dx)
X1 = scale*(np.linspace(0,c-1,c)*dx)
topo_min = np.min(strat[:,:,-1])
topo_max = np.max(strat[:,:,-1])
mlab.surf(X1, Y1, scale*strat[:,:,-1].T, warp_scale=ve, colormap='gist_earth', vmin=scale*topo_min, vmax=scale*topo_max, opacity=0.15)
def triangulate_layers(top,base,dx):
"""function for creating vertices of polygons that describe one layer"""
x = dx * np.arange(0,len(top))
ind1 = np.argwhere(top-base>0).flatten()
ind2 = np.argwhere(np.diff(ind1)>1)
ind2 = np.vstack((np.array([[-1]]),ind2))
ind2 = np.vstack((ind2,np.array([[len(top)]])))
Points = [] # list for points to be triangulated
Inds = []
for i in range(len(ind2)-1):
ind3 = ind1[int(ind2[i])+1:int(ind2[i+1])+1]
if (ind3[0] != 0) & (ind3[-1] != len(top)-1):
ind3 = np.hstack((ind3[0]-1,ind3))
ind3 = np.hstack((ind3,ind3[-1]+1))
top1 = top[ind3][:-1]
base1 = base[ind3][1:]
x1 = np.concatenate((x[ind3][:-1], x[ind3][::-1][:-1]))
inds = np.concatenate((ind3[:-1], ind3[::-1][:-1]))
if (ind3[0] == 0) & (ind3[-1] != len(top)-1):
ind3 = np.hstack((ind3,ind3[-1]+1))
top1 = top[ind3][:-1]
base1 = base[ind3]
x1 = np.concatenate((x[ind3][:-1], x[ind3][::-1]))
inds = np.concatenate((ind3[:-1], ind3[::-1]))
if (ind3[0] != 0) & (ind3[-1] == len(top)-1):
ind3 = np.hstack((ind3[0]-1,ind3))
top1 = top[ind3]
base1 = base[ind3][1:]
x1 = np.concatenate((x[ind3], x[ind3][::-1][:-1]))
inds = np.concatenate((ind3, ind3[::-1][:-1]))
if (ind3[0] == 0) & (ind3[-1] == len(top)-1):
top1 = top[ind3]
base1 = base[ind3]
x1 = np.concatenate((x[ind3], x[ind3][::-1]))
inds = np.concatenate((ind3, ind3[::-1]))
npoints = len(top1)+len(base1)
y = np.hstack((top1,base1[::-1]))
vertices = np.vstack((x1,y)).T
Points.append(vertices)
Inds.append(inds)
return Points,Inds
def create_triangles(vertices):
"""function for creating list of triangles from vertices
inputs:
vertices - 2 x n array with coordinates of polygon
returns:
triangles - indices of the 'vertices' array that from triangles (for triangular mesh)
scalars - 'fake' elevation values for each vertex of the polygon, used for coloring (relies on the base of the polygon)"""
n = len(vertices[:,0])
Z1 = vertices[:,1]
triangles = []
if (np.mod(n,2)==0) & (vertices[int((n-1)/2),0] != vertices[int((n-1)/2+1),0]): # if polygon is in the interior of the block
triangles.append([0,1,n-1])
for i in range(1,int(n/2-1)):
triangles.append([i,i+1,n-i])
triangles.append([i+1,n-i,n-i-1])
triangles.append([int(n/2-1),int(n/2),int(n/2+1)])
scalars = np.hstack((Z1[0],Z1[int(n/2):][::-1],Z1[int(n/2)+1:]))
if (np.mod(n,2)==0) & (vertices[int((n-1)/2),0] == vertices[int((n-1)/2+1),0]): # if polygon touches both sides of the block
for i in range(0,int(n/2-1)):
triangles.append([i,i+1,n-i-1])
triangles.append([i+1,n-i-1,n-i-2])
scalars = np.hstack((Z1[int(n/2):][::-1],Z1[int(n/2):]))
if np.mod(n,2)!=0: # if polygon has one segment on the side of the block
if vertices[int((n-1)/2),0] == vertices[int((n-1)/2+1),0]: # if polygon touches the right side of the block
triangles.append([0,1,n-1])
for i in range(1,int((n-1)/2)):
triangles.append([i,i+1,n-i])
triangles.append([i+1,n-i,n-i-1])
scalars = np.hstack((Z1[0],Z1[int((n+1)/2):][::-1],Z1[int((n+1)/2):]))
else:
for i in range(0,int((n-1)/2)-1): # if polygon touches the left side of the block
triangles.append([i,i+1,n-i-1])
triangles.append([i+1,n-i-1,n-i-2])
triangles.append([int((n-1)/2-1),int((n-1)/2),int((n-1)/2+1)])
scalars = np.hstack((Z1[int((n+1)/2)-1:][::-1],Z1[int((n+1)/2):]))
return triangles, scalars
def create_section(profile,dx,bottom):
"""function for creating a cross section from a top surface
inputs:
profile - elevation data for top surface
dx - gridcell size
bottom - elevation value for the bottom of the block
returns:
vertices - coordinates of vertices
triangles - indices of the 'vertices' array that from triangles (for triangular mesh)
"""
x1 = dx*np.linspace(0, len(profile)-1, len(profile))
x = np.hstack((x1, x1[::-1]))
y = np.hstack((profile, bottom*np.ones(np.shape(x1))))
vertices = np.vstack((x, y)).T
n = len(x)
triangles = []
for i in range(0,int((n-1)/2)):
triangles.append([i,i+1,n-i-1])
triangles.append([i+1,n-i-1,n-i-2])
return vertices, triangles
def plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity):
"""function for plotting layers on one side of a block
inputs:
layer_n - layer number
facies - 1D array of facies codes for layers
color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies'
colors - list of RGB values used if color_mode is 'facies'
X1,Y1,Z1 - coordinates of mesh vertices
ve - vertical exaggeration
triangles - indices of triangles used in mesh
vertices - coordinates of the vertices
scalars - scalars used for coloring the mesh in 'property' mode (= z-value of the base of current layer)
cmap - colormap used for layers in 'time' mode
norm - color normalization function used in 'time' mode
export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)
"""
if color_mode == 'time':
cmap = matplotlib.cm.get_cmap(colormap)
mlab.triangular_mesh(X1, Y1, ve*Z1, triangles, color = cmap(norm(layer_n))[:3], opacity = opacity)
if color_mode == 'property': # color based on property map
mlab.triangular_mesh(X1, Y1, ve*Z1, triangles, scalars=scalars, colormap=str(colormap), vmin=vmin, vmax=vmax, opacity = opacity)
if color_mode == 'facies':
mlab.triangular_mesh(X1,Y1,ve*Z1, triangles, color=tuple(colors[int(facies[0, 0, layer_n])]), opacity = opacity)
def create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,s1,dx,bottom,export,opacity):
r, c, ts = np.shape(strat)
dist = dx*((x2-x1)**2 + (y2-y1)**2)**0.5
s2 = s1*dx+dist
num = int(dist/float(dx))
cmap = matplotlib.cm.get_cmap(colormap)
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1)
Xrand, Yrand, Srand = np.linspace(x1,x2,num), np.linspace(y1,y2,num), np.linspace(s1*dx,s2,num)
base = map_coordinates(strat[:,:,0], np.vstack((Yrand,Xrand)))
vertices, triangles = create_section(base,dx,bottom)
gray = (0.6,0.6,0.6) # color for plotting basal part of panel
mlab.triangular_mesh(scale*np.hstack((dx*Xrand,dx*Xrand[::-1])),scale*np.hstack((dx*Yrand,dx*Yrand[::-1])),scale*ve*vertices[:,1],triangles,color=gray)
for layer_n in trange(0,ts-1):
top = map_coordinates(strat[:,:,layer_n+1], np.vstack((Yrand,Xrand)))
base = map_coordinates(strat[:,:,layer_n], np.vstack((Yrand,Xrand)))
if np.max(top-base)>1e-6:
Points, Inds = triangulate_layers(top,base,dx)
for i in range(len(Points)):
vertices = Points[i]
inds = Inds[i]
triangles, scalars = create_triangles(vertices)
X1 = scale*dx*Xrand[inds]
Y1 = scale*dx*Yrand[inds]
Z1 = scale*vertices[:,1]
mlab.plot3d(X1,Y1,Z1*ve,color=(0,0,0),tube_radius=0.5)
vmin = 0; vmax = 1
plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scalars,colormap,norm,vmin,vmax,export,opacity)
def create_random_section_n_points(strat,facies,topo,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,dx,bottom,export,opacity):
r, c, ts = np.shape(strat)
if len(x1)==1:
create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,0,dx,bottom,export,opacity)
else:
count = 0
dx1,dy1,ds1,s1 = compute_derivatives(x1,y1)
for i in range(len(x1)):
create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1[i],x2[i],y1[i],y2[i],s1[i],dx,bottom,export,opacity)
count = count+1
# print("panel "+str(count)+" done, out of "+str(len(x1))+" panels")
def create_random_cookie(strat,facies,topo,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,dx,bottom,export,opacity):
r, c, ts = np.shape(strat)
count = 0
dx1,dy1,ds1,s1 = compute_derivatives(x1,y1)
for i in range(len(x1)):
create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1[i],x2[i],y1[i],y2[i],s1[i],dx,bottom,export,opacity)
count = count+1
# print("panel "+str(count)+" done, out of "+str(len(x1)+1)+" panels")
create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x2[-1],x1[0],y2[-1],y1[0],s1[-1]+np.sqrt((x1[0]-x2[-1])**2+(y1[0]-y2[-1])**2),dx,bottom,export,opacity)
polygon = []
for i in range(len(x1)):
polygon.append((x1[i]+0.5, y1[i]+0.5))
polygon.append((x2[-1]+0.5, y2[-1]+0.5))
img = Image.fromarray(np.zeros(np.shape(strat[:,:,-1])))
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
img = np.array(img)
mask = np.ones_like(strat[:,:,-1]).astype(bool)
mask[img == 1] = False
r,c = np.shape(strat[:,:,-1])
Y1 = scale*(np.linspace(0,r-1,r)*dx)
X1 = scale*(np.linspace(0,c-1,c)*dx)
topo_min = np.min(strat[:,:,-1])
topo_max = np.max(strat[:,:,-1])
mlab.surf(X1, Y1, scale*strat[:,:,-1].T, mask=mask.T, warp_scale=ve, colormap='gist_earth', vmin=scale*topo_min, vmax=scale*topo_max)
def compute_derivatives(x,y):
dx = np.diff(x) # first derivatives
dy = np.diff(y)
ds = np.sqrt(dx**2+dy**2)
s = np.hstack((0,np.cumsum(ds)))
return dx, dy, ds, s
class LineBuilder:
def __init__(self, line):
self.line = line
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
if event.inaxes!=self.line.axes: return
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
def select_random_section(strat):
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.imshow(strat[:,:,-1],cmap='viridis')
plt.tight_layout()
ax.set_title('click to build line segments')
line, = ax.plot([], []) # empty line
linebuilder = LineBuilder(line)
xcoords = linebuilder.xs
ycoords = linebuilder.ys
return xcoords, ycoords
def plot_strat_diagram(time, elevation, time_units, elev_units, end_time, max_elevation):
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_axes([0.07, 0.08, 0.85, 0.76]) # [left, bottom, width, height]
ax1.set_xlabel('time (' + time_units + ')', fontsize = 12)
ax1.set_ylabel('elevation (' + elev_units + ')', fontsize = 12)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax2 = fig.add_axes([0.92, 0.08, 0.05, 0.76])
ax2.set_xticks([])
ax2.set_yticks([])
ax3 = fig.add_axes([0.07, 0.84, 0.85, 0.08])
ax3.set_yticks([])
ax3.set_xticks([])
ax1.set_xlim(0, end_time)
elev_range = max_elevation - np.min(elevation)
ylim1 = np.min(elevation)# - 0.02 * elev_range
ylim2 = max_elevation + 0.02 * elev_range
ax1.set_ylim(ylim1, ylim2)
ax2.set_xlim(0, 1)
ax2.set_ylim(ylim1, ylim2)
ax3.set_ylim(0, 1)
ax3.set_xlim(0, end_time)
ax4 = fig.add_axes([0.07, 0.92, 0.6, 0.08])
ax4.set_xlim(0, 10)
ax4.set_ylim(0, 1)
ax1.plot(time, elevation, 'xkcd:medium blue', linewidth = 3)
strat = np.minimum.accumulate(elevation[::-1])[::-1] # stratigraphic 'elevation'
unconf_inds = np.where(strat != elevation)[0] # indices where 'strat' curve is different from elevation
inds = np.where(np.diff(unconf_inds)>1)[0] # indices where deposition starts again, after erosion
inds = np.hstack((inds, len(unconf_inds)-1)) # add last index
if strat[-1] - strat[-2] == 0:
inds = np.hstack((inds, len(unconf_inds)-1))
if len(unconf_inds) > 0:
strat_tops = strat[unconf_inds[inds]+1] # stratigraphic tops
else:
strat_tops = []
strat_top_ages = [] # ages of the stratigraphic tops
for i in range(len(strat_tops)): # generate list of ages of stratigraphic tops
strat_top_ages.append(np.min(time[strat >= strat_tops[i]]))
loc_max_elev = signal.find_peaks(elevation)[0]
loc_min_elev = signal.find_peaks(-elevation)[0]
if elevation[-1] < elevation[-2]:
loc_min_elev = np.hstack((loc_min_elev, len(elevation)-1))
if (len(loc_min_elev) > 0) & (len(loc_max_elev) > 0):
if elevation[1] < elevation[0]: # add first point as a local maximum elevation if the series starts out erosionally
loc_max_elev = np.hstack((0, loc_max_elev))
if loc_min_elev[0] < loc_max_elev[0]:
ind = np.argmax(elevation[0 : loc_min_elev[0]])
loc_max_elev = np.sort(np.hstack((loc_max_elev, ind)))
for i in range(len(loc_min_elev)-1):
if len(loc_max_elev[loc_max_elev > loc_min_elev[i]]) > 0:
if np.min(loc_max_elev[loc_max_elev > loc_min_elev[i]]) > loc_min_elev[i+1]:
ind = np.argmax(elevation[loc_min_elev[i] : loc_min_elev[i+1]])
ind = loc_min_elev[i] + ind
loc_max_elev = np.sort(np.hstack((loc_max_elev, ind)))
else:
ind = np.argmax(elevation[loc_min_elev[i] : loc_min_elev[i+1]])
ind = loc_min_elev[i] + ind
loc_max_elev = np.sort(np.hstack((loc_max_elev, ind)))
for i in range(len(loc_max_elev)-1):
if len(loc_min_elev[loc_min_elev > loc_max_elev[i]]) > 0:
if np.min(loc_min_elev[loc_min_elev > loc_max_elev[i]]) > loc_max_elev[i+1]:
ind = np.argmin(elevation[loc_max_elev[i] : loc_max_elev[i+1]])
ind = loc_max_elev[i] + ind
loc_min_elev = np.sort(np.hstack((loc_min_elev, ind)))
else:
ind = np.argmin(elevation[loc_max_elev[i] : loc_max_elev[i+1]])
ind = loc_max_elev[i] + ind
loc_min_elev = np.sort(np.hstack((loc_min_elev, ind)))
erosion_start_times = time[loc_max_elev] # times when erosion starts
erosion_end_times = time[loc_min_elev] # times when erosion ends
erosion_start_elevations = elevation[loc_max_elev] # elevations when erosion starts
erosion_end_elevations = elevation[loc_min_elev] # elevations when erosion ends
if (len(loc_min_elev) > 0) & (len(loc_max_elev) > 0):
for i in range(len(erosion_end_times)): # plot erosional segments
ax1.plot(time[loc_max_elev[i]:loc_min_elev[i]+1],
elevation[loc_max_elev[i]:loc_min_elev[i]+1], 'xkcd:red', linewidth=3)
if len(erosion_start_times) > len(erosion_end_times): # plot last erosional segment (if needed)
ax1.plot(time[loc_max_elev[-1]:],
elevation[loc_max_elev[-1]:], 'xkcd:red', linewidth=3)
strat_top_labels = ['s' for strat_top in strat_tops] # labels for stratigraphic tops
erosion_start_labels = ['es' for erosion_start_time in erosion_start_times] # labels for start of erosion
erosion_end_labels = ['ee' for erosion_end_time in erosion_end_times] # labels for end of erosion
time_bounds = np.hstack((strat_top_ages, erosion_start_times, erosion_end_times)) # all time boundaries
sort_inds = np.argsort(time_bounds) # indices for sorting
time_bounds = time_bounds[sort_inds] # sort time boundaries
elevation_bounds = np.hstack((strat_tops, erosion_start_elevations, erosion_end_elevations)) # all elevation boundaries
elevation_bounds = elevation_bounds[sort_inds] # sort elevation boundaries
bound_labels = np.hstack((strat_top_labels, erosion_start_labels, erosion_end_labels)) # all boundary labels
bound_labels = bound_labels[sort_inds] # sort boundary labels
time_bounds = np.hstack((time[0], time_bounds, time[-1])) # add first and last time step to time boundaries
elevation_bounds = np.hstack((elevation[0], elevation_bounds, elevation[-1])) # add first and last elevation values
if elevation[-1] - elevation[-2] < 0: # add first and last boundary labels
bound_labels = np.hstack(('s', bound_labels, 'ee'))
else:
bound_labels = np.hstack(('s', bound_labels, 's'))
inds = []
for i in range(len(bound_labels)-1):
if (bound_labels[i] == 'es') & (bound_labels[i+1] == 's'):
inds.append(i)
if len(inds)>0:
for i in range(len(inds)):
bound_labels[inds[i]] = 's'
bound_labels[inds[i]+1] = 'es'
time_labels = []
for i in range(len(time_bounds)-1): # plot chronostratigraphic units
x = [time_bounds[i], time_bounds[i+1], time_bounds[i+1], time_bounds[i]]
y = [0, 0, 1, 1]
if (bound_labels[i] == 's') and (bound_labels[i+1] == 'es'): # vacuity
ax3.fill(x, y, facecolor='xkcd:light grey', edgecolor='k')
time_labels.append('v')
elif (bound_labels[i] == 'ee') and (bound_labels[i+1] == 'es'): # vacuity
ax3.fill(x, y, facecolor='xkcd:light grey', edgecolor='k')
time_labels.append('v')
elif (bound_labels[i] == 'es') and (bound_labels[i+1] == 'ee'): # erosion
ax3.fill(x, y, facecolor='xkcd:red', edgecolor='k')
time_labels.append('e')
elif (bound_labels[i] == 's') and (bound_labels[i+1] == 'ee'): # erosion
time_labels.append('e')
elif (bound_labels[i] == 'ee') and (bound_labels[i+1] == 's'): # deposition
ax3.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k')
time_labels.append('d')
elif (bound_labels[i] == 's') and (bound_labels[i+1] == 's'): # deposition
ax3.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k')
time_labels.append('d')
ax1.plot([time_bounds[i], time_bounds[i]], [elevation_bounds[i], max_elevation + 0.02 * elev_range], 'k--', linewidth=0.5)
for i in range(len(strat_tops)):
ax2.plot([0, 1], [strat_tops[i], strat_tops[i]], color = 'xkcd:red', linewidth = 3)
if len(strat_tops) > 0:
if elevation[0] < np.min(strat_tops):
strat_tops = np.hstack((elevation[0], strat_tops, elevation[-1]))
strat_top_ages = np.hstack((0, strat_top_ages, time[-1]))
else:
strat_tops = np.hstack((strat_tops, elevation[-1]))
strat_top_ages = np.hstack((strat_top_ages, time[-1]))
else:
strat_tops = np.hstack((elevation[0], strat_tops, elevation[-1]))
strat_top_ages = np.hstack((0, strat_top_ages, time[-1]))
for i in range(len(strat_tops)-1): # plot stratigraphic units
x = [0, 1, 1, 0]
y = [strat_tops[i], strat_tops[i], strat_tops[i+1], strat_tops[i+1]]
ax2.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k')
if i > 0:
ax1.plot([strat_top_ages[i], end_time], [strat_tops[i], strat_tops[i]], 'k--', linewidth=0.5)
times = np.diff(time_bounds)
thicknesses = np.diff(elevation_bounds)
deposition_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'd' ])
vacuity_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'v' ])
erosion_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'e' ])
deposition_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'd' ])
vacuity_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'v' ])
eroded_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'e' ])
dve_data = [deposition_time, vacuity_time, erosion_time, deposition_thickness, vacuity_thickness, eroded_thickness]
y1 = 0.55
y2 = 0.15
y = [y1, y1, y2, y2]
x1 = 0
x2 = 3 * deposition_time/time[-1]
x = [x1, x2, x2, x1]
ax4.fill(x, y, facecolor='xkcd:medium blue', edgecolor = 'k', zorder = 1000)
ax4.axis('off')
ax4.text(x1, y1 + 0.07, 'deposition', fontsize = 12)
ax4.text(x1 + 0.05, 0.27, str(np.round(deposition_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000)
x1 = 3
x2 = x1 + 3 * erosion_time/time[-1]
x = [x1, x2, x2, x1]
ax4.fill(x, y, facecolor='xkcd:red', edgecolor = 'k', zorder = 1001)
ax4.text(x1, y1 + 0.07, 'erosion', fontsize = 12)
ax4.text(x1 + 0.05, 0.27, str(np.round(erosion_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000)
x1 = 6
x2 = x1 + 3 * vacuity_time/time[-1]
x = [x1, x2, x2, x1]
ax4.fill(x, y, facecolor='xkcd:light grey', edgecolor = 'k', zorder = 1002)
ax4.text(x1, y1 + 0.07, 'vacuity', fontsize = 12)
ax4.text(x1 + 0.05, 0.27, str(np.round(vacuity_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000)
return fig
def topostrat(topo):
# convert topography to stratigraphy
if len(np.shape(topo)) == 2:
strat = np.minimum.accumulate(topo[::-1, :], axis=0)[::-1, :]
if len(np.shape(topo)) == 3:
strat = np.minimum.accumulate(topo[:, :, ::-1], axis=2)[:, :, ::-1]
return strat
def create_wheeler_diagram(topo):
"""create Wheeler (chronostratigraphic) diagram from a set of topographic surfaces
"""
strat = topostrat(topo) # convert topography to stratigraphy
wheeler = np.diff(topo, axis=2) # 'normal' Wheeler diagram
wheeler_strat = np.diff(strat, axis=2) # array for Wheeler diagram with vacuity blanked out; this array will be a positive number if there is preserved depostion, zero otherwise
vacuity = np.zeros(np.shape(wheeler)) # array for vacuity
vacuity[(wheeler>0) & (wheeler_strat==0)] = 1 # make the 'vacuity' array 1 where there was deposition (wheeler > 0) but stratigraphy is not preserved (wheeler_strat = 0)
wheeler_strat[wheeler<0] = wheeler[wheeler<0] # add erosion to 'wheeler_strat' (otherwise it would only show deposition)
return strat, wheeler, wheeler_strat, vacuity
def plot_model_cross_section_EW(strat, prop, facies, dx, xsec, color_mode, line_freq = 1, ve = False, map_aspect = 1, flattening_ind = False, units = 'm', list_of_colors = ['lemonchiffon', 'peru', 'sienna']):
"""Plots an E-W oriented cross section through a stratigraphic model
:param WG: well graph
:param strat: stratigraphic grid
:param prop: property array
:param facies: facies array
:param dx: gridcell size in the x- and y directions
:param xsec: index of cross section to be displayed
:param color_mode: determines what kind of plot is created; can be 'property' or 'facies'
:param flattening_ind: index of stratigraphic top that should be used for flattening; default is 'False' (= no flattening)
:param ve: vertical exaggeration; default is 'False'
:param units: units used in the model
:param map_aspect: the aspect ratio of the inset map that shows the location of the cross section
:param list_of_colors: list of named matplotlib colors that will be used when 'color_mode' is set to 'facies'
:return fig: figure handle
"""
fig = plt.figure(figsize = (10, 6))
ax = fig.add_subplot(111)
axin = ax.inset_axes([0.03, 0.03, 0.3, 0.3])
r,c,ts = np.shape(strat)
for i in trange(0, ts-1):
if flattening_ind:
top = (strat[xsec, :, i] - strat[xsec, :, flattening_ind])
base = (strat[xsec, :, i+1] - strat[xsec, :, flattening_ind])
else:
top = strat[xsec, :, i]
base = strat[xsec, :, i+1]
props = prop[xsec, :, i]
faciess = facies[xsec, :, i]
if np.max(base - top)>0:
Points, Inds = triangulate_layers(base,top,dx)
for j in range(len(Points)):
vertices = Points[j]
triangles, scalars = create_triangles(vertices)
x = vertices[:,0]
y = vertices[:,1]
if color_mode == 'property':
colors = props[Inds[j]]
colors = np.mean(colors[np.array(triangles)], axis = 1)
ax.tripcolor(x, y, triangles=triangles, facecolors = colors, cmap = 'YlOrBr_r',
edgecolors = 'none', vmin = 0, vmax = 0.35)
if color_mode == 'facies':
colors = faciess[Inds[j]]
colors = np.median(colors[np.array(triangles)], axis = 1)
cmap = ListedColormap(list_of_colors)
ax.tripcolor(x, y, triangles=triangles, facecolors = colors, edgecolors = 'none', cmap = cmap, vmin = 0, vmax = len(list_of_colors))
if np.mod(i, line_freq) == 0:
ax.plot(np.arange(0, dx*c, dx), top, 'k', linewidth = 0.25)
if i == ts-2:
ax.plot(np.arange(0, dx*c, dx), base, 'k', linewidth = 0.5)
ax.set_xlim(0, dx*(c-1))
if flattening_ind:
ax.set_ylim(np.nanmin(strat[:,:,0] - strat[:, :, flattening_ind]),
np.nanmax(strat[:,:,-1] - strat[:, :, flattening_ind]))
else:
ax.set_ylim(np.nanmin(strat), np.nanmax(strat))
ax.set_xlabel('distance (' + units + ')')
ax.set_ylabel('depth (' + units + ')')
axin.imshow(strat[:, :, -1], cmap='viridis', aspect = map_aspect)
axin.set_xticks([])
axin.set_yticks([])
axin.plot([0, c-1], [xsec, xsec], 'k')
# axin.set_aspect('equal')
if ve:
ax.set_aspect(ve, adjustable='datalim')
# plt.tight_layout()
return fig
def plot_model_cross_section_NS(strat, prop, facies, dx, xsec, color_mode, line_freq = 1, ve = False, flattening_ind = False, units = 'm', map_aspect = 1, list_of_colors = ['lemonchiffon', 'peru', 'sienna']):
"""Plots an E-W oriented cross section through a stratigraphic model
:param WG: well graph
:param strat: stratigraphic grid
:param prop: property array
:param facies: facies array
:param dx: gridcell size in the x- and y directions
:param xsec: index of cross section to be displayed
:param color_mode: determines what kind of plot is created; can be 'property' or 'facies'
:param flattening_ind: index of stratigraphic top that should be used for flattening; default is 'False' (= no flattening)
:param units: units used in the model
:param map_aspect: the aspect ratio of the inset map that shows the location of the cross section
:param list_of_colors: list of named matplotlib colors that will be used when 'color_mode' is set to 'facies'
:return fig: figure handle
"""
fig = plt.figure(figsize = (10, 6))
ax = fig.add_subplot(111)
axin = ax.inset_axes([0.03, 0.03, 0.3, 0.3])
r,c,ts = np.shape(strat)
for i in trange(0, ts-1):
if flattening_ind:
top = (strat[:, xsec, i] - strat[:, xsec, flattening_ind])
base = (strat[:, xsec, i+1] - strat[:, xsec, flattening_ind])
else:
top = strat[:, xsec, i]
base = strat[:, xsec, i+1]
props = prop[:, xsec, i]
faciess = facies[:, xsec, i]
if np.max(base - top)>0:
Points, Inds = triangulate_layers(base,top,dx)
for j in range(len(Points)):
vertices = Points[j]
triangles, scalars = create_triangles(vertices)
x = vertices[:,0]
y = vertices[:,1]
if color_mode == 'property':
colors = props[Inds[j]]
colors = np.mean(colors[np.array(triangles)], axis = 1)
ax.tripcolor(x, y, triangles=triangles, facecolors = colors, cmap = 'YlOrBr_r',
edgecolors = 'none', vmin = 0, vmax = 0.35)
if color_mode == 'facies':
colors = faciess[Inds[j]]
colors = np.median(colors[np.array(triangles)], axis = 1)
cmap = ListedColormap(list_of_colors)
ax.tripcolor(x, y, triangles=triangles, facecolors = colors, edgecolors = 'none', cmap = cmap, vmin = 0, vmax = len(list_of_colors))
if np.mod(i, line_freq) == 0:
ax.plot(np.arange(0, dx*r, dx), top, 'k', linewidth = 0.5)
if i == ts-2:
ax.plot(np.arange(0, dx*r, dx), base, 'k', linewidth = 0.5)
ax.set_xlim(0, dx*(r-1))
if flattening_ind:
ax.set_ylim(np.nanmin(strat[:,:,0] - strat[:, :, flattening_ind]),
np.nanmax(strat[:,:,-1] - strat[:, :, flattening_ind]))
else:
ax.set_ylim(np.nanmin(strat), np.nanmax(strat))
ax.set_xlabel('distance (' + units + ')')
ax.set_ylabel('depth (' + units + ')')
axin.imshow(strat[:, :, -1], cmap='viridis', aspect = map_aspect)
axin.set_xticks([])
axin.set_yticks([])
axin.plot([xsec, xsec], [0, r-1], 'k')
# plt.tight_layout()
if ve:
ax.set_aspect(ve, adjustable='datalim')
return fig
def resample_elevation_spl(time, elevation, sampling_rate):
spl = interpolate.splrep(time, elevation, s=0.5)
time_new = np.arange(time[0], time[-1]+1, sampling_rate)
elevation_new = interpolate.splev(time_new, spl)
return time_new, elevation_new
def resample_elevation_int1d(time, elevation, sampling_rate):
f = interpolate.interp1d(time, elevation)
time_new = np.arange(time[0], time[-1]+1, sampling_rate)
elevation_new = f(time_new)
return time_new, elevation_new | zsylvester/stratigraph | stratigraph/stratigraph.py | stratigraph.py | py | 50,938 | python | en | code | 8 | github-code | 6 |
582921826 | import numpy as np
import argparse
# parser = argparse.ArgumentParser(description='Keypoints distance computing script')
# parser.add_argument(
# '--origin_image_file', type=str, required=False,
# help='path to a file containing the keypoints and descriptors of the first image'
# )
# parser.add_argument(
# '--destination_image_file', type=str, required=False,
# help='path to a file containing the keypoints and descriptors of the second image'
# )
# parser.add_argument(
# '--matrix_file', type=str, required=False,
# help='path to a file containing the transform matrix from the origin image to the destination image'
# )
# parser.add_argument(
# '--threshold', type=int, default=1,
# help='the maximum distance between two points to be considered as a pair'
# )
# args = parser.parse_args()
def keypoints_distance(file1, file2, matrix_file, threshold):
origin_keypoints = np.load(file1)['keypoints']
destination_keypoints = np.load(file2)['keypoints']
transform_matrix = np.loadtxt(matrix_file)
origin_keypoints_transform = np.hstack(np.array([0,0]))
keypoints_distances = np.hstack(np.array([0 for _ in range(destination_keypoints.shape[0])]))
# print("----")
# print("Counting keypoints pairs between", file1, "and", file2)
# print("Threshold is set to", threshold)
# print(f"{origin_keypoints.shape[0]} keypoints in {file1}")
# print(f"{destination_keypoints.shape[0]} keypoints in {file2}")
# print("----")
for og_keypoint in origin_keypoints:
og_keypoint = np.append(og_keypoint, 1)
keypoint_transform = np.matmul(og_keypoint, transform_matrix)
keypoint_transform = keypoint_transform[:-1]
keypoint_distance_vector = np.empty(0)
for dst_keypoint in destination_keypoints:
keypoint_distance_vector = np.append(keypoint_distance_vector, np.linalg.norm(keypoint_transform-dst_keypoint))
keypoints_distances = np.vstack((keypoints_distances, keypoint_distance_vector))
origin_keypoints_transform = np.vstack((origin_keypoints_transform, keypoint_transform))
keypoints_distances = np.delete(keypoints_distances, 0, 0)
valid_pairs_count = 0
for distances_list in keypoints_distances:
for distance in distances_list:
if distance <= threshold:
valid_pairs_count += 1
break
return(round((valid_pairs_count / origin_keypoints.shape[0])*100, 2))
# print(keypoints_distance(args.origin_image_file, args.destination_image_file, args.matrix_file, args.threshold))
| vqlion/PTIR-Image-Processing | test_keypoints_distance.py | test_keypoints_distance.py | py | 2,595 | python | en | code | 0 | github-code | 6 |
5308746110 | from copy import deepcopy
arr = [[None]*4 for _ in range(4)]
for i in range(4):
row = list(map(int, input().split()))
for j in range(4):
# (번호, 방향)
arr[i][j] = [row[j*2], row[j*2+1]-1]
dirs = [(-1, 0), (-1, -1), (0, -1),
(1, -1), (1, 0), (1, 1), (0, 1), (-1, 1)]
# 현재 위치에서 왼쪽으로 회전된 결과 반환
def turn_left(direction):
return (direction+1) % 8
# 특정 번호 물고기 찾기
def find_fish(arr, idx):
for i in range(4):
for j in range(4):
if arr[i][j][0] == idx:
return (i,j)
return None
# 모든 물고기 회전 및 이동 - (arr, 상어위치)
def move_all_fishes(arr, now_r, now_c):
for i in range(1, 17):
position = find_fish(arr, i)
if position != None:
r, c = position[0], position[1]
direction = arr[r][c][1]
# 물고기 회전하며 이동가능하지 확인
for j in range(8):
dr, dc = dirs[direction]
nr, nc = dr + r, dc + c
# 이동 가능한 경우 이동
if 0 <= nr < 4 and 0 <= nc < 4:
if not (nr == now_r and nc == now_c):
arr[r][c][1] = direction
arr[r][c], arr[nr][nc] = arr[nr][nc], arr[r][c]
break
direction = turn_left(direction)
# 상어가 먹을 수 있는 물고기 위치 반환
def can_eat_fishes(arr, now_r, now_c):
fishes = []
direction = arr[now_r][now_c][1]
# 현재 방향으로 계속 이동
for i in range(4):
dr, dc = dirs[direction]
now_r, now_c = dr+now_r, dc+now_c
# 범위를 벗어나는지 확인
if 0 <= now_r < 4 and 0 <= now_c <4:
# 물고기 존재시
if arr[now_r][now_c][0] != -1:
fishes.append((now_r, now_c))
return fishes
# 모든 경우 탐색
def dfs(arr, now_r, now_c, total):
global result
arr = deepcopy(arr)
total += arr[now_r][now_c][0] # 현재 위치 물고기 먹기
arr[now_r][now_c][0] = -1 # 물고기 먹은 후 -1
move_all_fishes(arr, now_r, now_c)
# 상어가 이동가능한 위치 찾기
fishes = can_eat_fishes(arr, now_r, now_c)
if not fishes:
result = max(result, total)
return
for next_r, next_c in fishes:
dfs(arr, next_r, next_c, total)
result = 0
dfs(arr, 0, 0, 0)
print(result)
| louisuss/Algorithms-Code-Upload | Python/DongbinBook/simulation/kid_shark_solution.py | kid_shark_solution.py | py | 2,490 | python | ko | code | 0 | github-code | 6 |
37076357644 | """
Find the LCA of Binary Tree.
https://www.youtube.com/watch?v=13m9ZCB8gjw
"""
def lca(root, n1, n2):
if root is None:
return None
if root.data == n1 or root.data == n2:
return root
node_left = lca(root.left, n1, n2)
node_right = lca(root.right, n1, n2)
if node_left is not None and node_right is not None:
return root
if node_left is None and root.right is None:
return None
return node_left if node_left is not None else node_right
| piyush9194/data_structures_with_python | data_structures/trees/lowest_common_ancestor_bt.py | lowest_common_ancestor_bt.py | py | 502 | python | en | code | 0 | github-code | 6 |
41039585752 | import logging
import random
import string
import time
import sys
from decimal import Decimal
from typing import Any, Callable, Optional, TypeVar, Union
import requests
from vega_sim.grpc.client import VegaCoreClient, VegaTradingDataClientV2
from vega_sim.proto.data_node.api.v2.trading_data_pb2 import GetVegaTimeRequest
from vega_sim.proto.vega.api.v1.core_pb2 import StatisticsRequest
from vega_sim.proto.vega.markets_pb2 import Market
from vega_sim.tools.retry import retry
T = TypeVar("T")
TIME_FORWARD_URL = "{base_url}/api/v1/forwardtime"
logger = logging.getLogger(__name__)
class DataNodeBehindError(Exception):
pass
class ProposalNotAcceptedError(Exception):
pass
def generate_id(n: int) -> str:
return "".join(random.choices(string.ascii_lowercase + (2 * string.digits), k=n))
def get_enum(value: Union[str, T, int], enum_class: Any) -> T:
return (
value
if isinstance(value, (type(enum_class), int))
else getattr(enum_class, value)
)
def enum_to_str(e: Any, val: int) -> str:
return e.keys()[e.values().index(val)]
def num_to_padded_int(to_convert: float, decimals: int) -> int:
return int(Decimal(str(to_convert)) * Decimal(10**decimals))
def num_from_padded_int(to_convert: Union[str, int], decimals: int) -> float:
if not to_convert:
return 0
to_convert = int(to_convert) if isinstance(to_convert, str) else to_convert
return float(to_convert) / 10**decimals
def wait_for_datanode_sync(
trading_data_client: VegaTradingDataClientV2,
core_data_client: VegaCoreClient,
max_retries: int = 650,
) -> None:
"""Waits for Datanode to catch up to vega core client.
Note: Will wait for datanode 'latest' time to catch up to core time
when function is called. This avoids the case where a datanode
consistently slightly behind the core client never returns.
As such, this ensures that the data node has data from the core
*at the time of call* not necessarily the latest data when the function returns.
Wait time is exponential with increasing retries
(each attempt waits 0.05 * 1.03^attempt_num seconds).
"""
attempts = 1
core_time = retry(
10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp
)
trading_time = retry(
10, 0.5, lambda: trading_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp
)
while core_time > trading_time:
logging.debug(f"Sleeping in wait_for_datanode_sync for {0.005 * 1.1**attempts}")
time.sleep(0.0005 * 1.1**attempts)
try:
trading_time = retry(
10,
2.0,
lambda: trading_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp,
)
except Exception as e:
logging.warn(e)
trading_time = sys.maxsize
attempts += 1
if attempts >= max_retries:
raise DataNodeBehindError(
f"Data Node is behind and not catching up after {attempts} retries"
)
def wait_for_core_catchup(
core_data_client: VegaCoreClient,
max_retries: int = 200,
) -> None:
"""Waits for core node to fully execute everything in it's backlog.
Note that this operates by a rough cut of requesting time twice and checking for it
being unchanged, so only works on nullchain where we control time. May wait forever
in a standard tendermint chain
"""
attempts = 1
core_time = retry(
10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp
)
time.sleep(0.0001)
core_time_two = retry(
10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp
)
while core_time != core_time_two:
logging.debug(f"Sleeping in wait_for_core_catchup for {0.05 * 1.03**attempts}")
core_time = retry(
10,
0.5,
lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp,
)
time.sleep(0.0001 * 1.03**attempts)
core_time_two = retry(
10,
0.5,
lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp,
)
attempts += 1
if attempts >= max_retries:
raise DataNodeBehindError(
f"Core Node is behind and not catching up after {attempts} retries"
)
def wait_for_acceptance(
submission_ref: str,
submission_load_func: Callable[[str], T],
) -> T:
logger.debug("Waiting for proposal acceptance")
submission_accepted = False
for i in range(50):
try:
proposal = submission_load_func(submission_ref)
except:
time.sleep(0.001 * 1.1**i)
continue
if proposal:
logger.debug("Your proposal has been accepted by the network")
submission_accepted = True
break
time.sleep(0.001 * 1.1**i)
if not submission_accepted:
raise ProposalNotAcceptedError(
"The network did not accept the proposal within the specified time"
)
return proposal
def forward(time: str, vega_node_url: str) -> None:
"""Steps chain forward a given amount of time, either with an amount of time or
until a specified time.
Args:
time:
str, time argument to use when stepping forwards. Either an increment
(e.g. 1s, 10hr etc) or an ISO datetime (e.g. 2021-11-25T14:14:00Z)
vega_node_url:
str, url for a Vega nullchain node
"""
payload = {"forward": time}
req = requests.post(TIME_FORWARD_URL.format(base_url=vega_node_url), json=payload)
req.raise_for_status()
def statistics(core_data_client: VegaCoreClient):
return retry(
10, 0.5, lambda: core_data_client.Statistics(StatisticsRequest()).statistics
)
def get_settlement_asset(market: Market) -> str:
return get_product(market).settlement_asset
def get_product(market: Market) -> Any:
product = market.tradable_instrument.instrument.WhichOneof("product")
if product is None:
raise Exception(f"product not set for market '{market.id}'")
return getattr(market.tradable_instrument.instrument, product)
| vegaprotocol/vega-market-sim | vega_sim/api/helpers.py | helpers.py | py | 6,261 | python | en | code | 19 | github-code | 6 |
34465917082 | import torch
from torch.utils.data import DataLoader
from .coco_dataset import build_dataset
def batch_collator(batch):
images, boxmgrs = list(zip(*batch))
images = torch.stack(images, dim=0)
return images, boxmgrs
def build_dataloader(cfg, is_train=True):
dataset = build_dataset(cfg, is_train=is_train)
batch_size = (
cfg["DATA"]["BATCH_SIZE_TRAIN"] if is_train else cfg["DATA"]["BATCH_SIZE_VAL"]
)
return DataLoader(
dataset, batch_size, shuffle=is_train, num_workers=10, collate_fn=batch_collator
)
| lmyybh/computer-vision | yolo/yolo/data/dataloader.py | dataloader.py | py | 558 | python | en | code | 0 | github-code | 6 |
15018597005 | from utils.utils import OS
import sys
if OS.Linux:
import matplotlib
matplotlib.use("agg")
import json
import math
import multiprocessing
import random
from multiprocessing import Pool
from threading import Thread
from typing import Union, Callable
from uuid import UUID
import networkx
from Model.Computation import Computation
from Model.GraphLayoutAlgorithm import GraphLayoutAlgorithm
from Model.Network.Edge import Edge
from Model.Network.Flow import Flow
from Model.Network.Network import Network
from Model.Network.Node import Node
from Model.Result import Result
from utils import utils
class ModelFacade:
def __init__(self, network: Network = None):
self.computation_pools = {}
self.pending_results = {}
if network is None:
network = Network(model=self)
self.network = network
def add_node(self, x: int = 0, y: int = 0, name: str = None, is_selected=False, uid=None) -> Node:
"""
Adds a node to the model.
:param x: x coordinate
:param y: y coordinate
:param name: node name
:param is_selected: selection state
:param uid: UID
:return: node object
"""
if name is None:
name = f"S{self.network.node_counter}"
node = Node(name=str(name), is_selected=is_selected, x=x, y=y, uid=uid)
self.network.node_counter += 1
self.network.add_node(node)
self.network.notify((node, "add_node"))
return node
def add_edge(self, start_node: Node, end_node: Node, name=None, is_selected=False, uid=None) -> Edge:
"""
Adds an edge to the model.
:param start_node: start node
:param end_node: end node
:param name: edge name
:param is_selected: selection state
:param uid: UID
:return: edge object
"""
try:
if start_node is end_node:
raise ValueError("start node equals end node")
if end_node in start_node.edges or start_node in end_node.edges:
raise ValueError("edge already exists")
except ValueError as e:
self.notify_error(e)
raise e
if name is None:
name = f"E{self.network.edge_counter}"
edge = Edge(name=name, is_selected=is_selected, start_node=start_node, end_node=end_node, uid=uid)
self.network.edge_counter += 1
self.network.add_edge(edge)
self.network.notify((edge, "add_edge"))
start_node.notify((edge, "add_edge"))
end_node.notify((edge, "add_edge"))
return edge
def add_flow(self, path: list[Node], name=None, is_selected=False, uid=None) -> Flow:
"""
Adds a flow to the model.
:param path: path of the flow
:param name: flow name
:param is_selected: selection state
:param uid: UID
:return: flow object
"""
if name is None:
name = f"F{self.network.flow_counter}"
try:
if len(path) < 2:
raise ValueError("Path too short")
except ValueError as e:
self.notify_error(e)
raise e
self.add_missing_edges_to_path(path, is_selected=is_selected)
flow = Flow(name=name, is_selected=is_selected, path=path, uid=uid)
self.network.flow_counter += 1
self.network.add_flow(flow)
self.network.notify((flow, "add_flow"))
for node in path:
node.notify((flow, "add_flow"))
return flow
def add_missing_edges_to_path(self, path: list[Node], is_selected=False):
"""
Adds edges to the model based on the given path.
:param path: flow path
:param is_selected: selection state of the edges
"""
for index, start_node in enumerate(path):
if index + 1 == len(path):
return
end_node = path[index + 1]
if end_node not in start_node.edges:
self.add_edge(start_node, end_node, is_selected=is_selected)
def select_component(self, component: Union[Node, Edge, Flow], activity):
"""
Sets the selection state of a component.
:param component: component object
:param activity: selection state
"""
if component.is_selected != activity:
if activity:
self.network.selected_components.append(component)
else:
self.network.selected_components.remove(component)
component.is_selected = activity
component.notify((component, "set_selection"))
def unselect_all_components(self):
"""
Deselects every component of the model.
"""
for component in self.network.selected_components:
component.is_selected = False
component.notify((component, "set_selection"))
self.network.selected_components.clear()
def delete_component(self, component: Union[Node, Edge, Flow]):
"""
Deletes a component from the model.
:param component: component object
"""
if isinstance(component, Node):
self.network.delete_node(component)
elif isinstance(component, Edge):
self.network.delete_edge(component)
elif isinstance(component, Flow):
self.network.delete_flow(component)
def add_configuration(self, network_configuration_class, name=None) -> type['AbstractNetworkConfiguration']:
"""
Adds a configuration to the model.
:param network_configuration_class: a network configuration class
:param name: configuration name
:return: instance of a network configuration
"""
if name is None:
name = f"{network_configuration_class.get_configuration_name()}-{self.network.configs_counter}"
self.network.configs_counter += 1
flow_ids_of_interest = list(self.network.flows.keys())
network_configuration = network_configuration_class(name=name, flow_ids_of_interest=flow_ids_of_interest,
model=self, associated_component=self.network)
group_id = network_configuration.group_id
self.network.configurations[group_id] = network_configuration
self.network.notify((network_configuration, "add_configuration"))
node_configuration_class = network_configuration_class.get_node_configuration_class()
for node in self.network.nodes.values():
node.configurations[group_id] = node_configuration_class(group_id=group_id, model=self,
associated_component=node)
node.notify((node.configurations[group_id], "add_configuration"))
edge_configuration_class = network_configuration_class.get_edge_configuration_class()
for edge in self.network.edges.values():
edge.configurations[group_id] = edge_configuration_class(group_id=group_id, model=self,
associated_component=edge)
edge.notify((edge.configurations[group_id], "add_configuration"))
flow_configuration_class = network_configuration_class.get_flow_configuration_class()
for flow in self.network.flows.values():
flow.configurations[group_id] = flow_configuration_class(group_id=group_id, model=self,
associated_component=flow)
flow.notify((flow.configurations[group_id], "add_configuration"))
return network_configuration
def delete_configuration(self, configuration):
"""
Deletes a network configuration from the model.
:param configuration: configuration object
"""
group_id = configuration.group_id
if group_id in self.network.configurations:
for flow in self.network.flows.values():
del flow.configurations[group_id]
flow.notify((group_id, "delete_configuration"))
for edge in self.network.edges.values():
del edge.configurations[group_id]
edge.notify((group_id, "delete_configuration"))
for node in self.network.nodes.values():
del node.configurations[group_id]
node.notify((group_id, "delete_configuration"))
del self.network.configurations[group_id]
self.network.notify((group_id, "delete_configuration"))
def update_network_name(self, name: str):
"""
Updates the network name.
:param name: new name
"""
self.network.update_name(name)
def update_network_information(self, new_text: str):
"""
Updates the network information.
:param new_text: new information text
"""
self.network.information_text = new_text
self.network.notify((self.network, "information_text"))
@staticmethod
def update_node_name(node: Node, name: str):
"""
Updates the name of a node.
:param node: node object
:param name: new name
"""
node.name = name
node.notify((node, "set_parameter"))
@staticmethod
def update_node_x_y(node: Node, x: int, y: int):
"""
Updates the x and y coordinates of a node.
:param node: node object
:param x: x coordinate
:param y: y coordinate
"""
node.x = x
node.y = y
node.notify((node, "set_parameter"))
@staticmethod
def update_edge_name(edge: Edge, name: str):
"""
Updates the name of an edge.
:param edge: edge object
:param name: new name
"""
edge.name = name
edge.notify((edge, "set_parameter"))
@staticmethod
def update_flow_name(flow: Flow, name: str):
"""
Updates the name of a flow.
:param flow: flow object
:param name: new name
"""
flow.name = name
flow.notify((flow, "set_parameter"))
def insert_node_in_flow_path(self, flow: Flow, node: Node, index: int = None, is_edge_selected=False):
"""
Inserts a node to an existing path at a specific index.
:param flow: flow object
:param node: node object
:param index: insertion index, use 'None' to append
:param is_edge_selected: selection state of the edge
"""
if index is None:
self.append_node_to_flow_path(flow, node, is_edge_selected=is_edge_selected)
else:
flow.path.insert(index, node)
try:
self.add_missing_edges_to_path(flow.path, is_selected=is_edge_selected)
except Exception as e:
flow.path.remove(node)
raise e
flow.notify((flow, "update_path"))
def append_node_to_flow_path(self, flow: Flow, node: Node, is_edge_selected=False):
"""
Appends a node to an existing flow path.
:param flow: flow object
:param node: node object
:param is_edge_selected: selection state of the edge
"""
if node not in flow.path[-1].edges:
self.add_edge(flow.path[-1], node, is_selected=is_edge_selected)
flow.path.append(node)
flow.notify((flow, "update_path"))
def update_flow_color(self, flow: Flow, color: str):
"""
Update the color of a flow.
:param flow: flow object
:param color: color as RGB string (#RRGGBB)
"""
self.check_valid_color(color)
flow.color = color
flow.notify((flow, "set_parameter"))
def update_flow_highlight_color(self, flow: Flow, highlight_color: str):
"""
Update the highlight color of a flow.
:param flow: flow object
:param highlight_color: color as RGB string (#RRGGBB)
"""
self.check_valid_color(highlight_color)
flow.highlight_color = highlight_color
flow.notify((flow, "set_parameter"))
def run_computation(self, number_of_workers=None) -> Computation:
"""
Runs the computation with the current configurations.
:param number_of_workers: number of processes
:return: instance of a computation
"""
active_configurations = list(filter(lambda c: c.is_active, self.network.configurations.values()))
sys.setrecursionlimit(900000)
active_configurations = list(filter(lambda c: c.is_active, list(self.network.configurations.values())))
if len(active_configurations) == 0 or not self.network.flows:
raise ValueError("No active configuration or no flows")
def on_error(error, error_result):
error_result.error()
self.notify_error(error)
def make_error_callback(error_result):
return lambda error: on_error(error, error_result)
if number_of_workers is None:
process_count = min(multiprocessing.cpu_count(), len(active_configurations))
else:
process_count = number_of_workers
computation = Computation()
if OS.Linux:
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
self.computation_pools[computation] = pool = Pool(processes=process_count)
for configuration in active_configurations:
result = Result(configuration_name=configuration.name, configuration_id=configuration.group_id)
self.pending_results[result.id] = result
computation.add_result(result)
pool.apply_async(self._compute_configuration, (configuration, result),
callback=self._notify_result,
error_callback=make_error_callback(result))
computation.start()
self.network.notify((computation, "computation_started"))
Thread(target=lambda: (pool.close(), pool.join(), self._end_computation(computation))).start()
return computation
def _compute_configuration(self, configuration, result: Result):
"""
Computes a configuration.
:param configuration: configuration object
:param result: result object to store results
:return: result object
"""
result.start()
configuration.compute(self.network, result)
result.finish()
return result
def _end_computation(self, computation: Computation):
"""
Ends the computation.
:param computation: computation object
"""
computation.finish()
if computation in self.computation_pools:
del self.computation_pools[computation]
def cancel_computation(self, computation: Computation):
"""
Cancels a running computation.
:param computation: computation object
"""
computation.cancel()
if computation in self.computation_pools:
self.computation_pools[computation].terminate()
def _notify_result(self, pickled_result: Result):
"""
Notify the subscribers of the result object about finishing the computation.
:param pickled_result: includes the computation results
"""
original_result = self.pending_results[pickled_result.id]
original_result.__dict__ = original_result.__dict__ | pickled_result.__dict__
original_result.notify((original_result, "finished_result"))
del self.pending_results[pickled_result.id]
def update_configuration_parameters(self, configuration, dictionary: dict):
"""
Updates parameters of a configuration.
:param configuration: configuration object
:param dictionary: parameter dictionary
"""
try:
configuration.update_parameter_dict(dictionary)
except Exception as error:
self.notify_error(error)
raise error
finally:
configuration.notify((configuration, "set_parameter"))
def update_configuration_name(self, group_id: UUID, new_name: str):
"""
Updates the name of a configuration.
:param group_id: group ID of the configuration
:param new_name: new name
"""
configuration = self.network.configurations[group_id]
configuration.name = new_name
configuration.notify((configuration, "set_parameter"))
for flow in self.network.flows.values():
configuration = flow.configurations[group_id]
configuration.notify((configuration, "set_parameter"))
for edge in self.network.edges.values():
configuration = edge.configurations[group_id]
configuration.notify((configuration, "set_parameter"))
for node in self.network.nodes.values():
configuration = node.configurations[group_id]
configuration.notify((configuration, "set_parameter"))
def add_flow_of_interest(self, group_id: UUID, flow: Flow):
"""
Adds a flow of interest to a list of flow of interests.
:param group_id: group ID of a configuration
:param flow: flow object
"""
configuration = self.network.configurations[group_id]
if flow.id not in configuration.flow_ids_of_interest:
configuration.flow_ids_of_interest.append(flow.id)
configuration.notify((configuration, "set_parameter"))
flow_configuration = flow.configurations[group_id]
flow_configuration.is_flow_of_interest = True
flow_configuration.notify((flow_configuration, "set_parameter"))
def remove_flow_of_interest(self, group_id: UUID, flow: Flow):
"""
Removes a flow of interest from a list of flow of interests.
:param group_id: group ID of a configuration
:param flow: flow object
"""
configuration = self.network.configurations[group_id]
if flow.id in configuration.flow_ids_of_interest:
configuration.flow_ids_of_interest.remove(flow.id)
configuration.notify((configuration, "set_parameter"))
flow_configuration = flow.configurations[group_id]
flow_configuration.is_flow_of_interest = False
flow_configuration.notify((flow_configuration, "set_parameter"))
def notify_error(self, error: Exception):
"""
Notify the subscribers of the network about an error.
:param error: error
"""
utils.print_exception_traceback(error)
self.network.notify((error, "error"))
def import_network(self, json_file: str):
"""
Imports a network from a JSON file.
:param json_file: JSON file
"""
try:
new_model = ModelFacade.load_network(json_file)
except Exception as e:
self.notify_error(e)
return
self.network.notify((new_model, "new_model"))
def generate_random_network_in_new_model(self, seed=None, min_num_nodes=100, max_num_nodes=150, min_num_edges=200,
max_num_edges=200, delete_not_connected_nodes=True) -> 'ModelFacade':
"""
Generates a network and returns a new model.
:param seed: network seed
:param min_num_nodes: minimum number of nodes
:param max_num_nodes: maximum number of nodes
:param min_num_edges: minimum number of edges
:param max_num_edges: maximum number of edges
:param delete_not_connected_nodes: whether unconnected node should be removed
:return: model
"""
rand = random.Random(seed)
model = ModelFacade(network=Network(network_id=UUID(bytes=rand.randbytes(16), version=4)))
used_nodes = set()
num_nodes = rand.randint(min_num_nodes, max_num_nodes)
column_num = int(math.sqrt(num_nodes))
y = -1
for i in range(num_nodes):
x = i % column_num
if x == 0:
y += 1
model.add_node(x * 200, y * 200, uid=UUID(bytes=rand.randbytes(16), version=4))
all_nodes = list(model.network.nodes.values())
num_edges = rand.randint(min_num_edges, max_num_edges)
for i in range(num_edges):
node_i = rand.choice(all_nodes)
node_j = rand.choice(all_nodes)
while node_j == node_i:
node_j = rand.choice(all_nodes)
try:
if node_j not in node_i.edges and node_i not in node_j.edges:
model.add_edge(node_i, node_j, uid=UUID(bytes=rand.randbytes(16), version=4))
used_nodes.add(node_i)
used_nodes.add(node_j)
except ValueError:
continue
if delete_not_connected_nodes:
for node in used_nodes.symmetric_difference(model.network.nodes.values()):
model.delete_component(node)
self.network.notify((model, "new_model"))
return model
def generate_networkx_network_in_new_model(self, generator: Callable[..., networkx.Graph], uid_seed=None,
**kwargs) -> 'ModelFacade':
"""
Generates a network using NetworkX and returns a new model.
:param generator: NetworkX generation function
:param uid_seed: seed to recreate UIDs of network components
:param kwargs: arguments for the NetworkX generation function
:return: model
"""
uid_rand = random.Random(uid_seed)
try:
graph: networkx.Graph = generator(**kwargs)
scale = ModelFacade._get_layout_scale(graph)
pos = networkx.kamada_kawai_layout(graph, scale=scale)
model = ModelFacade()
mapping = {}
for nx_node in graph.nodes:
x, y = pos[nx_node]
node = model.add_node(x=x, y=y, name=nx_node, uid=UUID(bytes=uid_rand.randbytes(16), version=4))
mapping[nx_node] = node
for (i, j) in graph.edges:
node_i, node_j = mapping[i], mapping[j]
model.add_edge(node_i, node_j, uid=UUID(bytes=uid_rand.randbytes(16), version=4))
except Exception as e:
self.notify_error(e)
raise e
self.network.notify((model, "new_model"))
return model
def add_random_flows(self, min_num_flows=5, max_num_flows=10, min_num_nodes_in_flow_path=2,
max_num_nodes_in_flow_path=10,
flow_seed=None):
"""
Adds random flows to a network.
:param min_num_flows: minimum number of flows
:param max_num_flows: maximum number of flows
:param min_num_nodes_in_flow_path: minimum number of nodes in a flow path
:param max_num_nodes_in_flow_path: maximum number of nodes in a flow path
:param flow_seed: flow seed
"""
if min_num_nodes_in_flow_path < 2 or max_num_nodes_in_flow_path < 2:
raise ValueError("Minimal 2 nodes needed for flow")
graph = self.make_networkx_graph()
nodes = list(self.network.nodes.values())
rand = random.Random(flow_seed)
target_num_flows = rand.randint(min_num_flows, max_num_flows)
failed_combinations = set()
max_combinations = len(nodes) ** 2 * (max_num_nodes_in_flow_path - min_num_nodes_in_flow_path + 1)
added_flows = 0
while added_flows < target_num_flows:
start_node = rand.choice(nodes)
end_node = rand.choice(nodes)
path_length = rand.randint(min_num_nodes_in_flow_path, max_num_nodes_in_flow_path)
if start_node == end_node:
failed_combinations.add((start_node, end_node, path_length))
if len(failed_combinations) == max_combinations:
raise ValueError(
f"Cannot find a simple path with length " +
f"[{min_num_nodes_in_flow_path}, {max_num_nodes_in_flow_path}]")
while (start_node, end_node, path_length) in failed_combinations:
start_node = rand.choice(nodes)
end_node = rand.choice(nodes)
path_length = rand.randint(min_num_nodes_in_flow_path, max_num_nodes_in_flow_path)
found_path = None
for path in networkx.all_simple_paths(graph, start_node, end_node, cutoff=path_length):
if len(path) == path_length:
found_path = path
break
if found_path is None:
failed_combinations.add((start_node, end_node, path_length))
continue
self.add_flow(path=found_path, uid=UUID(bytes=rand.randbytes(16), version=4))
added_flows += 1
def make_networkx_graph(self) -> networkx.Graph:
"""
Translates a network into a NetworkX graph.
:return: NetworkX graph
"""
graph = networkx.Graph()
graph.add_nodes_from(self.network.nodes.values())
edges = self.network.edges.values()
starts = map(lambda e: e.start, edges)
ends = map(lambda e: e.end, edges)
graph.add_edges_from(zip(starts, ends))
return graph
def change_graph_layout(self, layout: GraphLayoutAlgorithm = GraphLayoutAlgorithm.fruchterman_reingold):
"""
Changes the layout of the network using NetworkX.
:param layout: NetworkX layout type
"""
G = networkx.Graph()
initial_pos = {}
for node_id, node in self.network.nodes.items():
G.add_node(node_id)
initial_pos[node_id] = node.x, node.y
for edge in self.network.edges.values():
G.add_edge(edge.start.id, edge.end.id)
scale = ModelFacade._get_layout_scale(G)
try:
if layout == GraphLayoutAlgorithm.fruchterman_reingold:
pos = networkx.fruchterman_reingold_layout(G, k=scale, pos=initial_pos, scale=scale)
elif layout == GraphLayoutAlgorithm.planar:
pos = networkx.planar_layout(G, scale=scale * 1.5)
elif layout == GraphLayoutAlgorithm.shell:
pos = networkx.shell_layout(G, scale=scale)
elif layout == GraphLayoutAlgorithm.kamada_kawai:
pos = networkx.kamada_kawai_layout(G, pos=initial_pos, scale=scale)
elif layout == GraphLayoutAlgorithm.spectral:
pos = networkx.spectral_layout(G, scale=scale)
elif layout == GraphLayoutAlgorithm.spiral:
pos = networkx.spiral_layout(G, scale=scale)
elif layout == GraphLayoutAlgorithm.circular:
pos = networkx.circular_layout(G, scale=scale)
elif layout == GraphLayoutAlgorithm.random:
pos = networkx.random_layout(G)
for node_id, coord in pos.items():
pos[node_id] = scale * coord
else:
raise ValueError("Unknown Algorithm")
except Exception as e:
self.notify_error(e)
raise e
for node_id, (x, y) in pos.items():
node = self.network.nodes[node_id]
self.update_node_x_y(node, int(x), int(y))
@staticmethod
def _get_layout_scale(G: networkx.Graph):
"""
Returns the scale of the NetworkX graph.
:param G: NetworkX graph
:return: scale
"""
num_components = G.number_of_nodes() + G.number_of_edges()
base = 1.07
if num_components <= 20:
x = math.log(num_components, base)
elif num_components <= 50:
x = math.log(num_components, base - 0.03)
elif num_components <= 100:
x = math.log(num_components, base - 0.04)
elif num_components <= 500:
x = math.log(num_components, base - 0.05)
else:
x = math.log(num_components, base - 0.06)
return x * 10
def _get_flows_as_networkx_digraph(self):
"""
Creates a digraph containing flows.
:return: NetworkX digraph
"""
g = networkx.DiGraph()
flow: Flow
for flow in self.network.flows.values():
for index, node_i in enumerate(flow.path):
if index + 1 == len(flow.path):
break
g.add_edge(node_i, flow.path[index + 1])
return g
def are_flows_cyclic(self):
"""
Checks whether the network contains cyclic flows.
:return: whether flows are cyclic
"""
g = self._get_flows_as_networkx_digraph()
return not networkx.is_directed_acyclic_graph(g)
def get_flow_cycles(self):
"""
Returns cyclic flows.
:return: simple cycles as defined by NetworkX
"""
g = self._get_flows_as_networkx_digraph()
return networkx.simple_cycles(g)
def export_network(self, path: str):
"""
Exports the network as JSON file.
:param path: path to the JSON file
"""
with open(path, 'w') as f:
json_string = self.network.to_json()
f.write(json_string)
@staticmethod
def load_network(path: str):
"""
Loads a network from a JSON file.
:param path: path to the JSON file
:return: the network model
"""
with open(path) as json_file:
json_dict = json.load(json_file)
model = ModelFacade()
network = Network.from_json(json_dict, model)
model.network = network
return model
def check_valid_color(self, color: str):
"""
Checks whether a color is in a valid RGB format.
:param color: color string
:return: whether string is valid
"""
try:
if len(color) != 7:
raise ValueError()
int(color[1:7], 16)
except ValueError:
e = ValueError(f"Illegal Color: \"{color}\". Expected \"#rrggbb\" format.")
self.notify_error(e)
raise e
def __getstate__(self):
"""
Used to pickle the object for process communication
:return: network dictionary
"""
return {"network": self.network}
| Moni5656/npba | Model/ModelFacade.py | ModelFacade.py | py | 30,489 | python | en | code | 0 | github-code | 6 |
33040837881 | import io
import struct
from typing import Any, BinaryIO
class StructStream(int):
PACK = ""
"""
Create a class that can parse and stream itself based on a struct.pack template string.
"""
def __new__(cls: Any, value: int):
value = int(value)
try:
v1 = struct.unpack(cls.PACK, struct.pack(cls.PACK, value))[0]
if value != v1:
raise ValueError(f"Value {value} does not fit into {cls.__name__}")
except Exception:
bits = struct.calcsize(cls.PACK) * 8
raise ValueError(
f"Value {value} of size {value.bit_length()} does not fit into " f"{cls.__name__} of size {bits}"
)
return int.__new__(cls, value) # type: ignore
@classmethod
def parse(cls: Any, f: BinaryIO) -> Any:
bytes_to_read = struct.calcsize(cls.PACK)
read_bytes = f.read(bytes_to_read)
assert read_bytes is not None and len(read_bytes) == bytes_to_read
return cls(*struct.unpack(cls.PACK, read_bytes))
def stream(self, f):
f.write(struct.pack(self.PACK, self))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any: # type: ignore
f = io.BytesIO(blob)
result = cls.parse(f)
assert f.read() == b""
return result
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
| snight1983/chia-rosechain | chia/util/struct_stream.py | struct_stream.py | py | 1,440 | python | en | code | 369 | github-code | 36 |
34098196022 | import uvicorn
from pyroute2 import IPRoute
from fastapi import FastAPI
ipr = IPRoute()
ipr.bind()
app = FastAPI()
@app.get("/iface/{iface_name}")
async def iface_id(iface_name):
with IPRoute() as ipr:
iface = ipr.link_lookup(ifname=iface_name)
return {"iface": iface[0]}
| andreagarbugli/iaac-tc-quic | tc-daemon/router.py | router.py | py | 293 | python | en | code | 0 | github-code | 36 |
23442724105 | inp = input("Enter you input here ")
len = len(inp)
decoding = True
if(decoding):
if(len<3):
print(inp[::-1])
else:
random=inp[3:-3]
#ll=random[-1]
newStr=random[-1]+random[:-1]
print(newStr) | somya143/python_learning | decoding.py | decoding.py | py | 240 | python | en | code | 1 | github-code | 36 |
39908117264 | from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import logging
import time
import argparse
import json
import ast
AllowedActions = ['both', 'publish', 'subscribe']
file_path = "../History.log"
faults = []
fault_type = ""
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="a28yobe9j1e4my-ats.iot.us-east-2.amazonaws.com")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="x509root.crt")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="f9f5eadeff-certificate.pem.crt")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="f9f5eadeff-private.pem.key")
parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub",
help="Targeted client id")
parser.add_argument("-t", "--topic", action="store", dest="topic", default="topic/getData", help="Targeted topic")
parser.add_argument("-m", "--mode", action="store", dest="mode", default="both",
help="Operation modes: %s"%str(AllowedActions))
parser.add_argument("-M", "--message", action="store", dest="message", default="Hello World!",
help="Message to publish")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
port = args.port
useWebsocket = args.useWebsocket
clientId = args.clientId
topic = args.topic
if args.mode not in AllowedActions:
parser.error("Unknown --mode option %s. Must be one of %s" % (args.mode, str(AllowedActions)))
exit(2)
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Port defaults
if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443
port = 443
if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883
port = 8883
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
if useWebsocket:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
myAWSIoTMQTTClient.connect()
time.sleep(5)
try:
with open(file_path, 'r') as file:
idx = 0
while True:
file.seek(0)
for i in range(idx):
file.readline()
new_lines = file.readlines()
if new_lines:
for l in new_lines:
idx += 1
l = l.strip()
if l.startswith("-"):
if len(faults) > 0:
if args.mode == 'both' or args.mode == 'publish':
data = json.dumps({'deviceID': "OBD-II_Dongle", "data": { 'fault': fault_type, "codes": faults}})
try:
myAWSIoTMQTTClient.publish(topic, data, 1)
except Exception as error:
print('Error while sending data to DB: {}'.format(error))
myAWSIoTMQTTClient.connect()
time.sleep(5)
myAWSIoTMQTTClient.publish(topic, data, 1)
if args.mode == 'publish':
print('Published topic %s: %s\n' % (topic, data))
fault_type, faults = "", []
else:
sidx = l.find('>') + 1
eidx = l.find(';')
if l.startswith('DTC'):
dtc = l[sidx:eidx].strip()
faults.append(dtc)
elif l.startswith('20'):
fault_type = l[sidx:eidx].strip()
else:
time.sleep(0.5)
continue
except FileNotFoundError:
print(f"File '{file_path}' not found.")
except IOError as e:
print(f"Error reading file: {e}")
| ngonza27/ctp-ngv-23 | src/py/send_data.py | send_data.py | py | 5,270 | python | en | code | 0 | github-code | 36 |
11503756221 | def fizz_buzz(max_val=100):
'''fizz_buzz is an implementation of a popular programming question.
It is an illustration in the futility of a language without switch/case
statements.
Arguments:
-- max_val: fizz_buzz runs from [0,max_val] (default: 100)
'''
for num in range(0, max_val):
if num%5 == 0:
if num%3 == 0:
# In this case, num is evenly divisible by both 5 and 3.
print('fizzbuzz!')
else:
print('fizz')
elif num%3 == 0:
print('buzz')
else:
print("num: {}".format(num))
if __name__ == '__main__':
fizz_buzz()
| alextoombs/learning-python | fizzbuzz/fizzbuzz.py | fizzbuzz.py | py | 680 | python | en | code | 0 | github-code | 36 |
14007737341 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import numpy as np
import random
class AutoEncoderNet(torch.nn.Module):
def __init__(self, n_channels, dim_last_layer, latent_features):
super(AutoEncoderNet, self).__init__()
n_flatten = torch.prod(torch.tensor(dim_last_layer))
self.encoder = nn.Sequential(
nn.Conv2d(n_channels, 16, 5),
nn.ReLU(),
nn.Conv2d(16, 32, 5,stride=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 64, 5,stride=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 5,stride=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Flatten(start_dim=1),
nn.Linear(n_flatten, 512),
nn.ReLU(),
nn.Linear(512, latent_features)
)
self.decoder = nn.Sequential(
nn.Linear(latent_features, n_flatten),
nn.ReLU(),
nn.Unflatten(1,dim_last_layer),
nn.ConvTranspose2d(64, 64, 5,stride=2,output_padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 5,stride=2,output_padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 5,stride=2,output_padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.ConvTranspose2d(16, n_channels, 5),
nn.Sigmoid()
)
def forward(self, x):
latent_space = self.encoder(x)
x_reconstruction = self.decoder(latent_space)
return latent_space, x_reconstruction
| s183920/02582_Computational_Data_Analysis_Case2 | autoencoder/ae.py | ae.py | py | 1,937 | python | en | code | 0 | github-code | 36 |
6198323090 | '''
1.입력받은 문자의 길이별 + 문자별 조건식?
2.BruteForce니까 로직만 짜서 1씩 증가? <- 이래도 되는게 가장 큰 수 해봐야 5^5임 시간초과는 안날걸
2번으로 진행해보도록 하고, 로직은 어떻게 하느냐가 문제일듯
'''
from itertools import product
def solution(word):
answer = []
for i in range(1,6):
for v in product(["A","E","I","O","U"],repeat = i):
answer.append("".join(v))
answer.sort()
return answer.index(word)+1 | byeong-chang/Baekjoon-programmers | 프로그래머스/lv2/84512. 모음 사전/모음 사전.py | 모음 사전.py | py | 518 | python | ko | code | 2 | github-code | 36 |
34212427030 | lista_idades = []
lista_pessoas = []
nome = input("Digite um nome: ")
while (nome.lower() != 'fim'):
idade = int(input("Digite uma idade: "))
lista_idades.append(idade)
lista_pessoas.append(nome)
nome = input("Digite um nome: ")
print("\n\nNomes digitados\n===========")
print(lista_pessoas)
print("\nIdades\n================")
print(lista_idades) | robsondejesus1996/Pos-Graduacao-Python | EstruturaRepeticao/ComandoWhile.py | ComandoWhile.py | py | 366 | python | pt | code | 0 | github-code | 36 |
6620739257 | import random
def avalia(sequencia,matriz):
distancia_atual = 0
print("o caminho atual é:"+str(sequencia))
for linha in range(len(sequencia)):
for posicao in range(len(sequencia)):
if linha+1<len(sequencia) and posicao==sequencia[linha+1]:
print("A proxima posicao do caminho é:"+str(posicao))
print("O valor até a posicao é:"+str(matriz[sequencia[linha]][posicao]))
distancia_atual+=matriz[sequencia[linha]][posicao]
distancia_atual += matriz[sequencia[-1]][sequencia[0]]
return distancia_atual
def Solucao_inicial(tamanhomattriz):
sequencia_atual = random.sample(range(tamanhomattriz),tamanhomattriz)
return sequencia_atual
matriz = [[0,10,20,30],[10,0,40,50],[20,40, 0,60],[30,50,60,0]]
sol = Solucao_inicial(len(matriz))
avaliAa = avalia(sol,matriz)
print(sol)
print(avaliAa) | Igao2/CodigosRandom | exemplojorge.py | exemplojorge.py | py | 899 | python | pt | code | 0 | github-code | 36 |
22579941668 | import sys
import heapq
INF = sys.maxsize
V, E = map(int, input().split())
K = int(input())
node = [[] for _ in range(V+1)]
for _ in range(E):
start, end, value = map(int, input().split())
node[start].append((value, end))
def dijkstra():
hq = []
distLi = [INF for _ in range(V+1)]
heapq.heappush(hq, (0, K))
distLi[K] = 0
while hq:
d, s = heapq.heappop(hq)
for dist, next in node[s]:
newDist = dist + d
if newDist < distLi[next]:
distLi[next] = newDist
heapq.heappush(hq, (newDist, next))
return distLi
answer = dijkstra()[1:]
for a in answer:
if a == INF:
print('INF')
continue
print(a) | heisje/Algorithm | baekjoon/1753_최단경로.py | 1753_최단경로.py | py | 721 | python | en | code | 0 | github-code | 36 |
22382283158 | import shutil
import tempfile
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django import forms
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.cache import cache
from ..models import Group, Post, Follow
User = get_user_model()
TEST_OF_POST = 13
FIRST_OF_POSTS = 10
TEMP_MEDIA_ROOT = tempfile.mktemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostViewsTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create(username='tes')
def setUp(self):
self.small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
self.uploaded = SimpleUploadedFile(
name='small.gif',
content=self.small_gif,
content_type='image/gif'
)
self.unauthorized_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
self.group = Group.objects.create(
id=1,
title='Тестовая группа',
slug='slug',
description='Тестовое описание',
)
self.post = Post.objects.create(
author=self.user,
text='Тестовая пост какойто',
group=self.group,
image='posts/small.gif',
)
@classmethod
def tearDownClass(cls):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def test_pages_uses_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
templates_page_names = {
'posts/index.html': reverse('posts:index'),
'posts/group_list.html': (
reverse('posts:group_list', kwargs={'slug': 'slug'})
),
'posts/profile.html': (
reverse('posts:profile',
kwargs={'username': self.user.username})
),
'posts/post_detail.html': (
reverse('posts:post_detail',
kwargs={'post_id': self.post.pk})
),
'posts/create_post.html': reverse('posts:post_create'),
}
for template, reverse_name in templates_page_names.items():
with self.subTest(template=template):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_index_show_correct_context(self):
"""Шаблон index сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:index'))
first_object = response.context['posts'][0]
post_text_0 = first_object.text
post_group_0 = first_object.group
post_author_0 = first_object.author
post_image_0 = first_object.image
self.assertEqual(post_text_0, 'Тестовая пост какойто')
self.assertEqual(post_author_0, self.user)
self.assertEqual(post_group_0, self.group)
self.assertEqual(post_image_0, 'posts/small.gif')
def test_group_list_show_correct_context(self):
"""Шаблон group_list сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse(
'posts:group_list', kwargs={'slug': 'slug'})
)
first_object = response.context['posts'][0]
post_group_0 = first_object.group
self.assertEqual(post_group_0, self.group)
post_image_0 = first_object.image
self.assertEqual(post_image_0, 'posts/small.gif')
def test_prifile_show_correct_context(self):
"""Шаблон profile сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse(
'posts:profile', kwargs={'username': self.user.username})
)
first_object = response.context['post_list'][0]
post_author_0 = first_object.author
post_image_0 = first_object.image
self.assertEqual(post_author_0, self.user)
self.assertEqual(post_image_0, 'posts/small.gif')
def test_post_detail_show_correct_context(self):
"""Шаблон post_detail сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse(
'posts:post_detail', kwargs={'post_id': self.post.pk})
)
first_object = response.context.get('post')
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(post_text_0, 'Тестовая пост какойто')
self.assertEqual(post_image_0, 'posts/small.gif')
def test_post_detail_page_list_is_1(self):
"""На post_detail передаётся ожидаемое количество объектов"""
response = self.authorized_client.get(reverse(
'posts:post_detail', kwargs={'post_id': self.post.pk})
)
self.assertEqual(response.context['post_count'], 1)
def test_create_correct_context(self):
"""Шаблон create сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:post_create'))
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
'image': forms.ImageField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
self.assertIsInstance(form_field, expected)
def test_post_added_correctly_user(self):
"""Пост при создании виден на странице выбранной группы,
в профайле пользовател и на главной странице"""
group2 = Group.objects.create(title='Тестовая группа 2',
slug='test_group2')
posts_count = Post.objects.filter(group=self.group).count()
post = Post.objects.create(
text='Тестовый пост от другого автора',
author=self.user,
group=group2)
response_profile = self.authorized_client.get(
reverse('posts:profile',
kwargs={'username': f'{self.user.username}'}))
response_home = self.authorized_client.get(
reverse('posts:index'))
group = Post.objects.filter(group=self.group).count()
profile = response_profile.context['post_list']
home = response_home.context['posts']
self.assertEqual(group, posts_count)
self.assertIn(post, profile)
self.assertIn(post, home)
def test_index_cache_context(self):
"""Проверка кэширования страницы index"""
before_create_post = self.authorized_client.get(
reverse('posts:index'))
first_item_before = before_create_post.content
Post.objects.create(
author=self.user,
text='Проверка кэша',
group=self.group,
image=self.uploaded
)
after_create_post = self.authorized_client.get(reverse('posts:index'))
first_item_after = after_create_post.content
self.assertEqual(first_item_after, first_item_before)
cache.clear()
after_clear = self.authorized_client.get(reverse('posts:index'))
self.assertNotEqual(first_item_after, after_clear)
class PaginatorViewsTest(TestCase):
def setUp(self):
self.client = Client()
self.guest_client = Client()
self.user = User.objects.create_user(username='auth')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
self.group = Group.objects.create(
id=1,
title='Тестовая группа',
slug='slug',
description='Тестовое описание',
)
bilk_post: list = []
for i in range(TEST_OF_POST):
bilk_post.append(Post(text=f'Тестовая, пост какойто {i}',
group=self.group,
author=self.user))
Post.objects.bulk_create(bilk_post)
def test_correct_page_context_guest_client(self):
"""Проверка количества постов на первой и второй страницах."""
pages: tuple = (reverse('posts:index'),
reverse('posts:profile',
kwargs={'username': f'{self.user.username}'}),
reverse('posts:group_list',
kwargs={'slug': f'{self.group.slug}'}))
for page in pages:
response1 = self.client.get(page)
response2 = self.client.get(page + '?page=2')
count_posts1 = len(response1.context['page_obj'])
count_posts2 = len(response2.context['page_obj'])
self.assertEqual(count_posts1, FIRST_OF_POSTS)
self.assertEqual(count_posts2, TEST_OF_POST - FIRST_OF_POSTS)
class FollowViewsTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='auth1')
cls.user2 = User.objects.create_user(username='auth2')
cls.author = User.objects.create_user(username='someauthor')
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
self.authorized_client2 = Client()
self.authorized_client2.force_login(self.user2)
def test_user_follower_authors(self):
"""Посты доступны пользователю, который подписался на автора.
Увеличение подписок автора"""
count_follow = Follow.objects.filter(user=FollowViewsTest.user).count()
data_follow = {'user': FollowViewsTest.user,
'author': FollowViewsTest.author}
url_redirect = reverse(
'posts:profile',
kwargs={'username': FollowViewsTest.author.username})
response = self.authorized_client.post(
reverse('posts:profile_follow', kwargs={
'username': FollowViewsTest.author.username}),
data=data_follow, follow=True)
new_count_follow = Follow.objects.filter(
user=FollowViewsTest.user).count()
self.assertTrue(Follow.objects.filter(
user=FollowViewsTest.user,
author=FollowViewsTest.author).exists())
self.assertRedirects(response, url_redirect)
self.assertEqual(count_follow + 1, new_count_follow)
def test_unfollower_no_see_new_post(self):
"""У не подписчика поста нет"""
new_post_follower = Post.objects.create(
author=FollowViewsTest.author,
text='Текстовый текст')
Follow.objects.create(user=FollowViewsTest.user,
author=FollowViewsTest.author)
response_unfollower = self.authorized_client2.get(
reverse('posts:follow_index'))
new_post_unfollower = response_unfollower.context['page_obj']
self.assertNotIn(new_post_follower, new_post_unfollower)
def test_follower_see_new_post(self):
"""У подписчика появляется новый пост избранного автора."""
new_post_follower = Post.objects.create(
author=FollowViewsTest.author,
text='Текстовый текст')
Follow.objects.create(user=FollowViewsTest.user,
author=FollowViewsTest.author)
response_follower = self.authorized_client.get(
reverse('posts:follow_index'))
new_posts = response_follower.context['page_obj']
self.assertIn(new_post_follower, new_posts)
| krankir/Social-network | yatube/posts/tests/test_views.py | test_views.py | py | 12,656 | python | en | code | 0 | github-code | 36 |
73256078825 | # add CBAM注意力机制模块
import torch
from torch import nn
class ChannelAttention(nn.Module):
def __init__(self, channel, ratio=16):
super(ChannelAttention, self).__init__()
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel,channel//ratio,False),
nn.ReLU(),
nn.Linear(channel//ratio,channel,False)
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b,c,h,w = x.size()
max_pool_out = self.max_pool(x).view([b,c])
avg_pool_out = self.avg_pool(x).view([b,c])
max_fc_out = self.fc(max_pool_out)
avg_fc_out = self.fc(avg_pool_out)
out = max_fc_out + avg_fc_out
out = self.sigmoid(out).view([b,c,1,1])
return out*x
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, h, w = x.size()
max_pool_out = torch.max(x,dim=1,keepdim=True)
mean_pool_out = torch.mean(x, dim=1, keepdim=True)
pool_out =torch.cat([max_pool_out, mean_pool_out], dim=1)
out = self.conv(pool_out)
out = self.sigmoid(out)
return out*x
class cbam(nn.Module):
def __init__(self, channel, ratio=16,kernel_size=7):
super(cbam, self).__init__()
self.channel_attention = ChannelAttention(channel,ratio)
self.spatial_attention = SpatialAttention(kernel_size)
def forward(self,x):
x = self.channel_attention(x)
x = self.spatial_attention(x)
return x
# model = cbam(512)
# print(model)
| DickensKP/Yolov3-vehicle-pedestrian-trafficsign-detection-system | CBAM.py | CBAM.py | py | 2,002 | python | en | code | 4 | github-code | 36 |
17887863275 | """
集成了流式布局、按钮排布的窗口。
"""
from PySide2.QtWidgets import QScrollArea, QWidget, QToolButton, QVBoxLayout, QSpacerItem, QSizePolicy
from PySide2.QtCore import Qt, QSize
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PySide2.QtGui import QResizeEvent
from widgets import PMFlowLayout
class PMFlowAreaWidget(QWidget):
def __init__(self):
super().__init__()
from widgets import PMFlowLayout
self.outer_layout = QVBoxLayout()
self.flow_layout = PMFlowLayout()
self.setMinimumWidth(100)
self.outer_layout.addLayout(self.flow_layout)
spacer_v = QSpacerItem(20, 20, QSizePolicy.Minimum,
QSizePolicy.Expanding)
self.outer_layout.addItem(spacer_v)
self.setLayout(self.outer_layout)
def add_widget(self, w: 'QWidget'):
self.flow_layout.add_widget(w)
def setup_ui(self):
if hasattr(self.widget(), 'setup_ui'):
self.widget().setup_ui()
def resizeEvent(self, a0: 'QResizeEvent') -> None:
super().resizeEvent(a0)
layout: 'PMFlowLayout' = self.flow_layout
layout.on_resize()
class PMFlowArea(QScrollArea):
def __init__(self, parent=None):
super().__init__(parent)
self.flow_widget = PMFlowAreaWidget()
self.widgets_list = self.flow_widget.flow_layout.widgets_list
self.setWidget(self.flow_widget)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setWidgetResizable(True)
def set_layout_content_margins(
self, left: int, right: int, up: int, down: int):
self.flow_widget.flow_layout.setContentsMargins(left, right, up, down)
def add_tool_button(self, name: str, text: str, icon_path: str = ''):
from widgets import create_icon
b = QToolButton()
b.setText(text)
icon = create_icon(icon_path)
b.setIcon(icon)
b.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
b.setIconSize(QSize(40, 40))
b.setMaximumWidth(80)
b.setMinimumWidth(80)
b.setMinimumHeight(60)
b.setMaximumHeight(60)
self.add_widget(b)
return b
def add_widget(self, w: 'QWidget'):
self.widget().add_widget(w)
return w
def setup_ui(self):
if hasattr(self.widget(), 'setup_ui'):
self.widget().setup_ui()
if __name__ == '__main__':
from PySide2.QtWidgets import QApplication, QPushButton
import sys
app = QApplication(sys.argv)
sa = PMFlowArea()
for i in range(10):
w = sa.add_widget(QPushButton('ad%d' % i))
w.setMaximumHeight(60)
w.setMinimumHeight(60)
w.setMinimumWidth(100)
w.setMaximumWidth(100)
sa.show()
sys.exit(app.exec_())
| pyminer/pyminer | pyminer/widgets/widgets/basic/containers/flowarea.py | flowarea.py | py | 2,813 | python | en | code | 77 | github-code | 36 |
1416947614 | #Duc Nguyen
#15/02/2023
#This code draws 3 shapes, square, triangle and a hexagon with a variety of colour fill and colour outlined +
#pen size changed
import turtle # Allows us to use turtles
alex = turtle.Turtle() # Create a turtle, assign to alex
wn = turtle.Screen() # create a window for our design
#begins drawing the square with colour fill
alex.begin_fill() #starts colour fill
alex.fillcolor("SeaGreen1") #selects colour
for side in range(4):
alex.forward(100)
alex.left(90)
alex.end_fill() #ends fill
#begins drawing the triangle with colour fill
alex.begin_fill() #starts colour fill
alex.fillcolor("SeaGreen2") #selects colour
for side in range(3):
alex.forward(100)
alex.left(120)
alex.end_fill() #ends fill
#begins drawing the Hexagon with thick lines & coloured lines
alex.pencolor("SeaGreen3") #pen colour change
alex.pensize(5) #pen thickness change
for side in range(6):
alex.forward(100)
alex.left(60)
wn.exitonclick() # get rid of the screen. Last step
| Kaizuu08/PythonShowcase2023Semester1 | Week 3/colours.py | colours.py | py | 1,041 | python | en | code | 0 | github-code | 36 |
38072028175 | from migen import *
from migen.genlib.io import *
from migen.genlib.misc import BitSlip, WaitTimer
from litex.soc.interconnect import stream
from litex.soc.cores.code_8b10b import Encoder, Decoder
from liteiclink.serwb.datapath import TXDatapath, RXDatapath
class _KUSerdesClocking(Module):
def __init__(self, pads, mode="master"):
self.refclk = Signal()
# # #
# In Master mode, generate the linerate/10 clock. Slave will re-multiply it.
if mode == "master":
converter = stream.Converter(40, 8)
self.submodules += converter
self.comb += [
converter.sink.valid.eq(1),
converter.source.ready.eq(1),
converter.sink.data.eq(Replicate(Signal(10, reset=0b1111100000), 4)),
]
self.specials += [
Instance("OSERDESE3",
p_DATA_WIDTH=8, p_INIT=0,
p_IS_CLK_INVERTED=0, p_IS_CLKDIV_INVERTED=0,
p_IS_RST_INVERTED=0,
o_OQ=self.refclk,
i_RST=ResetSignal("sys"),
i_CLK=ClockSignal("sys4x"), i_CLKDIV=ClockSignal("sys"),
i_D=converter.source.data
),
DifferentialOutput(self.refclk, pads.clk_p, pads.clk_n)
]
# In Slave mode, multiply the clock provided by Master with a PLL/MMCM
elif mode == "slave":
self.specials += DifferentialInput(pads.clk_p, pads.clk_n, self.refclk)
class _KUSerdesTX(Module):
def __init__(self, pads):
# Control
self.idle = idle = Signal()
self.comma = comma = Signal()
# Datapath
self.sink = sink = stream.Endpoint([("data", 32)])
# # #
# Datapath
self.submodules.datapath = datapath = TXDatapath(8)
self.comb += [
sink.connect(datapath.sink),
datapath.source.ready.eq(1),
datapath.idle.eq(idle),
datapath.comma.eq(comma)
]
# Output Data(DDR with sys4x)
data = Signal()
self.specials += [
Instance("OSERDESE3",
p_DATA_WIDTH=8, p_INIT=0,
p_IS_CLK_INVERTED=0, p_IS_CLKDIV_INVERTED=0, p_IS_RST_INVERTED=0,
o_OQ=data,
i_RST=ResetSignal("sys"),
i_CLK=ClockSignal("sys4x"), i_CLKDIV=ClockSignal("sys"),
i_D=datapath.source.data
),
DifferentialOutput(data, pads.tx_p, pads.tx_n)
]
class _KUSerdesRX(Module):
def __init__(self, pads):
# Control
self.delay_rst = Signal()
self.delay_inc = Signal()
self.bitslip_value = bitslip_value = Signal(6)
# Status
self.idle = idle = Signal()
self.comma = comma = Signal()
# Datapath
self.source = source = stream.Endpoint([("data", 32)])
# # #
# Data input (DDR with sys4x)
data_nodelay = Signal()
data_delayed = Signal()
data_deserialized = Signal(8)
self.specials += [
DifferentialInput(pads.rx_p, pads.rx_n, data_nodelay),
Instance("IDELAYE3",
p_CASCADE="NONE", p_UPDATE_MODE="ASYNC", p_REFCLK_FREQUENCY=200.0,
p_IS_CLK_INVERTED=0, p_IS_RST_INVERTED=0,
p_DELAY_FORMAT="COUNT", p_DELAY_SRC="IDATAIN",
p_DELAY_TYPE="VARIABLE", p_DELAY_VALUE=0,
i_CLK=ClockSignal("sys"),
i_RST=self.delay_rst, i_LOAD=0,
i_INC=1, i_EN_VTC=0,
i_CE=self.delay_inc,
i_IDATAIN=data_nodelay, o_DATAOUT=data_delayed
),
Instance("ISERDESE3",
p_IS_CLK_INVERTED=0,
p_IS_CLK_B_INVERTED=1,
p_DATA_WIDTH=8,
i_D=data_delayed,
i_RST=ResetSignal("sys"),
i_FIFO_RD_CLK=0, i_FIFO_RD_EN=0,
i_CLK=ClockSignal("sys4x"),
i_CLK_B=ClockSignal("sys4x"), # locally inverted
i_CLKDIV=ClockSignal("sys"),
o_Q=data_deserialized
)
]
# Datapath
self.submodules.datapath = datapath = RXDatapath(8)
self.comb += [
datapath.sink.valid.eq(1),
datapath.sink.data.eq(data_deserialized),
datapath.bitslip_value.eq(bitslip_value),
datapath.source.connect(source),
idle.eq(datapath.idle),
comma.eq(datapath.comma)
]
@ResetInserter()
class KUSerdes(Module):
def __init__(self, pads, mode="master"):
self.submodules.clocking = _KUSerdesClocking(pads, mode)
self.submodules.tx = _KUSerdesTX(pads)
self.submodules.rx = _KUSerdesRX(pads)
| kamejoko80/linux-on-litex-vexriscv-legacy | liteiclink/liteiclink/serwb/kuserdes.py | kuserdes.py | py | 4,841 | python | en | code | 0 | github-code | 36 |
38105669317 | #!/usr/bin/env python3
from manim import *
import numpy as np
FONT_COLOR= "#282828"
NO_TEX_FONT = "Bookerly"
# NO_TEX_FONT = "JetBrains Mono"
# NO_TEX_FONT = "JuliaMono"
TEX_TEMPLATE = TexTemplate()
TEX_TEMPLATE.add_to_preamble(r"\usepackage{amsbsy}")
TEX_TEMPLATE.add_to_preamble(r"\usepackage{amsmath}")
TEX_TEMPLATE.add_to_preamble(r"\usepackage{mathtools}")
DM_CHANGE_COLOR = "#8F3F71"
VAL_COLOR_RIGHT = "#427B58"
VAL_COLOR_WRONG = "#B16286"
LEFT_BRACKET = '('
RIGHT_BRACKET = ')'
"""
slices of DM
1. 2:9
"""
EPSILON = r"\epsilon = 1"
class lernmatrix_1(Scene):
"""Main scene for lermatrix didactic resource."""
def construct(self):
"""scene constructor."""
self.camera.background_color = WHITE
COLOR_SLICES_DM = np.array([[2, 9],
[7, 14],
[12, 19]])
#################
# introduction
#################
title = Text("La Lernmatrix", font=NO_TEX_FONT, slant=ITALIC,
color=FONT_COLOR)
title.font_size *= 1.7
subtitle = Text("Ejemplo 1", font=NO_TEX_FONT, color=FONT_COLOR)
subtitle.next_to(title, DOWN)
self.play(Write(title, scale=2), run_time=3.5)
self.play(title.animate.shift(1.5*UP))
self.play(Write(subtitle))
self.wait(2)
self.remove(title, subtitle)
#################
# training set
#################
title = Text("Conjunto de Entrenamiento", font=NO_TEX_FONT,
color=FONT_COLOR)
# title.font_size *= 0.9
training_set = MathTex(
r"""
\boldsymbol{x^1} = \begin{pmatrix}
1 \\ 0 \\ 1 \\ 0 \\ 1
\end{pmatrix}
\boldsymbol{y^1} = \begin{pmatrix}
1 \\ 0 \\ 0
\end{pmatrix} &;
\boldsymbol{x^2} = \begin{pmatrix}
1 \\ 1 \\ 0 \\ 0 \\ 1
\end{pmatrix}
\boldsymbol{y^2} = \begin{pmatrix}
0 \\ 1 \\ 0
\end{pmatrix};
\\
\boldsymbol{x^3} = \begin{pmatrix}
1 \\ 0 \\ 1 \\ 1 \\ 0
\end{pmatrix}
& \mbox{ }
\boldsymbol{y^3} = \begin{pmatrix}
0 \\ 0 \\ 1
\end{pmatrix}
""",
tex_template=TEX_TEMPLATE, color=FONT_COLOR)
self.play(FadeIn(title, scale=2), run_time=1.2)
self.play(title.animate.to_edge(UP), run_time=2)
training_set.next_to(title, DOWN)
self.play(FadeIn(training_set, scale=1.1), run_time=2)
self.wait(1.5)
self.remove(title, training_set)
# entrenamiento
title = Text("Fase de Entrenamiento", font=NO_TEX_FONT,
color=FONT_COLOR)
title.font_size *= 1.25
self.play(Write(title), run_time=1.5)
self.play(title.animate.to_edge(UP), run_time=1.2)
M = np.zeros((3, 5), dtype=int)
X = np.array([[1, 0, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 1, 1, 0]])
Y = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
M_str = np_to_pmatrix(M)
M_manim = IntegerMatrix(M, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
M_manim.color = FONT_COLOR
M_tex = MathTex('M = ', color=FONT_COLOR)
M_manim.next_to(M_tex, RIGHT)
M_group = Group(M_tex, M_manim)
M_group.width *= 0.75
M_group.to_edge(LEFT)
training_set.scale(0.7)
training_set.to_edge(RIGHT)
self.play(FadeIn(M_group),
FadeIn(training_set),
run_time=2)
self.wait(4)
self.play(FadeOut(title),
FadeOut(M_group),
FadeOut(training_set),
run_time=1)
self.remove(title, M_group, training_set)
self.wait(0.8)
# training loop
epsilon_tex = MathTex(EPSILON, color=FONT_COLOR)
epsilon_tex.next_to(title, DOWN)
epsilon_tex.to_edge(RIGHT)
epsilon_tex.shift(DOWN * 0.15)
epsilon_tex.shift(LEFT * 0.25)
epsilon_tex.scale(1.5)
epsilon_tex.set_color(BLUE_E)
for i in range(X.shape[0]):
title = Text(f"Par asociado {i + 1}", font=NO_TEX_FONT,
color=FONT_COLOR)
title.to_edge(UP)
x_i = X[i, :]
y_i = Y[i, :]
x_i_tex = MathTex(r"\boldsymbol{" + f"x^{i + 1}" + "} = ",
color=FONT_COLOR)
x_i_manim = IntegerMatrix(x_i.reshape((1, X.shape[1])),
left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
x_i_manim.color = FONT_COLOR
x_i_manim.next_to(x_i_tex, RIGHT)
x_i_group = Group(x_i_tex, x_i_manim)
y_i_tex = MathTex(r"\boldsymbol{" + f"y^{i + 1}" + "} = ",
color=FONT_COLOR)
y_i_manim = IntegerMatrix(y_i.reshape((Y.shape[1], 1)),
left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
y_i_manim.color = FONT_COLOR
y_i_manim.next_to(y_i_tex, RIGHT)
y_i_group = Group(y_i_tex, y_i_manim)
# actual Lernmatrix training
DM = np.zeros((Y.shape[1], X.shape[1]), dtype=int)
rows = y_i == 1
row_idx = np.where(rows)[0][0]
cols_plus = x_i == 1
cols_minus = x_i == 0
DM[rows, cols_plus] = 1
DM[rows, cols_minus] = -1
# location of elements on screen
DM_manim = IntegerMatrix(DM, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
DM_manim.color = FONT_COLOR
DM_manim.shift(DOWN)
x_i_group.width *= 1.03
x_i_group.next_to(DM_manim, UP)
x_i_group.shift(0.62 * LEFT)
y_i_group.next_to(DM_manim, LEFT)
y_i_group.shift(0.4 * LEFT)
y_i_group.shift(0.04 * DOWN)
# color elements that will change during training
DM_manim.get_rows()[row_idx].color = DM_CHANGE_COLOR
y_i_manim.get_rows()[row_idx].color = DM_CHANGE_COLOR
self.play(FadeIn(title),
FadeIn(epsilon_tex),
FadeIn(DM_manim, scale=1),
FadeIn(x_i_group),
FadeIn(y_i_group),
run_time=2)
self.wait(2)
self.remove(DM_manim, x_i_group, y_i_group)
NM = M + DM
NM_tex = MathTex(r"M &= M + \Delta M_{" + f"{i+1}" + r"} \\ &= "
+ np_to_pmatrix(M)
+ r" + "
+ np_to_pmatrix(DM)
+ r"\\ &= "
+ np_to_pmatrix(NM),
tex_template=TEX_TEMPLATE, color=FONT_COLOR)
NM_tex.shift(DOWN)
self.play(FadeIn(NM_tex), run_time=3)
self.wait(4)
M = NM
self.remove(title, NM_tex, epsilon_tex)
title = Text("Final del entrenamiento", font=NO_TEX_FONT,
color=FONT_COLOR)
title.to_edge(UP)
M_tex = MathTex(r"M &= "
+ np_to_pmatrix(M),
tex_template=TEX_TEMPLATE, color=FONT_COLOR)
M_tex.scale(1.4)
self.play(FadeIn(title),
FadeIn(M_tex),
run_time=2)
self.wait(4)
self.remove(title, M_tex)
#######################################
# Recuperacion. Resubstitution Error
#######################################
title = Text("Fase de Recuperación", font=NO_TEX_FONT,
color=FONT_COLOR)
subtitle = Text("Resubstitution Error", font=NO_TEX_FONT,
color=FONT_COLOR)
subtitle.font_size *= 0.85
subtitle.next_to(title, DOWN)
self.play(Write(title),
Write(subtitle), run_time=2)
self.play(FadeOut(subtitle, shift=DOWN),
FadeOut(title, shift=UP), run_time=1.5)
self.remove(subtitle, title)
self.wait(2)
# loop de recuperacion
total_test = 0
right = 0
times = MathTex(r" \times ", color=FONT_COLOR)
equals = MathTex(" = ", color=FONT_COLOR)
y_omega = MathTex(r"y^{\omega} = ", color=FONT_COLOR)
M_manim = IntegerMatrix(M, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
M_manim.color = FONT_COLOR
M_manim.width *= 0.85
cross = Cross(stroke_color=RED_C)
patrones_probados = Variable(total_test,
Text('P. probados', font=NO_TEX_FONT),
var_type=Integer)
patrones_correctos = Variable(total_test, Text('P. correctos',
font=NO_TEX_FONT),
var_type=Integer)
for i in range(X.shape[0]):
# titulos y posicion
subtitle = Text(f"Patrón de entrada {i + 1}", font=NO_TEX_FONT,
color=FONT_COLOR)
subtitle.to_edge(UP)
if i == 0:
patrones_probados.width *= 0.65
patrones_correctos.width *= 0.65
patrones_probados.color = FONT_COLOR
patrones_correctos.color = VAL_COLOR_RIGHT
patrones_probados.next_to(subtitle, DOWN)
patrones_probados.to_edge(RIGHT)
patrones_correctos.next_to(patrones_probados, DOWN)
self.play(Write(subtitle),
Write(patrones_probados),
Write(patrones_correctos), run_time=2)
else:
self.play(Write(subtitle), run_time=2)
# patrones de prueba
x_i = X[i].reshape(X.shape[1], 1)
y_i = Y[i].reshape(Y.shape[1], 1)
y_hat = M @ x_i
y_hat_final = np.copy(y_hat)
max_y_hat = np.max(y_hat)
mask_max = y_hat == max_y_hat
mask_not_max = ~mask_max
y_hat_final[mask_max] = 1
y_hat_final[mask_not_max] = 0
are_equal = np.all(y_hat_final == y_i)
# operacion M * x_i
operation = MathTex(r"M \times " + "x^{" + str(i + 1) + "} = ",
color=FONT_COLOR)
x_i_manim = IntegerMatrix(x_i, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
x_i_manim.color = FONT_COLOR
x_i_manim.width *= 0.85
y_hat_manim = IntegerMatrix(y_hat, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
y_hat_manim.color = FONT_COLOR
y_hat_manim.width *= 0.85
y_final_manim = IntegerMatrix(y_hat_final,
left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
y_final_manim.color = FONT_COLOR
y_final_manim.width *= 0.85
operation.shift(DOWN * 1.5)
operation.to_edge(LEFT)
M_manim.next_to(operation, RIGHT)
times.next_to(M_manim, RIGHT)
x_i_manim.next_to(times, RIGHT)
equals.next_to(x_i_manim, RIGHT)
y_hat_manim.next_to(equals, RIGHT)
self.play(FadeIn(operation), FadeIn(M_manim), FadeIn(times),
FadeIn(x_i_manim), FadeIn(equals), FadeIn(y_hat_manim),
run_time=2.5)
self.wait(3)
self.play(FadeOut(operation), FadeOut(M_manim), FadeOut(times),
FadeOut(x_i_manim), FadeOut(equals),
run_time=1.5)
self.play(y_hat_manim.animate.to_edge(LEFT), run_time=1.5)
self.remove(operation, x_i_manim)
# encontrando maximo de vector resultante de M * x_i
max_operation = MathTex(r" ; \vee_{h=1}^{p}\left[\sum_{j=1}^{n}"
+ r" m_{hj} \cdot x_j^{" + str(i+1)
+ r"}\right] = " + f"{max_y_hat}"
+ r"\mbox{ } \therefore",
color=FONT_COLOR,
tex_template=TEX_TEMPLATE)
max_operation.next_to(y_hat_manim, RIGHT)
max_operation.shift(RIGHT * 0.25)
y_omega.next_to(max_operation)
y_omega.shift(RIGHT * 0.25)
y_final_manim.next_to(y_omega)
group_omega = Group(y_omega, y_final_manim)
self.play(FadeIn(max_operation), FadeIn(y_omega),
FadeIn(y_final_manim), run_time=1.5)
self.wait(3)
self.play(FadeOut(y_hat_manim), FadeOut(max_operation),
run_time=1.5)
self.remove(y_hat_manim, max_operation)
# comparando y_omega vs y_i
y_i_manim = IntegerMatrix(y_i, left_bracket=LEFT_BRACKET,
right_bracket=RIGHT_BRACKET)
y_i_manim.width *= 0.85
self.play(group_omega.animate.shift(LEFT * 5))
y_i_tex = MathTex(r"y^{" + f"{i + 1}" + "} = ", color=FONT_COLOR)
y_i_tex.next_to(group_omega, RIGHT)
y_i_tex.shift(RIGHT)
y_i_manim.next_to(y_i_tex, RIGHT)
y_i_manim.color = FONT_COLOR
self.play(FadeIn(y_i_tex), FadeIn(y_i_manim), run_time=1.5)
# check if recall was right
total_test += 1
if are_equal:
right += 1
group_i = Group(y_i_tex, y_i_manim)
self.play(y_omega.animate.set_fill(VAL_COLOR_RIGHT),
y_final_manim.animate.set_fill(VAL_COLOR_RIGHT),
y_i_tex.animate.set_fill(VAL_COLOR_RIGHT),
y_i_manim.animate.set_fill(VAL_COLOR_RIGHT),
run_time=0.8)
self.play(group_omega.animate.scale(1.3),
group_i.animate.scale(1.3), run_time=0.8)
self.play(group_omega.animate.scale(0.77),
group_i.animate.scale(0.77),
patrones_probados.tracker.animate.set_value(
total_test),
patrones_correctos.tracker.animate.set_value(right),
run_time=0.8)
else:
cross.next_to(y_i_manim, RIGHT)
cross.shift(RIGHT)
self.play(FadeIn(cross), run_time=0.8)
patrones_probados_tracker = patrones_probados.tracker
self.play(patrones_probados_tracker.animate.set_value(
total_test), run_time=0.8)
self.wait(0.8)
self.wait(2.5)
self.play(FadeOut(y_i_tex), FadeOut(y_i_manim),
FadeOut(y_omega), FadeOut(y_final_manim),
FadeOut(subtitle), run_time=1.5)
y_omega.set_fill(FONT_COLOR)
self.remove(subtitle, y_i_tex, y_i_manim, y_omega, y_final_manim,
patrones_probados, patrones_correctos)
self.wait(1)
# resubstitution error computation
title = Text("Desempeño de la Lernmatrix", font=NO_TEX_FONT,
color=FONT_COLOR)
frac = MathTex(r"Resubstitution \mbox{ } error = "
+ r"\frac{P. \mbox{ }correctos}{P. \mbox{ }probados}="
+ r"\frac{" + f"{right}" + "}{"
+ f"{total_test}" + "}="
+ f"{right / total_test * 100:.2f}" + r"\%",
color=FONT_COLOR)
frac.shift(DOWN * 0.75)
self.play(Write(title), run_time=1.5)
self.play(title.animate.to_edge(UP), run_time=1)
self.play(Write(patrones_probados), Write(patrones_correctos),
run_time=1.5)
self.play(FadeIn(frac), run_time=1.5)
self.wait(10)
def np_to_pmatrix(arr):
"""
Return the string Latex pmatrix representation of an np.array.
Parameter
--------
arr : np.array
array to be parsed to latex
Return
------
tex : string
string pmatrix representation of an array
"""
if len(arr.shape) > 2:
raise ValueError('Matrix should be at most 2D')
lines = str(arr).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{pmatrix*}[r]']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
# rv[-1] = rv[-1].replace(r"\\", "")
rv += [r'\end{pmatrix*}']
return '\n'.join(rv)
# return rv
| Cardoso1994/manim_videos | associative_memories/lernmatrix/src/lernmatrix.py | lernmatrix.py | py | 17,164 | python | en | code | 0 | github-code | 36 |
73507098983 | from django import forms
from .Config import EffectType
class ChooseEffectRadioForm(forms.Form):
def __init__(self, effect_type, effect_label, *args, **kwargs):
super(ChooseEffectRadioForm, self).__init__(*args, **kwargs)
self.fields["pref-effect"] = forms.BooleanField(label=effect_label,
required=True,
widget=forms.CheckboxInput(attrs={
"type": "radio",
"id": effect_type,
"value": effect_type
}))
radio_list = list()
for key, value in EffectType.__members__.items():
radio_list.append(ChooseEffectRadioForm(key, value.value[0]))
| gwolan/pic_convolving_website | upload_pic/src/ChooseEffectRadioForm.py | ChooseEffectRadioForm.py | py | 932 | python | en | code | 0 | github-code | 36 |
43712557365 | nu1,nu2=map(int,input().split())
if nu1<=nu2:
u=nu1
else:
u=nu2
m=[]
for i in range(0,u):
m.append(sorted(list(map(int,input().split()))))
m=sorted(m)
for i in range(0,len(m[0])):
for j in range(0,len(m)-1):
if m[j][i]>m[j+1][i]:
m[j][i],m[j+1][i]=m[j+1][i],m[j][i]
for i in m:
print(*i)
| sriramkiddo/guvi-programs | pro4_2.py | pro4_2.py | py | 308 | python | en | code | 0 | github-code | 36 |
72045569703 | import re
puzzle = open('puzzle', 'r').read().splitlines()
puzzle = [tuple(map(int, re.findall(r'\d+', i))) for i in puzzle]
def is_close_enough(x, y):
distances = sum(abs(i[0]-x) + abs(i[1]-y) for i in puzzle)
if distances < 10000:
return 1
return 0
x1, x2, y1, y2 = puzzle[0][0], puzzle[0][0], puzzle[0][1], puzzle[0][1]
for i in puzzle:
if i[0] < x1:
x1 = i[0]
if i[0] > x1:
x2 = i[0]
if i[1] < x1:
y1 = i[1]
if i[1] > x1:
y2 = i[1]
x1 -= 10000 // len(puzzle) - 1
y1 -= 10000 // len(puzzle) - 1
x2 += 10000 // len(puzzle) + 1
y2 += 10000 // len(puzzle) + 1
grid = {}
close_enough = 0
for x in range(x1, x2):
for y in range(y1, y2):
close_enough += is_close_enough(x, y)
print(close_enough) | filipmlynarski/Advent-of-Code-2018 | day_06/day_6_part_2.py | day_6_part_2.py | py | 720 | python | en | code | 0 | github-code | 36 |
907918850 | # Write a program to check if two strings are a rotation of each other?
def checkRotation(str1, str2):
temp = ''
# Check if lengths of two strings are equal or not
if len(str1) != len(str2):
return False
# storing concatenated string
temp = str1 + str1
if str2 in temp:
return True #returning true if 2nd string is present in concatenated string
else:
return False
str1 = input("enter the string : ")
str2 = input("enter the second string : ")
if checkRotation(str1, str2):
print("Given Strings are rotations of each other.")
else:
print("Given Strings are not rotations of each other.") | Atulj01/DSA-assignment | 3_problem.py | 3_problem.py | py | 700 | python | en | code | 0 | github-code | 36 |
14373956629 | import random
def merge_list():
first_list = []
second_list = []
seed_list(first_list)
seed_list(second_list)
print(first_list)
print(second_list)
output_list = []
for x in range(6):
if x % 2 == 0:
output_list.insert(x, first_list[x])
elif x % 2 == 1:
output_list.insert(x, second_list[x])
print(output_list)
def seed_list(list):
for x in range(6):
list.append(random.randint(1, 20))
merge_list()
| GGerginov/Python-Eexercises-TU-Sofia | Exercise/05|11|2021/MergeLists.py | MergeLists.py | py | 507 | python | en | code | 0 | github-code | 36 |
32975944321 | #Scripts for the search
import mysql.connector
import json
class carrier():
Name = ""
Email = ""
Location = []
class seller():
Firstname = ""
Surname = ""
Email = ""
Graduate = False
Location = ""
Products = []
class product():
menteeName = ""
menteeGraduate = False
menteeLocation = ""
menteeEmail = ""
Quantity = ""
MenteeEmail = ""
Matchs = False
#Returns the quantity as a volume in liters
def convertToLiters(quantity, units):
if units == "liter":
return quantity * 1.0
elif units == "milliliter":
return quantity/1000.0
elif units == "gallons":
return quantity*4.54609
elif units == "pints":
return quantity/1.7598
def convertToKilograms(quantity, units):
if units == "kilograms":
return quantity*1.0
elif units == "grams":
return quantity/1000.0
elif units == "tonnes":
return quantity * 1000.0
elif units == "stone":
return quantity*6.35029
elif units == "pounds":
return quantity/2.20462
elif units == "ounces":
return quantity/35.274
def search(quantity, units, keywords, endRegion, usertype):
unitFamily = ""
#Determine if liquid, solid or loose
#Convert to corresponding standard measure(liters, kilograms, pieces)
if units in ["liters", "milliliters", "gallons", "pints"]:
unitFamily="liters"
convertToLiters(quantity, units)
elif units in ["kilograms", "grams", "tonnes", "stone", "pounds", "ounces"]:
unitFamily="kilograms"
convertToKilograms(quantity, units)
else:
unitFamily = "pieces"
#function convertToLiters
#function convertToKilograms
#Access database
cnx = mysql.connector.connect(user='root', password='cfg2014!', host='127.0.0.1', database='c4g', port='3306')
cursor = cnx.cursor()
#carrier query
carriers = []
query = ("SELECT Name, Email, Location FROM carrier")
cursor.execute(query)
for (Name, Email, Location) in cursor:
individual = carrier()
individual.Name = Name
individual.Email = Email
individual.Location = split(Location, ",").trim()
carriers.append(individual)
#Find carriers with end region in range
viableCarriers = []
for i in range(len(carriers)):
for j in range(len(carriers[i].Location)):
if carriers[i].Location[j].lower() == endRegion:
viableCarriers.append(viableCarriers[i])
#Find sellers within the ranges of carriers
#Get sellers
sellers = []
query = ("SELECT Firstname, Surname, Email, Graduate, Location FROM mentee")
cursor.execute(query)
for Firstname, Surname, Email, Graduate, Location in cursor:
individual = seller()
individual.Firstname = Firstname
individual.Surname = Surname
individual.Email = Email
individual.Graduate = Graduate
individual.Location = Location
#Prevents non graduated accounts from being shown
if individual.Graduate == True or (usertype == "mentee" or usertype == "mentor"):
#Get products of user
products = []
query = ("SELECT Name, Quantity, Email FROM products WHERE MenteeEmail = '" + str(individual.Email) + "'")
cursor.execute(query)
#
#
#
#HERE
#
#
#
#
for Name, Quantity, Email in cursor:
individualProduct = product()
individualProduct.Name = Name
individualProduct.MenteeEmail = Email
individualProduct.Quantity = Quantity
individualProduct.menteeName = (individual.Firstname + individual.Lastname)
individualProduct.menteeGraduate = individual.Graduate
individualProduct.menteeLocation = individual.Location
products.append(individualProduct)
individual.Products = products
#Check for matching with keywords and their products
keywordList = split(keywords, " ")
for i in range[len(sellers)]:
for j in range[len(sellers[i].Products)]:
for k in range[len(keywordList)]:
if keywordList[k] in sellers[i].Products[j].Name:
sellers[i].Products[j].Matchs = True
#Check for region matching that of viable carriers
finalListProducts = []
for i in range[len(sellers)]:
for j in range[len(sellers[i].Products)]:
for k in range[len(viableCarriers)]:
if sellers[i].Products[j].Matchs == True:
if sellers[i].Location in viableCarriers.Location:
finalListProducts.append(sellers[i].Products[k])
#Organise by quantity
finalListProducts.sort()
finalListProducts[::-1]
#find closest to required quantity
targetSum = Quantity
suggestList = []
for i in range[len(finalListProducts)]:
if finalListProducts[i].Quantity < targetSum:
targetSum = targetSum - finalListProducts[i].Quantity
suggestList.append(finalListProducts[i])
#Return as JSON
#JSON suggestList
#JSONfinalListProducts
#iterate
for i in range[len(suggestList)]:
suggestDict = {'unitvalue': suggestList[i].Quantity, 'unit': unitFamily, 'keyword' : suggestList[i].Name, 'meName' : suggestList[i].menteeName, 'region' : suggestList[i].Location, 'carrier' : viableCarrier[0], 'qualitymatch' : 0}
for i in range[len(finalListProducts)]:
finalDict = {'unitvalue': finalListProducts[i].Quantity, 'unit': unitFamily, 'keyword' : finalListProducts[i].Name, 'meName' : finalListProducts[i].menteeName, 'region' : finalListProducts[i].Location, 'carrier' : viableCarrier[0], 'qualitymatch' : 1}
JSONDICT = {'success' : 1, 'data' : [suggestDict + finalDict]}
return HttpResponse(json.dump(JSONDICT))
| Team-14-CodeForGood2014/Cherie-Blair-Foundation-Marketplace | Django/cbfm/searchEngine/scripts.py | scripts.py | py | 6,321 | python | en | code | 0 | github-code | 36 |
11352228515 | import os
clear = lambda : os.system('cls')
import datetime
from time import process_time_ns
x = datetime.datetime.now()
ulang = "y"
while ulang=="y" or ulang=="Y":
kodeGolongan = [1,2,3]
gajiPokok = [2500000, 4500000, 6500000]
tunjanganIstri = [0.01, 0.03, 0.05]
kodeJK =[1,2]
JK = ['Laki - Laki','Perempuan']
kodeStsKwn =[1,2]
StsKwn = ['Kawin','Belum Kawin']
kodeStsAnk =[1,2]
StsAnk = ['Punya','Belum Punya']
iuranPensiun = 15500
iuranOrganisasi = 3500
clear
print ("==============================================")
print("{:^44}".format("SELAMAT DATANG"))
print("{:^44}".format("PERHITUNGAN GAJI KARYAWAN CV.LOGOS"))
print("{:^44}".format("TANGGAL = " + x.strftime("%x")))
print ("==============================================")
namaKaryawan = input("Masukan Nama = ")
inp = 1
while inp < 4:
clear()
print("==============================================")
print("{:^44}".format("PILIHAN GOLONGAN"))
print("==============================================")
nmr = 1
a = 0
for kodeGol in kodeGolongan :
print(str(nmr) + ". Golongan " + str(kodeGol))
a = a + 1
nmr = nmr + 1
print("==============================================")
golongan = int(input("Masukan Kode Golongan = "))
clear()
inp = golongan
if inp <= len(kodeGolongan) :
i = 0
while i<len(kodeGolongan):
if kodeGolongan[i] == inp:
ambilGaji = gajiPokok[i]
i+=1
else :
break
clear()
print("==============================================")
print("{:^44}".format("PILIHAN JENIS KELAMIN"))
print("==============================================")
a = 0
for jenisKel in JK :
kodeKel = kodeJK[a]
print(str(kodeKel) + ". " + str(jenisKel))
a = a + 1
print("==============================================")
jenisKelamin = int(input("Masukan Kode Jenis Kelamin = "))
clear()
inpJK = jenisKelamin
if inpJK <= len(kodeJK) :
i = 0
while i<len(kodeJK):
if kodeJK[i] == inpJK:
ambilJK = JK[i]
i+=1
else :
break
clear()
print("==============================================")
print("{:^44}".format("PILIHAN STATUS KAWIN"))
print("==============================================")
a = 0
for jenisSK in StsKwn :
kodeSK = kodeStsKwn[a]
print(str(kodeSK) + ". " + str(jenisSK))
a = a + 1
print("==============================================")
StatusKawin = int(input("Masukan Kode Status Kawin = "))
clear()
inpSK = StatusKawin
if inpSK <= len(kodeStsKwn) :
i = 0
while i<len(kodeStsKwn):
if kodeStsKwn[i] == inpSK:
ambilSK = StsKwn[i]
i+=1
else :
break
if ambilSK == 'Kawin' :
clear()
print("==============================================")
print("{:^44}".format("PILIHAN STATUS ANAK"))
print("==============================================")
a = 0
for jenisSA in StsAnk :
kodeSA = kodeStsAnk[a]
print(str(kodeSA) + ". " + str(jenisSA))
a = a + 1
print("==============================================")
StatusAnak = int(input("Masukan Kode Status Anak = "))
clear()
inpSA = StatusAnak
if inpSA <= len(kodeStsAnk) :
i = 0
while i<len(kodeStsAnk):
if kodeStsAnk[i] == inpSA:
ambilSA = StsAnk[i]
i+=1
else :
break
#hitung tunjangan istri
if ambilJK == 'Laki - Laki' and ambilSK == 'Kawin' :
i = 0
while i<len(kodeGolongan):
if kodeGolongan[i] == inp:
ambilTunjanganIstri = tunjanganIstri[i]
totalTunjanganIstri = ambilGaji * ambilTunjanganIstri
i+=1
else :
totalTunjanganIstri = 0
#hitung tunjangan anak
if ambilSK == 'Kawin' and ambilSA == 'Punya' :
totalTunjanganAnak = ambilGaji * 0.02
else :
totalTunjanganAnak = 0
#hitung gaji bruto
gajiBruto = ambilGaji + totalTunjanganAnak + totalTunjanganIstri
#hitung biaya jabatan
biayaJabatan = gajiBruto * 0.0005
#hitung gaji netto
gajiNetto = gajiBruto - biayaJabatan - iuranPensiun - iuranOrganisasi
clear()
print("==============================================")
print("{:^44}".format("SLIP GAJI"))
print("{:^44}".format("KARYAWAN CV.LOGOS"))
print("{:^44}".format("TANGGAL = " + x.strftime("%x")))
print("==============================================")
print("Nama " + namaKaryawan)
print("Golongan " + str(golongan))
print("jenis kelamin " + ambilJK)
print("Staus Kawin " + ambilSK)
print("Gaji Pokok Rp " + format(ambilGaji,',.2f'))
print("Tunjangan istri Rp " + format(totalTunjanganIstri,',.2f'))
print("Tunjangan Anak Rp " + format(totalTunjanganAnak,',.2f'))
print(">> Gaji bruto Rp " + format(gajiBruto,',.2f'))
print("==============================================")
print("Biaya Jabatan Rp " + format(biayaJabatan,',.2f'))
print("Iuran Pensiun Rp " + format(iuranPensiun,',.2f'))
print("Iuran Organisasi Rp " + format(iuranOrganisasi,',.2f'))
print(">> Gaji Netto Rp " + format(gajiNetto,',.2f'))
print("")
#cetak struk (ekstensi : .txt)
f=open("SLIPGAJI"+ namaKaryawan.upper() +".txt","w+")
f.write("==============================================\r")
f.write("{:^44}".format("SLIP GAJI") + "\r")
f.write("{:^44}".format("KARYAWAN CV.LOGOS") + "\r")
f.write("{:^44}".format("TANGGAL = " + x.strftime("%x")) + "\r")
f.write("==============================================\r")
f.write("Nama " + namaKaryawan + "\r")
f.write("Golongan " + str(golongan) + "\r")
f.write("jenis kelamin " + ambilJK + "\r")
f.write("Staus Kawin " + ambilSK + "\r")
f.write("Gaji Pokok Rp " + format(ambilGaji,',.2f') + "\r")
f.write("Tunjangan istri Rp " + format(totalTunjanganIstri,',.2f') + "\r")
f.write("Tunjangan Anak Rp " + format(totalTunjanganAnak,',.2f') + "\r")
f.write(">> Gaji bruto Rp " + format(gajiBruto,',.2f') + "\r")
f.write("==============================================\r")
f.write("Biaya Jabatan Rp " + format(biayaJabatan,',.2f') + "\r")
f.write("Iuran Pensiun Rp " + format(iuranPensiun,',.2f') + "\r")
f.write("Iuran Organisasi Rp " + format(iuranOrganisasi,',.2f') + "\r")
f.write(">> Gaji Netto Rp " + format(gajiNetto,',.2f') + "\r")
f.write("\r")
f.write("{:^44}".format("- TETAP SEMANGAT & SEHAT SELALU -") + "\r")
f.write("{:^44}".format("- TERIMA KASIH -") + "\r")
ulang = input('Ulangi Cek Gaji? (y/t) : ')
clear()
break
| 20083000169RianHudaMaulana/Uas- | UAS_20083000169_Rian Huda Maulana_2G.py | UAS_20083000169_Rian Huda Maulana_2G.py | py | 8,062 | python | en | code | 0 | github-code | 36 |
6798077511 | import logging
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from embed_video.backends import detect_backend
from ...clients import VimeoClient
from ...models import Resource
from ...conf import settings
logger = logging.getLogger('vimeo')
class Command(BaseCommand):
def exists_dev_tag(self, video_link):
client = VimeoClient()
is_dev = False
code = detect_backend(video_link).code
try:
tags = client.get_video_tags(code)
for tag in tags:
if tag.get('name') == settings.RESOURCE_DEVELOPMENT_TAG_NAME:
is_dev = True
except Exception as e:
logger.error('commands.import_vimeo_resources.get_video_tags.ValueError: {}'.format(e))
is_dev = True
return is_dev
def create_internal_messages(self):
Resource.objects.create_internal_messages(
resources=self.resources_updated,
level=settings.RESOURCE_CH_MESSAGE_SUCCESS)
Resource.objects.create_internal_messages(
resources=self.resources_error,
level=settings.RESOURCE_CH_MESSAGE_ERROR)
def get_resources_links_deleted(self):
return Resource.objects.removed().values_list('link', flat=True)
def get_resources_links_dev(self):
return Resource.objects.get_development_resources().values_list('link', flat=True)
def get_video_status(self, video_data):
return video_data.get('status')
def update_or_create_resource(self, resource, video_data):
try:
resource, created = Resource.objects.update_or_create(**video_data)
try:
self.resources_updated[resource.created_by.pk] += 1
except KeyError:
self.resources_updated[resource.created_by.pk] = 1
except AttributeError:
pass
self.stdout.write('Resource with pk {} updated'.format(resource.pk))
except Exception as e:
logger.error('commands.import_vimeo_resources.update_or_create.Exception: {}'.format(e))
def set_resource_status_error(self, resource):
self.stdout.write('Resource with pk {} error'.format(resource.pk))
resource.set_as_error()
try:
self.resources_error[resource.created_by.pk] += 1
except KeyError:
self.resources_error[resource.created_by.pk] = 1
except AttributeError:
pass
def set_video_thumbnail(self, video_data):
video_pictures = video_data.get('pictures')
if video_pictures and len(video_pictures):
positions = [0, 1, 2, 3]
for position in positions:
try:
thumbnail = video_pictures.get('sizes')[position]
video_data['thumbnail'] = thumbnail.get('link')
except IndexError:
pass
return video_data
def is_video_available(self, video_status):
return video_status == settings.RESOURCE_PROVIDER_STATUS_AVAILABLE
def is_video_error(self, video_status):
return video_status == settings.RESOURCE_PROVIDER_STATUS_UPLOADING_ERROR
def is_resource_deleted(self, video_link):
return video_link in self.resources_deleted
def is_resource_dev_local(self, video_link):
return video_link in self.resources_dev_local
def is_resource_dev_vimeo(self, video_link):
return video_link in self.resources_dev_vimeo
def handle(self, *args, **kwargs):
self.stdout.write('Script init: Import vimeo videos')
logger.info('Script init: Import vimeo videos')
client = VimeoClient()
num_pages = client.get_video_num_pages()
self.resources_updated = {}
self.resources_error = {}
self.resources_dev_vimeo = []
self.resources_dev_local = self.get_resources_links_dev()
self.resources_deleted = self.get_resources_links_deleted()
for page in range(0, num_pages):
videos = client.get_videos_paginated(page + 1)
for video_data in videos:
video_link = video_data.get('link')
try:
resource, _ = Resource.objects.get_or_create(link=video_link)
if resource.is_draft:
status = self.get_video_status(video_data)
video_data = self.set_video_thumbnail(video_data)
if self.exists_dev_tag(video_link):
self.resources_dev_vimeo.append(video_link)
if self.is_resource_deleted(video_link):
continue
elif (self.is_resource_dev_local(video_link) or self.is_resource_dev_vimeo(video_link)) \
and settings.UPLOAD_REAL:
continue
if self.is_video_available(status):
self.update_or_create_resource(resource, video_data)
elif self.is_video_error(status):
self.set_resource_status_error(resource)
except ObjectDoesNotExist:
logger.error('commands.import_vimeo_resources.ObjectDoesNotExist: {}'.format(video_link))
continue
self.create_internal_messages()
self.stdout.write('Script finished: Import vimeo videos')
logger.info('Script finished: Import vimeo videos')
| tomasgarzon/exo-services | service-exo-medialibrary/resource/management/commands/import_vimeo_resources.py | import_vimeo_resources.py | py | 5,577 | python | en | code | 0 | github-code | 36 |
21993890104 | from typing import List
from unittest import TestCase
import torch
from torch import Tensor
from attnganw import config
from attnganw.randomutils import get_vector_interpolation
class TestInterpolation(TestCase):
def test_get_noise_interpolation(self):
batch_size = 1
noise_vector_size = 3
noise_vector_start: Tensor = torch.randn(batch_size, noise_vector_size, dtype=torch.float)
noise_vector_end: Tensor = torch.randn(batch_size, noise_vector_size, dtype=torch.float)
config.generation['noise_interpolation_steps'] = 4
initial_interpolation: List[Tensor] = get_vector_interpolation(batch_size=batch_size,
noise_vector_size=noise_vector_size,
noise_vector_start=noise_vector_start,
noise_vector_end=noise_vector_end,
gpu_id=-1)
self.assertEqual(len(initial_interpolation), config.generation['noise_interpolation_steps'] + 1)
self.assertTrue(torch.equal(initial_interpolation[0], noise_vector_start))
self.assertTrue(torch.equal(initial_interpolation[-1], noise_vector_end))
config.generation['noise_interpolation_steps'] = config.generation['noise_interpolation_steps'] * 2
second_interpolation: List[Tensor] = get_vector_interpolation(batch_size=batch_size,
noise_vector_size=noise_vector_size,
noise_vector_start=noise_vector_start,
noise_vector_end=noise_vector_end,
gpu_id=-1)
self.assertEqual(len(second_interpolation), config.generation['noise_interpolation_steps'] + 1)
self.assertTrue(torch.equal(second_interpolation[0], noise_vector_start))
self.assertTrue(torch.equal(second_interpolation[-1], noise_vector_end))
self.assertFalse(torch.equal(second_interpolation[1], initial_interpolation[1]))
| cptanalatriste/birds-of-british-empire | tests/test_train.py | test_train.py | py | 2,304 | python | en | code | null | github-code | 36 |
33245288481 | #!/usr/bin/env python
import rospy
import matplotlib.pyplot as plt
import matplotlib.animation
import numpy as np
from ti_mmwave_rospkg.msg import RadarScan
class MySimpleClass(object):
def __init__(self):
self.sub = rospy.Subscriber('/ti_mmwave/radar_scan',RadarScan,self.sub_callback)
self.tmp_x = []
self.tmp_y = []
self.showflag = 0
#self.fig, ax = plt.subplots()
#self.sc = ax.scatter(self.tmp_x,self.tmp_y)
def sub_callback(self,msg):
if msg.point_id ==0:
'''if not self.showflag:
self.showflag = 1
plt.show() '''
#plt.scatter(self.tmp_x,self.tmp_y)
#self.sc.set_offsets(np.c_[self.tmp_x,self.tmp_y])
#self.fig.canvas.draw_idle()
#plt.pause(0.1)
self.tmp_x=[]
self.tmp_y=[]
self.tmp_x.append(msg.x)
self.tmp_y.append(msg.y)
def animate(i):
sc.set_offsets(np.c_[my_simple_class.tmp_x,my_simple_class.tmp_y])
if __name__ =="__main__":
rospy.init_node('hello')
my_simple_class = MySimpleClass()
#plt.show()
fig, ax = plt.subplots()
x, y = [],[]
sc = ax.scatter(x,y)
plt.xlim(0,10)
plt.ylim(-10,10)
ani = matplotlib.animation.FuncAnimation(fig, animate,
frames=30, interval=100, repeat=True)
plt.show()
rospy.spin()
| YiShan8787/mm-2sensor | src/micro_doppler_pkg/scripts/test3.py | test3.py | py | 1,420 | python | en | code | 0 | github-code | 36 |
69833302503 | list = [1,2,3,4,5,6,7,8,23,435,545]
string = 'hailo'
int = 123
def adarsh_loop(object):
try:
iter_list = iter(object)
while True:
try:
print(next(iter_list))
except:
break
except:
print("object not iterable, f*ck off")
adarsh_loop(list)
adarsh_loop(string)
adarsh_loop(int)
def my_generator():
for i in range(40+1):
yield i #it will generate value on the fly when needed
gen=my_generator()
# print(next(gen))
# print(next(gen))
# print(next(gen))
# for i in gen:
# print(i) | Adarsh1o1/python-initials | iter_and_generators.py | iter_and_generators.py | py | 582 | python | en | code | 1 | github-code | 36 |
42390357735 | from opspec import Spec
import sys
from braindead import log, info, error, die
from syntax import *
log.enable()
s = Spec()
s.parse(sys.argv[1])
info('loaded %s rules', len(s.rules))
lut = ['"\\x01invalid"']*256
for pattern, asm, action in s.rules:
b = int(pattern[0], 16)
asm = asm.replace('xxyy', '\\x02""')
asm = asm.replace('xxrel', '\\x01""')
asm = asm.replace('xx', '\\x01""')
asm = asm.replace('yy', '\\x03""')
fmt = '"'
fmt += f'\\x{len(pattern):02x}""'
fmt += asm
lut[b] = fmt+'"'
for op in range(256):
print(lut[op], ",")
| braindead/ctf-writeups | 2019/X-MAS/CHIP9/gen_disasm.py | gen_disasm.py | py | 545 | python | en | code | 11 | github-code | 36 |
74605387945 | from petalo_calib.tdc_corrections import correct_tfine_wrap_around
from petalo_calib.qdc_corrections import correct_efine_wrap_around
from petalo_calib.tdc_corrections import apply_tdc_correction_tot
from petalo_calib.tdc_corrections import compute_integration_window_size
from petalo_calib.tdc_corrections import add_tcoarse_extended_to_df
from petalo_calib.clustering import compute_evt_number_combined_with_cluster_id
from petalo_calib.io import compute_file_chunks_indices
from petalo_calib.io import write_corrected_df_daq
from sklearn.cluster import DBSCAN
import pandas as pd
import numpy as np
import sys
def compute_tcoarse_wrap_arounds(df):
limits = df[df.tcoarse_diff < -20000].index
first = df.index[0]
last = df.index[-1]
limits = np.concatenate([np.array([first]), limits.values, np.array([last])])
return limits
def compute_tcoarse_nloops(df):
limits = compute_tcoarse_wrap_arounds(df)
nloops = np.zeros(df.shape[0], dtype='int32')
for i in range(limits.shape[0]-1):
start = limits[i]
end = limits[i+1]
nloops[start:end+1] = i
return nloops
def compute_extended_tcoarse(df):
return df['tcoarse'] + df['nloops'] * 2**16
def add_tcoarse_extended_to_df(df):
df['tcoarse'] = df.tcoarse.astype(np.int32)
df['tcoarse_diff'] = df.tcoarse.diff()
df['nloops'] = compute_tcoarse_nloops(df)
df['tcoarse_extended'] = compute_extended_tcoarse(df)
def local_sort_tcoarse(df, indices):
start = -1
end = -1
window_size = 120
for index in indices:
if (index >= start) and (index <= end):
#print("Done! ", index)
continue
start = index - window_size
end = index + window_size
#print(start, end)
df.iloc[start:end] = df.iloc[start:end].sort_values('tcoarse', ascending=False)
def local_sort_tcoarse_to_fix_wrap_arounds(df):
add_tcoarse_extended_to_df(df)
indices = df[df.tcoarse_diff < -20000].index.values
local_sort_tcoarse(df, indices)
add_tcoarse_extended_to_df(df)
#df.drop(columns=['tcoarse_diff', 'nloops'], inplace=True)
def compute_tcoarse_extended_with_local_sort(df):
df_0 = df[df.tofpet_id == 0].reset_index()
df_2 = df[df.tofpet_id == 2].reset_index()
local_sort_tcoarse_to_fix_wrap_arounds(df_0)
local_sort_tcoarse_to_fix_wrap_arounds(df_2)
df_all = pd.concat([df_0, df_2])
df_all_sorted = df_all.sort_values(['evt_number', 'tcoarse_extended']).reset_index(drop=True)
return df_all_sorted
def compute_clusters(df):
values = df.tcoarse_extended.values
values = values.reshape(values.shape[0],1)
clusters = DBSCAN(eps=10, min_samples=2).fit(values)
return clusters.labels_
def process_daq_df_tot(df, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2):
compute_integration_window_size(df)
correct_tfine_wrap_around(df)
correct_efine_wrap_around(df)
df = compute_tcoarse_extended_with_local_sort(df)
df_0 = df[df.tofpet_id == 0]
df_2 = df[df.tofpet_id == 2]
df_0 = apply_tdc_correction_tot(df_0, df_tdc1_asic0, 'tfine')
df_0 = apply_tdc_correction_tot(df_0, df_tdc2_asic0, 'efine')
df_2 = apply_tdc_correction_tot(df_2, df_tdc1_asic2, 'tfine')
df_2 = apply_tdc_correction_tot(df_2, df_tdc2_asic2, 'efine')
df = pd.concat([df_0, df_2]).sort_index()
df.drop(columns=['card_id', 'wordtype_id'], inplace=True)
df['cluster'] = compute_clusters(df)
return df
def process_daq_file(filein, fileout, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2):
chunks = compute_file_chunks_indices(filein)
nchunks = chunks.shape[0]
for i in range(nchunks-1):
print("{}/{}".format(i, nchunks-2))
start = chunks[i]
end = chunks[i+1]
df = pd.read_hdf(filein, 'data', start=start, stop=end+1)
df_corrected = process_daq_df_tot(df, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2)
write_corrected_df_daq(fileout, df_corrected, i, i>0)
tdc1_asic0 = '/home/vherrero/CALIBRATION_FILES/tfine_cal_asic0_run11291.h5'
tdc2_asic0 = '/home/vherrero/CALIBRATION_FILES/tfine2_cal_asic0_run11291.h5'
tdc1_asic2 = '/home/vherrero/CALIBRATION_FILES/tfine_cal_asic2_run11292.h5'
tdc2_asic2 = '/home/vherrero/CALIBRATION_FILES/tfine2_cal_asic2_run11292.h5'
df_tdc1_asic0 = pd.read_hdf(tdc1_asic0, key='tfine_cal')
df_tdc2_asic0 = pd.read_hdf(tdc2_asic0, key='tfine_cal')
df_tdc1_asic2 = pd.read_hdf(tdc1_asic2, key='tfine_cal')
df_tdc2_asic2 = pd.read_hdf(tdc2_asic2, key='tfine_cal')
filein = sys.argv[1]
fileout = sys.argv[2]
process_daq_file(filein, fileout, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2)
| jmbenlloch/petalo_calib | petalo_calib/scripts/process_files_tot_new_clusters.py | process_files_tot_new_clusters.py | py | 4,761 | python | en | code | 0 | github-code | 36 |
42887542436 | from collections import OrderedDict
class Cache:
"""
A python class which used ordered dictionary (OrderedDict) to implement the LRU cache.
Each entry of the dictinoary will be a key/value pair. The search would be
by key. LRU Cache will have its maximum size defined at initiation. When adding
new keys that cause the capacity to be exceed the size defined at initialtion,
the oldest items will be removed to make room. The newly added items/last accessed
items will be moved to the back of the dictonary (most recently used) and the elements
at the begining will correspond to lest recently used. The element at the begining of the
dictionary will be the one to discard when the cache if full (least recently used)
Methods:
get(key) - Get the value corresponding to key
put(key,value) - Insert key/value into the cache
delKey(key) - Delete the key
reset() - Clear the cache
dumpCache() - Print the cache contents
"""
def __init__(self, size, verbose=False):
"""
Initialize new cache object with the size passed.
Args:
size (int): The max size of the cache.
"""
self.printDebug = verbose
self.sizeOfCache = size
self.cacheLRU = OrderedDict()
def get(self, key):
"""
Returns the value corresponding to the key provided.
If the key does not exit, it returns None else it moves
the key to the end of the dictionary using move_to_end
(method of OrderedDict)
Args:
key (int): The key to lookup the value
Returns:
value (int): The value corresponding to the key if found else None
"""
if self.sizeOfCache == 0:
if self.printDebug:
print("Zero capacity cache and get request. Raise exception.")
raise Exception('Cache is not defined')
else:
if key not in self.cacheLRU:
return None
else:
self.cacheLRU.move_to_end(key)
return self.cacheLRU[key]
def put(self, key, value):
"""
Inserts the key/value pair to the cache. If the cache is
already full (max capacity), it will remove the element at
the head of the dict (least recently used) using popitem and
insert the key/value pair at the end of the dict (most recently used).
If the key already exists, it will not fail but mark the item
as recently used (move it to the back of the dict)
Returns:
Nothing or exception if cache capacity is 0
"""
if key in self.cacheLRU:
if self.printDebug:
print("Key {} already exists in cache. Update this and mark as recently used".format(key))
self.cacheLRU[key] = value
self.cacheLRU.move_to_end(key)
else:
if self.sizeOfCache == 0:
if self.printDebug:
print("Zero capacity cache and put request. Raise exception.")
raise Exception('Cache is not defined')
else:
if len(self.cacheLRU) >= self.sizeOfCache:
outKey, outVal = self.cacheLRU.popitem(last = False)
if self.printDebug:
print("Cache at capacity of {}. Removing LRU key {}".format(self.sizeOfCache,outKey))
self.cacheLRU[key] = value
self.cacheLRU.move_to_end(key)
def delKey(self, key):
"""
Delete the key from the cache if it exists. If the key does not exist
nothing happens. The delete is treated as a cache hit and the key is moved
to the end (most recently used) and then removed.
Returns:
Nothing or exception if cache capacity is 0
"""
if self.sizeOfCache == 0:
if self.printDebug:
print("Zero capacity cache and del request. Raise exception.")
raise Exception('Cache is not defined')
else:
if key in self.cacheLRU:
self.cacheLRU.move_to_end(key)
self.cacheLRU.popitem(last = True)
else:
if self.printDebug:
print("Key {} to delete does not exist in the cache. Doing nothing.\n".format(self.sizeOfCache))
def reset(self):
"""
Reset the cache
Returns:
Nothing or exception if cache capacity is 0
"""
if self.sizeOfCache == 0:
if self.printDebug:
print("Zero capacity cache and reset request. Raise exception.")
raise Exception('Cache is not defined')
else:
self.cacheLRU.clear()
def dumpCache(self):
"""
Print the contents of the cache
Returns:
Nothing
"""
print(self.cacheLRU)
| harsimrit/task1 | cacheLRU.py | cacheLRU.py | py | 4,971 | python | en | code | 0 | github-code | 36 |
21928404743 | # css selector 활용 크롤링
'''
# css란?
Cascading Style Sheets
html로 잡힌 골격에 스타일링(색, 크기 등)을 하는 것
스타일의 이름으로 구조가 특정지어질 수 있음(css selector)
CSS selector
- 웹 구성 시 CSS Selector을 직접 활용해 이름을 붙혀 만들기 때문에 CSS Selector로 찾아질 가능성이 높다
- Element Type 방식
태그 값들이 selector의 기준이 된다
- ID 방식
태그 내 id 값이 존재하면 id값이 selector의 기준이 된다
- Class 방식
태그 내 class 값이 존재하는 경우 class값이 selector의 기준이 된다
- 고급 한정자 방식
id, class가 없는 경우 기준이 되는 것
ex> nth-child
'''
from bs4 import BeautifulSoup as BS #HTML을 편하게 다룰 수 있게 해줌
import requests as req # HTTP 통신을 위해 사용
# module 'collections' has no attribute 'Callable' 에러 대응
# collections.Callable 참조가 파이썬 3.10부터 collections.abc.Callable로 이동하여, 제거된 Attribute라서 발생하는 오류
import collections
if not hasattr(collections, 'Callable'):
collections.Callable = collections.abc.Callable
#------------------------------------------------------------------
url = "https://finance.naver.com/marketindex/exchangeList.naver"
res = req.get(url)
#print(res.text)
soup = BS(res.text, "html.parser")
# 출력 테스트
# print(soup.title)
# print(soup.title.string)
# 원하는 영역 찾기
tds = soup.find_all("td")
names = []
for td in soup.select("td.tit") :
names.append(td.get_text(strip=True))
prices = []
for td in soup.select("td.sale") :
prices.append(td.get_text(strip=True))
print(names)
print(prices) | sh95fit/Python_study | Python_Crawling/Crawling_Static/Static_Study06.py | Static_Study06.py | py | 1,717 | python | ko | code | 1 | github-code | 36 |
73642772264 | # -----------------------------------------------------------
# --------- Assignment 4 - PCA analysis with Python ------------------
# -----------------------------------------------------------
# Author: Tomas Milla-Koch
# Purpose: The following script is script for clipping a scene to vector boundary and performing a PCA analysis.
# Course: REMS 6023
# Date: 30/01/2022
# Disclaimer: This script is for educational purposes only.
# ----------------------------------------------------------
# -----------------------------------------------------------
# --------- Importing of required python libraries ---------
# -----------------------------------------------------------
import os
import shutil
import fnmatch
# import exceptions module
from pci.exceptions import *
# import pci modules for project
from pci.clip import clip
from pci.pcimod import *
from pci.nspio import Report, enableDefaultReport
from pci.pca import pca
from pci.nspio import Report, enableDefaultReport
from pci.fexport import *
# initializing script time
from datetime import datetime as dt, time
# start time of script
start = dt.now()
# -----------------------------------------------------------
# --------- File Management --------------------------------
# -----------------------------------------------------------
print('Obtaining necessary files.')
# get root directory
root = os.getcwd()
# list containing paths where files of different outputs will go
files = ['pca', 'reports']
# iterate through files to remove existing data and create new empty folders
for i in files:
if os.path.exists(root + '\\' + i):
shutil.rmtree(root + '\\' + i)
os.mkdir(root + '\\' + i) # make new folders
# initialize metadata file list
input_files = []
# populate list with availible MTL.txt files, in this case just one
for r, d, f in os.walk(os.getcwd()):
for inFile in fnmatch.filter(f, '*_MTL.txt'):
input_files.append(os.path.join(r, inFile))
# create a list of 1 for the vector file to be used as a boundary
vector_files = []
for r, d, f in os.walk(os.getcwd()):
for inFile in fnmatch.filter(f, '*.shp'):
vector_files.append(os.path.join(r, inFile))
print('Finished obtaining necessary files.')
# -----------------------------------------------------------
# --------- Clipping Image and Adding Bands to Image---------
# -----------------------------------------------------------
print('Clipping image.')
try:
clip(fili=input_files[0]+'-MS', # call MTL file from previously populated list of 1 element
dbic=[1, 2, 3, 4, 5, 6, 7], # which bands to clip
sltype='vec', # what kind of boundary file will image clip to
# clip image to vector file provided ... any file name for other purposes is fine
filo=root + '\\pca\\hal_clip.pix',
clipfil=vector_files[0]
)
pcimod(file=root+'\\pca\\hal_clip.pix',
pciop='ADD', # add raster layers
pcival=[0, 0, 3, 0] # add 3 16bit unsigned layers to .pix file
)
except Exception as e:
print(e)
print('Finished clipping image.')
# -----------------------------------------------------------
# --------- PCA Image & Report Writing ----------------------
# -----------------------------------------------------------
print('Starting PCA analysis and report writing.')
try:
# initialize report file and make sure none of it is already in memory
Report.clear()
enableDefaultReport(root+'\\reports\\PCA_report_1.txt')
# pca analysis function
pca(file=root + '\\pca\\hal_clip.pix',
# which spectral bands to be analysed
dbic=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
eign=[1, 2, 3],
# what bands to occupy the newly created principal components
# setting RGB color channels to 8,9,10 will give PC1,PC2,PC3 in focus
dboc=[8, 9, 10],
rtype='long')
finally:
# close the report file
enableDefaultReport('term')
print('Finished PCA analysis and report writing.')
# How long did the script take?
scp_time = dt.now() - start
print('The script took ' + str(scp_time) + ' to complete.')
# -----------------------------------------------------------
# --------- File Exporting ----------------------------------
# -----------------------------------------------------------
fexport(fili=root + '\\pca\\hal_clip.pix',
filo=root + '\\pca\\MillaKoch_PCA.pix',
dbic=[8, 9, 10])
print("File with PCs has been exported.")
# -----------------------------------------------------------
# --------- End of Script -----------------------------------
# -----------------------------------------------------------
| tomasmk/Remote-Sensing-Automation | PCA.py | PCA.py | py | 4,658 | python | en | code | 0 | github-code | 36 |
71116093224 | from common.httphandler import HttpHander
from common.yml_util import YmlUtil
import pytest, json
http = HttpHander()
YmlUt = YmlUtil()
class TestCaseSingle:
def get_case_all(self, case_data, url_map, header):
if case_data.get("method") == 'get':
resp = http.get(url=url_map, headers=header)
print(resp)
if "assert_data" in case_data.keys():
assert case_data.get("assert_data") in resp
elif case_data.get("method") == 'post':
data = case_data.get("param")
resp = http.post(url=url_map, json=data, headers=header)
print(resp)
if "assert_data" in case_data.keys():
assert case_data.get("assert_data") in resp
else:
return
@pytest.mark.parametrize('yml_path,case_id', YmlUt.read_yaml_all_tuple("case_single"))
def test_case_all(self, yml_path, case_id):
yml_value = YmlUt.read_yaml_values(yml_path)
case_data = YmlUt.get_value(yml_value, case_id)
header = YmlUt.read_yaml_values(YmlUt.read_yaml_paths("token_head")[0]).get("headers")
host_map = YmlUt.read_yaml_values(YmlUt.read_yaml_paths("token_head")[0]).get("host_map")
url_map = YmlUt.host_map_url(case_data, host_map) # 服务器映射
# print(header)
self.get_case_all(case_data, url_map, header)
| itol220/testapi | testcases/test_single.py | test_single.py | py | 1,385 | python | en | code | 0 | github-code | 36 |
29719072387 | import numpy as np
def cast(s):
triple = s.split('<')[1][:-1].split(',')
triple = [int(t) for t in triple]
return np.array(triple)
class Point(object):
def __init__(self, split):
"""initalize"""
self.loc = cast(split[0])
self.vel = cast(split[1])
self.acc = cast(split[2])
def move(self):
"""move the point"""
self.vel += self.acc
self.loc += self.vel
return self.loc
raw = open('input').read().splitlines()
points = {}
# build points
for i, r in enumerate(raw):
split = r.split(', ')
points[i] = Point(split)
step = 0
prev_len = 0
while True:
if step % 100 == 0:
print('{}\t{}'.format(step, len(points)))
if prev_len == len(points):
break
prev_len = len(points)
keys = np.zeros(len(points))
vals = np.zeros((len(points), 3))
i = 0
# move everything
for k, v in points.items():
keys[i] = k
vals[i] = v.move()
i += 1
# check for unique indexes
out = np.unique(vals, axis=0, return_index=True, return_counts=True)
# make sure there is only one of the unique value
uniq_indexes = [o for i, o in enumerate(out[1]) if out[2][i] == 1]
# find the inverse of the unique indexes
remove = np.setdiff1d(range(len(points)), uniq_indexes)
# remove the dups but make sure to map back to the keys
for r in remove:
del points[keys[r]]
step += 1
| yknot/adventOfCode | 2017/20_02.py | 20_02.py | py | 1,461 | python | en | code | 0 | github-code | 36 |
25321444319 | import pandas as pd
from glob import glob
from datetime import datetime
import os
def removeDups(file):
df = pd.read_excel(file)
# Keep only FIRST record from set of duplicates
df_first_record = df.drop_duplicates(subset="Date/Time", keep="first")
#creates an excel file with sorted times
if glob("noDupsTime.xlsx"):
pass
else:
df_first_record.to_excel("./downloads/noDupsTime.xlsx", index=False)
# removeDups()
def create_dict():
os.chdir('./downloads')
df=pd.read_excel("noDupsTime.xlsx")
names_list=list(df['Name'])
dates_list=list(df['Date/Time'])
custom_dict={}#dictionary of names as keys and al lthe datetime as values
modified_dict={}
for name,date_time in zip(names_list,dates_list):
if name not in custom_dict.keys():
custom_dict[name]=[date_time]
else:
custom_dict[name].append(date_time)
for name in custom_dict.keys():
#for each name go through each date,then split date_time into date and time,
#create a dictionary with each day as the key and the values an array of times
date_dict={}
for dateTime in custom_dict[name]:
#iterate over datetimes of each person
date = dateTime.split()[0]
time = dateTime.split()[-1]
if date not in date_dict.keys():
date_dict[date]=[time]
else:
date_dict[date].append(time)
modified_dict[name]=date_dict#create new dictionary with the name as the key
print(modified_dict)
return modified_dict
def create_report(create_dict):
data=create_dict()
lst_of_names=[]
length=0
lst_of_dates=[]
lst_of_timein=[]
lst_of_timeout=[]
lst_of_durations=[]
total=0
#iterate over all the names
for name in data.keys():
length+=1
days=data[name]
dates=data[name].keys()#gets dates for each name
print(f'each_day:{type(days)}')
no_of_names=len(dates)
lst_of_names.extend([name for i in range(no_of_names)])#make name array same size as dates array
lst_of_dates.extend(dates)
for day in days.values():
print('day:',day)
total+=1
first_time = datetime.strptime(day[0], '%H:%M:%S')
last_time = datetime.strptime(day[-1], '%H:%M:%S')
time_diff_Hours = (last_time - first_time).seconds//3600
rem_minutes = ((last_time-first_time).seconds% 3600)//60
time_diff = str(time_diff_Hours) + ":" + str(rem_minutes)
lst_of_timein.append(first_time.time())
lst_of_timeout.append(last_time.time())
lst_of_durations.append(time_diff)
#lst_of_durations.extend(data[name].values()[0])
print(f'total:{total}')
print(f"old length:{length}\nnew length:{len(lst_of_names)}")
print(f"no of dates:{len(lst_of_dates)}")
print(f'{len(lst_of_timein)}')
print(f'{len(lst_of_timeout)}')
df=pd.DataFrame({"Names":lst_of_names,"Date":lst_of_dates,"Time_in":lst_of_timein,"Time_out":lst_of_timeout,"Time Spent":lst_of_durations})
#df2=pd.DataFrame({"Time_in":lst_of_timein,"Time_out":lst_of_timeout})
#df=pd.DataFrame({"Names":lst_of_names,"Date":lst_of_dates})
df.to_excel('report.xlsx')
#df2.to_excel('fingers2.xlsx')
# create_report(create_dict)
def report(file):
removeDups(file)
create_report(create_dict)
| OliverSolomon/flaskExcel | reporter.py | reporter.py | py | 3,444 | python | en | code | 0 | github-code | 36 |
43157370954 | #!/usr/bin/python3
import sys, pygame
from pygame.locals import *
black = (0, 0, 0)
white = (255,255,255)
red = (255,0,0)
green = (0, 255, 0)
blue = (0, 0, 255)
pygame.init()
pygame.display.set_caption("drawing") # set the title of the window
surface = pygame.display.set_mode((400, 300)) # return pygame.Surface
surface.fill(white) # <=== white the surface
# draw polygon
pygame.draw.polygon(surface, green, ((123, 0), (234,132), (269, 211), (77, 66)), 0)
# draw line
pygame.draw.line(surface, red, (70, 200), (80,100), 20)
pygame.draw.circle(surface, black, (30, 50), 15, 10)
pygame.draw.ellipse(surface, black, (30, 50, 100, 60), 10)
pygame.draw.rect(surface, blue, (30, 50, 100, 60))
# event loop: handling event, update game state(variables), rendering graphics
while True:
for event in pygame.event.get():
# QUIT etc, defined in pygame.locals
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update() # render surface into screen
# Question:
# what's the different if we move Line 14 ~ 18 into loop before pygame.display.update() ?
"""
Surface: 2D rectangle
Color : (r, g, b, a)
Rect: (x0, y0, width, height)
"""
| minskeyguo/mylib | python-edu/17-pygame-basic/02-geometry.py | 02-geometry.py | py | 1,223 | python | en | code | 0 | github-code | 36 |
34547412730 |
from PIL import Image
import cv2
# 選擇第二隻攝影機
cap = cv2.VideoCapture(0)
while(True):
# 從攝影機擷取一張影像
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(frame, 100 , 200)
# img_fc, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# hierarchy = hierarchy[0]
# found = []
# for i in range(len(contours)):
# k = i
# c = 0
# while hierarchy[k][2] != -1:
# k = hierarchy[k][2]
# c = c + 1
# if c >= 5:
# found.append(i)
# for i in found:
# cv2.drawContours(frame, contours, i, (0, 255, 0), 3)
(_, cnts, _) = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
for cnt in cnts:
# compute the (rotated) bounding box around then
# contour and then draw it
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(self.frame, [rect], -1, (0, 255, 0), 2)
# 顯示圖片
cv2.imshow('frame', edges)
cv2.waitKey(1)
if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 釋放攝影機
cap.release()
# 關閉所有 OpenCV 視窗
cv2.destroyAllWindows() | Amenoimi/Simple_OCR | QR_GET.py | QR_GET.py | py | 1,322 | python | en | code | 0 | github-code | 36 |
73902282025 | from abc import ABC, abstractmethod
from app.schemas.base import BaseModel
class BaseRepository(ABC):
def __init__(self, model: BaseModel, *args, **kwargs):
self.model = model
@abstractmethod
async def get_all(self):
raise NotImplementedError
| kirakulakov/wbmp_redis_stat | app/repositories/base.py | base.py | py | 275 | python | en | code | 0 | github-code | 36 |
17704956797 | import numpy as np
from PyQt5.QtWidgets import QWidget, QApplication, QVBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from SO3 import SO3
from rotation import Ui_Form
class My_window(QWidget, Ui_Form):
def __init__(self, parent=None, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.setupUi(self)
self.retranslateUi(self)
self.current_rotation = SO3()
self.figure = Figure()
self.canvas = FigureCanvasQTAgg(self.figure)
self.layout = QVBoxLayout(self.widget_2)
self.layout.addWidget(self.canvas)
self.display_current_rotation()
def slot_matrix2all(self):
pass
def slot_quat2all(self):
q = np.empty(4)
q[0] = self.quatw.value()
q[1] = self.quatx.value()
q[2] = self.quaty.value()
q[3] = self.quatz.value()
self.current_rotation = SO3.from_quaternion(q)
self.display_current_rotation()
def slot_anglevec2all(self):
vector = np.empty(4)
vector[0] = self.vectorx.value()
vector[1] = self.vectory.value()
vector[2] = self.vectorz.value()
vector[3] = self.angle.value()
self.current_rotation = SO3.from_axis_angle(vector)
self.display_current_rotation()
def display_current_rotation(self):
# matrix
matrix = self.current_rotation.rotation_matrix
print(matrix)
for i in range(3):
for j in range(3):
exec("self.matrix{}{}.setValue({})".format(str(i), str(j), matrix[i][j]))
# quat
quat = self.current_rotation.to_quaternion()
self.quatw.setValue(quat[0])
self.unitw.setValue(quat[0])
self.quatx.setValue(quat[1])
self.unitx.setValue(quat[1])
self.quaty.setValue(quat[2])
self.unity.setValue(quat[2])
self.quatz.setValue(quat[3])
self.unitz.setValue(quat[3])
# angle_vector
vector = self.current_rotation.to_axis_angle()
self.angle.setValue(vector[3])
self.vectorx.setValue(vector[0])
self.vectory.setValue(vector[1])
self.vectorz.setValue(vector[2])
self.textBrowser.setText(str(self.current_rotation))
# plot
self.figure.clear()
self.current_rotation.plot_coordinate_system(self.figure)
self.canvas.draw()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = My_window()
window.show()
sys.exit(app.exec_())
| rollingball-3/Learning-rotation | main.py | main.py | py | 2,586 | python | en | code | 0 | github-code | 36 |
32383353461 | # -*- coding: utf-8 -*-
#performance.py
from __future__ import print_function
import numpy as np
import pandas as pd
def create_sharpe_ratio(returns,periods=252):
"""
计算策略的Sharpe比率,基于基准为0,也就是假设无风险利率为0
"""
return np.sqrt(periods)*(np.mean(returns)/np.std(returns))
def create_drawdowns(pnl):
"""
计算PnL曲线的最大回撤(从最大收益到最小收益之间的距离),以及回撤的时间
这里需要参数pnl_returns是一个pandas的Series
"""
hwm=[0]
idx=pnl.index
drawdown=pd.Series(index=idx)
duration=pd.Series(index=idx)
for t in range(1,len(idx)):
hwm.append(max(hwm[t-1],pnl[t]))
drawdown[t]=(hwm[t]-pnl[t])
duration[t]=(0 if drawdown[t]==0 else duration[t-1]+1)
return drawdown,drawdown.max(),duration.max()
| szy1900/Event_driven_framework_for_backtesting | performance.py | performance.py | py | 882 | python | zh | code | 38 | github-code | 36 |
40243436461 | """
Beautify Images Utils
"""
import random
import operator
import heapq
import math
from scipy.interpolate import UnivariateSpline
import cv2
import pilgram
from PIL import Image, ImageStat
import numpy as np
from src.utils.image_process import (
do_we_need_to_sharpen,
sharpen_my_image,
adjust_contrast_brightness,
)
def get_top_frames(scores, num, fps, dispersed=True):
"""
Returns list of indexes for number frames with the highest scores as
specified by the user.
Users can define the 'dispersed' function if they wish to have num images
taken from different parts of the video. In this instance, we randomly sample
10% of the frames from the video and score these frames.
Otherwise the function just returns the best num images from the frames scored.
"""
if len(scores) <= 1000:
dispersed = False
if dispersed:
tmp = []
while True:
if len(tmp) == int(0.1 * len(scores)):
break
sampled_frame = random.choice(scores)
if len(tmp) == 0:
tmp.append(sampled_frame)
else:
flag = False
for i in tmp:
if i - fps <= sampled_frame <= i + fps:
flag = True
break
if flag == False:
tmp.append(sampled_frame)
idx = sorted(
list(zip(*heapq.nlargest(num, enumerate(tmp), key=operator.itemgetter(1))))[
0
]
)
return sorted([scores.index(j) for j in [tmp[i] for i in idx]])
else:
return sorted(
list(
zip(*heapq.nlargest(num, enumerate(scores), key=operator.itemgetter(1)))
)[0]
)
def get_top_n_idx(filtered_scores, filtered_idx, sampling_size=0.1, n=10):
"""
Random sample from scores and get the indices of the top n scores
from original video
Args:
filtered_scores (np.array): scores filtered from object detection that pass a threshold
filtered_idx (np.array): the indices of scores that pass the threshold, from original video
sampling_size (float): proportion of samples to choose from num_frames of original video
n (int): top n scores to choose from
Return:
top_n_idx (np.array): indices of top n scores from the sample,
corresponding to indices from original video
"""
# sample from filtered_scores & filtered_idx arrays
n_sample = int(np.ceil(len(filtered_scores) * sampling_size))
if n_sample <= n:
n_sample = len(filtered_scores)
rand_sample = np.random.choice(len(filtered_scores), n_sample, replace=False)
rand_sample_scores = filtered_scores[rand_sample]
rand_sample_idx = filtered_idx[rand_sample]
# get the indices of the top n scores from the sample
top_n_idx = rand_sample_idx[rand_sample_scores.argsort()[::-1][: min(n, n_sample)]]
return top_n_idx
def brightness(im_file):
"""
Returns perceived brightness of image
https://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx
"""
stat = ImageStat.Stat(im_file)
r, g, b = stat.mean
return math.sqrt(0.241 * (r**2) + 0.691 * (g**2) + 0.068 * (b**2))
def LookupTable(x, y):
spline = UnivariateSpline(x, y)
return spline(range(256))
def Summer(img):
increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256])
decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256])
blue_channel, green_channel, red_channel = cv2.split(img)
red_channel = cv2.LUT(red_channel, increaseLookupTable).astype(np.uint8)
blue_channel = cv2.LUT(blue_channel, decreaseLookupTable).astype(np.uint8)
sum = cv2.merge((blue_channel, green_channel, red_channel))
return sum
def Winter(img):
increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256])
decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256])
blue_channel, green_channel, red_channel = cv2.split(img)
red_channel = cv2.LUT(red_channel, decreaseLookupTable).astype(np.uint8)
blue_channel = cv2.LUT(blue_channel, increaseLookupTable).astype(np.uint8)
win = cv2.merge((blue_channel, green_channel, red_channel))
return win
def beautify(beauti_img, filter="hudson"):
"""
Beautifies selected images.
Input arguments:
1) beauti_img (np.array) - array of images in cv2/BGR format
2) filter (str) - instagram filter to apply
3) The default filter is Hudson.
List of Instagram filters: https://github.com/akiomik/pilgram/tree/master/pilgram
"""
if filter:
try:
pilgram_filter = getattr(pilgram, filter.lower())
except:
raise ValueError(
"""
That was not a correct filter. The list of correct filters are:
_1977
aden
brannan
brooklyn
clarendon
earlybird
gingham
hudson
inkwell
kelvin
lark
lofi
maven
mayfair
moon
nashville
perpetua
reyes
rise
slumber
stinson
toaster
valencia
walden
willow
xpro2
Here's some showcases of filtered images:
https://github.com/akiomik/pilgram/blob/master/screenshots/screenshot.png
"""
)
for idx, img in enumerate(beauti_img):
## Reduce blue light ##
if filter and filter.lower() == "hudson":
img = Summer(img)
# Adjust brightness and contrast
lux = brightness(Image.fromarray(img))
if lux <= 130:
beta = 137.5 - lux
elif lux > 145:
beta = 137.5 - lux
else:
beta = 0
img = adjust_contrast_brightness(img, contrast=1.2, brightness=beta)
## Check and sharpen ##
if do_we_need_to_sharpen(img):
img = sharpen_my_image(img)
## Apply instagram filter ##
if filter:
img = np.array(pilgram_filter(Image.fromarray(img)))
beauti_img[idx] = img
return beauti_img
def check_filter(filter):
error_msg = """
That was not a correct filter. The list of correct filters are: \n
_1977
aden
brannan
brooklyn
clarendon
earlybird
gingham
hudson
inkwell
kelvin
lark
lofi
maven
mayfair
moon
nashville
perpetua
reyes
rise
slumber
stinson
toaster
valencia
walden
willow
xpro2
\nHere's some showcases of filtered images:
https://github.com/akiomik/pilgram/blob/master/screenshots/screenshot.png
"""
try:
pilgram_filter = getattr(pilgram, filter.lower())
except:
return False, error_msg
return True, error_msg
| teyang-lau/you-only-edit-once | src/utils/beautify.py | beautify.py | py | 6,911 | python | en | code | 6 | github-code | 36 |
27653523771 | import time
import utilities.custom_logger as cl
import logging
from base.basepage import BasePage
from base.selenium_driver import SeleniumDriver
class Register_courses_page(BasePage):
log = cl.customLogger(logging.DEBUG)
#Locators
_search_box_id = "search-courses"
_course_xpath = "/html/body/div/div/div/div[2]/div/div/div[1]/a/div/div[2]"
_search_btn_id = "search-course-button"
_enroll_button_id = "enroll-button-top" # type is id
_cc_id= "payment_method_credit_card" #type is id
_credit_card_num_name = "cardnumber" #type is id
_cc_exp_name = "exp-date"
_cc_cvc_name = "cvc"
_postal_field_name = "postal"
_submit_enroll_id = "confirm-purchase" #id
_enroll_error_message_class = "cc__error alert-danger" # class
def __init__(self, driver):
super(Register_courses_page, self).__init__(driver)
self.driver = driver
def enterCourseToEnroll(self, Name):
self.sendKeys(Name, self._search_box_id, "id")
self.elementClick(self._search_btn_id, "id")
#put a wait command here,becoz it will load
def selectCourseToEnroll(self):
# not sure about the link text
self.elementClick(self._course_xpath, "xpath")
#put wait statements
self.elementClick(self._enroll_button_id, "id")
def enterCardNumber(self, cardNumber):
#__privateStripeFrame6
self.webScroll("down", self._postal_field_name, "name")
#self.webScroll("down")
#switch to frame using id
time.sleep(1)
self.driver.switch_to_frame(self.getElement("__privateStripeFrame3", "name"))
time.sleep(2)
self.sendKeys(cardNumber, self._credit_card_num_name, "name")
self.driver.switch_to_default_content()
def enterCardExp(self,exp):
#time.sleep(1)
self.driver.switch_to_frame(self.getElement("__privateStripeFrame4", "name"))
time.sleep(2)
self.sendKeys(exp, self._cc_exp_name,"name")
self.driver.switch_to_default_content()
def enterCardCvc(self,cvc):
#time.sleep(1)
self.driver.switch_to_frame(self.getElement("__privateStripeFrame5", "name"))
time.sleep(2)
self.sendKeys(cvc, self._cc_cvc_name,"name")
self.driver.switch_to_default_content()
def enterpostalcode(self,postalcode):
self.driver.switch_to_frame(self.getElement("__privateStripeFrame6", "name"))
time.sleep(2)
self.sendKeys(postalcode, self._postal_field_name,"name")
self.driver.switch_to_default_content()
def enrollInCourse(self):
self.elementClick(self._submit_enroll_id, "id")
def enterCreditCardinformation(self, cardNumber, exp, cvc, postal_code):
self.enterCardNumber(cardNumber)
self.enterCardExp(exp)
self.enterCardCvc(cvc)
self.enterpostalcode(postal_code)
def captureErrorMsg(self):
error_msg = "Hint : check for exception"
try:
error_msg = self.getElement(self._enroll_error_message_class, "class")
except:
raise
return error_msg
| akanksha2306/selenium_python_practice | pages/courses/register_courses_page.py | register_courses_page.py | py | 3,129 | python | en | code | 0 | github-code | 36 |
7668440361 | import sys
def ft_filter(check_function, _list):
"""filter(function or None, iterable) --> filter object
Return an iterator yielding those items of iterable for which function(item)
is true. If function is None, return the items that are true."""
filtered_list = []
for elem in _list:
if check_function(elem) == True:
filtered_list.append(elem)
return filtered_list
def check_even(number):
if number % 2 == 0:
return True
return False
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def main():
print(ft_filter(check_even, numbers))
# print(filter.__doc__)
# print(ft_filter.__doc__)
if __name__ == "__main__":
main()
| GusFiveO/python_for_data_science | module00/ex06/ft_filter.py | ft_filter.py | py | 651 | python | en | code | 0 | github-code | 36 |
75091410342 | import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
import wikipediaapi
# Set up Wikipedia API
wiki = wikipediaapi.Wikipedia('en')
# Set up tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2LMHeadModel.from_pretrained('gpt2', pad_token_id=tokenizer.eos_token_id)
# Define training parameters
batch_size = 4
epochs = 2
learning_rate = 1e-4
# Define function to preprocess input text
def preprocess(text):
text = text.strip().replace('\n', ' ')
tokens = tokenizer.encode(text, add_special_tokens=True, max_length=512)
input_ids = tf.convert_to_tensor(tokens[:-1], dtype=tf.int32)
target_ids = tf.convert_to_tensor(tokens[1:], dtype=tf.int32)
return input_ids, target_ids
# Define function to fetch training data from Wikipedia
def fetch_training_data():
titles = [
'Artificial intelligence',
'Machine learning',
'Natural language processing',
'Recurrent neural network',
'Transformer (machine learning)',
'Generative Pre-trained Transformer 2'
]
text = ''
for title in titles:
page = wiki.page(title)
if page.exists():
text += page.text
return text
# Fetch training data from Wikipedia
training_data = fetch_training_data()
# Preprocess the training data and convert it to a TensorFlow dataset
dataset = tf.data.Dataset.from_tensor_slices(training_data).map(preprocess).shuffle(10000).batch(batch_size)
# Set up optimizer and loss function
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Train the model
for epoch in range(epochs):
epoch_loss = 0.0
for batch in dataset:
with tf.GradientTape() as tape:
logits = model(batch[0], training=True)[0]
loss = loss_fn(batch[1], logits)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
epoch_loss += loss.numpy()
print('Epoch {} Loss: {:.4f}'.format(epoch+1, epoch_loss/len(dataset)))
# Save the trained model and tokenizer to files
model.save_pretrained('my_chatgpt_model')
tokenizer.save_pretrained('my_chatgpt_model') | ethan-haynes/test | train.py | train.py | py | 2,291 | python | en | code | 0 | github-code | 36 |
23551852124 | #coding: UTF-8
class Dog:
name = ""
def brak(self):
m = self.name + " : Bow-wow!"
print(m)
pochi = Dog()
pochi.name = "Pochi"
pochi.brak()
hachi = Dog()
hachi.name = "Hachi"
hachi.brak() | kato-takashi/AI_python | python_training/class.py | class.py | py | 212 | python | en | code | 0 | github-code | 36 |
8640307873 | """Extract hourly real-time EIA data from the bulk-download zip file."""
import pandas as pd
import json
from os.path import join
import os
import zipfile
import requests
import logging
from electricitylci.globals import data_dir
def download_EBA():
"""Add docstring."""
url = 'http://api.eia.gov/bulk/EBA.zip'
print(f"Downloading eia bulk data from {url}...", end="")
r = requests.get(url)
os.makedirs(join(data_dir, 'bulk_data'), exist_ok=True)
output = open(join(data_dir, 'bulk_data', 'EBA.zip'), 'wb')
output.write(r.content)
output.close()
print(f"complete.")
path = join(data_dir, 'bulk_data', 'EBA.zip')
if __name__=="__main__":
try:
z = zipfile.ZipFile(path, 'r')
with z.open('EBA.txt') as f:
raw_txt = f.readlines()
except FileNotFoundError:
download_EBA()
z = zipfile.ZipFile(path, 'r')
with z.open('EBA.txt') as f:
raw_txt = f.readlines()
# REGION_NAMES = [
# 'California', 'Carolinas', 'Central',
# 'Electric Reliability Council of Texas, Inc.', 'Florida',
# 'Mid-Atlantic', 'Midwest', 'New England ISO',
# 'New York Independent System Operator', 'Northwest', 'Southeast',
# 'Southwest', 'Tennessee Valley Authority'
# ]
#
# REGION_ACRONYMS = [
# 'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA',
# 'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW',
# ]
#
# TOTAL_INTERCHANGE_ROWS = [
# json.loads(row) for row in raw_txt if b'EBA.TI.H' in row
# ]
#
# NET_GEN_ROWS = [
# json.loads(row) for row in raw_txt if b'EBA.NG.H' in row
# ]
#
# DEMAND_ROWS = [
# json.loads(row) for row in raw_txt if b'EBA.D.H' in row
# ]
#
# EXCHANGE_ROWS = [
# json.loads(row) for row in raw_txt if b'EBA.ID.H' in row
# ]
#
# BA_TO_BA_ROWS = [
# row for row in EXCHANGE_ROWS
# if row['series_id'].split('-')[0][4:] not in REGION_ACRONYMS
# ]
def row_to_df(rows, data_type):
"""
Turn rows of a single type from the bulk data text file into a dataframe
with the region, datetime, and data as columns
Parameters
----------
rows : list
rows from the EBA.txt file
data_type : str
name to use for the data column (e.g. demand or total_interchange)
Returns
-------
dataframe
Data for all regions in a single df with datatimes converted and UTC
"""
tuple_list = []
for row in rows:
# "data" is of form:
# [['20190214T04Z', -102],
# ['20190214T03Z', -107],
# ['20190214T02Z', -108],
# ['20190214T01Z', -103]]
try:
datetime = pd.to_datetime([x[0] for x in row['data']],
utc=True, format='%Y%m%dT%HZ')
except ValueError:
try:
datetime = pd.to_datetime([x[0]+":00" for x in row['data']],
format='%Y%m%dT%H%z')
except ValueError:
continue
data = [x[1] for x in row['data']]
region = row['series_id'].split('-')[0][4:]
# df_data = {
# 'region': region,
# 'datetime': datetime,
# data_type: data,
# }
# region_list=[region for x in datetime]
# _df = pd.DataFrame(df_data)
# tuple_list.append(_df)
tuple_data=[x for x in zip([region]*len(datetime), list(datetime), data)]
tuple_list.extend(tuple_data)
df=pd.DataFrame(tuple_list, columns=["region", "datetime", data_type])
# df = pd.concat(df_list).reset_index(drop=True)
return df
def ba_exchange_to_df(rows, data_type='ba_to_ba'):
"""
Turn rows of a single type from the bulk data text file into a dataframe
with the region, datetime, and data as columns
Parameters
----------
rows : list
rows from the EBA.txt file
data_type : str
name to use for the data column (e.g. demand or total_interchange)
Returns
-------
dataframe
Data for all regions in a single df with datatimes converted and UTC
"""
tuple_list = []
for row in rows:
# "data" is of form:
# [['20190214T04Z', -102],
# ['20190214T03Z', -107],
# ['20190214T02Z', -108],
# ['20190214T01Z', -103]]
try:
datetime = pd.to_datetime([x[0] for x in row['data']],
utc=True, format='%Y%m%dT%HZ')
except ValueError:
try:
datetime = pd.to_datetime([x[0]+"00" for x in row['data']],
format='%Y%m%dT%H%z')
except ValueError:
continue
data = [x[1] for x in row['data']]
from_region = row['series_id'].split('-')[0][4:]
to_region = row['series_id'].split('-')[1][:-5]
# df_data = {
# 'from_region': from_region,
# 'to_region': to_region,
# 'datetime': datetime,
# data_type: data,
# }
tuple_data = [x for x in zip([from_region]*len(datetime), [to_region]*len(datetime), datetime, data)]
tuple_list.extend(tuple_data)
# _df = pd.DataFrame(df_data)
# df_list.append(_df)
# df = pd.concat(df_list).reset_index(drop=True)
df=pd.DataFrame(tuple_list, columns=["from_region", "to_region", "datetime", data_type])
return df
| USEPA/ElectricityLCI | electricitylci/bulk_eia_data.py | bulk_eia_data.py | py | 5,340 | python | en | code | 23 | github-code | 36 |
70168571944 | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,10,100)
y = []
up_limit = 0.8
for i in x:
if i < 6:
y.append(0)
elif i < 9:
y.append((i-6)/3 * up_limit)
else:
y.append(up_limit)
plt.plot(x,y)
plt.show() | CryptoGamer8/INFO6205-FINAL | Model/main/draw.py | draw.py | py | 264 | python | en | code | 2 | github-code | 36 |
7630680769 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('inicio', '0006_auto_20160820_2050'),
]
operations = [
migrations.CreateModel(
name='cargo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre_cargo', models.CharField(unique=True, max_length=100)),
('fecha', models.DateTimeField(auto_now=True)),
('estado', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='adminstrador',
name='carrera',
),
migrations.AddField(
model_name='adminstrador',
name='cargo',
field=models.ForeignKey(default='1', to='inicio.cargo'),
preserve_default=False,
),
]
| juanjavierlimachi/sistema-de-Informacion | mipagina/mipagina/apps/inicio/migrations/0007_auto_20160820_2141.py | 0007_auto_20160820_2141.py | py | 1,065 | python | en | code | 0 | github-code | 36 |
74950126185 | #!/usr/bin/env python
# coding: utf-8
# In[45]:
# Choquet adaptive thresholding: two step algorithm
import progressbar
from time import sleep
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from skimage import measure
from pynverse import inversefunc
import time
import scipy.misc
get_ipython().run_line_magic('matplotlib', 'inline')
import sys
import warnings
import numpy as np
#Otsu trhesholding
from skimage import data
from skimage import filters
from skimage import exposure
#format the output in a readable format
float_formatter = lambda x: "%.2f" % x
np.set_printoptions(precision=0,formatter={'float_kind':float_formatter})
if not sys.warnoptions:
warnings.simplefilter("ignore")
# In[46]:
#function section
### import img
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def plot_it(img):
plt.figure(figsize = [8,8])
arr = np.asarray(img)
plt.imshow(arr, cmap='gray', vmin=0, vmax=arr.max())
plt.title(namestr(img, globals()))
plt.show()
def import_img(img_path):
img = cv2.imread(img_path, 0)
img_reverted= cv2.bitwise_not(img)
norm_img = img_reverted / 255.0
#plot_it(norm_img)
print(norm_img)
print(norm_img.shape)
print(norm_img.size)
return(norm_img)
### cumulative G function (sum-table algorithm)
def compute_summed_area_table(image):
# image is a 2-dimensional array containing ints or floats, with at least 1 element.
height = len(image)
width = len(image[0])
new_image = [[0.0] * width for _ in range(height)] # Create an empty summed area table
for row in range(0, height):
for col in range(0, width):
if (row > 0) and (col > 0):
new_image[row][col] = image[row][col] + new_image[row][col - 1] + new_image[row - 1][col] - new_image[row - 1][col - 1]
elif row > 0:
new_image[row][col] = image[row][col] + new_image[row - 1][col]
elif col > 0:
new_image[row][col] = image[row][col] + new_image[row][col - 1]
else:
new_image[row][col] = image[row][col]
return new_image
def get_int_img_m1(input_img):
h, w = input_img.shape
#integral img
int_img = np.zeros_like(input_img, dtype=np.uint32)
for col in range(w):
for row in range(h):
int_img[row,col] = input_img[0:row+1,0:col+1].sum()
return int_img
def cdf_image(input_img):
nh, binn = np.histogram(input_img)
cdf = np.cumsum(nh)
return([cdf, nh, binn])
# In[ ]:
# In[47]:
# Adaptive choquet
# OPT= 0 Hamacher
# OPT= 1 Discrete Choquet
# Opt= 2 Discrete Choquet with F1,F2 on the distributive property
def compute_choquet(choquet_order, fuzzy_mu, opt=0):
C=0
if opt==0: # Choquet Hamacher
for i in range(len(choquet_order)-1):
j = i +1
C = C + (choquet_order[j] * fuzzy_mu[i])/(choquet_order[j] + fuzzy_mu[i] - (choquet_order[j] * fuzzy_mu[i]))
if opt==1: #Choquet
for i in range(len(choquet_order)-1):
j = i +1
C = C + ((choquet_order[j] - choquet_order[j-1] )*fuzzy_mu[i])
if opt ==2: #Choquet F1 F2
for i in range(len(choquet_order)-1):
j = i +1
C = C + (np.sqrt(choquet_order[j]*fuzzy_mu[i]) - max( (choquet_order[j]+fuzzy_mu[i] -1) , 0))
return(C)
def compute_sugeno(sugeno_order, fuzzy_mu):
S = np.empty((1), float)
for i in range(len(sugeno_order)):
S = np.append(S, min(sugeno_order[i], fuzzy_mu[i]))
#print(S)
#print('sugeno: ' + str(choquet_order[j]) + " " + str(fuzzy_mu[i]) + " " + str(max(S)))
return(max(S))
## Integral Choquet and Sugeno image.
def adaptive_choquet_itegral(input_img, int_img, opt,log=False):
h, w = input_img.shape
th_mat = np.zeros(input_img.shape)
choquet_mat = np.zeros(input_img.shape)
sugeno_mat = np.zeros(input_img.shape)
count_matrix = np.zeros(input_img.shape)
for col in range(w): #i
for row in range(h): #j
#SxS region
y0 = int(max(row-1, 0))
y1 = int(min(row, h-1))
x0 = int(max(col-1, 0))
x1 = int(min(col, w-1))
count = (y1-y0)*(x1-x0)
count_matrix[row, col] = count
choquet_order = -1
sum_ = -1
fuzzy_mu = -1
if count == 0:
if x0 == x1 and y0 == y1:
sum_ = int_img[y0, x0]
C_ = sum_
S_ = sum_
if x1 == x0 and y0 != y1:
sum_ = (int_img[y1, x1] + int_img[y0, x1])/2
choquet_order = np.asarray([0,int_img[y0, x1], int_img[y1, x1]])
sugeno_order = np.asarray([int_img[y0, x1], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.5])
C_ = compute_choquet(choquet_order, fuzzy_mu,opt)
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
if y1 == y0 and x1 != x0:
sum_ = (int_img[y1, x1] + int_img[y1, x0])/2
choquet_order = np.asarray([0,int_img[y1, x0], int_img[y1, x1]])
sugeno_order = np.asarray([int_img[y1, x0], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.5])
C_ = compute_choquet(choquet_order, fuzzy_mu,opt)
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
else:
sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0]
if(int_img[y0, x1] > int_img[y1, x0] ):
choquet_order = np.asarray([0,int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]])
sugeno_order = np.asarray([int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]])
else:
choquet_order = np.asarray([0,int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]])
sugeno_order = np.asarray([int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.75, 0.50, 0.25])
C_ = compute_choquet(choquet_order, fuzzy_mu,opt)
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
th_mat[row,col] = sum_
choquet_mat[row,col] = C_
sugeno_mat[row,col] = S_
if(log):
coords_window = np.zeros_like(input_img)
#coords_window[x0:x1,y0:y1] = 1.0
coords_window[y0, x0] = 0.2
coords_window[y1, x0] = 0.4
coords_window[y0, x1] = 0.6
coords_window[y1, x1] = 0.8
plot_it(coords_window)
print("Search_region")
print("x0:" + str(x0) + " x1:"+ str(x1) + " y0:" + str(y0) + " y1:" + str(y1) )
print("Row:" + str(row) + " Col:" + str(col))
print("Count: " + str(count))
print("choquet fixed ordered and fuzzy mu")
print(choquet_order)
print(fuzzy_mu)
print("choquet calculus")
print(C_)
print("sugeno calculus")
print(S_)
print("Input mat")
print(input_img)
print("Int img")
print(int_img)
print("I integral mat: ")
print(th_mat)
print("C_ choquet")
print(choquet_mat)
print("S_ sugeno")
print(sugeno_mat)
print("Count matrix")
print(count_matrix)
print("-------")
return choquet_mat, sugeno_mat, count_matrix
# In[ ]:
# In[48]:
## Classic Bradley Apprroach
def adaptive_thresh(input_img, int_img, a1=8, a2=2, T=0.15):
out_img = np.zeros_like(input_img)
h, w = input_img.shape
S = w/a1
s2 = S/a2
th_mat = np.zeros(input_img.shape)
for col in range(w):
for row in range(h):
#SxS region
y0 = int(max(row-s2, 0))
y1 = int(min(row+s2, h-1))
x0 = int(max(col-s2, 0))
x1 = int(min(col+s2, w-1))
count = (y1-y0)*(x1-x0)
sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0]
th_mat[row,col] = sum_/count
if input_img[row, col]*count < sum_*(1.-T)/1.:
out_img[row,col] = 0
else:
out_img[row,col] = 1
return np.asarray(out_img), th_mat
#Novel choquet adaptive approach
def adaptive_thresh2(input_img, int_img, a1=4, a2=1, T=0, log=False):
if T==0:
T = filters.threshold_otsu(input_img)
T = T
out_img_choquet = np.zeros_like(input_img)
out_img_sugeno = np.zeros_like(input_img)
choquet_mat = np.zeros_like(input_img)
sugeno_mat = np.zeros_like(input_img)
h, w = input_img.shape
S = w/a1
s2 = S/a2
for col in range(w):
for row in range(h):
y0 = int(max(row-s2, 0))
y1 = int(min(row+s2, h-1))
x0 = int(max(col-s2, 0))
x1 = int(min(col+s2, w-1))
count = (y1-y0)*(x1-x0)
sum_ = -1
fuzzy_mu = -1
if count == 0:
if x0 == x1 and y0 == y1:
sum_ = int_img[y0, x0]
S_ = sum_
if x1 == x0 and y0 != y1:
sum_ = int_img[y1, x1] - int_img[y0, x1]
sugeno_order = np.asarray([int_img[y0, x1], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.5])
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
if y1 == y0 and x1 != x0:
sum_ = int_img[y1, x1] - int_img[y1, x0]
sugeno_order = np.asarray([int_img[y1, x0], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.5])
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
else:
sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0]
if(int_img[y0, x1] > int_img[y1, x0] ):
sugeno_order = np.asarray([int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]])
else:
sugeno_order = np.asarray([int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]])
fuzzy_mu = np.asarray([1, 0.75, 0.50, 0.25])
S_ = compute_sugeno(sugeno_order, fuzzy_mu)
choquet_mat[row,col] = sum_/count
if input_img[row, col]*count < sum_ * (1.-T)/1.:
out_img_choquet[row,col] = 0
else:
out_img_choquet[row,col] = 1
sugeno_mat[row,col] = S_/count
#note is not only T
if input_img[row, col]*count < S_ * (1.- T)/1.:
out_img_sugeno[row,col] = 0
else:
out_img_sugeno[row,col] = 1
return out_img_choquet, out_img_sugeno, choquet_mat, sugeno_mat, T
# In[ ]:
#Qualitative comparisons
# Compute the mean squared error and structural similarity
# index for the images
def compare_images(img1, img2):
m = mse(img1, img2)
s = measure.compare_ssim(img1, img2, data_range=img2.max() - img2.min(), multichannel=False)
ret = np.array([m,s])
#the higher the ssim, the more "similar"
return(ret)
def mse(img1, img2):
err = np.sum((img1.astype("float") - img2.astype("float")) ** 2)
err /= float(img1.shape[0] * img2.shape[1])
#the lower the error, the more "similar"
return(err)
#simple listing class in order to collect the results
class results_collector(object):
def __init__(self, name, original_img, choquet_mat, sugeno_mat, count_matrix,
out_img_adapt_choquet,out_img_sugeno,out_img_bradley, c_m, s_m, T, elapsed_time,
mse_choquet, mse_sugeno, mse_bradley, ssim_choquet, ssim_sugeno, ssim_bradley,
th, a1, a2):
self.name = name,
self.img = original_img,
self.choquet_mat = choquet_mat,
self.sugeno_mat = sugeno_mat,
self.count_matrix = count_matrix,
self.out_img_adapt_choquet = out_img_adapt_choquet,
self.out_img_sugeno = out_img_sugeno,
self.out_img_bradley = out_img_bradley
self.c_m = c_m,
self.s_m = s_m,
self.T = T,
self.elapsed_time = elapsed_time,
self.mse_choquet = mse_choquet,
self.mse_sugeno = mse_sugeno,
self.mse_bradley = mse_bradley,
self.ssim_choquet = ssim_choquet,
self.ssim_sugeno = ssim_sugeno,
self.ssim_bradley = ssim_bradley,
self.th = th,
self.a1 = a1,
self.a2 = a2
#Embedded method for comparisons between groundtruth and Choquet thresholded images
def compute_multi_thresh(test_images, gt_images, opt = 0, T=0, a1=2, a2=2):
count=0
resc = []
for i in test_images:
test_image = i
#plot_it(test_image)
S1 = np.asarray(compute_summed_area_table(test_image))
#S1 = get_int_img_m1(test_image)
choquet_mat, sugeno_mat, count_matrix = adaptive_choquet_itegral(np.asarray(test_image),
S1,
opt,
log=False )
#Choquet Adaptive Thresh
out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(test_image),
np.asarray(choquet_mat),
a1 = a1,
a2 = a2,
T= T,
log=False ) #with compute_summed_area table doesn't work.
#Bradley Adaptive Thresh
S1 = get_int_img_m1(test_image)
out_img_bradley, bradley_int_mat = adaptive_thresh(np.asarray(test_image),
S1 ,
a1=a1,
a2=a2,
T=T)
#compare it
mse_choquet, ssim_choquet = compare_images(gt_images[count], out_img_adapt_choquet)
mse_sugeno, ssim_sugeno = compare_images(gt_images[count], out_img_sugeno)
mse_bradley, ssim_bradley = compare_images(gt_images[count], out_img_bradley)
#
resc.append(results_collector("Comparisons", i, choquet_mat,
sugeno_mat,count_matrix,
out_img_adapt_choquet,
out_img_sugeno,
out_img_bradley,
c_m,
s_m,
T,
elapsed_time,
mse_choquet,
mse_sugeno,
mse_bradley,
ssim_choquet,
ssim_sugeno,
ssim_bradley,
T,
a1,
a2))
count += 1
return(resc)
def add_random_noise(small_image, perc=1):
np.random.seed(1)
mu, sigma = 0, 1 # mean and standard deviation
s = np.random.normal(mu, sigma, small_image.shape)
img_n = np.abs(s/s.max()) * perc
img_ret = small_image + img_n
return(img_ret)
# In[82]:
### Testing Grad/Glaze images vs Groundtruth / GT noise vs GT / Test+noise vs GT
def test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.01, noise_gt = -1, noise_test=-1):
resc_a = []
elapsed_time=0
### Add noise on the GroundTruth
if noise_gt > 0:
noise_img = []
for i in range(len(gt_images)):
noise_img.append(add_random_noise(gt_images[i], noise_gt))
test_images = noise_img
#Add noise on the test images
if noise_test > 0:
noise_img = []
for i in range(len(test_images)):
noise_img.append(add_random_noise(test_images[i], noise_test))
test_images = noise_img
# Test test_images or noised ones with respect the GT.
for i in range(a1):
for j in range(a2):
x = scale
if(i >= j ):
print("Testing image conf ( i: " + str(i) + " j: " + str(j) + ")")
t1 = time.process_time()
while(x <= 1.01):
resc = compute_multi_thresh(test_images,gt_images,
opt = opt,
T=x,
a1=i+1,
a2=j+1)
x = x + scale
resc_a.append(resc)
elapsed_time = time.process_time() - t1
print('Out: {} images processed in {} seconds'.format(str(len(resc)), round(elapsed_time ,3)))
return(resc_a)
## Simple testing prints
## It should return the list of the stuff
def search_results(resc_b, ssim_th = 0.5, attention_image = -1):
count=0
for i in range(len(resc_b)):
for j in range(len(resc_b[-1])):
if(resc_b[i][j].ssim_choquet[0] > resc_b[i][j].ssim_bradley[0]
and resc_b[i][j].ssim_choquet[0] > ssim_th
and resc_b[i][j].a1[0] != resc_b[i][j].a2):
count= count+1
print('{}-th image -------------------\n mse: C {} S {} B {}, \nssid: C {} S {} B {} \na1: {}, a2: {}, th: {}'.format(
str(j),
round(resc_b[i][j].mse_choquet[0],3),
round(resc_b[i][j].mse_sugeno[0],3),
round(resc_b[i][j].mse_bradley[0],3),
round(resc_b[i][j].ssim_choquet[0],3),
round(resc_b[i][j].ssim_sugeno[0],3),
round(resc_b[i][j].ssim_bradley[0],3),
str(resc_b[i][j].a1[0]),
str(resc_b[i][j].a2),
round(resc_b[i][j].th[0], 4) ))
if(attention_image >= 0):
if(j==attention_image):
print("**********************************************************************************")
print("Percentage of coverage around all the possible configurations" + str(count/(len(resc_b)*len(resc_b[-1]))))
# In[83]:
################################################################################
#### Test on a single image:
################################################################################
small_image = 1.0 - import_img('./original/00.bmp')
plot_it(small_image)
S1 = np.asarray(compute_summed_area_table(small_image))
cdf_img = cdf_image(small_image)
int_img = get_int_img_m1(small_image) # common
#int_img2 = get_int_img_m2(small_image, cum_distr) #choquet int img
print("Image")
print(np.asarray(small_image))
print("summed area table")
print(np.asarray(summ_at))
print("integral image")
print(int_img)
plt.plot(np.asarray(cdf_img[0]), np.asarray( cdf_img[2][0:len(cdf_img[2])-1]), 'r--')
print("cumulative distribution of the image")
print(np.asarray(cdf_img[0]))
print("histogram")
print(np.asarray(cdf_img[1]))
print("range values")
print(np.asarray(cdf_img[2]))
choquet_mat, sugeno_mat, count_matrix = adaptive_choquet_itegral(np.asarray(small_image),
S1,
1,
log=False )
print("C mat")
plot_it(choquet_mat)
print("S mat")
plot_it(sugeno_mat)
print("-----------------------------------------------------------------------------------")
#Otsu T parameter
print("Image thresholded with the choquet integral image and an automatic Otsu threshold")
out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(small_image),
np.asarray(choquet_mat),
a1 = 16,
a2 = 2, #Leave T = 0 for the Otsu
log=False ) #con compute_summed_area table doesn't work.
print("Threshold " + str(T))
plot_it(out_img_adapt_choquet)
plot_it(out_img_sugeno)
plot_it(c_m)
plot_it(s_m)
print("-----------------------------------------------------------------------------------")
#Manual Parameter
print("Image thresholded with the choquet integral image and a fixed manual threshold.")
out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(small_image),
np.asarray(choquet_mat),
a1 = 16,
a2 = 2,
T = 0.2,
log=False ) #con compute_summed_area table doesn't work.
print("Threshold " + str(T))
plot_it(out_img_adapt_choquet)
plot_it(out_img_sugeno)
plot_it(c_m)
plot_it(s_m)
# In[84]:
################################################################################
#### Toy dataset # Testing complex gradients, glazes, additive noise, smoothness
################################################################################
#Prepare the list data structures
#Groundtruth images
gt_images = []
# Smothed, glazed GT images
test_images = []
# In[85]:
###
# Definition of the toy dataset
###
small_image1 = [[0, 0, 0, 0, 0, 0,0,0,0],
[0, 1, 0, 1, 0, 0,0,0,0],
[1, 1, 1, 1, 1, 0,0,0,0],
[0, 1, 0, 1, 0, 0,0,0,0],
[0, 0, 1, 1, 1, 0,0,0,0],
[0, 0, 0, 1, 0, 0,0,0,0],
[0, 0, 1, 1, 1, 0,0,0,0],
[0, 0, 0, 1, 0, 0,0,0,0],
[0, 0, 1, 1, 1, 0,0,0,0]]
small_image1 = np.asarray(small_image1, dtype="float32")
gt_images.append(small_image1)
plot_it(small_image1)
small_image1 = [[0.2, 0.2, 0.1, 0.2, 0.15, 0.14,0.13,0.12,0.11],
[0.16, 0.6, 0.2, 0.3, 0.15, 0.14,0.13,0.12,0.11],
[0.6, 0.5, 0.6, 0.7, 0.8, 0.14,0.13,0.12,0.11],
[0.14, 0.5, 0.2, 0.3, 0.15, 0.14,0.13,0.12,0.11],
[0.15, 0.12, 0.3, 0.4, 0.3, 0.14,0.13,0.12,0.11],
[0.14, 0.13, 0.2, 0.4, 0.15, 0.14,0.13,0.12,0.11],
[0.15, 0.12, 0.3, 0.3, 0.3, 0.14,0.13,0.12,0.11],
[0.14, 0.13, 0.2, 0.26, 0.1, 0.14,0.13,0.12,0.11],
[0.15, 0.12, 0.25, 0.25, 0.25, 0.14,0.13,0.12,0.11]]
small_image1 = np.asarray(small_image1, dtype="float32")
test_images.append(small_image1)
plot_it(small_image1)
small_image2 = [[0, 0, 0, 0, 1,0,0,0,0],
[0, 0, 0, 1, 0,1,0,0,0],
[0, 0, 1, 0, 0,0,1,0,0],
[0, 1, 0, 0, 0,0,0,1,0],
[0, 1, 0, 0, 0,0,0,1,0],
[0, 0, 1, 0, 0,0,1,0,0],
[0, 0, 0, 1, 0,1,0,0,0],
[0, 0, 0, 0, 1,0,0,0,0]]
small_image2 = np.asarray(small_image2, dtype="float32")
gt_images.append(small_image2)
plot_it(small_image2)
small_image2 = [[0.22, 0.19, 0.19, 0.18, 0.5, 0.11, 0.08, 0.06,0.02],
[0.22, 0.19, 0.19, 0.5, 0.15,0.6, 0.08, 0.06,0.02],
[0.22, 0.19, 0.40, 0.18, 0.15,0.11, 0.7, 0.06,0.02],
[0.22, 0.30, 0.19, 0.18, 0.15,0.11, 0.08, 0.8,0.02],
[0.22, 0.30, 0.19, 0.18, 0.15,0.11, 0.08, 0.8,0.02],
[0.22, 0.19, 0.40, 0.18, 0.15,0.11, 0.7, 0.06,0.02],
[0.22, 0.19, 0.19, 0.5, 0.15,0.6, 0.08, 0.06,0.02],
[0.22, 0.19, 0.19, 0.18, 1 ,0.11, 0.08, 0.06,0.02]]
small_image2 = np.asarray(small_image2, dtype="float32")
plot_it(small_image2)
test_images.append(small_image2)
small_image3 = [[0,0,0, 0, 0, 0, 0,0],
[0,0,0, 0, 0, 0, 0,0],
[0,0,0, 1, 0, 0, 0,0],
[0,0,1, 1, 1, 0, 0,0],
[0,0,0, 1, 0, 1, 0,0],
[0,0,0, 1, 1, 1, 1,0],
[0,0,0, 1, 0, 1, 0,0],
[0,0,0, 1, 0, 0, 0,0]]
small_image3 = np.asarray(small_image3, dtype="float32")
plot_it(small_image3)
gt_images.append(small_image3)
small_image3 = [[0.18,0.22, 0.15, 0.22, 0.20, 0.17, 0.15,0.14],
[0.18,0.22, 0.15, 0.22, 0.20, 0.15, 0.17,0.1],
[0.18,0.22, 0.15, 0.45, 0.20, 0.17, 0.15,0.14],
[0.17,0.21, 0.35, 0.45, 0.55, 0.15, 0.17,0.1],
[0.17,0.20, 0.15, 0.45, 0.20, 0.65, 0.15,0.14],
[0.18,0.21, 0.15, 0.45, 0.55, 0.65, 0.75,0.1],
[0.19,0.22, 0.15, 0.45, 0.20, 0.65, 0.15,0.14],
[0.18,0.22, 0.15, 0.35, 0.20, 0.15, 0.17,0.1]]
small_image3 = np.asarray(small_image3, dtype="float32")
plot_it(small_image3)
test_images.append(small_image3)
small_image4 = [[0, 0, 0, 0, 0,0,0,0],
[0, 0, 0, 0, 1,1,1,0],
[0, 0, 0, 1, 1,1,1,0],
[0, 0, 1, 1, 1,1,1,0],
[0, 1, 1, 1, 1,1,1,0],
[0, 0, 1, 1, 1,1,1,0],
[0, 0, 0, 1, 1,1,1,0],
[0, 0, 0, 0, 1,1,1,0]
]
small_image4 = np.asarray(small_image4, dtype="float32")
small_image6 = np.asarray(np.transpose(small_image4), dtype="float32")
plot_it(small_image4)
gt_images.append(small_image4)
gt_images.append(small_image6)
small_image4 = [[0.1, 0.1, 0.3, 0.2, 0.2,0.1,0, 0],
[0.1, 0.15, 0.3, 0.2, 0.4,0.6,0.6,0],
[0.1, 0.15, 0.3, 0.5, 0.5,0.5,0.6,0],
[0.2, 0.15, 0.6, 0.5, 0.55,0.5,0.6,0],
[0.2, 0.8, 0.7, 0.5, 0.55,0.5,0.6,0],
[0.2, 0.15, 0.6, 0.5, 0.55,0.5,0.6,0],
[0.1, 0.1, 0.3, 0.5, 0.5,0.5,0.6,0],
[0.1, 0.1, 0.3, 0.2, 0.5,0.5,0.6,0]
]
small_image4 = np.asarray(small_image4, dtype="float32")
small_image6 = np.asarray(np.transpose(small_image4), dtype="float32")
plot_it(small_image4)
test_images.append(small_image4)
test_images.append(small_image6)
small_image5 = [[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0, 0., 0.],
[0., 0., 0., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]
]
small_image5 = np.asarray(small_image5, dtype="float32")
plot_it(small_image5)
gt_images.append(small_image5)
small_image5 = [[0.4, 0. , 0., 0., 0., 0., 0., 0.],
[0. , 0.5, 0., 0., 0., 0., 0., 0.],
[0. , 0. , 0.6, 0.1, 0.1, 0.1, 0.1, 0.],
[0. , 0. , 0.1, 0.1, 1.0, 0.1, 0.1, 0.],
[0. , 0. , 0.1, 0.7, 0.8, 0.9, 0.1, 0.],
[0. , 0. , 0.1, 0.1, 0.9, 0.1, 0.1, 0.],
[0. , 0. , 0.1, 0.2, 0.1, 0.1, 0.1, 0.],
[0. , 0. , 0., 0., 0., 0., 0., 0.]
]
small_image5 = np.asarray(small_image5, dtype="float32")
plot_it(small_image5)
test_images.append(small_image5)
small_image7 = [[0,0, 0, 0, 0, 0,0,0],
[0,0, 0, 0, 0, 0,0,0],
[0,0, 0, 1, 0, 0,0,0],
[0,0, 1, 0 , 1, 0,0,0],
[0,0, 0, 1, 0, 0,0,0],
[0,0, 1, 0, 1, 0,0,0],
[0,0, 0, 1, 0, 0,0,0],
[0,0, 0, 0, 0, 0,0,0], ]
small_image7 = np.asarray(small_image7, dtype="float32")
plot_it(small_image7)
gt_images.append(small_image7)
small_image7 = [[0,0.1, 0.2, 0.2, 0.2, 0.1,0,0],
[0,0.1, 0.2, 0.2, 0.2, 0.1,0,0],
[0,0.1, 0.2, 0.7, 0.2, 0.1,0,0],
[0,0.3, 0.6, 0.2 , 0.6, 0.1,0,0],
[0,0.3, 0.2, 0.8, 0.2, 0.25,0,0],
[0,0.1, 0.7, 0.2, 0.6, 0.1,0,0],
[0,0.1, 0.1, 0.6, 0.2, 0.2,0,0],
[0,0.1, 0.1, 0.1, 0.2, 0.2,0,0]
]
small_image7 = np.asarray(small_image7, dtype="float32")
plot_it(small_image7)
test_images.append(small_image7)
small_image8 = [
[0, 0, 0, 0, 0,0,1,0],
[0, 0, 0, 0, 0,1,0,0],
[0, 0, 0, 0, 1,0,0,0],
[ 0, 0, 0, 1, 0,0,0,0],
[ 0, 0, 1, 0, 0,0,0,0],
[ 0, 1, 0, 0, 0,0,0,0],
[ 1, 0, 0, 0, 0,0,0,0],
[0, 0, 0, 0, 0, 0, 0, 0]]
small_image8 = np.asarray(small_image8, dtype="float32")
plot_it(small_image8)
gt_images.append(small_image8)
small_image8 = [[0, 0, 0, 0, 0, 0.4, 1, 0.5],
[0, 0, 0, 0, 0.3, 0.95, 0.4, 0.5],
[0, 0, 0, 0.3, 0.9, 0.4, 0, 0],
[0, 0, 0.3, 0.8, 0.3, 0, 0, 0],
[0, 0.2, 0.8, 0.3, 0, 0, 0, 0],
[0.2, 0.8, 0.3, 0, 0, 0, 0, 0],
[0.8, 0.2, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0, 0]]
small_image8 = np.asarray(small_image8, dtype="float32")
plot_it(small_image8)
test_images.append(small_image8)
# In[86]:
### OPT 0,1,2 on Testing imgs vs GT
test_0a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2)
test_0b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2)
test_0c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2)
search_results(test_0a, ssim_th = 0.3, attention_image = 2)
search_results(test_0b, ssim_th = 0.3, attention_image = 2)
search_results(test_0c, ssim_th = 0.3, attention_image = 2)
### OPT 0,1,2 on GT noised vs GT +20%
test_1a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2, noise_gt = 0.2)
test_1b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2, noise_gt = 0.2)
test_1c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2, noise_gt = 0.2)
search_results(test_1a, ssim_th = 0.3, attention_image = 2)
search_results(test_1b, ssim_th = 0.3, attention_image = 2)
search_results(test_1c, ssim_th = 0.3, attention_image = 2)
### OPT 0,1,2 on Testing imgs noised vs GT + 20%
test_2a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2, noise_test = 0.2)
test_2b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2, noise_test = 0.2)
test_2c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2, noise_test = 0.2)
search_results(test_2a, ssim_th = 0.3, attention_image = 2)
search_results(test_2b, ssim_th = 0.3, attention_image = 2)
search_results(test_2c, ssim_th = 0.3, attention_image = 2)
# In[23]:
#####################
### Berkeley Dataset
imgs = ['./original/00.bmp', './original/01.bmp','./original/02.bmp',
'./original/03.bmp', './original/04.bmp','./original/05.bmp',
'./original/06.bmp', './original/07.bmp','./original/08.bmp',
'./original/09.bmp' ]
imgs_gt = [ './gtruth/00.bmp', './gtruth/01.bmp', './gtruth/02.bmp',
'./gtruth/03.bmp', './gtruth/04.bmp', './gtruth/05.bmp',
'./gtruth/06.bmp', './gtruth/07.bmp', './gtruth/08.bmp',
'./gtruth/09.bmp' ]
test_images2 = []
test_images2.append(1.0 - import_img(imgs[0]))
test_images2.append(1.0 - import_img(imgs[1]))
test_images2.append(1.0 - import_img(imgs[2]))
test_images2.append(1.0 - import_img(imgs[3]))
test_images2.append(1.0 - import_img(imgs[4]))
test_images2.append(1.0 - import_img(imgs[5]))
test_images2.append(1.0 - import_img(imgs[6]))
test_images2.append(1.0 - import_img(imgs[7]))
test_images2.append(1.0 - import_img(imgs[8]))
test_images2.append(1.0 - import_img(imgs[9]))
test_images_gt_2 = []
test_images_gt_2.append(1.0 - import_img(imgs_gt[0]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[1]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[2]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[3]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[4]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[5]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[6]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[7]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[8]))
test_images_gt_2.append(1.0 - import_img(imgs_gt[9]))
# In[ ]:
###Berkeley
### Testing Grad/Glaze images vs Groundtruth - t-norm choquet
### From a range from 0 to 1 it requires 2963.407 seconds
### better fixing a 16/2 and not trying all the possible combs.
brk_resc = []
t1 = time.process_time()
x=0
while(x <= 1.00):
resc = compute_multi_thresh(test_images2, test_images_gt_2,
opt = 0,
T=x,
a1=16,
a2=2)
x = x + 0.01
brk_resc.append(resc)
elapsed_time = time.process_time() - t1
print(x)
print('Images processed in {} seconds'.format(round(elapsed_time ,3)))
# In[110]:
print(len(brk_resc))
# In[114]:
for i in range(len(brk_resc)):
for j in range(len(brk_resc[-1])):
if(brk_resc[i][j].ssim_choquet[0] > brk_resc[i][j].ssim_bradley[0]
and brk_resc[i][j].ssim_choquet[0] >0.3
and brk_resc[i][j].a1[0] != brk_resc[i][j].a2):
count= count+1
print('{}-th image -------------------\n mse: C {} S {} B {}, \nssid: C {} S {} B {} \na1: {}, a2: {}, th: {}'.format(
str(j),
round(brk_resc[i][j].mse_choquet[0],3),
round(brk_resc[i][j].mse_sugeno[0],3),
round(brk_resc[i][j].mse_bradley[0],3),
round(brk_resc[i][j].ssim_choquet[0],3),
round(brk_resc[i][j].ssim_sugeno[0],3),
round(brk_resc[i][j].ssim_bradley[0],3),
str(brk_resc[i][j].a1[0]),
str(brk_resc[i][j].a2),
round(brk_resc[i][j].th[0], 4) ))
# In[176]:
#### Example of the chessboard
test_image = 1.0 - import_img('./original/chessboard.png')
#Choquet Adaptive Thresh
choquet_mat, _, _ = adaptive_choquet_itegral(np.asarray(test_image),
S1,
0, #t-norm version
log=False )
out_img_adapt_choquet, _, _, _, T = adaptive_thresh2(np.asarray(test_image),
np.asarray(choquet_mat),
a1 = 16,
a2 = 2,
T= 0.095,
log=False ) #con compute_summed_area table doesn't work.
#Choquet Adaptive Thresh
choquet_mat, _, _ = adaptive_choquet_itegral(np.asarray(test_image),
S1,
1, #choquet int version
log=False )
out_img_adapt_choquet2, _, _, _, T = adaptive_thresh2(np.asarray(test_image),
np.asarray(choquet_mat),
a1 = 16,
a2 = 2,
T= 0.095,
log=False ) #con compute_summed_area table doesn't work.
#Bradley Adaptive Thresh
S1 = get_int_img_m1(test_image)
out_img_bradley, bradley_int_mat = adaptive_thresh(np.asarray(test_image),
S1 ,
a1=16,
a2=2,
T=T)
# In[177]:
#Choquet Adaptive Thresh
plot_it(test_image)
plot_it(out_img_adapt_choquet2)
plot_it(out_img_adapt_choquet)
plot_it(out_img_bradley)
print(compare_images(out_img_adapt_choquet, out_img_bradley))
print(compare_images(out_img_adapt_choquet2, out_img_bradley))
# In[ ]:
# In[ ]:
| lodeguns/FuzzyAdaptiveBinarization | fuzzy_adaptive_bin.py | fuzzy_adaptive_bin.py | py | 36,527 | python | en | code | 3 | github-code | 36 |
13790256283 | #!/usr/bin/env python
import rbd
import rados
import json
import subprocess
from itertools import chain
from texttable import Texttable, get_color_string, bcolors
def f(x):
if x=="quota_max_bytes":
return str(pool[x]/1024/1024)
else:
return str(pool[x])
p = subprocess.check_output('ceph osd dump -f json-pretty', shell=True)
pools = json.loads(p)['pools']
pools_table = Texttable()
header = [ "Id", "Pool", "Size", "Min_size", "Pg_num", "Pgp_num", "Crush","Quota (MB)", "Quota (obj)" ]
keys = [ "pool", "pool_name", "size", "min_size", "pg_num", "pg_placement_num", "crush_ruleset","quota_max_bytes","quota_max_objects" ]
pools_table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header))
for pool in pools:
pools_table.add_row(map(f, keys))
table = Texttable()
table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES)
table.set_cols_align( [ "l", "l", "l", "l", "l", "l", "l" ])
table.set_cols_valign([ "m", "m", "m", "m", "m", "m", "m" ])
table.set_cols_width([ "20", "20", "8","8","20","8","8"])
header = [ "Pool", "Image", "Size(Mb)", "Features", "Lockers", "Str_size", "Str_cnt" ]
keys = [ "features", "list_lockers", "stripe_unit", "stripe_count" ]
table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header))
with rados.Rados(conffile='/etc/ceph/ceph.conf') as cluster:
pool_list = cluster.list_pools()
for pool in pool_list:
table.add_row([ get_color_string(bcolors.GREEN, pool) , "", "", "", "", "", "" ])
with cluster.open_ioctx(pool) as ioctx:
rbd_inst = rbd.RBD()
image_list = rbd_inst.list(ioctx)
for image_name in image_list:
with rbd.Image(ioctx, image_name) as image:
image_size = str(image.size()/1024**2)
table.add_row(["", image_name, image_size] + map(lambda x: str(getattr(image,x)()), keys))
if pool != pool_list[-1]:
table.add_row([ "-"*20, "-"*20,"-"*8,"-"*8,"-"*20,"-"*8,"-"*8 ])
print(pools_table.draw())
print
print(table.draw())
| angapov/ceph-scripts | ceph.py | ceph.py | py | 2,086 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.