seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
24481282090 | import logging
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Logger(object):
__metaclass__ = Singleton
def __init__(self):
self._logger = logging.getLogger('[WikiTopK]')
self._logger.setLevel(logging.INFO)
formatter1 = logging.Formatter('%(name)s - %(message)s')
fh = logging.FileHandler('log/debug.log',mode='w')
fh.setLevel(logging.ERROR)
fh.setFormatter(formatter1)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter1)
self._logger.addHandler(fh)
self._logger.addHandler(ch)
| shalseban/wikiepedia-top-pageviews | src/main/python/logger.py | logger.py | py | 809 | python | en | code | 0 | github-code | 36 |
32137328654 | import datetime
import json
import os
from datetime import timezone
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold, ParameterGrid, train_test_split
from prism_kondo.experiment_utils import (
add_random_noise_arnaiz,
calc_model_errors,
run_selector,
save_hyperopt_result,
train_lr_model,
)
from prism_kondo.instance_selection._params_dict import (
NOISE_DEPENDENT_PARAMS,
PARAMS_DICTS_NOISE,
)
class NoiseExperimenter:
def generate_gaussian_linear_data(
self,
nr_samples: int,
nr_features: int,
mean: float,
std: float,
random_state=None,
):
rs = np.random.RandomState(random_state)
X = rs.normal(mean, std, size=(nr_samples, nr_features))
y = np.zeros(nr_samples)
coefs = np.round(rs.uniform(-10, 10, nr_features), 2)
for i in range(nr_features):
y += coefs[i] * X[:, i]
y += rs.normal(0, 1, size=nr_samples)
return X, y
def run_experiments_arnaiz(
self,
selectors,
nr_datasets,
nr_samples,
nr_features,
mean,
std,
noise_frac: float,
output_dir="arnaiz_synthetic",
):
for i in range(nr_datasets):
if i % 20 == 0:
print(f"generated {i}/{nr_datasets} datasets")
X, y = self.generate_gaussian_linear_data(
nr_samples, nr_features, mean, std
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
X_val, X_test = X_test[:200], X_test[200:]
y_val, y_test = y_test[:200], y_test[200:]
y_train_noisy, noisy_idx = add_random_noise_arnaiz(
y_train, noise_frac=noise_frac
)
for selector_name in selectors:
if selector_name in ["selcon", "shapley", "fish1"]:
params = NOISE_DEPENDENT_PARAMS[noise_frac][selector_name]
elif selector_name == "ground_truth":
params = {}
else:
params = PARAMS_DICTS_NOISE[selector_name]
model_clean = train_lr_model(X_train, y_train)
errors_clean = calc_model_errors(model_clean, X_test, y_test)
model_noisy = train_lr_model(X_train, y_train_noisy)
errors_noisy = calc_model_errors(model_noisy, X_test, y_test)
if selector_name == "ground_truth":
labels_clean = np.ones(len(X_train), dtype="bool")
labels_noisy = np.ones(len(X_train), dtype="bool")
labels_noisy[noisy_idx] = False
elif selector_name in ["selcon"]:
X_train_and_val = np.vstack([X_train, X_val])
y_train_and_val = np.concatenate([y_train, y_val])
y_train_noisy_and_val = np.concatenate([y_train_noisy, y_val])
labels_clean, _ = run_selector(
X_train_and_val, y_train_and_val, selector_name, params
)
labels_clean = labels_clean[:600]
labels_noisy, _ = run_selector(
X_train_and_val, y_train_noisy_and_val, selector_name, params
)
labels_noisy = labels_noisy[:600]
elif selector_name in ["reg_enn_time", "fixed_window", "fish1"]:
base_time = datetime.datetime(2000, 1, 1)
time_train = np.array(
[
base_time + datetime.timedelta(days=i)
for i in range(X_train.shape[0])
],
dtype="datetime64[ns]",
).astype(np.float32)
X_time = np.hstack([X_train, time_train.reshape(-1, 1)])
if selector_name in ["reg_enn_time", "fixed_window"]:
labels_clean, _ = run_selector(
X_time, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_time, y_train_noisy, selector_name, params
)
elif selector_name in ["fish1"]:
x_target = np.hstack(
[X_test[0, :], time_train[-1].astype(np.float32)]
)
X_fish = np.vstack([X_time, x_target])
labels_clean, _ = run_selector(
X_fish, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_fish, y_train_noisy, selector_name, params
)
else:
labels_clean, _ = run_selector(
X_train, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_train, y_train_noisy, selector_name, params
)
model_labels_clean = train_lr_model(
X_train[labels_clean, :], y_train[labels_clean]
)
model_labels_noisy = train_lr_model(
X_train[labels_noisy, :], y_train_noisy[labels_noisy]
)
errors_clean_selector = calc_model_errors(
model_labels_clean, X_test, y_test
)
errors_noisy_selector = calc_model_errors(
model_labels_noisy, X_test, y_test
)
errors_clean.update(
{f"clean_selector_{k}": v for k, v in errors_clean_selector.items()}
)
errors_clean.update({f"noisy_{k}": v for k, v in errors_noisy.items()})
errors_clean.update(
{f"noisy_selector_{k}": v for k, v in errors_noisy_selector.items()}
)
errors_clean["selector"] = selector_name
if selector_name == "ground_truth":
errors_clean["params"] = {}
else:
errors_clean["params"] = PARAMS_DICT[selector_name]
errors_clean["noise_frac"] = noise_frac
errors_clean["mean"] = mean
errors_clean["std"] = std
errors_clean["nr_samples"] = nr_samples
errors_clean["nr_features"] = nr_features
errors_clean["std_y"] = float(np.std(y_test))
(
correctly_kicked_out,
falsely_kicked_out,
) = self.calc_correctly_identified_noise_samples(
noisy_idx, labels_noisy
)
errors_clean["clean_frac_kicked_out"] = len(
np.argwhere(labels_clean == False).flatten()
) / len(labels_clean)
errors_clean["noisy_frac_kicked_out"] = len(
np.argwhere(labels_noisy == False).flatten()
) / len(labels_noisy)
errors_clean["frac_correctly_kicked_out"] = correctly_kicked_out / len(
noisy_idx
)
errors_clean["frac_falsely_kicked_out"] = falsely_kicked_out / len(
y_train
)
self.save_json_file(
errors_clean,
output_dir,
selector_name,
)
def save_json_file(self, info_dict, output_dir, selector_name):
directory_path = os.path.join(output_dir, selector_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
timestamp = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%S.%f")
filename = f"{timestamp}"
filepath = os.path.join(directory_path, filename)
with open(filepath, "w") as outfile:
json.dump(info_dict, outfile)
def run_hyperopt(
self,
selector_name,
param_dict_ranges,
nr_samples,
nr_features,
mean,
std,
noise_frac,
n_splits=10,
):
X, y = self.generate_gaussian_linear_data(nr_samples, nr_features, mean, std)
X, _, y, _ = train_test_split(X, y, test_size=0.25)
y_train_noisy, noisy_idx = add_random_noise_arnaiz(y, noise_frac=noise_frac)
base_time = datetime.datetime(2000, 1, 1)
time_train = np.array(
[base_time + datetime.timedelta(days=i) for i in range(X.shape[0])],
dtype="datetime64[ns]",
).astype(np.float32)
X_time = np.hstack([X, time_train.reshape(-1, 1)])
all_param_combinations = list(ParameterGrid(param_dict_ranges))
iteration = 1
for param_dict in all_param_combinations:
print("trying combination", iteration, "/", len(all_param_combinations))
iteration += 1
kf = KFold(n_splits=n_splits, shuffle=False)
cv_val_scores = []
cv_dict = {}
for i, (train_index, val_index) in enumerate(kf.split(X)):
X_train = X[train_index, :]
y_train = y_train_noisy[train_index]
X_val = X[val_index, :]
y_val = y_train_noisy[val_index]
if selector_name == "reg_enn_time":
boolean_labels, scores = run_selector(
X_time[train_index, :], y_train, selector_name, param_dict
)
elif selector_name == "fish1":
x_target = np.hstack(
[X_val[0, :], time_train[-1].astype(np.float32)]
)
X_fish = np.vstack([X_time[train_index, :], x_target])
boolean_labels, scores = run_selector(
X_fish, y_train, selector_name, param_dict
)
else:
boolean_labels, scores = run_selector(
X_train, y_train, selector_name, param_dict
)
model = train_lr_model(
X_train[boolean_labels, :], y_train[boolean_labels]
)
error_dict = calc_model_errors(model, X_val, y_val)
cv_val_scores.append(error_dict["r2"])
cv_dict["raw_scores"] = cv_val_scores
cv_dict["mean_score"] = np.mean(cv_val_scores)
cv_dict["std_scores"] = np.std(cv_val_scores)
cv_dict["n_splits"] = n_splits
cv_dict["noise_frac"] = noise_frac
cv_dict["mean"] = mean
cv_dict["std"] = std
cv_dict["nr_samples"] = nr_samples
cv_dict["nr_features"] = nr_features
save_hyperopt_result(
selector_name,
param_dict,
cv_dict,
f"noise{noise_frac}",
"",
"hyperopt_synthetic",
)
def calc_correctly_identified_noise_samples(self, noisy_idx, labels):
idx_by_selector = np.argwhere(labels == False).flatten()
correctly_kicked_out = len(
set(noisy_idx).intersection(set(np.argwhere(labels == False).flatten()))
)
falsely_kicked_out = len(idx_by_selector) - correctly_kicked_out
return correctly_kicked_out, falsely_kicked_out
def create_pca_plot(self, noise_frac=0.3, filename=None, random_state=None):
X, y = self.generate_gaussian_linear_data(
1000, 5, 0, 1, random_state=random_state
)
y_noisy, noisy_idx = add_random_noise_arnaiz(y, noise_frac=noise_frac)
pca = PCA(2)
X = pca.fit_transform(X)
# fig, axes = plt.subplots(1, 2, figsize=(15, 10), sharey=True)
gridspec = {"width_ratios": [1, 1, 0.05]}
fig, axes = plt.subplots(1, 3, figsize=(15, 10), gridspec_kw=gridspec)
axes[0].scatter(X[:, 0], X[:, 1], c=y, cmap="seismic")
axes[0].set_title("a) Clean Data Set")
size = np.ones(len(y)) * 20
size[noisy_idx] = 60
axes[1].scatter(X[:, 0], X[:, 1], c=y_noisy, s=size, cmap="seismic")
axes[1].set_title(f"b) Noisy Data Set ( {int(noise_frac*100)}% )")
fig.supxlabel("Principal Component 1", fontsize=16, y=0.0)
fig.supylabel("Principal Component 2", fontsize=16, x=0.08)
cmap = mpl.cm.get_cmap("seismic")
norm = mpl.colors.Normalize(vmin=y.min(), vmax=y.max())
cbar = plt.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=axes[2],
)
cbar.set_label(label="y Value", size="large", weight="bold")
if filename:
fig.savefig(filename)
| lurue101/Ruecker_MA | prism_kondo/noise_experiments.py | noise_experiments.py | py | 13,013 | python | en | code | 0 | github-code | 36 |
42893635287 | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/4/13.
"""
from app import create_app
from tests.utils import get_authorization
__author__ = 'Allen7D'
app = create_app()
def test_create_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/append', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
def test_delete_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/remove', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
test_create_auth_list()
test_delete_auth_list()
| Allen7D/mini-shop-server | tests/test_cms_auth.py | test_cms_auth.py | py | 874 | python | en | code | 663 | github-code | 36 |
73974471145 | import logging
from datetime import datetime
from time import sleep
from typing import Union, Optional, Tuple, List, Sequence
import mariadb
from mariadb import Cursor, Connection
from accounting_bot import utils
from accounting_bot.exceptions import DatabaseException
logger = logging.getLogger("ext.accounting.db")
class AccountingDB:
def __init__(self, username: str, password: str, host: str, port: str, database: str) -> None:
self.cursor = None # type: Cursor | None
self.con = None # type: Connection | None
self.username = username
self.password = password
self.host = host
self.port = port
self.database = database
connected = False
counter = 0
while not connected and counter < 5:
# Retrying the connection in case the database is not yet ready
try:
self.try_connect()
connected = True
except mariadb.Error:
counter += 1
logger.warning(f"Retrying connection in {counter * 2} seconds")
sleep(counter * 2)
if not connected:
raise DatabaseException(f"Couldn't connect to MariaDB database on {self.host}:{self.port}")
def try_connect(self) -> None:
logger.info("Connecting to database...")
try:
self.con = mariadb.connect(
user=self.username,
password=self.password,
host=self.host,
port=self.port,
database=self.database,
connect_timeout=8
)
logger.info("Connected to database!")
self.cursor = self.con.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS messages ("
"msgID BIGINT NOT NULL, "
"userID BIGINT NOT NULL, "
"verified BIT NOT NULL DEFAULT b'0', "
"t_state TINYINT, "
"ocr_verified BIT NOT NULL DEFAULT b'0', "
"PRIMARY KEY (msgID)"
") ENGINE = InnoDB; ")
self.cursor.execute("CREATE TABLE IF NOT EXISTS shortcuts ("
"msgID BIGINT NOT NULL, "
"channelID BIGINT NOT NULL, "
"PRIMARY KEY (msgID)"
") ENGINE = InnoDB; ")
except mariadb.Error as e:
logger.error(f"Error connecting to MariaDB Platform: {e}")
raise e
def ping(self):
try:
if self.con is None or not self.con.open:
self.try_connect()
start = datetime.now()
self.con.ping()
return (datetime.now() - start).microseconds
except mariadb.Error as e:
utils.log_error(logger, e)
return None
def execute_statement(self, statement: str, data: Sequence = ()) -> Cursor:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(statement, data)
self.con.commit()
return self.cursor
except mariadb.Error as e:
logger.error("Error while trying to execute statement %s: %s", statement, e)
raise e
def add_transaction(self, message: int, user: int) -> None:
logger.debug(f"Saving transaction to database with msg {str(message)} and user {str(user)}")
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"INSERT INTO messages (msgID, userID) VALUES (?, ?);",
(message, user))
self.con.commit()
except mariadb.Error as e:
logger.error(f"Error while trying to insert a new transaction: {e}")
raise e
def set_state(self, message: int, state: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET t_state = ? WHERE messages.msgID=?;",
(state, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to state {state}: {e}")
raise e
def get_state(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, t_state FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, state) = res
return state
except mariadb.Error as e:
logger.error(f"Error while trying to get state of a transaction: {e}")
raise e
def get_owner(self, message: int) -> Optional[Tuple[int, bool]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT userID, verified FROM messages WHERE msgID=?;",
(message,))
res = self.cursor.fetchone()
if res is None:
return None
(user, verified) = res
verified = verified == 1
return user, verified
except mariadb.Error as e:
logger.error(f"Error while trying to get a transaction: {e}")
raise e
def set_verification(self, message: int, verified: Union[bool, int]) -> int:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET verified = ? WHERE messages.msgID=?;",
(verified, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to {verified}: {e}")
raise e
def is_unverified_transaction(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, verified FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, verified) = res
return verified == b'\x00'
except mariadb.Error as e:
logger.error(f"Error while trying to check a transaction: {e}")
raise e
def get_unverified(self, include_user: bool = False) -> Union[List[int], List[Tuple[int, int]]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
res = []
if include_user:
self.cursor.execute(
"SELECT msgID, userID FROM messages WHERE verified=b'0';")
for (msg, user) in self.cursor:
res.append((msg, user))
else:
self.cursor.execute(
"SELECT msgID FROM messages WHERE verified=b'0';")
for (msg,) in self.cursor:
res.append(msg)
return res
except mariadb.Error as e:
logger.error(f"Error while trying to get all unverified transactions: {e}")
raise e
def set_ocr_verification(self, message: int, verified: Union[bool, int]) -> int:
if type(verified) == bool:
verified = 1 if verified else 0
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET ocr_verified = ? WHERE messages.msgID=?;",
(verified, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to ocr_verified {verified}: {e}")
raise e
def get_ocr_verification(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, ocr_verified FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, verified) = res
return verified == b'\x01'
except mariadb.Error as e:
logger.error(f"Error while trying to check a transaction: {e}")
raise e
def delete(self, message: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"DELETE FROM messages WHERE messages.msgID=?",
(message,))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Deletion of message {message} affected {affected} rows, expected was 1 row")
else:
# logger.info(f"Deleted message {message}, affected {affected} rows")
pass
except mariadb.Error as e:
logger.error(f"Error while trying to delete a transaction: {e}")
raise e
def add_shortcut(self, msg_id: int, channel_id: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"INSERT INTO shortcuts (msgID, channelID) VALUES (?, ?);",
(msg_id, channel_id))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Insertion of shortcut message {msg_id} affected {affected} rows, expected was 1 row")
else:
logger.info(f"Inserted shortcut message {msg_id}, affected {affected} rows")
except mariadb.Error as e:
logger.error(f"Error while trying to insert a shortcut message {msg_id}: {e}")
raise e
def get_shortcuts(self) -> List[Tuple[int, int]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
res = []
self.cursor.execute(
"SELECT msgID, channelID FROM shortcuts;")
for (msg, channel) in self.cursor:
res.append((msg, channel))
return res
except mariadb.Error as e:
logger.error(f"Error while trying to get all shortcut messages: {e}")
raise e
def delete_shortcut(self, message: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"DELETE FROM shortcuts WHERE shortcuts.msgID=?",
(message,))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Deletion of shortcut message {message} affected {affected} rows, expected was 1 row")
else:
logger.info(f"Deleted shortcut message {message}, affected {affected} rows")
except mariadb.Error as e:
logger.error(f"Error while trying to delete a shortcut message: {e}")
raise e
| Blaumeise03/AccountingBot | accounting_bot/ext/accounting_db.py | accounting_db.py | py | 11,837 | python | en | code | 6 | github-code | 36 |
13231395273 | class Creation(object):
"""The creation module for inheritance.
Functions:
create:
assign a chain.
_creation:
creation a chain.
_create_text:
creation a text.
"""
def create(self):
"""Assign a chain to the self._chain.
Note: this overwrites the self._chain.
"""
if self._chain:
self._chain = {}
self._creation()
# self._data now is an empty generator.
self._data = ()
def _creation(self):
"""Creation of the chain.
Creating the Markov chain based on self._data.
Note: (
"*START* text_1 *END* *TEXT_END*",
"*START* text_2 *END* *TEXT_END*"
)
text_2 is another text.
In this way, a words from text_1
WILL NOT BE UNITED with a words from text_2.
Raises:
ValueError:
the self._data is empty.
"""
if not self._data:
raise ValueError("data is empty")
self._chain[self.start] = []
[self._create_text(text) for text in self._data]
def _create_text(self, text):
"""Creation a text.
Fills up the self._chain.
Warning: a text should not be a pure str type.
At least it should be (str,) or [str].
Args:
text (any iterable type):
a data for the self._chain.
Examples:
self._data: (
"*START* One fish two fish red fish blue fish. *END* *TEXT_END*",
)
self._chain: {
'blue': ['fish.'],
'fish.': ['*END*'],
'*START*': ['One'],
'*END*': ['*TEXT_END*'],
'fish': ['two', 'red', 'blue'],
'two': ['fish'],
'One': ['fish'],
'red': ['fish']
}
"""
create_elements = lambda text, count, text_i=0, count_i=0: [
text[i + text_i] for i in range(count + count_i)
]
for i in range(len(text) - self.window):
elements = create_elements(text, self.window, i, 1)
word = elements[-1]
# adding a words for the self.start key.
if elements[0] == self.start:
key = " ".join(create_elements(elements, self.window, 1))
self._chain[self.start].append(key)
key = " ".join(create_elements(elements, self.window))
if (key != self.start) and (key in self._chain):
self._chain[key].append(word)
elif key != self.start:
self._chain[key] = [word]
| Amaimersion/markov-chain | chain/creation.py | creation.py | py | 2,793 | python | en | code | 6 | github-code | 36 |
38687624011 | import random
def setWinningNumber():
winningNumber = random.randint(0,37)
"""
if winningNumber == 37:
winningNumber = str("Double Zero")
print("Double Zero: " + str(winningNumber))
else:
winningNumber = str(winningNumber)
"""
winningNumber = str(winningNumber)
if winningNumber <= 9 and winningNumber >= 1:
winningNumber = "0" + str(winningNumber)
return winningNumber
"""def isBlack(number, BlackNumList):
if number in BlackNumList:
return True
else:
return False
"""
def getColor(number):
#Colors
blackNumList = ["28", "26", "11", "20", "17", "22", "15", "24", "13", "27", "25", "12", "19", "18", "21", "16", "23", "14"]
redNumList = ["09", "30", "07", "32", "05", "34", "03", "36", "01", "10", "29", "08", "31", "06", "33", "04", "35", "02"]
greenNumList = ["0", "00", "37"]
if number in blackNumList:
return "Black"
elif number in redNumList:
return "Red"
elif number in greenNumList:
return "Green"
else:
return "Unexpected color value"
def getOddOrEven(number):
if int(number) % 2 == 0:
return "Even"
else:
return "Odd"
def getQuadrant(number):
#Quadrants
quad1 = ["28", "26", "11", "20", "17", "22", "15", "24"]
quad2 = ["09", "30", "07", "32", "05", "34", "03", "36"]
quad3 = ["01", "10", "29", "08", "31", "06", "33", "04", "35", "02"]
quad4 = ["13", "27", "25", "12", "19", "18", "21", "16", "23", "14"]
if numer in quad1:
return "Quad 1"
elif number in quad2:
return "Quad 2"
elif number in quad3:
return "Quad 3"
elif number in quad4:
return "Quad 4"
else:
return "Unexpected error for " + number
def getThirds(number):
#thirds
firstThird = ["28", "26", "11", "20", "17", "22", "15", "24", "09", "30", "07", "32"]
secondThird = ["05", "34", "03", "36", "01", "10", "29", "08", "31", "06", "33", "04"]
thirdThird = ["35", "02", "13", "27", "25", "12", "19", "18", "21", "16", "23", "14"]
if number in firstThird:
return "First Third"
elif number in secondThird:
return "Second Third"
elif number in thirdThird:
return "Third Third"
def getColorBetWinLose(color, colorBet):
if color is colorBet:
return "Win"
elif color is not colorBet:
return "Lose"
else:
return "Unexpected Error"
def get24Plus8WinLose(number):
number = str(number)
if number == "0" or number == "00" or number == "16" or number == "19" or number == "37":
return "Lose"
elif number >= "13" and number <= "24":
return "Win"
else:
return "Push"
def main():
dollars = 0
spins = 0
streak = 0
highBlackStreak = 0
highRedStreak = 0
win = 0
push = 0
lose = 0
winningNumber = 0
playerColor = "N/A"
colorWinLoss = "N/A"
colorWins = 0
colorLoses = 0
spins = input("How many spins do you want to play: ")
#dollars = input("How many dollars are you starting with: ")
colorBet = raw_input("Black or Red: ")
twentyFourPlusEightWin = 0
twentyFourPlusEightLose = 0
twentyFourPlusEightPush = 0
twentyFourPlusEightLoseStreak = 0
twentyFourPlusEightLoseHighStreak = 0
if (colorBet == "B"):
playerColor = "Black"
for i in range(spins):
winningNumber = str(setWinningNumber())
oddOrEven = getOddOrEven(winningNumber)
color = getColor(str(winningNumber))
twentyFourPlusEightWinLose = get24Plus8WinLose(str(winningNumber))
colorWinLoss = getColorBetWinLose(color, playerColor)
if colorWinLoss is "Win":
colorWins = colorWins + 1
elif colorWinLoss is "Lose":
colorLoses = colorLoses + 1
else:
colorWinLoss = "Unexpected Error"
if twentyFourPlusEightWinLose is "Win":
twentyFourPlusEightWin = twentyFourPlusEightWin + 1
twentyFourPlusEightLoseStreak = 0
elif twentyFourPlusEightWinLose is "Lose":
twentyFourPlusEightLose = twentyFourPlusEightLose + 1
twentyFourPlusEightLoseStreak = twentyFourPlusEightLoseStreak + 1
elif twentyFourPlusEightWinLose is "Push":
twentyFourPlusEightPush = twentyFourPlusEightPush + 1
if twentyFourPlusEightLoseHighStreak < twentyFourPlusEightLoseStreak:
twentyFourPlusEightLoseHighStreak = twentyFourPlusEightLoseStreak
print("Spin " + str(i + 1))
print("------------")
print(winningNumber)
print("Winning Number is: " + str(winningNumber))
print("Color: " + color)
print("Odd or Even: " + oddOrEven)
print("")
print("Color Bet: " + colorWinLoss)
print ("Color Wins:" + str(colorWins))
print ("Color Loses:" + str(colorLoses))
print("24 + 8 Win/Lose/Push: " + twentyFourPlusEightWinLose)
print ("24 + 8 Wins: " + str(twentyFourPlusEightWin))
print ("24 + 8 Loses: " + str(twentyFourPlusEightLose))
print ("24 + 8 Push: " + str(twentyFourPlusEightPush))
print("24 + 8 High Streak:" + str(twentyFourPlusEightLoseHighStreak))
print("")
if __name__ == '__main__':
main()
| DanielMeeker/RouletteSimulator | rouletteStrats.py | rouletteStrats.py | py | 5,466 | python | en | code | 0 | github-code | 36 |
4898894473 | import os
import sys
from PyQt5 import QtGui, QtWidgets
"""
from datetime import datetime,timedelta
from threading import Timer
"""
print('poggo')
class SystemTrayIcon(QtWidgets.QSystemTrayIcon):
def __init__(self,icon,parent=None):
QtWidgets.QSystemTrayIcon.__init__(self,icon,parent)
self.setToolTip('TimeUp')
menu = QtWidgets.QMenu(parent)
exitApp = menu.addAction('Exit')
#t.cancel()
exitApp.triggered.connect(lambda:sys.exit())
self.setContextMenu(menu)
self.activated.connect(self.trayActivate)
def trayActivate(self, reason):
if reason == self.DoubleClick:
window = startWindow()
def startWindow():
from mainmenu4 import Ui_MainWindow
import sys
global daWindow
global ui
daWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(daWindow)
ui.refreshList()
daWindow.show()
return daWindow
def trayMain():
from mainmenu4 import Ui_MainWindow
trayApp = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
trayApp.setQuitOnLastWindowClosed(False)
dirName=os.path.dirname(os.path.abspath(__file__))
iconPath = os.path.join(dirName, 'icon.png')
trayIcon = SystemTrayIcon(QtGui.QIcon(iconPath),w)
trayIcon.show()
"""
print ('setting up main window and hiding it.')
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui.refreshList()
MainWindow.hide()
"""
#mainmenu4.runMain()
sys.exit(trayApp.exec_())
if __name__ == '__main__':
trayMain()
"""
xdate=datetime.today()
ydate=timedelta(0,10)
print (xdate)
print (ydate)
delta_t=xdate + ydate
print (delta_t)
secs= delta_t - xdate
def hello_world():
print ('hello world')
#print (secs)
t = Timer(5,hello_world())
""" | verentino/PU_PDT_TimeUp | tray_old.py | tray_old.py | py | 1,936 | python | en | code | 0 | github-code | 36 |
25852170492 | """Returns probes where each 'N' base is replaced by real bases.
The 'N' base in a probe indicates an unknown -- i.e., the base
can either 'A', 'T', 'C', or 'G'.
This acts as a filter on the probes by returning, for each probe p:
- if p does not contain an 'N' base, then p itself.
- if p does contain one or more 'N' bases, then 4 or more probes
in which the sequence is maintained except 'N' is expanded to
be 'A', 'T', 'C', and 'G'.
For example, if a probe is 'ANA', then the following probes are
returned in place of 'ANA': 'AAA', 'ATA', 'ACA', 'AGA'. If a
probe is 'ANN', then the following probes are returned in place
of 'ANN': 'AAA', 'AAT', 'AAC', 'AAG', 'ATA', 'ATT', 'ATC', 'ATG',
'ACA', 'ACT', 'ACC', 'ACG', 'AGA', 'AGT', 'AGC', 'AGG'. If a
probe contains n 'N' bases, then 4^n probes are returned in place
of the probe.
The number of output probes is dependent on the number of 'N'
bases within each probe. The order of the input probes is
conserved -- i.e., the new, "expanded" probes are returned among
the other probes.
Since the number of output probes grows exponentially in the
number of 'N' bases within a probe, we can limit this with
the 'limit_n_expansion_randomly' parameter. When set to a nonnegative
integer, only limit_n_expansion_randomly 'N' bases are expanded;
these ones are chosen randomly. The other 'N' bases, if there
exist others, are randomly replaced with an unambiguous base.
When set to None, all 'N' bases are expanded.
"""
import random
from catch.filter.base_filter import BaseFilter
from catch import probe
__author__ = 'Hayden Metsky <hayden@mit.edu>'
class NExpansionFilter(BaseFilter):
"""Filter that expands 'N' bases within probes.
"""
def __init__(self, limit_n_expansion_randomly=3):
"""
Args:
limit_n_expansion_randomly: when set to a nonnegative integer,
only this number of 'N' bases are expanded, and they
are randomly chosen; the rest of are replaced with
random unambiguous bases. When None, all 'N' bases
are expanded
"""
self.limit_n_expansion_randomly = limit_n_expansion_randomly
def _filter(self, input):
"""Return input probes where 'N' bases are replaced with real bases.
"""
real_bases = ['A', 'T', 'C', 'G']
output = []
for p in input:
num_n = p.seq_str.count('N')
if num_n == 0:
# p has no 'N' bases, so there is nothing to expand
output += [p]
continue
p_seq_init = p.seq_str
if (self.limit_n_expansion_randomly is not None and
num_n > self.limit_n_expansion_randomly):
# Randomly replace (num_n - self.limit_n_expansion_randomly)
# 'N' bases with random unambiguous bases
occurrences = [i for i, base in enumerate(p_seq_init)
if base == 'N']
p_seq_init_list = list(p_seq_init)
while len(occurrences) > self.limit_n_expansion_randomly:
occ_to_replace = random.choice(occurrences)
replacement = random.choice(real_bases)
p_seq_init_list[occ_to_replace] = replacement
occurrences.remove(occ_to_replace)
p_seq_init = ''.join(p_seq_init_list)
expanded_probe_seqs = [p_seq_init]
# Keep iterating (expanding) while there are still 'N'
# bases left
while [s for s in expanded_probe_seqs if 'N' in s]:
expanded_probe_seqs_updated = []
for s in expanded_probe_seqs:
N_pos = s.index('N')
if N_pos == -1:
# There is no need to expand s because there is no 'N'
expanded_probe_seqs_updated += [s]
continue
# Expand the first 'N' in s (at position N_pos)
s_list = list(s)
for b in real_bases:
s_list[N_pos] = b
expanded_probe_seqs_updated += [''.join(s_list)]
expanded_probe_seqs = expanded_probe_seqs_updated
for seq in expanded_probe_seqs:
output += [probe.Probe.from_str(seq)]
return output
| broadinstitute/catch | catch/filter/n_expansion_filter.py | n_expansion_filter.py | py | 4,410 | python | en | code | 63 | github-code | 36 |
42982956826 | import redis
#import hazelcast
import logging
import random
import azlog
log = azlog.getLogger(__name__)
def SetupCacheConn(type,ip,port,key,ssl):
if (type=="redis"):
if (ssl=="yes"):
r=SetupRedisSSLConn(ip,port,key)
else:
r=SetupRedisConn(ip,port,key)
else:
print("working on it. not yet supported...")
return r
def SetupRedisConn(ip,port,key):
r = redis.Redis(
host=ip,
port=port,
password=key)
return r
def SetupRedisSSLConn(ip,port,key):
r = redis.StrictRedis(
host=ip,
port=port,
password=key,
ssl_cert_reqs=u'none', #-- or specify location of certs
ssl=True)
return r
def GetTrade(r,keyname):
xmlstring = r.get(keyname)
return xmlstring
''' cachetype = redis, nfs etc.
io = "input" or "output"
r = redis handle
format = eyxml or varxml
tradenum = trade number
xmlstring = the trade xml data
'''
def PutTrade(cache_type,io,r,format,tradenum,xmlstring):
if (format == "eyxml"):
prefix = "ey"
elif (format == "varxml"):
prefix = "var"
else:
log.error("invalid format: %s" % format)
return(1)
if (io == "input"):
keyname = "%s%007d.xml" % (prefix, tradenum)
elif (io == "output"):
keyname = "%s%007d_result.xml" % (prefix, tradenum)
else:
log.error("File format: %s; input/output only supported. " % format)
return(1)
if (cache_type=="redis"):
r.set(keyname,xmlstring)
log.debug("Trade %d: written as: %s:\n%s" % (tradenum,keyname,xmlstring))
return r
def InjectRandomFail(failure):
if random.uniform(0.0, 1.0) < failure:
logging.error("RANDOM ERROR INJECTION: TASK EXIT WITH ERROR")
return(1)
def DoFakeCompute(xmlstring,delay_time,task_duration,mem_usage):
import numpy as np
import time
import random
# allocate the memory
array_size = (mem_usage, 131072)
data = np.ones(array_size, dtype=np.float64)
# do startup delay
time.sleep(delay_time)
# now do fake computation
task_duration_s = task_duration / 1000.0 #- convert from ms to s
end_time = time.time() + task_duration_s
while time.time() < end_time:
data *= 12345.67890
data[:] = 1.0
| Azure/HPC-Accelerator | scenarios/batch/code/src/utils.py | utils.py | py | 2,295 | python | en | code | 9 | github-code | 36 |
29673229409 | #!/usr/bin/env python
import rospy
import tf2_ros
import gazebo_msgs.msg
import geometry_msgs.msg
import time
import pdb
IS_SIM = True
if IS_SIM:
ORIGIN_FRAME = 'odom'
else:
ORIGIN_FRAME = 'origin'
if __name__ == '__main__':
rospy.init_node('gazebo_tf_broadcaster')
broadcaster = tf2_ros.StaticTransformBroadcaster()
publish_frequency = rospy.get_param("publish_frequency", 10)
last_published = None
def callback(data):
global last_published
if last_published and publish_frequency > 0.0 and time.time() - last_published <= 1 / publish_frequency:
return
transforms = []
for i in range(len(data.name)):
# if data.name[i] == "robot::base_link":
# child_frame = 'base_link'
# else:
# continue
transform = geometry_msgs.msg.TransformStamped()
transform.header.stamp = rospy.Time.now()
transform.header.frame_id = ORIGIN_FRAME
transform.child_frame_id = data.name[i]
# transform.child_frame_id = child_frame
transform.transform.translation.x = data.pose[i].position.x
transform.transform.translation.y = data.pose[i].position.y
transform.transform.translation.z = data.pose[i].position.z
transform.transform.rotation.w = data.pose[i].orientation.w
transform.transform.rotation.x = data.pose[i].orientation.x
transform.transform.rotation.y = data.pose[i].orientation.y
transform.transform.rotation.z = data.pose[i].orientation.z
transforms.append(transform)
# rospy.loginfo("Publishing: {} as {}".format(data.name[i], child_frame))
broadcaster.sendTransform(transforms)
last_published = time.time()
rospy.Subscriber("/gazebo/link_states", gazebo_msgs.msg.LinkStates, callback)
rospy.spin()
| apacheck/stretch_skill_repair | nodes/gazebo_tf_publisher.py | gazebo_tf_publisher.py | py | 1,914 | python | en | code | 1 | github-code | 36 |
72136189865 | import os
from flask import jsonify, current_app
from flask_mail import Message
from werkzeug.utils import secure_filename
from PIL import Image
from api import mail
QUESTIONS_PER_PAGE = 5
def paginator(request, data):
page = request.args.get("page", 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
formatted_data = [item.format() for item in data]
return formatted_data[start:end]
ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg", "gif"}
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def save_image(image_file):
try:
new_image_name = secure_filename(image_file.filename)
output_folder_path = os.path.join(
current_app.root_path, "static/profile_pics", new_image_name
)
output_size = (200, 200)
i = Image.open(image_file)
i.thumbnail(output_size)
i.save(output_folder_path)
return new_image_name
except Exception as e:
return str(e)
def send_email(user, subject, sender, body):
msg = Message(subject, sender=sender, recipients=[user.email])
msg.body = body
mail.send(msg)
def json_failure(fields=None):
if fields is None:
fields = {}
return jsonify({"success": False, **fields})
def json_success(fields=None):
if fields is None:
fields = {}
return jsonify({"success": True, **fields}), 200
def json_404(fields=None):
if fields is None:
fields = {}
return jsonify({"success": True, **fields}), 404
| dennisappiah/pong-game-api | api/utils.py | utils.py | py | 1,591 | python | en | code | 4 | github-code | 36 |
16239891433 | def read_data(filename):
try:
with open(filename, 'r', encoding='utf-8') as file:
data = []
for line in file:
line = line.strip().split(' ')
if len(line) == 2:
surname, birth_year = line
data.append((surname, int(birth_year)))
return data
except FileNotFoundError:
print("File not found!")
def sort_data(data):
data.sort(key=lambda x: x[0]) # Сортировка по фамилии
data.sort(key=lambda x: x[1]) # Сортировка по возрасту
return data[::-1]
def print_data(data):
for item in data:
print(item[0], item[1])
filename = "people.txt"
data = read_data(filename)
if data:
sorted_data = sort_data(data)
print_data(sorted_data) | Merlin0108/rep2 | lab10/3.py | 3.py | py | 824 | python | en | code | 0 | github-code | 36 |
11916959214 | from django import forms
from .models import Comment ,Blog,Category
class BlogForm(forms.ModelForm):
category = forms.ModelChoiceField(
queryset=Category.objects.all().order_by('name'))
class Meta:
model = Blog
fields = ['title', 'featured_image', 'content','category']
def __init__(self, *args, **kwargs):
super(BlogForm, self).__init__(*args, **kwargs)
for name, field in self.fields.items():
field.widget.attrs.update({'class': 'form-control'})
field.widget.attrs.update({'id': 'form3Example1c'})
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["content"]
widgets = {
"content":forms.TextInput(attrs={"class":"form-control"},)
}
| minarefaat1002/blog_website | blogs project/blog/forms.py | forms.py | py | 804 | python | en | code | 0 | github-code | 36 |
17978578215 | # 导入操作系统库
import os
# 更改工作目录
os.chdir(r"D:\softwares\applied statistics\pythoncodelearning\chap3\sourcecode")
# 导入绘图库
import matplotlib.pyplot as plt
# 导入支持向量机模型
from sklearn import svm
# 导入决策边界可视化工具
from sklearn.inspection import DecisionBoundaryDisplay
# 导入数据集生成工具
from sklearn.datasets import make_blobs
# 导入绘图库中的字体管理包
from matplotlib import font_manager
# 实现中文字符正常显示
font = font_manager.FontProperties(fname=r"C:\Windows\Fonts\SimKai.ttf")
# 使用seaborn风格绘图
plt.style.use("seaborn-v0_8")
# 生成样本
n_samples_1 = 1000
n_samples_2 = 100
centers = [[0.0, 0.0], [2.0, 2.0]]
clusters_std = [1.5, 0.5]
# 分类数据
X, y = make_blobs(
n_samples=[n_samples_1, n_samples_2], # 分别的样本量
centers=centers, # 聚类中心
cluster_std=clusters_std, # 标准差
random_state=0,
shuffle=False,
)
# 线性SVM模型
clf = svm.SVC(kernel="linear", C=1.0)
# 模型拟合
clf.fit(X, y)
# 加权的SVM模型
wclf = svm.SVC(kernel="linear", class_weight={1: 10})
# 模型拟合
wclf.fit(X, y)
# 开始绘图
fig, ax = plt.subplots(figsize=(6,6), tight_layout=True)
# 绘制散点
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors="k")
# 绘制SVM的决策边界
disp = DecisionBoundaryDisplay.from_estimator(
clf,
X,
plot_method="contour",
colors="k",
levels=[0],
alpha=0.5,
linestyles=["--"],
ax=ax
)
# 绘制加权的SVM的决策边界
wdisp = DecisionBoundaryDisplay.from_estimator(
wclf,
X,
plot_method="contour",
colors="r",
levels=[0],
alpha=0.5,
linestyles=["-"],
ax=ax
)
# 添加图例
ax.legend(
[disp.surface_.collections[0], wdisp.surface_.collections[0]],
["non weighted", "weighted"],
loc="upper right",
)
plt.show()
fig.savefig("../codeimage/code4.pdf")
| AndyLiu-art/MLPythonCode | chap3/sourcecode/Python4.py | Python4.py | py | 1,925 | python | zh | code | 0 | github-code | 36 |
9190017437 | # imports
# scipy/anaconda imports
import pandas
from scipy import stats
import numpy
# python standard library imports
import math
import statistics
import copy
import collections
import time
nan = float("nan")
def fit_line(x_data, y_data):
"""
performs a linear fit to the data and return the slope, y-intercept, R-squared
value, P value, and standard error for the fit. Use as follows:
// x and y are lists of numbers with more than 75 elements
// fitting points 25 through 75 in from the data
start = 25
end = 75
slope, y_intercept, r_squared, p_value, std_err = MathHelper.fit_line(x[start:end], y[start,end])
print( str.format("Fitted formula: Y = {a}X + {b}", a=slope, b=y_intercept))
print( str.format("\tR-squared = {r2}", r2=r_squared))
"""
slope, y_intercept, r_value, p_value, std_err = stats.linregress(x_data, y_data)
r_squared = r_value * r_value
return slope, y_intercept, r_squared, p_value, std_err
def chi_squared_of_fit(y_data, fitted_y_data):
"""
This function calculates and returnsthe Chi-squared value of a generated set
of Y values (fitted_y_data) against the observed Y values (y_data).
"""
# note that in the typical definition of Chi-squared, it is assumed that
# nature is wrong and our formula is theoretically perfect, but here we
# are testing a model against emperical data, so the "expected" values
# are the data we measured and the "observed" values are the values
# generated by a fitting formula, and therefore we swap these variables
# in the stats.chisquare(...) invocation
return stats.chisquare(fitted_y_data,y_data)
def r_squared(y_data,test_y_data):
average = 0
iii = 0
size = len(y_data)
for n in range(0,size):
average += y_data[n]
iii += 1
average = average / iii
sumResidual = 0
sumTotal = 0
for n in range(0,size):
d = y_data[n]
sim = test_y_data[n]
sumResidual += (d - sim)*(d - sim)
sumTotal += (d - average)*(d - average)
return 1 - (sumResidual / sumTotal)
def _fit_err(x_data, y_data, formula_function, coefficient_vector):
"""
quickly calculates a simple fitting error value (this is NOT standard error!)
"""
sum = 0;
m = 1.0 / len(y_data)
for n in range(0,len(x_data)):
t = x_data[n]
obs = y_data[n]
sim = formula_function(*( t, coefficient_vector ) )
er = sim - obs
sum += er * er * m
return sum
def fit_function_bruteforce(x_data, y_data, formula_function, coefficient_vector, max_iterations ):
"""
This function uses a brute-force guess-and-check
x_data: the x values of the data set
y_data: the y values of the data set
formula_function: a function whose input parameters are (x, coefficient_vector)
and returns a single number by applying a formula to x with
coefficients defined in coefficient_vector. For example,
here's the fuction definition for an exponential decay fit
where the coefficients are [A, tau, C] in the formula
Y=A*e^(-x/tau)+C:
def exponential_decay_function(x, cv_list):
return cv_list[0] * math.exp( (-1 / cv_list[1]) x ) + cv_list[2]
coefficients, precisions, iterations = fit_function_bruteforce(x, y, exponential_decay_function, [1,10,0], 1000000)
print(str.format("fit: Y={A}*e^(-x/{tau})+{C}",A=coefficients[0], tau=coefficients[1], C=coefficients[2]))
coefficient_vector: list of numbers corresponding to coefficients in the
formula which fit_function_bruteforce(...) will manipulate
to try to fit he formula to the data. The starting values
should be a best guess close to the actual value. If these
values are too far off, the formula may get stick in a
local maxima or edge-case
max_iterations: the maximum number of iterations through the fitting formula
returns coefficient_vector, precision, iteration_count:
returns the coefficient_vector after adjusting it to achieve the best
possible fit within the allowed number of iterations, the +/- precision
of the fit for each coefficient (also as a list), and the actual number
of iterations used to perform the fit
"""
iterations = 0
# initialize deltas to the coefficients
delta_vector = scalar_multiply(copy.deepcopy(coefficient_vector), 0.25)
while(iterations < max_iterations):
# scale-down the deltas by a factor of 2 each time
delta_vector = scalar_multiply(delta_vector, 0.5)
new_cv, jiggles = _improveFit(x_data, y_data, formula_function, coefficient_vector, delta_vector, max_iterations - iterations)
coefficient_vector = new_cv
iterations += jiggles
# done
return coefficient_vector, delta_vector, iterations
def _improveFit(x,y,formula,cvec,delta, maxIterations):
"""
jiggles the coefficients to improve the formula fit a little bit
x: x data
y: y data
formula: the fitting formula (see description for fit_function_bruteforce(...) )
cvec: coefficient vector (see description for fit_function_bruteforce(...) )
delta: list of jiggle sizes corresponding to cvec
maxIterations: maximum number of jiggles allowed before returning
"""
# adjust the variables by the delta amount in decrease the error value
iterations = 0
while True: # python does not support do-while loops
lastErr = _fit_err(x,y,formula,cvec)
for i in range(len(cvec)):
oldC = cvec[i]
upC = cvec[i]+delta[i]
downC = cvec[i]-delta[i]
# current fit error
currentErr = _fit_err(x,y,formula,cvec)
# increase the coefficient a little and check again
cvec[i] = upC
errPlus = _fit_err(x,y,formula,cvec)
# decrease the coefficient a little and check again
cvec[i] = downC
errMinus = _fit_err(x,y,formula,cvec)
if(errPlus < currentErr and errPlus < errMinus):
# increase the variable
cvec[i] = upC;
elif(errMinus < currentErr):
# decrease the variable
cvec[i] = downC
else:
# no change
cvec[i] = oldC
iterations += 1
if(lastErr <= _fit_err(x,y,formula,cvec) or iterations >= maxIterations):
break
return cvec, iterations
def scalar_multiply(vector_list, scalar):
"""
Multiplies a vector (represented as a list of numbers) by a scalar value and
returns the new vector (original vector values are not changed)s
"""
v = copy.deepcopy(vector_list)
for i in range(0, len(v)):
v[i] = v[i] * scalar
return v
def p_value(set1, set2):
"""
returns the T-test P-value for two independent sets of data
"""
s, p = stats.ttest_ind(set1, set2)
return p
def mean(data_set):
try:
return statistics.mean(data_set)
except:
return nan
def stdev(data_set):
if(len(data_set) < 2):
return nan
else:
try:
return statistics.stdev(data_set)
except:
return nan | Kramer-Lab-Team-Algae/vO2-per-LEF-scripts | Data Analysis/MathHelper.py | MathHelper.py | py | 6,671 | python | en | code | 0 | github-code | 36 |
20638189382 | from tkinter import *
root=Tk()
h,w=root.winfo_screenheight(),root.winfo_screenwidth()
root.geometry('%dx%d+0+0'%(w,h))
def seat():
root.destroy()
import journey_details
def check():
root.destroy()
import checkbooking
def add():
root.destroy()
import addbus
img=PhotoImage(file=".\\Bus_for_project.png")
Label(root,image=img).grid(row=0,column=0,columnspan=3,padx=w//3)
Label(root,text='Online Bus Booking System',font='Arial 18 bold', bg='cadetblue1',fg='red').grid(row=1,column=0,columnspan=3,padx=w//3)
Label(root,text='').grid(row=2,column=1)
Label(root,text='').grid(row=3,column=1)
Button(root,text='Seat Booking',bg='green2',command=seat).grid(row=4,column=0)
Button(root,text='Check Booked Seat', bg='green3',command=check).grid(row=4,column=1)
Button(root,text='Add Bus Details', bg='green4',command=add).grid(row=4,column=2)
Label(root,text='').grid(row=5,column=1)
Label(root,text='For admin only',fg='red').grid(row=6,column=2)
root.mainloop()
| aviraljain19/Python-Bus-Booking-Project | home.py | home.py | py | 1,006 | python | en | code | 0 | github-code | 36 |
1158088849 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import INPUT_SHAPE, batch_generator
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, Dropout, Dense, Flatten
from keras.models import load_model
def load_data():
data_df = pd.read_csv('driving_log.csv', names=['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'])
X = data_df[['center', 'left', 'right']].values
y = data_df['steering'].values
X_train, X_valid, y_train, y_valid = train_test_split(X,y,test_size = 0.2)
return X_train, X_valid, y_train, y_valid
def build_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))
model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
model.summary()
return model
def train_model(model, X_train, X_valid, y_train, y_valid,
data_dir = "IMG", batch_size = 1000, nb_epoch = 20, samples_per_epoch = 100000, learning_rate = 1.0e-4):
#Saves the best model so far.
checkpoint = ModelCheckpoint('model2-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=learning_rate))
# keras 2 has other ideas about certain things
steps_per_epoch = samples_per_epoch/batch_size
v_steps = int(np.floor(len(X_valid)/batch_size))
model.fit_generator(batch_generator(data_dir, X_train, y_train, batch_size, True),
steps_per_epoch, nb_epoch, max_queue_size=1,
validation_data=batch_generator(data_dir, X_valid, y_valid, batch_size, False),
validation_steps=v_steps, callbacks=[checkpoint])
data = load_data()
model = build_model()
#continue from previously trained model
#model = load_model("model-010.h5")
train_model(model, *data)
| thomashiemstra/self_driving_car_simulation | train.py | train.py | py | 2,585 | python | en | code | 1 | github-code | 36 |
7004225568 | # *args
def func(*args):
print(args) # it print tuple
print(type(args))
func(1,2,3,4,5,6,4)
def add_all(*args):
total = 0
for i in args:
total += i
return total
print(add_all(1,2,3,4,6,8,11,23)) | salmansaifi04/python | chapter9(functions)--(args_and_kwargs)/01_args_intro.py | 01_args_intro.py | py | 229 | python | en | code | 0 | github-code | 36 |
28713561153 | import sys
# print(sys.argv)
old_str = sys.argv[1]
new_str = sys.argv[2]
filename = sys.argv[3]
# 1 读取文件至内存中
f = open(filename, "r+")
data = f.read()
new_data = data.replace(old_str, new_str)
old_count = data.count(old_str)
# 2清空文件
f.seek(0)
f.truncate()
# 3写入文件
f.write(new_data)
print(new_data)
f.close()
print(f"{old_str}被替换为{new_str},共被替换{old_count}次")
| codefreshstudent/day8 | day4/file_replace.py | file_replace.py | py | 412 | python | en | code | 0 | github-code | 36 |
24267348983 | import pandas as pd
import geocoder
import math
from RR import *
class TreeOp:
def __init__(self, path=None):
# The path is the path of the csv file. Call this function to create the R-Trees
# Will Create R-Entries and then those entries will be search
# X is the longitude, Y is the Latitude
self.tree = RTree()
self.Area = dict()
self.Results = list()
df = pd.read_csv(path, header=0)
for i, row in df.iterrows():
Lat = float(row['Latitude'])
Lon = float(row['Longitude'])
g = geocoder.google([Lat, Lon], method='reverse')
#g = geocoder.google(row['Address']+',Karachi,Sindh,Pakistan')
a = g.bbox
x1, y1 = self.convertSphericalToCartesian(
a["northeast"][0], a["northeast"][1])
x2, y2 = self.convertSphericalToCartesian(
a["southwest"][0], a["southwest"][1])
entry = TreeEntry(([x1, x2], [y1, y2]))
g = geocoder.google(row['Area']+',Karachi,Sindh,Pakistan')
a = g.bbox
x1, y1 = self.convertSphericalToCartesian(
a["northeast"][0], a["northeast"][1])
x2, y2 = self.convertSphericalToCartesian(
a["southwest"][0], a["southwest"][1])
if row['Area'] not in self.Area:
self.Area[row['Area']] = ([x1, x2], [y1, y2])
# ShopID,Name,Address,City,Province,Area,Cell,Landline,Longitude,Latitude,StoreType
Name = row['Name']
Address = row['Address']
Province = row['Province']
Area = row['Area']
Cell = row['Cell']
Landline = row['Landline']
Latitude = row['Latitude']
Longitude = row['Longitude']
StoreType = row['StoreType'].split(";")
entry.setData(Name, Address, Province, Area, Cell,
Landline, Latitude, Longitude, StoreType)
self.tree.insert(entry)
self.Results.append(entry)
if i == 5:
break
print(self.tree.Root)
def getAreas(self):
# This function will return disctinct dictionary of areas of the file
# For each Area in the key I will store its bounding box
return list(self.Area.keys())
def Search(self, Entity, AreaK, flag):
# Entity will tell what key what type of store
# AreaChose will be the key to the dictionary which will then select the file the bounds for searching
# NearMe is a boolean flag that will tell key Near Me search karni hai
filter = []
if flag == False:
for i in self.Results:
if Entity in i.StoreType:
filter.append(i)
else:
serch = self.tree.Search(self.tree.Root, self.Area[AreaK])
for i in serch:
if Entity in i.StoreType:
filter.append(i)
return filter
def convertSphericalToCartesian(self, latitude, longitude):
# Convert from Degrees to Radians
latRad = latitude * (math.pi)/180
lonRad = longitude * (math.pi)/180
earthRadius = 6367 # Radius in km
posX = earthRadius * math.cos(latRad) * math.cos(lonRad)
posY = earthRadius * math.cos(latRad) * math.sin(lonRad)
return(round(posX, 3), round(posY, 3))
| munawwar22HU/Ehsas | Source/RTreeOperations.py | RTreeOperations.py | py | 3,426 | python | en | code | 1 | github-code | 36 |
17585730612 | from __future__ import annotations
from collections import defaultdict
from starwhale import Job, handler, evaluation
from starwhale.utils.debug import console
PROJECT_URI = "https://cloud.starwhale.cn/project/349"
JOB_URI_TEMPLATE = "%s/job/{job_id}" % PROJECT_URI
JOB_IDS = [
"845",
"844",
"843",
"842",
"830",
"828",
"827",
"818",
"817",
"816",
"814",
"813",
"810",
"796",
"759",
]
SHOT_GROUPS = ("zero_shot", "one_shot", "five_shot")
CATEGORY_GROUPS = ("first-level", "second-level", "third-level")
@handler(replicas=1)
def analysis_leaderboard() -> None:
r_score = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
r_score_models = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
r_benchmark = defaultdict(dict)
r_jobs = {}
job_cnt = len(JOB_IDS)
for idx, job_id in enumerate(JOB_IDS):
uri = JOB_URI_TEMPLATE.format(job_id=job_id)
job = Job.get(uri)
console.print(f"{idx+1}/{job_cnt} processing [{job_id}] {job.model} ...")
if job.model:
r_jobs[job_id] = job.model.name
for row in job.get_table_rows("results"):
benchmark_id = row["id"]
if benchmark_id not in r_benchmark:
r_benchmark[benchmark_id] = {
"question": row["input/question"],
"answer": row["input/answer"],
"choices": row["input/choices"],
}
for category in CATEGORY_GROUPS:
r_benchmark[benchmark_id][f"category/{category}"] = row[
f"input/category/{category}"
]
for shot in SHOT_GROUPS:
score = row[f"output/{shot}/score"]
score = f"score-{score}"
if score == "score-1":
r_score[benchmark_id][shot]["right_count"] += 1
r_score[benchmark_id][shot][score] += 1
if job.model:
r_score_models[benchmark_id][shot][score].append(job.model.name)
r_right_distribution = defaultdict(lambda: defaultdict(int))
for benchmark_id, scores in r_score.items():
evaluation.log(
category="leaderboard-analysis",
id=benchmark_id,
metrics={
"benchmark": r_benchmark[benchmark_id],
"scores": scores,
"models": r_score_models[benchmark_id],
},
)
for shot, score_values in scores.items():
score_one_cnt = score_values.get("right_count") or 0
r_right_distribution[shot][score_one_cnt] += 1
for shot, score_values in r_right_distribution.items():
for count_name, count_value in score_values.items():
metrics = {
f"{shot}/count": count_value,
f"{shot}/percentage": f"{count_value/len(r_benchmark):.2%}",
}
evaluation.log(
category="right-answer-distribution",
id=count_name,
metrics=metrics,
)
console.log(f"{count_name} - {shot}: {metrics}")
evaluation.log_summary(
{
"project": PROJECT_URI,
"benchmark/name": "cmmlu",
"benchmark/questions_count": len(r_benchmark),
"analysis/job_models": list(r_jobs.values()),
"analysis/job_ids": JOB_IDS,
}
)
console.print(":clap: finished!")
| star-whale/starwhale | example/llm-leaderboard/src/analysis.py | analysis.py | py | 3,516 | python | en | code | 171 | github-code | 36 |
28518778247 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class percent_development_type_DDD_within_walking_distance(Variable):
"""There is exactly one variable corresponding to each defined development type dynamic_land_use_variables,
where "?" is the development type group's NAME (e.g. residential, commercial).
100 * [sum over c in cell.walking_radius of (if c.development_type.dynamic_land_use_variables == N then 1 else 0)] /
(number of cells within walking distance)"""
_return_type="float32"
def __init__(self, type_id):
self.type_id = type_id
self.number_of_development_type_wwd = \
"number_of_development_type_"+str(self.type_id)+"_within_walking_distance"
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label(self.number_of_development_type_wwd)]
def compute(self, dataset_pool):
urbansim_constant = dataset_pool.get_dataset('urbansim_constant')
return 100.0*self.get_dataset().get_attribute(self.number_of_development_type_wwd)/ \
float(urbansim_constant["walking_distance_footprint"].sum())
from numpy import array
from numpy import ma
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "randstad.gridcell.percent_development_type_12_within_walking_distance"
def test_my_inputs( self ):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='gridcells',
table_data={
'grid_id': array([1,2,3,4]),
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
'number_of_development_type_12_within_walking_distance': array([3, 5, 1, 0])
}
)
storage.write_table(
table_name='urbansim_constants',
table_data={
"walking_distance_circle_radius": array([150]),
'cell_size': array([150]),
"acres": array([105.0]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
gridcell = dataset_pool.get_dataset('gridcell')
gridcell.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = gridcell.get_attribute(self.variable_name)
should_be = array( [3/5.0*100.0,
5/5.0*100.0,
1/5.0*100.0,
0/5.0*100.0] )
self.assert_(ma.allclose( values, should_be, rtol=1e-7),
msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | randstad/gridcell/percent_development_type_DDD_within_walking_distance.py | percent_development_type_DDD_within_walking_distance.py | py | 3,241 | python | en | code | 4 | github-code | 36 |
25947442218 | import os
import sqlite3
from datetime import datetime, timedelta
import telebot
bot = telebot.TeleBot(os.getenv("BOT_TOKEN"))
memes_chat_id = int(os.getenv("MEMES_CHAT_ID"))
flood_thread_id = int(os.getenv("FLOOD_THREAD_ID", 1))
memes_thread_id = int(os.getenv("MEMES_THREAD_ID", 1))
conn = sqlite3.connect("memes.db", check_same_thread=False)
def main():
seven_days_ago = datetime.now() - timedelta(days=14)
query = "SELECT u.user_id, u.username FROM users u LEFT JOIN user_messages um ON um.user_id=u.user_id AND um.created_at > ? WHERE um.message_id is NULL AND u.active=1"
rows = conn.execute(query, (seven_days_ago,)).fetchall()
msg = ["Список вуаеристов\n"]
for row in rows:
user_id, username = row
user_data = bot.get_chat_member(memes_chat_id, user_id)
if user_data.status == "administrator":
continue
msg.append(
"[{username}](tg://user?id={user_id}) {user_id}".format(
username=username,
user_id=user_id,
)
)
bot.send_message(
memes_chat_id,
"\n".join(msg),
message_thread_id=flood_thread_id,
parse_mode="Markdown",
)
if __name__ == "__main__":
main()
| dzaytsev91/tachanbot | cron_job_message_count.py | cron_job_message_count.py | py | 1,258 | python | en | code | 2 | github-code | 36 |
38326596416 | import pandas as pd
from matplotlib import pyplot as plt
from oemof.tools import logger
import logging
import q100opt.plots as plots
from q100opt.buildings import BuildingInvestModel, SolarThermalCollector
from q100opt.scenario_tools import ParetoFront
from q100opt.setup_model import load_csv_data
logger.define_logging(screen_level=logging.INFO)
# read data
timeseries = pd.read_csv("data/test-building-timeseries.csv")
weather = pd.read_csv("data/weather.csv")
tech_data = pd.read_csv("data/techdata.csv", index_col=0, skiprows=1)
commodity_data = load_csv_data("data/commodities")
# define data, that could/should be in the Kataster
kataster = {
'heat_load_space_heating': 10, # heat load space heating [kW]
'heat_load_dhw': 4, # heat load hot water [kW]
'heat_load_total': 12, # total heat load [kW]
'pv_1_max': 5, # maximum kWp of PV area 1
'pv_2_max': 3, # maximum kWp of PV area 2
'pv_3_max': 0, # maximum kWp of PV area 2
# roof areas
# roof 1, e.g. west orientation
'roof_1_azimuth': 90, # Ausrichtung [°]
'roof_1_pitch': 40, # Dachneigung in [°]
'roof_1_area_usable': 20, # [m²]
# roof 1, e.g. south orientation
'roof_2_azimuth': 180, # Ausrichtung [°]
'roof_2_pitch': 40, # Dachneigung in [°]
'roof_2_area_usable': 20, # [m²]
# roof 1, e.g. east orientation
'roof_3_azimuth': 270, # Ausrichtung [°]
'roof_3_pitch': 40, # Dachneigung in [°]
'roof_3_area_usable': 20, # [m²]
# solar thermal options
# maximum share of roof area considered for solar thermal
'st_1_max': 0.8,
'st_2_max': 0.8,
'st_3_max': 0.8,
# maximum values of units (for investment model)
"gas-boiler.maximum": 100,
"pellet-boiler.maximum": 0,
"wood-boiler.maximum": 0,
"heatpump-geo.maximum": 10,
"heatpump-air.maximum": 10,
"thermal-storage.maximum": 100,
"battery-storage.maximum": 100,
"substation.maximum": 100,
# installed capacities for operation model
"gas-boiler.installed": 10,
"pellet-boiler.installed": 0,
"wood-boiler.installed": 0,
"heatpump-geo.installed": 0,
"heatpump-air.installed": 10,
"thermal-storage.installed": 0,
"battery-storage.installed": 0,
}
my_collector = SolarThermalCollector(
eta_0=0.825,
a_1=3.41,
a_2=0.0161,
)
house = BuildingInvestModel(
space_heating_demand=timeseries["E_th_RH"],
electricity_demand=timeseries["E_el"],
hot_water_demand=timeseries["E_th_TWE"],
pv_1_profile=timeseries["E_el_PV_1"],
pv_2_profile=timeseries["E_el_PV_2"],
commodity_data=commodity_data,
tech_data=tech_data,
weather=weather,
timesteps=8760,
start_date="2015-01-01 01:00",
location=(52.516254, 13.377535),
solar_thermal_collector=my_collector,
exclusive_roof_constraint=True, # for each roof a constraint with limited area is created
pv_system={"space_demand": 5}, # [m²/kWp],
**kataster,
)
table_collection = house.create_table_collection()
# ab hier wäre es aufbauend auf den bestehenden Funktionen von q100opt
house.pareto_front = ParetoFront(
table_collection=house.table_collection,
number_of_points=5,
number_of_time_steps=700,
)
house.pareto_front.calc_pareto_front(solver='gurobi', tee=True)
# some plots
house.pareto_front.results["pareto_front"].plot(
x='emissions', y='costs', kind='scatter'
)
plt.xlabel('emissions')
plt.ylabel('costs')
plt.show()
for emission_limit, scenario in house.pareto_front.district_scenarios.items():
plots.plot_investments(
scenario.results['main'], title="Emissionscenario: " + emission_limit
)
| quarree100/q100opt | examples/single_building/example_house_with_solarthermal.py | example_house_with_solarthermal.py | py | 3,764 | python | en | code | 1 | github-code | 36 |
5302886583 | import natasha
from external_analizer.morph_dictionaries.pymorphy_morph_dictionary import PymorphyMorphDictionary
from external_analizer.syntax_analizer.syntax_analyzer import SyntaxAnalizer
class NLPAnalyzer():
# создаем классы для анализа текста предложения для всего проекта
segmenter = natasha.Segmenter()
morph_dict = PymorphyMorphDictionary()
# morph_dict = MaruMorphDictionary()
syntax_analizer = SyntaxAnalizer()
@staticmethod
def change_morph_dictionary_to_maru():
from external_analizer.morph_dictionaries.maru_morph_parsing import MaruMorphDictionary
global morph_dict
morph_dict = MaruMorphDictionary()
@classmethod
def divide_text_to_natasha_sents(cls, text):
def correct_token_id(sent_tokens):
"""
Расшифровываем строковое представление нумерации в синтаксическом дереве.
Преобразуем строку в число для id и head_id.
"""
for token in sent_tokens:
head_id = token.head_id
head_id = head_id.split("_")[1]
token.head_id = int(head_id)
id = token.id
id = id.split("_")[1]
token.id = int(id)
return sent_tokens
def correct_token_position_conserning_sentence(sent_tokens, sentence_start):
for num, token in enumerate(sent_tokens):
token.start = token.start - sentence_start
token.stop = token.stop - sentence_start
return sent_tokens
doc = natasha.Doc(text)
doc.segment(cls.segmenter)
doc.tag_morph(cls.syntax_analizer.morph_tagger)
doc.parse_syntax(cls.syntax_analizer.syntax_parser)
sents = list(doc.sents)
for sent in sents:
sent.tokens = correct_token_id(sent.tokens)
sent.tokens = correct_token_position_conserning_sentence(sent.tokens, sentence_start=sent.start)
return sents
| NenausnikovKV/NLP_library | source/external_analizer/nlp_analizer.py | nlp_analizer.py | py | 2,131 | python | en | code | 0 | github-code | 36 |
34182723957 | # coding=utf-8
from __future__ import print_function
"""负责从主网址中爬取出需要的网址"""
import datetime
import logging
import bs4
import requests
import re
import tools.newspublish
from bs4 import BeautifulSoup
from models import *
from .tools.bloomfilter import BloomFilter
from Spider.autonews.tools.svmutil import *
from .object import URL
from ..autorecog.recognize import *
from ..autorecog.keywords import analyse_keywords
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logger = logging.getLogger(__name__)
dirname = path.dirname(path.abspath(__file__))
# 适配不同平台加载模型内容
if sys.platform == 'win32':
content_model = svm_load_model(path.join(dirname, ".\content.model"))
else:
content_model = svm_load_model(path.join(dirname, './content.model'))
def crawl(task):
"""根据任务爬取内容的主函数"""
assert isinstance(task, Task)
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
cookie = ""
if task.cookie is not None and task.cookie != "":
cookie = dict(task.cookie)
# 对复杂url进行分析处理
site_url = task.site_url
site_urls = []
matchObj1 = re.match(r'.*\(date,(.*?),(.*?)\).*', site_url, re.M | re.I)
if matchObj1:
patten = matchObj1.group(1)
lead = matchObj1.group(2)
patten = patten.replace("yyyy", "%Y")
patten = patten.replace("MM", "%m")
patten = patten.replace("dd", "%d")
patten = patten.replace("HH", "%H")
patten = patten.replace("mm", "%M")
patten = patten.replace("ss", "%S")
delta = datetime.timedelta(days=int(lead))
now = datetime.datetime.now() - delta
patterned_time = now.strftime(patten) # 计算完偏移量的日期
site_url = re.sub(r"\(date,(.*?)\)", patterned_time, site_url)
matchObj = re.match(r'.*\(loop,(.*?),(.*?),(.*?)\).*', site_url, re.M | re.I)
if matchObj:
first_num = int(matchObj.group(1))
last_num = int(matchObj.group(2))
number_of_phases = int(matchObj.group(3))
for i in range(first_num, last_num, number_of_phases):
u = re.sub(r"\(loop,(.*?),(.*?),(.*?)\)", str(i), site_url)
site_urls.append(u)
if len(site_urls) is 0:
site_urls.append(site_url)
hrefs = []
url_model = UrlModel.objects.filter(task=task)
if len(url_model) is 0: # 判断url是否有模板
for u in site_urls:
href = __crawl_urls(u, cookie) # 获取所有需要采集的地址
hrefs.extend(href)
else:
for u in site_urls:
href = __crawl_urls_by_model(url_model[0], u, cookie)
hrefs.extend(href)
for url in hrefs:
try:
r = requests.get(url, headers=header, cookies=cookie)
logger.info('开始请求%s,返回状态码为%d,当前时间为%s' % (url, r.status_code, datetime.datetime.now()))
# 如果请求失败重试三次
if r.status_code != 200:
i = 0
while i < 3 and r.status_code != 200:
logger.info('正在重试第%d次' % (i + 1))
r = requests.get(url, headers=header, cookies=cookie)
i += 1
if r.status_code != 200:
raise requests.ConnectionError('网址连接失败'+url)
html = r.text
code = "utf8" # 用于下面对html进行操作
# 编码判断(待改进)
try:
html = html.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
html = html.encode(r.encoding).decode("GB18030")
code = "utf8"
except UnicodeEncodeError:
html = html.encode("GB18030").decode("GB18030")
code = "GB18030"
logger.debug("网址%s \n"
"编码%s \n"
"返回内容%s \n"
% (url, r.encoding, html))
# 分析每条网址并且根据模板识别内容,然后保存数据库并且发送
ret = __recognize_by_model(html, task, code)
title = ret.get("title")
content_html = ret.get("content_html")
content = ret.get("content")
if title is None or content_html is None or content_html is '' or title is '':
ret = traversal(html)
t = ret.get("title")
c = ret.get("content_html")
p = ret.get("content")
if title is None or title is '':
title = t
if content_html is None or content_html is '':
content_html = c
content = p
content_html = __convert_img(content_html, str(url)) # 将文中的图片的相对路径转换为绝对路径
print (title)
print (type(content))
news = News()
news.task = task
news.url = url
news.title = title
news.content = content_html
news.keywords = analyse_keywords(content, 5)
news.save()
publishers = task.publisher.all()
print (type(publishers))
if title is not None and content_html is not None and content_html is not '' and title is not '':
for publisher in publishers:
publisher_type = publisher.type
publisher_url = publisher.url
r = eval("tools.newspublish."+publisher_type+"(publisher_url, title, content_html, task.site_name, task.site_column, news.keywords)")
print (r)
bf = BloomFilter()
bf.insert(url)
except Exception as e:
print (e)
def __crawl_urls(url, cookie):
"""分析URL下所有可以采集的URL
:param url:需要分析的URL
:return set
"""
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHT ML, like Gecko) Chrome/43.0.235'
}
r = requests.get(url, headers=header, cookies=cookie)
content = r.text
# print r.encoding
# TODO(qi): 解析编码方式还是不太好,虽然一般够用了,下次有更好的解决方案需要替换掉这段
try:
content = content.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
content = content.encode(r.encoding).decode("GB18030")
except UnicodeEncodeError:
content = content.encode("GB18030").decode("GB18030")
soup = BeautifulSoup(content, "html.parser")
t = soup.find_all("a")
hrefs = set('')
bf = BloomFilter()
for tag in t:
if tag.get("href") is not None:
newurl = str(tag.get("href")).strip()
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not newurl.startswith("http") and newurl.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if newurl.startswith("/"):
newurl = domain + newurl
elif newurl.startswith("./"):
newurl.replace("./","")
newurl = domain + newurl
elif newurl.startswith("../"):
count = newurl.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
newurl = domain + newurl.replace("../", "")
else: # 剩下的”content_234.html"这种情况
newurl = domain + newurl
# 清理url中最后的#,以及当中的多个///的情况
newurl = newurl.partition("#")[0]
newurl = newurl.replace("://", "!!!")
while newurl.find("//") is not -1:
newurl = newurl.replace("//", "/")
newurl = newurl.replace("!!!", "://")
#TODO 错误识别“http://newspaper.jfdaily.com/xwcb/resfiles/2017-06/19/A0120170619S.pdf”临时处理,以后加(下次看到的话)
if newurl.find(".pdf") != -1:
continue
if "http" in newurl:
url_o = URL.URL(newurl, unicode(tag.string))
if url_o.is_contenturl():
if not bf.isContains(newurl):
# 转跳到下步处理分析内容
hrefs.add(newurl)
print ("已采集新网址"+url_o.url_name)
else:
print("该网址已采集过")
log_hrefs = "已分析网址"+str(url)
for h in hrefs:
log_hrefs += "\r\n"
log_hrefs += h
logger.info(log_hrefs)
return hrefs
def __crawl_urls_by_model(url_model, url, cookie):
"""通过模板来获取网址"""
assert isinstance(url_model, UrlModel)
start_location = url_model.start_location
end_location = url_model.end_location
include_word = url_model.include_words
include_words = None
if include_word is not u"":
include_words = include_word.split(";")
exclude_word = url_model.exclude_words
exclude_words = None
if exclude_word is not u"":
exclude_words = exclude_word.split(";")
hrefs = set('')
bf = BloomFilter()
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
r = requests.get(url, headers=header, cookies=cookie)
content = r.text
code = r.encoding
try:
content = content.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
content = content.encode(r.encoding).decode("GB18030")
except UnicodeEncodeError:
content = content.encode("GB18030").decode("GB18030")
code = "GB18030"
start_num = 0
end_num = len(content)
if start_location is not None and start_location != "":
start_num = content.find(start_location)
if start_num == -1:
start_num = 0
if end_location is not None and end_location != "":
end_num = content.find(end_location, start_num)
if end_num == -1:
end_num = len(content)
content = content[start_num:end_num]
soup = BeautifulSoup(content, "html.parser")
a_tags = soup.find_all("a")
for tag in a_tags:
if tag.get("href") is not None:
newurl = str(tag.get("href")).strip()
newurl = newurl.replace("\\","/")
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not newurl.startswith("http") and newurl.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if newurl.startswith("/"):
newurl = domain + newurl
elif newurl.startswith("./"):
newurl.replace("./","")
newurl = domain + newurl
elif newurl.startswith("../"):
count = newurl.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
newurl = domain + newurl.replace("../", "")
else: # 剩下的”content_234.html"这种情况
newurl = domain + newurl
# 清理url中最后的#,以及当中的多个///的情况
newurl = newurl.partition("#")[0]
newurl = newurl.replace("://", "!!!")
while newurl.find("//") is not -1:
newurl = newurl.replace("//", "/")
newurl = newurl.replace("!!!", "://")
# url过滤条件
continue_flag = False
if include_words is not None and len(include_words) is not 0:
for word in include_words:
if newurl.find(word) is -1:
continue_flag = True
break
if exclude_words is not None and len(exclude_words) is not 0:
for word in exclude_words:
if newurl.find(word) is not -1:
continue_flag = True
break
if continue_flag:
continue
# TODO 错误识别“http://newspaper.jfdaily.com/xwcb/resfiles/2017-06/19/A0120170619S.pdf”临时处理,以后加(以后高兴加的话)
if newurl.find(".pdf") != -1:
continue
if "http" in newurl:
if not bf.isContains(newurl):
# 转跳到下步处理分析内容
hrefs.add(newurl)
print("已采集新网址" + newurl)
else:
print("该网址已采集过")
log_hrefs = "已分析网址"+str(url)
for h in hrefs:
log_hrefs += "\r\n"
log_hrefs += h
logger.info(log_hrefs)
return hrefs
# def __recognize_content(soup):
# """识别网页标题和内容"""
#
# assert isinstance(soup, BeautifulSoup)
# soup = __clean(soup)
# title = ""
# content = ""
# return title, content
# def __clean(soup):
# """清理网页噪声"""
# assert isinstance(soup, BeautifulSoup)
#
# try:
# for script in soup.find_all('script'):
# script.decompose()
# except TypeError:
# pass
# try:
# for style in soup.find_all('style'):
# style.decompose()
# except TypeError:
# pass
# try:
# for meta in soup.find_all('meta'):
# meta.decompose()
# except TypeError:
# pass
# try:
# for form in soup.find_all('soup'):
# form.decompose()
# except TypeError:
# pass
# try:
# for inputs in soup.find_all('input'):
# inputs.decompose()
# except TypeError:
# pass
# try:
# for select in soup.find_all('select'):
# select.decompse()
# except TypeError:
# pass
# try:
# for link in soup.find_all('link'):
# link.decompse()
# except TypeError:
# pass
#
# return soup
def __recognize_by_model(html, task, code):
"""根据模板筛选标题和内容"""
assert isinstance(task, Task)
title = ""
content = ""
content_html = ""
models = Model.objects.filter(task_id=task.id)
for model in models:
tag_name = model.tag_name
tag_id = model.tag_id
tag_attrs = model.tag_attrs
attrs_dict = {}
if tag_attrs is not None and tag_attrs != "":
attrs_dict = eval(tag_attrs)
is_title = model.is_title
# 前后文截取
# html = html.encode("utf-8")
assert isinstance(html, unicode)
start = model.start_location
end = model.end_location
start_num = 0
end_num = len(html)
if start is not None and start != "":
start_num = html.find(start.decode(code))
if start_num == -1:
start_num = 0
if end is not None and end != "":
end_num = html.find(end.decode(code), start_num)
if end_num == -1:
end_num = len(html)
html = html[start_num:end_num]
try:
html = html.encode("utf8").decode("utf8")
except UnicodeDecodeError:
html = html.encode("utf8").decode("GB18030")
except UnicodeEncodeError:
html = html.encode("GB18030").decode("GB18030")
try:
soup = BeautifulSoup(html, "html.parser")
except Exception as e:
print (e)
print (html)
if is_title:
try:
title = soup.find(name=tag_name, id=tag_id, attrs=attrs_dict).string
except AttributeError:
print ("找不到标题")
else:
# TODO(qi): 需要提供图片
try:
if tag_name is not u'' and attrs_dict is not {}:
content_soups = soup.find_all(name=tag_name, attrs=attrs_dict)
# for s in content_soups:
# if str(s.string) is not None and "None" not in str(s.string):
# content += s.get_text()
content_html += str(content_soups[0])
content += content_soups[0].get_text()
else:
content_html += str(soup)
content += soup.get_text()
except AttributeError:
print("找不到内容")
except TypeError:
print("找不到内容")
result = {"title": title, "content": content, "content_html": content_html}
return result
# def __recognize(lines, line_max):
# """该私有方法为处理数据并调用libsvm识别标题和内容"""
#
# title = '' # 存放标题
# content = '' # 存放内容
# content_html = '' # 存放原生html
#
# content_flag = False # 上一条是否为正文,是的话为True,否的话为False
# tags = [] # 存放所有Tag
# for line in lines:
# # print line.get('content')
# sequence = line.get('sequence')
# tag = line.get('tag')
# tag_name = line.get('tag_name')
# tag_id = line.get('tag_id')
# tag_class = line.get('tag_class')
# content_len = line.get('content_len')
#
# # 如果是紧跟正文的图片则判断为需要的图片
# if content_flag is True and tag_name == 'img':
# content_html += line.get('content_html')
#
# content_flag = False
# if not tag_name == 'img':
# f1 = sequence / line_max # 在队列中的顺序
#
# f2 = 0.5
# try:
# if tag_name.lower() == "h1":
# f2 = 1
# if tag_name.lower() == "h2" or tag_name.lower() == "h3":
# f2 = 0.90
# if tag_name.lower() == "title":
# f2 = 0.80
# if tag_name.lower() == "div":
# f2 = 0.70
# if tag_name.lower() == "span":
# f2 = 0.30
# if tag_name.lower() == "td" or tag_name.lower() == "th":
# f2 = 0.20
# if tag_name.lower() == "strong":
# f2 = 0.15
# if tag_name.lower() == "article":
# f2 = 0.10
# if tag_name.lower() == "p":
# f2 = 0
# except AttributeError:
# pass
#
# f3 = 0.5
# try:
# if tag_id.lower().find("title") is not -1 or tag_class.lower().find("title") is not -1:
# f3 = 1
# if tag_id.lower().find("headline") is not -1 or tag_class.lower().find("headline") is not -1:
# f3 = 0.90
# if tag_id.lower().find("pic") is not -1 or tag_class.lower().find("pic") is not -1:
# f3 = 0.40
# if tag_id.lower().find("content") is not -1 or tag_class.lower().find("content") is not -1:
# f3 = 0.30
# if tag_id.lower().find("text") is not -1 or tag_class.lower().find("text") is not -1:
# f3 = 0.20
# if tag_id.lower().find("author") is not -1 or tag_class.lower().find("author") is not -1:
# f3 = 0.10
# if tag_id.lower().find("editor") is not -1 or tag_class.lower().find("editor") is not -1:
# f3 = 0
# except AttributeError:
# pass
#
# f4 = content_len / 100
# if f4 > 1:
# f4 = 1
#
# data_list = []
# row = "0 1:%f 2:%f 3:%f 4:%f" % (f1, f2, f3, f4)
# # print row
# data_list.append(row)
# y, x = svm_read_problem(data_list)
# # print (os.path.abspath('..'))
# # m = svm_load_model('./Spider/autonews/content.model')
# p_labs, p_acc, p_vals = svm_predict(y, x, content_model)
# if p_labs[0] == 1.0:
# title += line.get('content')
# if p_labs[0] == 2.0:
# content_flag = True
# content += line.get('content')
# content_html += line.get('content_html')
# tags.append(tag)
#
# result = {"title": title, "content": content, "content_html": content_html, "tags": tags}
# return result
# def traversal(html):
# soup = BeautifulSoup(html, "lxml")
# lines = []
# # 遍历所有节点
# i = 0
# for tag in soup.descendants:
# line = {'sequence': i}
# i += 1
# line['tag'] = tag
# if type(tag) == bs4.element.Tag:
# try:
# # 标签有内容或者是p标签,并且标签的父节点没有p(因为只需要判断到p就可以了,里面的东西都要的)
# if (tag.string is not None or tag.name == 'p') and tag.find_parent('p') is None:
# line['content_html'] = str(tag)
# try:
# line['content_len'] = len(tag.string.strip())
# except TypeError and AttributeError:
# line['content_len'] = 0
# content = ''
# for string in tag.stripped_strings:
# content += string
# line['content'] = content
# # content = tag.string
# line['tag_name'] = tag.name
# line['tag_id'] = tag.get("id")
# line['tag_class'] = tag.get("class")
#
# # p提取其下所有标签的文字
# if tag.name == 'p':
# content = ''
# for string in tag.stripped_strings:
# content += string
# line['content_len'] = len(content.strip())
# line['content'] = content
# elif tag.name == 'img':
# line['tag_name'] = tag.name
# line['content_html'] = str(tag)
#
# except StopIteration:
# pass
#
# if type(tag) == bs4.element.NavigableString and tag.string.strip() != '':
# if tag.next_sibling is not None and tag.previous_sibling is not None:
# line['content_html'] = str(tag)+"</br>"
# line['tag_name'] = 'p'
# line['content_len'] = len(unicode(tag).strip())
# content = tag.string
# line['content'] = content
#
# # 判断该节点是否为需要的节点
# if line.get('tag_name') is not None:
# lines.append(line) # 在队列尾部插入新数据
#
# result = __recognize(lines, i)
# tags = result['tags']
# if len(tags) > 0:
# count = 0
# last_parent = tags[0].parent
# for t in tags:
# if t not in last_parent.descendants and t is not None:
# last_parent = last_parent.parent
# count += 1
# if count is 3:
# last_parent = None
# break
# if last_parent is not None:
# result['content_html'] = str(last_parent)
# print ("success: "+str(last_parent))
#
# return result
def __convert_img(content_html, url):
"""
将文章中的相对图片路径转换为绝对路径(如果有的化)
:param content_html: HTML版的正文
:param url: 文章的地址
:return content: 将修改完的文章替换
"""
assert isinstance(content_html, str)
assert isinstance(url, str)
try:
soup = BeautifulSoup(content_html, "html.parser")
except Exception as e:
print ("处理图片地址转换失败")
return content_html
imgs = soup.find_all(name="img")
for img in imgs:
if img.get("src") is not None:
src = str(img.get("src")).strip()
src = src.replace("\\","/")
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not src.startswith("http") and src.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if src.startswith("/"):
src = domain + src
elif src.startswith("./"):
src.replace("./", "")
src = domain + src
elif src.startswith("../"):
count = src.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
src = domain + src.replace("../", "")
else: # 剩下的”content_234.html"这种情况
src = domain + src
img['src'] = src
return str(soup)
if __name__ == '__main__':
crawl(1)
# __crawl_urls("http://www.sina.com.cn")
# crawl_urls("http://www.163.com")
# crawl_urls("http://www.qq.com")
# crawl_urls("http://www.sohu.com")
# crawl_urls("http://www.kankanews.com")
#
# crawl_urls("http://www.people.com.cn")
# crawl_urls("http://www.gmw.cn")
# crawl_urls("http://chinese.yonhapnews.co.kr")
# crawl_urls("https://www.washingtonpost.com")
# crawl_urls("http://www.thepaper.cn")
| zqkarl/Spider | Spider/autonews/url_spider.py | url_spider.py | py | 26,835 | python | en | code | 0 | github-code | 36 |
26944213809 | # -*- coding: utf-8 -*-
'''
@author: davandev
'''
import logging
import os
import traceback
import sys
import davan.config.config_creator as configuration
import davan.util.constants as constants
from davan.util import cmd_executor as cmd_executor
from davan.http.service.base_service import BaseService
class PictureService(BaseService):
'''
Motion detected on sensors, take photo from camera and send to
all Telegram receivers
'''
def __init__(self, service_provider, config):
'''
Constructor
'''
BaseService.__init__(self, "TakePicture", service_provider, config)
self.logger = logging.getLogger(os.path.basename(__file__))
def handle_request(self, msg):
'''
Handle received request,
- Take pictures from all configured cameras,
- Send pictures to all configured receivers
- Delete pictures.
'''
try:
self.increment_invoked()
camera = self.parse_request(msg)
self.take_picture(camera)
self.send_picture(camera)
self.delete_picture()
except:
self.logger.error(traceback.format_exc())
self.increment_errors()
self.logger.error("Failed to handle picture request")
return constants.RESPONSE_NOT_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_FAILED_TO_TAKE_PICTURE
return constants.RESPONSE_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_EMPTY_MSG
def parse_request(self, msg):
'''
Return camera name from received msg.
'''
self.logger.debug("Parsing: " + msg )
msg = msg.replace("/TakePicture?text=", "")
return msg
def delete_picture(self):
'''
Deletes the taken photo
'''
self.logger.debug("Deleting picture")
os.remove("/var/tmp/snapshot.jpg")
def send_picture(self, camera):
'''
Send picture to all configured telegram receivers
@param camera: camera name
'''
self.logger.info("Sending picture to telegram accounts")
for chatid in self.config['CHATID']:
self.logger.debug("Sending picture to chatid[" + chatid + "]")
telegram_url = ('curl -X POST "https://api.telegram.org/bot' +
self.config['TOKEN'] +
'/sendPhoto" -F chat_id=' +
chatid +
' -F photo="@/var/tmp/snapshot.jpg" -F caption="Rörelse upptäckt från ' +
camera +'"' )
cmd_executor.execute_block(telegram_url,"curl")
def take_picture(self, camera):
'''
Take a picture from the camera, store it temporary on file system
Verify that camera is configured (has ip adress, user and password) otherwise rais an exception
@param camera: camera name
'''
self.logger.info("Take picture from camera [" + camera + "]")
if self.config["CAMERAS"].has_key(camera):
cam_picture_url = self.config["CAMERAS"][camera]
cmd_executor.execute("wget " + cam_picture_url + " --user=" + self.config["CAMERA_USER"] +
" --password=" + self.config["CAMERA_PASSWORD"] + " --auth-no-challenge")
pos = cam_picture_url.rfind('/')
file_name = cam_picture_url[pos+1:]
cmd_executor.execute("sudo mv "+file_name+" /var/tmp/snapshot.jpg")
else:
raise Exception("No camera url for [" + camera + "] configured")
def has_html_gui(self):
"""
Override if service has gui
"""
return True
def get_html_gui(self, column_id):
"""
Override and provide gui
"""
if not self.is_enabled():
return BaseService.get_html_gui(self, column_id)
column = constants.COLUMN_TAG.replace("<COLUMN_ID>", str(column_id))
column = column.replace("<SERVICE_NAME>", self.service_name)
column = column.replace("<SERVICE_VALUE>", "<li>Cameras: " + str(self.config["CAMERAS"].keys()) + " </li>\n")
return column
if __name__ == '__main__':
from davan.util import application_logger as log_config
config = configuration.create()
log_config.start_logging(config['LOGFILE_PATH'],loglevel=4)
camerapath = "/TakePicture?text=Framsidan"
test = PictureService()
test.start(camerapath)
| davandev/davanserver | davan/http/service/picture/PictureService.py | PictureService.py | py | 4,701 | python | en | code | 0 | github-code | 36 |
2262298883 | import tensorflow as tf
#how to see tensorflow operation
def seeTF():
# one 3x3 image with 2 channels
input = tf.Variable(tf.random_normal([1,3,3,2]))
# one 3x3 filter with 2 channels
filter = tf.Variable(tf.random_normal([3,3,2,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
op2 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
print("input")
print(input.eval())
print("filter")
print(filter.eval())
print("result")
result = sess.run(op)
result2 = sess.run(op2)
print(result)
print(result2)
#if we have following matrix image:
# i1 i2 i3
# I = i4 i5 i6
# i7 i8 i9
#with the following filters:
# w1 w2 w3 f1 f2 f3
# W = w4 w5 w6 and F = f4 f5 f6
# w7 w8 w9 f7 f8 f9
#with padding VALID the center of the filter is set to the center
# . . . . . . . .
# . x . with 5x5 image: . x x x .
# . . . . x x x .
# . x x x .
# . . . . .
#so the result will be equal to r where
# r = np.sum( I*W + I*F ) here it's r = np.sum( I[0,:,:,0]*W[:,:,0,0] + I[0,:,:,1]*W[:,:,1,0])
#with padding SAME we keep the same dim as output so we have:
# 0 0 0 0 0
# 0 x x x 0
# 0 x x x 0
# 0 x x x 0
# 0 0 0 0 0
#so the result will be equal to
# r1 r2 r3
# R = r4 r5 r6
# r7 r8 r9
#where r1 = np.sum( I[0,0:2,0:2,0]*W[1:,1:,0,0] + I[0,0:2,0:2,1]*W[1:,1:,1,0])
def tuto_ts_conv2d():
#input image WxH in RGB -> 3 input channels
# nb_filters = nb_output_channels
#padding VALID - perform valid convolution
#padding SAME - keep the same output dimension than the input
input = tf.Variable(tf.random_normal([1,3,3,5]))
filter = tf.Variable(tf.random_normal([1,1,5,1]))
# strides -> [1, stride, stride, 1]
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
#3x3 image and 1x1 filter each have 5 channels
# input -> [nb_image, image_width, image_height, nb_channels]
# filter -> [filter_width, filter_height, nb_input_channels, nb_filters]
#output = nb_images x width x height x nb_filters(here nb_filters = nb_output_channels)
# -> here 1x3x3x1
input = tf.Variable(tf.random_normal([1,3,3,5]))
filter = tf.Variable(tf.random_normal([3,3,5,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
#image 3x3, 3x3 filter, 5 channels
#output = 1x1x1x1, value is the sum of the 9,5-element dot product, 45-element dot product
input = tf.Variable(tf.random_normal([1,5,5,5]))
filter = tf.Variable(tf.random_normal([3,3,5,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
#output = 1x3x3x1
input = tf.Variable(tf.random_normal([1,5,5,5]))
filter = tf.Variable(tf.random_normal([3,3,5,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
#output = 1x5x5x1
#with multiple filters, here 7
input = tf.Variable(tf.random_normal([1,5,5,5]))
filter = tf.Variable(tf.random_normal([3,3,5,7]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
#output = 1x5x5x7
#strides 2,2
input = tf.Variable(tf.random_normal([1,5,5,5]))
filter = tf.Variable(tf.random_normal([3,3,5,7]))
op = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='SAME')
#output = 1x3x3x7
# x . x . X
# . . . . .
# x . x . x
# . . . . .
# x . x . x
#now with 10 images
input = tf.Variable(tf.random_normal([10,5,5,5]))
filter = tf.Variable(tf.random_normal([3,3,5,7]))
op = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='SAME')
#output = 10x3x3x7
| thbeucher/DQN | help/tuto_tf.py | tuto_tf.py | py | 3,957 | python | en | code | 1 | github-code | 36 |
12006624886 | #! /usr/bin/env python3
import sys
sys.path.insert(0, '/home/pi/soco') # Add soco location to system path
import time
from soco import SoCo
from soco.snapshot import Snapshot
print("Starting Doorbell Player...")
### Setup
# Define doorbell MP3 file as bellsound and set doorbell volume
bellsound = "http://www.orangefreesounds.com/wp-content/uploads/2016/06/Westminster-chimes.mp3"
bellvolume = 50
# Assign all zone player IPs to their names
study = SoCo('192.168.1.225')
masterbedroom = SoCo('192.168.1.83') # Left Play:1 (master)
invmasterbedroom = SoCo('192.168.1.245') # Right Play:1 (invisible slave)
library = SoCo('192.168.1.166')
guestroom = SoCo('192.168.1.184')
kitchen = SoCo('192.168.1.212')
diningroom = SoCo('192.168.1.162')
livingroom = SoCo('192.168.1.238') # Playbar (master)
invlivingroom = SoCo('192.168.1.208') # SUB IP (invisible slave)
# Identify doorbell and non-doorbell players
doorbell1 = study
doorbell2 = masterbedroom
doorbell3 = diningroom
doorbell4 = kitchen
nondoorbell1 = library
nondoorbell2 = guestroom
nondoorbell3 = livingroom
# Create group lists of doorbell, non-doorbell and invisible players
doorbellgroup = [doorbell1, doorbell2, doorbell3, doorbell4]
nondoorbellgroup = [nondoorbell1, nondoorbell2, nondoorbell3]
invisiblezones = [invmasterbedroom, invlivingroom]
### Store pre-doorbell state
# Take a snapshot of the states of doorbellgroup players
for zp in doorbellgroup:
print("\nSnapshotting current state of " + zp.player_name + "\n")
zp.snap = Snapshot(zp)
zp.snap.snapshot()
# Build descriptor list for each doorbell group player for later processing & restoration
for zp in doorbellgroup:
print("\nGetting current group state of " + zp.player_name + "\n")
zp.groupstatus = [zp, # 0 player object
bool(len(set(zp.group.members) - set(invisiblezones)) != 1), # 1 in a group? (can't rely on boolean return from Snapshot, because invisible players are included in group/non-group status)
zp.is_coordinator, # 2 is coordinator?
zp.group.coordinator, # 3 curent coordinator object
bool(set(zp.group.members) & set(nondoorbellgroup)), # 4 heterogeneous group? (made up of both doorbell and non-doorbell players)
(list(set(nondoorbellgroup) & set(zp.group.members)) + [False])[0] # 5 First non-doorbell group member from list; Blank if only doorbellgroup members
]
### Doorbell player routine
# Pause and unjoin doorbell zone players from any current groups
print("Unjoining doorbell group players from current groups...\n")
for zp in doorbellgroup :
zp.unjoin()
# Join doornell zone players into a group with doorbell1 as master
print("Joining doorbell group players with " + doorbell1.player_name + " as master...\n")
for i in range(1,len(doorbellgroup)):
zp = doorbellgroup[i]
zp.join(doorbell1)
# Wait for zone players to be ready
while not doorbell1.is_coordinator:
print("Waiting for " + doorbell1.player_name + " to be coordinator...\n")
time.sleep(0.1)
# Set volume for doorbell sound
for zp in doorbellgroup:
zp.volume = bellvolume
print("Setting " + zp.player_name + " volume.\n")
# Play doorbell sound
doorbell1.play_uri(uri=bellsound)
track = doorbell1.get_current_track_info()
print(track['title'])
# Show state of playing doorbell
while str(doorbell1.get_current_transport_info()[u'current_transport_state']) != "PLAYING":
print("Waiting to start playing...")
time.sleep(0.1)
while str(doorbell1.get_current_transport_info()[u'current_transport_state']) == "PLAYING":
print("Ringing...")
time.sleep(0.1)
# Unjoin doornbell zone players doorbell group
print("\nUnjoining doorbell group players from doorbell group...")
for zp in doorbellgroup:
zp.unjoin()
# Wait for zone players to be ungrouped
for zp in doorbellgroup:
while not zp.is_coordinator:
print("\nWaiting for " + zp.player_name + " to be ungrouped...")
time.sleep(0.1)
### Restore and regroup doorbell players
# Restore original state of doorbell players
print("\nRestoring doorbell group players to former states...")
for zp in doorbellgroup:
zp.snap.restore(fade=0)
time.sleep(1)
# Restore groups based on zp.groupstatus descriptor list of original group state
print("\nRestoring groups...")
for zp in doorbellgroup:
if zp.groupstatus[1] == False: # Loner
pass #### Do nothing; was not in a group
elif zp.groupstatus[2] == False and zp.groupstatus[4] == False: # Homog group slave
zp.join(zp.groupstatus[3]) ##### Rejoin to original coord
elif zp.groupstatus[2] == True and zp.groupstatus[4] == False: # Homog group coord
pass #### Do nothing; slaves are rejoined above
elif zp.groupstatus[2] == True and zp.groupstatus[4] == True: # Former coord of heterog group
zp.join(zp.groupstatus[5].group.coordinator) ##### Query new coord of non-doorbell group member & rejoin to it
elif zp.groupstatus[2] == False and zp.groupstatus[3] not in doorbellgroup: # Slave in heterog group with non-doorbell coord
zp.join(zp.groupstatus[3]) #### Rejoin to original coord
else: # Slave in heterog group with doorbell coord
zp.join(zp.groupstatus[5].group.coordinator) #### Query new coord of non-doorbell group member & rejoin to it
# Finish
print("\nDoorbell Player finished.\n")
| ronschaeffer/sonosdoorbell | SonosDoorbellPlayer.py | SonosDoorbellPlayer.py | py | 5,389 | python | en | code | 2 | github-code | 36 |
34636922480 | #!/usr/bin/env python
# coding: utf-8
# In[6]:
#Program to find minimum flips to convert message P to message Q
def flipped_bits(num1, num2):
# initially flips is equal to 0
flips = 0
# & each bits of num1 && num2 with 1
# if t1 != t2 then we will flip that bit
while(num1 > 0 or num2 > 0):
t1 = (num1 & 1)
t2 = (num2 & 1)
if(t1 != t2):
flips += 1
# right shifting num1 and num2
num1>>=1
num2>>=1
return flips
def main():
#input for num1
num1 = int(input('Input the first integer:'))
#input for num2
num2 = int(input('Input the seconde integer:'))
result = flipped_bits(num1, num2)
print('The minimum number of flips is:',result)
if __name__ == "__main__":
main()
| atta1987/HSBC | Bits flip.py | Bits flip.py | py | 796 | python | en | code | 0 | github-code | 36 |
37849815349 | #!/usr/bin/env python3
from bicon import data_preprocessing
from bicon import BiCoN
from bicon import results_analysis
import sys
path_expr = sys.argv[1]
path_net = sys.argv[2]
path_out = sys.argv[3]
GE, G, labels, _ = data_preprocessing(path_expr, path_net)
L_g_min = int(sys.argv[4])
L_g_max = int(sys.argv[5])
model = BiCoN(GE, G, L_g_min, L_g_max)
solution, scores = model.run_search()
results = results_analysis(solution, labels)
genes = []
genes.extend(results.genes1)
genes.extend(results.genes2)
with open(path_out, 'w') as fh:
for gene in genes:
fh.write(gene + "\n")
| repotrial/NeDRex-Web | web/backend/scripts/run_bicon.py | run_bicon.py | py | 596 | python | en | code | 2 | github-code | 36 |
71249270184 | """
Core client functionality, common across requests.
"""
import collections
import random
import requests
import time
from datetime import datetime
from datetime import timedelta
RETRIABLE_STATUSES = {500, 503, 504}
class AbstractRestClient:
"""Performs requests to APIs services."""
def __init__(self, base_url,
timeout=None, connect_timeout=None, read_timeout=None, retry_timeout=60,
queries_per_second=10,
requests_kwargs=None):
"""
:param base_url: base url to perform requests
:type base_url: string
:param timeout: Combined connect and read timeout for HTTP requests, in
seconds. Specify "None" for no timeout.
:type timeout: int
:param connect_timeout: Connection timeout for HTTP requests, in
seconds. You should specify read_timeout in addition to this option.
Note that this requires requests >= 2.4.0.
:type connect_timeout: int
:param read_timeout: Read timeout for HTTP requests, in
seconds. You should specify connect_timeout in addition to this
option. Note that this requires requests >= 2.4.0.
:type read_timeout: int
:param retry_timeout: Timeout across multiple retriable requests, in
seconds.
:type retry_timeout: int
:param queries_per_second: Number of queries per second permitted.
If the rate limit is reached, the client will sleep for the
appropriate amount of time before it runs the current query.
:type queries_per_second: int
:param requests_kwargs: Extra keyword arguments for the requests
library, which among other things allow for proxy auth to be
implemented. See the official requests docs for more info:
http://docs.python-requests.org/en/latest/api/#main-interface
:type requests_kwargs: dict
"""
self.session = requests.Session()
if timeout and (connect_timeout or read_timeout):
raise ValueError("Specify either timeout, or connect_timeout "
"and read_timeout")
if connect_timeout and read_timeout:
self.timeout = (connect_timeout, read_timeout)
else:
self.timeout = timeout
self.retry_timeout = timedelta(seconds=retry_timeout)
self.requests_kwargs = requests_kwargs or {}
self.requests_kwargs.update({
"timeout": self.timeout,
})
self.queries_per_second = queries_per_second
self.sent_times = collections.deque("", queries_per_second)
self.base_url = base_url
def _request(self, url, method="get", first_request_time=None, retry_counter=0, requests_kwargs=None):
"""Performs HTTP GET/POST.
:param url: URL path for the request. Should begin with a slash.
:type url: string
:param method: HTTP method name, support get and post.
:type method: string
:param first_request_time: The time of the first request (None if no
retries have occurred).
:type first_request_time: datetime.datetime
:param retry_counter: The number of this retry, or zero for first attempt.
:type retry_counter: int
:raises ApiError: when the API returns an error.
:raises Timeout: if the request timed out.
:raises TransportError: when something went wrong while trying to
exceute a request.
"""
if not first_request_time:
first_request_time = datetime.now()
elapsed = datetime.now() - first_request_time
if elapsed > self.retry_timeout:
raise ValueError("timeout")
if retry_counter > 0:
# 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration,
# starting at 0.5s when retry_counter=0. The first retry will occur
# at 1, so subtract that first.
delay_seconds = 0.5 * 1.5 ** (retry_counter - 1)
# Jitter this value by 50% and pause.
time.sleep(delay_seconds * (random.random() + 0.5))
requests_kwargs = requests_kwargs or {}
final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs)
try:
response = self.session.request(method, url, **final_requests_kwargs)
except requests.exceptions.Timeout:
raise ValueError("timeout")
if response.status_code in RETRIABLE_STATUSES:
return self._request(url, first_request_time, retry_counter + 1, requests_kwargs)
# Check if the time of the nth previous query (where n is
# queries_per_second) is under a second ago - if so, sleep for
# the difference.
if self.sent_times and len(self.sent_times) == self.queries_per_second:
elapsed_since_earliest = time.time() - self.sent_times[0]
if elapsed_since_earliest < 1:
time.sleep(1 - elapsed_since_earliest)
self.sent_times.append(time.time())
return response
def _get_request_uri(self, partial_uri=""):
if partial_uri.startswith("http"):
return partial_uri
return "{}{}".format(self.base_url, partial_uri)
def get(self, uri, params=None, headers=None):
requests_kwargs = {"params": params or {}, "headers": headers or {}}
url = self._get_request_uri(uri)
return self._request(url, "get", requests_kwargs=requests_kwargs)
def post(self, uri, data=None, headers=None):
requests_kwargs = {"json": data or {}, "headers": headers or {}}
url = self._get_request_uri(uri)
return self._request(url, "post", requests_kwargs=requests_kwargs)
| ifreddyrondon/address-resolver | addressresolver/core/client.py | client.py | py | 5,786 | python | en | code | 0 | github-code | 36 |
74541806822 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import logging
import time
import gzip
import random
from six.moves.urllib.error import URLError
from six.moves.urllib.request import Request, build_opener, HTTPCookieProcessor
from six.moves.urllib.parse import urlencode
from six.moves.http_cookiejar import CookieJar
from six.moves import StringIO
# pysocks
import socks
from sockshandler import SocksiPyHandler
logger = logging.getLogger(__name__)
def get_user_agent(idx=-1):
user_agent = [
'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130619 Firefox/17.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',
]
if idx < 0:
idx = random.randint(0, len(user_agent) - 1)
return user_agent[idx]
def url_downloader(url, data=None, path=None, cookie=None,
timeout=5, retry=1, retry_ivl=5,
referer=None, agent=None, proxy=None):
"""Download URL link
url: url to download
data: post data
path: download to local file
timeout: socket timeout
retry: retry times to download url
retry_ivl: interval time when retry
agent: http user agent
proxy: socks5://127.0.0.1:1080
"""
while True:
try:
if isinstance(data, dict):
data = urlencode(data).encode('ascii')
request = Request(url, data=data)
request.add_header('User-Agent', agent or get_user_agent())
if referer:
request.add_header('Referer', referer)
if data:
request.add_header(
'Content-Type',
'application/x-www-form-urlencoded;charset=utf-8')
response = None
handlers = []
if proxy:
scheme, host, port = proxy.split(':')
host = host.strip('/')
proxy_handler = SocksiPyHandler(
socks.PROXY_TYPES[scheme.upper()], host, int(port)
)
handlers.append(proxy_handler)
if cookie is None:
cookie = CookieJar()
cookie_handler = HTTPCookieProcessor(cookie)
handlers.append(cookie_handler)
opener = build_opener(*handlers)
response = opener.open(request, timeout=timeout)
content_encoding = response.info().get('content-encoding')
if content_encoding:
with gzip.GzipFile(fileobj=StringIO(response.read())) as f:
r_data = f.read()
else:
r_data = response.read()
if path:
with open(path, 'wb') as f:
f.write(r_data)
r_data = None
response.close()
mime = response.info().get('content-type')
real_url = response.geturl()
err_msg = 'Ok'
break
except (URLError, IOError, OSError) as err:
response and response.close()
retry -= 1
err_msg = str(err)
if retry > 0:
logger.debug('Error: %s... Try again after %s seconds' % (
retry_ivl, err_msg))
time.sleep(retry_ivl)
retry_ivl += retry_ivl
timeout += timeout
else:
path = mime = r_data = real_url = None
break
return {
'mime': mime,
'path': path,
'data': r_data,
'url': real_url,
'cookie': cookie,
'error': err_msg,
}
| liuyug/utils | network.py | network.py | py | 3,623 | python | en | code | 0 | github-code | 36 |
19738801169 | from vigilo.models.session import DBSession, MigrationDDL
from vigilo.models.tables import HighLevelService
def upgrade(migrate_engine, actions):
"""
Migre le modèle.
@param migrate_engine: Connexion à la base de données,
pouvant être utilisée durant la migration.
@type migrate_engine: C{Engine}
@param actions: Conteneur listant les actions à effectuer
lorsque cette migration aura été appliquée.
@type actions: C{MigrationActions}
"""
# vigilo-updatedb crée automatiquement la nouvelle table de liaison
# chargée de stocker les priorités.
# Donc, les tâches restantes sont uniquement :
# - Suppression de l'ancienne colonne "priority" sur les HLS.
# - Invitation pour que l'administrateur relance VigiConf.
MigrationDDL(
[
# Ajout de la nouvelle colonne.
"ALTER TABLE %(fullname)s DROP COLUMN priority",
],
context={}
).execute(DBSession, HighLevelService.__table__)
# Invite l'utilisateur à resynchroniser sa configuration.
actions.sync_force = True
| vigilo/models | src/vigilo/models/migration/028_Different_HLS_priorities.py | 028_Different_HLS_priorities.py | py | 1,101 | python | fr | code | 4 | github-code | 36 |
25141616 | from sys import stdin
input = stdin.readline
n = int(input())
table = []
for i in range(n):
table.append(list(map(int, input().split())))
ans = [0]*n
for i in range(n):
rank = 1
for j in range(n):
if i == j :
continue
if (table[i][0] >= table[j][0] and table[i][1] < table[j][1]) or (table[i][0] < table[j][0] and table[i][1] >= table[j][1]):
continue
if table[i][0] < table[j][0] or table[i][1] < table[j][1]:
rank += 1
ans[i] = rank
print(*ans) | kmgyu/baekJoonPractice | bruteForce/덩치.py | 덩치.py | py | 523 | python | en | code | 0 | github-code | 36 |
28721865338 | # usage: python dropprofiles.py
# looks through mongo argo:argo and lists ids
from pymongo import MongoClient
client = MongoClient('mongodb://database/argo')
db = client.argo
mongoprofiles = open("mongoprofiles", "w")
mongoids = [x['_id'] for x in list(db.argo.find({}, {'_id':1}))]
for x in mongoids:
mongoprofiles.write(x)
mongoprofiles.write('\n')
| argovis/ifremer-sync | audit/mongoids.py | mongoids.py | py | 363 | python | en | code | 0 | github-code | 36 |
16393471442 | #This is just an expansion on what the Matrix class was having a hard time fitting in
#It mainly deals with getting larger inverses, but also could have allowed
#transposing and determinants
def transposeMatrix(m):
t = []
for r in range(len(m)):
tRow = []
for c in range(len(m[r])):
if c == r:
tRow.append(m[r][c])
else:
tRow.append(m[c][r])
t.append(tRow)
return t
def getMatrixMinor(m,i,j):
return [row[:j] + row[j+1:] for row in (m[:i]+m[i+1:])]
def getMatrixDeternminant(m):
#base case for 2x2 matrix
if len(m) == 2:
return m[0][0]*m[1][1]-m[0][1]*m[1][0]
determinant = 0
for c in range(len(m)):
determinant += ((-1)**c)*m[0][c]*getMatrixDeternminant(getMatrixMinor(m,0,c))
return determinant
def getMatrixInverse(m):
determinant = getMatrixDeternminant(m)
#special case for 2x2 matrix:
if len(m) == 2:
return [[m[1][1]/determinant, -1*m[0][1]/determinant],
[-1*m[1][0]/determinant, m[0][0]/determinant]]
#find matrix of cofactors
cofactors = []
for r in range(len(m)):
cofactorRow = []
for c in range(len(m)):
minor = getMatrixMinor(m,r,c)
cofactorRow.append(((-1)**(r+c)) * getMatrixDeternminant(minor))
cofactors.append(cofactorRow)
cofactors = transposeMatrix(cofactors)
for r in range(len(cofactors)):
for c in range(len(cofactors)):
cofactors[r][c] = cofactors[r][c]/determinant
return cofactors | MrShutCo/Matrix-Solver | matrix_inverse_helper.py | matrix_inverse_helper.py | py | 1,571 | python | en | code | 0 | github-code | 36 |
40264770849 | budget = float(input())
season = input()
type_holiday = ""
destination = ""
total = 0
if budget <= 100:
destination = "Bulgaria"
if season == "summer":
type_holiday = "Camp"
total = budget * 0.3
elif season == "winter":
type_holiday = "Hotel"
total = budget * 0.7
elif budget <= 1000:
destination = "Balkans"
if season == "summer":
type_holiday = "Camp"
total = budget * 0.4
elif season == "winter":
type_holiday = "Hotel"
total = budget * 0.8
elif budget > 1000:
destination = "Europe"
type_holiday = "Hotel"
total = budget * 0.9
print(f"Somewhere in {destination}")
print(f"{type_holiday} - {total:.2f}")
| ivoivanov0830006/1.1.Python_BASIC | 3.Nested_conditional_statements/*05.Journey.py | *05.Journey.py | py | 709 | python | en | code | 1 | github-code | 36 |
15062774171 | import socket
import tkinter as tk
HOST = '127.0.0.1'
PORT = 12345
def run_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((HOST, PORT))
server_socket.listen()
print(f"Server started at {HOST}:{PORT}")
while True:
print("Waiting for a connection...")
client_socket, client_address = server_socket.accept()
print(f"Connection from {client_address} has been established.")
while True:
try:
data = client_socket.recv(1024).decode()
if not data:
break
print(f"Received message from client: {data}")
if data == 'stop':
client_socket.send("Exiting chat...".encode())
client_socket.close()
break
server_input = input("Server: ")
client_socket.send(server_input.encode())
except (ConnectionResetError, BrokenPipeError):
print(f"Connection with {client_address} closed.")
client_socket.close()
break
if __name__ == "__main__":
run_server()
| damianslavenburg/leren-programmeren | python/module 4/deel 2/chatbot/server.py | server.py | py | 1,186 | python | en | code | 0 | github-code | 36 |
32578660748 | import cv2 as cv
import numpy as np
image = cv.imread('img.jpg')
imageGray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
imageGrayCanny = cv.Canny(image,100,150)
cv.imshow('canny',imageGrayCanny)
contours, hierarchy = cv.findContours(imageGrayCanny,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)
contours_poly = [None] * len(contours)
boundRect = [None] * len(contours)
for local, contour in enumerate(contours):
contours_poly[local] = cv.approxPolyDP(contour, 3, True)
boundRect[local] = cv.boundingRect(contours_poly[local])
draw = np.copy(image)
for i in range(len(contours)):
color = ((0, 0, 255))
cv.drawContours(draw, contours_poly, i, color)
cv.rectangle(draw, (int(boundRect[i][0]), int(boundRect[i][1])),
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
for index ,contour in enumerate(contours):
print(f"object {index + 1}: {cv.contourArea(contour)} a.u.")
cv.imshow('draw',draw)
cv.waitKey(0)
cv.imwrite("objects.jpg", draw)
| mycatdoitbetter/projects-opencv2-python | t31 - t45/t-32/t-32.py | t-32.py | py | 1,040 | python | en | code | 1 | github-code | 36 |
10084447821 | s = input().rstrip()
result = []
# 한글자씩 떼서 리스트에 저장
for i in range(len(s)):
a = s[i:]
result.append(a)
# 정렬
result.sort()
# 출력
for j in result:
print(j) | papillonthor/Cool_Hot_ALGO | tsLim/boj/s4_11656_접미사배열.py | s4_11656_접미사배열.py | py | 196 | python | ko | code | 2 | github-code | 36 |
21107263411 | """Utility functions for building models."""
from __future__ import print_function
import collections
import time
import os
import numpy as np
import tensorflow as tf
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import data_utils
__all__ = [
"get_initializer", "get_device_str",
"create_train_model", "create_eval_model", "create_infer_model",
"create_rnn_cell", "gradient_clip", "create_or_load_model", "load_model"
]
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer, init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string from multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
# Train Model
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_train_model(model_creator, hparams, scope=None):
"""Create train graph, model, and iterator."""
out_dir = hparams.out_dir
data_file = hparams.data_file
data_dtype, data_shape = data_utils.check_data(data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
# Define dataset from placeholder, will be fed in during training
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_iterator(
dataset,
batch_size=hparams.batch_size,
random_seed=hparams.random_seed,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None)
model_device_fn = None # if we have a special device name or function
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
scope=None)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
# Eval Model
class EvalModel(
collections.namedtuple("EvalModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_eval_model(model_creator, hparams, scope=None):
"""Create eval graph, model, and iterator."""
out_dir = hparams.out_dir
dev_data_file = hparams.dev_data_file
data_dtype, data_shape = data_utils.check_data(dev_data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
# Define dataset from placeholder, will be fed in during evaluation
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_iterator(
dataset,
batch_size=hparams.batch_size,
random_seed=hparams.random_seed,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
scope=None)
return EvalModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
# Infer Model
class InferModel(
collections.namedtuple("InferModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_infer_model(model_creator, hparams, scope=None):
"""Create infer graph, model, and iterator."""
out_dir = hparams.out_dir
if hparams.infer_data_file:
infer_data_file = hparams.infer_data_file
else:
infer_data_file = hparams.sample_infer_data_file
data_dtype, data_shape = data_utils.check_data(infer_data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "infer"):
# Define dataset from placeholder, will be fed in during inference
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_infer_iterator(
dataset,
batch_size=hparams.infer_batch_size,
num_infer_steps=hparams.num_infer_steps)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
scope=None)
return InferModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
def create_rnn_cell(unit_type, num_units, forget_bias, dropout, mode,
num_proj=None, use_peepholes=True, device_str=None):
"""Creates an instance of a single RNN cell."""
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias)
single_cell = tf.contrib.rnn.LSTMCell(
num_units=num_units,
use_peepholes=use_peepholes, # diagonal peephole connections to learn timing
num_proj=num_proj, # linear output projection
forget_bias=forget_bias)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# # Dropout (= 1-keep_prob) is set to 0 for eval and infer modes
# dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# if dropout > 0.0:
# single_cell = tf.contrib.rnn.DropoutWrapper(
# cell=single_cell, input_keep_prob=(1.0-dropout))
# utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout))
# TODO: Residual
# TODO: DeviceWrapper
return single_cell
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
return clipped_gradients, gradient_norm
def load_model(model, ckpt, session, name):
"""Load model from checkpoint."""
start_time = time.time()
model.saver.restore(session, ckpt)
utils.print_out(" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt, time.time()-start_time))
return model
def create_or_load_model(model, ckpt_dir, session, name):
"""Create nMOR model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(ckpt_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time()-start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
| panchgonzalez/nmor | nmor/model_helper.py | model_helper.py | py | 7,400 | python | en | code | 22 | github-code | 36 |
27513406893 | dict_a = {
'kr': '한국',
'au': '호주',
'jp': '일본',
'us': '미국'
}
tuple_list = sorted(dict_a.items(), key = lambda item: item[1])
keys = []
values = []
for key, value in tuple_list:
keys.append(key)
values.append(value) | yewon-kim/practice-python | locale_practice.py | locale_practice.py | py | 253 | python | en | code | 0 | github-code | 36 |
17904371974 | import pandas as pd
import tensorflow as tf
import psycopg2
import configparser as cf
import numpy as np
import key_driver_analysis as kda
SQL_COLUMN_NAMES = ['nct_id',
'start_date',
'study_type',
'enrollment_type',
'phase',
'overall_status']
STATUS = ['Completed',
'Terminated']
def get_db_connection_str(db_props = 'aact.properties'):
"""Returns a psycopg2 DB database connection string"""
config = cf.ConfigParser()
with open(db_props) as f:
config.readfp(f, filename=db_props)
dbargs=""
for k, v in config['aact.database'].items():
dbargs=dbargs + k + "=" + v + " "
return dbargs
def train_validate_test_split(df, train_percent=.6, validate_percent=.2, seed=None):
"""Splits the original CT items into 3 sets: training, validation, testing"""
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.loc[perm[:train_end]]
validate = df.loc[perm[train_end:validate_end]]
test = df.loc[perm[validate_end:]]
return train, validate, test
def kda_metadata(df):
target = 'status'
features=list(set(df.columns.tolist()).difference(set([target])))
print(f'target --> {target}')
print(f'features --> {features}')
rw_df = kda.relative_importance(df,
target=target,
features=features,
verbose=True)
return rw_df
def load_data(y_name='status', db_props='aact.properties'):
"""Returns the CT dataset as (train_x, train_y), (test_x, test_y), , (validate_x, validate_y)."""
dbargs=get_db_connection_str(db_props)
conn = psycopg2.connect(dbargs)
sqlstr= \
"SELECT s." + ",s.".join(SQL_COLUMN_NAMES) + ", sp.agency_class as sponsor_type, cv.number_of_facilities, e.gender, " + \
" cv.has_us_facility, cv.average_condition_completion_ratio, " + \
" CASE WHEN s.brief_title LIKE '%age III%' THEN '1' WHEN s.brief_title LIKE '%age IV%' THEN '2' ELSE 0 END as condition_stage, " + \
" CASE WHEN s.number_of_arms IS NULL THEN 0 ELSE s.number_of_arms END as number_of_arms_clean, " + \
" d.allocation, d.intervention_model, d.primary_purpose, 0 as drug_recency, bs.description, " + \
" count(dgi.id) as design_group_intervention_count, count(distinct(i.intervention_type)) as intervention_type_count, " + \
" count(distinct(sp2.name)) as sponsor_count " + \
"FROM studies as s, calculated_values as cv, eligibilities as e, interventions as i, " + \
" sponsors as sp, sponsors as sp2, design_group_interventions as dgi, designs as d, brief_summaries as bs " + \
"WHERE s.nct_id=cv.nct_id AND s.nct_id=sp.nct_id AND s.nct_id=i.nct_id AND s.nct_id=sp2.nct_id AND s.nct_id=e.nct_id " + \
"AND s.nct_id=dgi.nct_id AND s.nct_id=d.nct_id AND s.nct_id=bs.nct_id " + \
"AND s.start_date > '2019-01-01' " + \
"AND cv.is_oncology = true " + \
"AND s.overall_status in ('Completed', 'Terminated') " + \
"AND s.enrollment IS NOT NULL AND cv.number_of_facilities > 0 " + \
"AND sp.lead_or_collaborator = 'lead' " + \
"GROUP BY s." + ",s.".join(SQL_COLUMN_NAMES) + ", sponsor_type, cv.number_of_facilities, cv.average_condition_completion_ratio, " + \
" e.gender, cv.has_us_facility, s.brief_title, s.number_of_arms, e.criteria, " + \
" d.allocation, d.intervention_model, d.primary_purpose, bs.description "
print(sqlstr)
df = pd.read_sql_query(sql=sqlstr,
con=conn,
index_col='nct_id',
parse_dates={'start_date': '%Y-%m-%d'})
conn.close()
# df_sponsors = df1['source'].value_counts()
# df=df1.join(df_sponsors,
# on='source',
# rsuffix='_local')
# print(df.groupby('phase').count())
df['start_epoch'] = df.start_date.dt.year
df['study_type_category'] = 0
df['agency_type_category'] = 0
df['gender_category'] = 0
df['allocation_type'] = 0
df['enrollment_type_category'] = 0
# df['intervention_model_type'] = 0
df['primary_purpose_type'] = 0
df['status'] = 0
df.loc[df.study_type == 'Expanded Access', 'study_type_category'] = 1
df.loc[df.study_type == 'Interventional', 'study_type_category'] = 2
df.loc[df.study_type == 'Observational', 'study_type_category'] = 3
df.loc[df.study_type == 'Observational [Patient Registry]', 'study_type_category'] = 4
df.loc[df.overall_status == 'Completed', 'status'] = 0
df.loc[df.overall_status == 'Terminated', 'status'] = 1
df.loc[df.sponsor_type == 'U.S. Fed', 'agency_type_category'] = 0
df.loc[df.sponsor_type == 'NIH', 'agency_type_category'] = 1
df.loc[df.sponsor_type == 'Industry', 'agency_type_category'] = 2
df.loc[df.sponsor_type == 'Other', 'agency_type_category'] = 3
df.loc[df.gender == 'Male', 'gender_category'] = 1
df.loc[df.gender == 'Female', 'gender_category'] = 2
df.loc[df.allocation == 'Randomized', 'allocation_type'] = 1
df.loc[df.description.str.contains('randomized'), 'allocation_type'] = 1
df.loc[df.allocation == 'Non-Randomized', 'allocation_type'] = 2
df.loc[df.description.str.contains('non-randomized'), 'allocation_type'] = 2
df.loc[df.number_of_arms_clean == 1, 'allocation_type'] = 2
# df.loc[df.intervention_model == 'Crossover Assignment', 'intervention_model_type'] = 1
# df.loc[df.intervention_model == 'Factorial Assignment', 'intervention_model_type'] = 2
# df.loc[df.intervention_model == 'Parallel Assignment', 'intervention_model_type'] = 3
# df.loc[df.intervention_model == 'Sequential Assignment', 'intervention_model_type'] = 4
# df.loc[df.intervention_model == 'Single Group Assignment', 'intervention_model_type'] = 5
df.loc[df.enrollment_type == 'Anticipated', 'enrollment_type_category'] = 1
df.loc[df.primary_purpose == 'Basic Science', 'primary_purpose_type'] = 1
df.loc[df.primary_purpose == 'Device Feasibility', 'primary_purpose_type'] = 2
df.loc[df.primary_purpose == 'Diagnostic', 'primary_purpose_type'] = 3
df.loc[df.primary_purpose == 'Educational/Counseling/Training', 'primary_purpose_type'] = 4
df.loc[df.primary_purpose == 'Health Services Research', 'primary_purpose_type'] = 5
df.loc[df.primary_purpose == 'Prevention', 'primary_purpose_type'] = 6
df.loc[df.primary_purpose == 'Screening', 'primary_purpose_type'] = 7
df.loc[df.primary_purpose == 'Supportive Care', 'primary_purpose_type'] = 8
df.loc[df.primary_purpose == 'Treatment', 'primary_purpose_type'] = 9
df.to_csv('/tmp/ct.csv')
df.drop(columns=['start_date','overall_status','average_condition_completion_ratio','sponsor_type', 'gender', 'phase', 'study_type',
'has_us_facility', 'allocation', 'intervention_model', 'primary_purpose', 'enrollment_type', 'description'], inplace=True)
train, validate, test = train_validate_test_split(df, 0.7, 0.005)
print("Rows", len(df))
print("************")
rw_df = kda_metadata(df)
print(rw_df)
print("************")
train_x, train_y = train, train.pop(y_name)
test_x, test_y = test, test.pop(y_name)
validate_x, validate_y = validate, validate.pop(y_name)
return (train_x, train_y), (test_x, test_y), (validate_x, validate_y)
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation or prediction"""
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
| nastacio/clinical-bi | src/main/py/ct_data.py | ct_data.py | py | 8,474 | python | en | code | 0 | github-code | 36 |
2124363907 | import torch
import torch.nn as nn
class RNN_Classifier(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers = 1, batch_first = True, use_gpu = True):
super(RNN_Classifier, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.num_layers = num_layers
self.rnn = nn.RNN(self.input_size, self.hidden_size, num_layers, batch_first = batch_first)
self.fc = nn.Linear(self.hidden_size, self.output_size)
self.softmax = torch.nn.Softmax(dim=1)
self.use_gpu = True
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
if self.use_gpu:
h0 = h0.cuda()
# c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# print(type(x))
# print(type(h0))
out, hidden = self.rnn(x, h0)
out = self.fc(out[:, -1, :])
# out = self.softmax(out)
return out
| nhatleminh1997/ASL_detection | RNN_classifer.py | RNN_classifer.py | py | 1,041 | python | en | code | 0 | github-code | 36 |
3238085621 | from pandas import DataFrame
import csv
import xlwt
import pandas as pd
import numpy as np
data = pd.read_csv("D:\\experiment\\第三次豆瓣\\测试3\\train\\实验数据_clear.csv")
#print(data)
#二维矩阵存每个用户观看不同类别的数量
namedic={"其他": 0, "剧情": 1,"喜剧": 2,"动作": 3,"爱情": 4,"科幻": 5,"动画": 6,"悬疑": 7,"惊悚": 8,"恐怖": 9,"犯罪": 10,"传记": 11,"历史": 12,"战争": 13,"西部": 14,"奇幻": 15,"冒险": 16,"纪录片": 17,"武侠": 18,"#": 19}
user_movie_list = np.zeros((182,20))
print(user_movie_list)
i=0
j=0
for index, row in data.iterrows():
#print(index) # 输出每行的索引值
#print(row[0])
if row[0] == i+1:
user_movie_list[i][namedic.get(row[3], 0)] += 1
user_movie_list[i][namedic.get(row[4], 0)] += 1
user_movie_list[i][namedic.get(row[5], 0)] += 1
else:
i+=1
if row[0] == i + 1:
user_movie_list[i][namedic.get(row[3], 0)] += 1
user_movie_list[i][namedic.get(row[4], 0)] += 1
user_movie_list[i][namedic.get(row[5], 0)] += 1
print(user_movie_list)
lieming=["其他","剧情","喜剧","动作","爱情","科幻","动画","悬疑","惊悚","恐怖","犯罪","传记","历史","战争","西部","奇幻","冒险" ,"纪录片","武侠","#"]
user_category = pd.DataFrame(columns=lieming,data=user_movie_list)
user_category.to_csv('D:\\experiment\\第三次豆瓣\\测试3\\train\\douban_user_category.csv', index=False,encoding='utf-8')#输出文件 | JiaoZixun/Recommend_By_Canopy-K-means | recommend——豆瓣/step1——统计各用户各类型数量.py | step1——统计各用户各类型数量.py | py | 1,523 | python | en | code | 18 | github-code | 36 |
14537963306 | # 주식 비교 및 분석
# 1. 주식 비교
# 야후 파인낸스 사용
# 필요 라이브러리는 yfinance, pandas-datareader
# 주식 시세 구하는 함수는 get_data_yahoo()
# get_data_yahoo(조회할 주식 종목 [, start=조회 기간의 시작일] [, end=조회 기간의 종료일])
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
sec = pdr.get_data_yahoo('063160.KS', start='2020-08-17')
msft = pdr.get_data_yahoo('MSFT', start='2018-05-04')
print(sec)
tmp_msft = msft.drop(columns='Volume') # 거래량 컬럼 삭제
print(tmp_msft.tail()) # tail()은 최근 5개 데이타 출력
print(sec.index) | drafighter/dra_investar | stock_basic.py | stock_basic.py | py | 655 | python | ko | code | 0 | github-code | 36 |
30835513911 | from voxy.wordcount.wordcount import count_words
def test_word_count():
test_text = """This is a text with some punctuations. And some new lines
and plenty of spaces."""
expected_count = 15
actual_count = count_words(test_text)
assert actual_count == expected_count
| itissid/vxy_coding_challenge | voxy/test/test_word_count.py | test_word_count.py | py | 305 | python | en | code | 0 | github-code | 36 |
18074097517 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
#
urlpatterns = patterns('',
url(r'^index$', 'Users.views.start'),
url(r'^setNewUser$', 'Users.views.setNewUser'),
url(r'^getRegisterForm$', 'Users.views.getRegisterForm'),
url(r'^login$', 'Users.views.login'),
url(r'^logout$', 'Users.views.logout'),
)
| ggarri/photoDiary | Users/urls.py | urls.py | py | 512 | python | en | code | 0 | github-code | 36 |
40895132002 | from flask import Flask, render_template, request, Response, url_for,jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_weasyprint import HTML, render_pdf
import ast
import json
import dicttoxml
from datetime import datetime
from model import Reports
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://interview:LetMeIn@candidate.suade.org/suade'
db = SQLAlchemy(app)
db.init_app(app)
@app.route("/")
def hello():
return "Welcome to Suade Reporting API!"
@app.route('/api/reports/', methods=['GET'])
def get_all_reports():
reports = Reports.query.all()
return jsonify([item.type for item in reports])
@app.route('/api/report/<int:id>/', methods=['GET'])
def get_report(id):
report = Reports.query.get(id)
return report.type
# XML endpoint
@app.route('/api/report/<int:id>.xml/')
def get_xml_report(id):
report = Reports.query.get(id)
obj = json.loads(report.type)
xml = dicttoxml.dicttoxml(obj)
return Response(xml, mimetype='text/xml')
# PDF endpoint
@app.route('/api/report/<int:id>.pdf/')
def get_pdf_report(id):
# converting string to dictionary
report = ast.literal_eval(Reports.query.get(id).type)
report['created'] = datetime.now().strftime('%Y-%m-%d') # timestamp
html = render_template('report.html',report= report)
return render_pdf(HTML(string=html))
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
if __name__ == "__main__":
app.run() | webbyfox/suade | app.py | app.py | py | 1,494 | python | en | code | 0 | github-code | 36 |
9376474634 | import numpy as np
import cv2
import sys
from math import sqrt
sys.setrecursionlimit(10000)
class MandelbrotSet:
def __init__(self, size, numColors):
self.size = size
self.c = 0
self.img = np.zeros((size,size,3), dtype='uint8')
self.pallet = []
self.generatePallet(numColors)
self.MAX_STACK = len(self.pallet)-1
self.generateImage()
def generatePallet(self, numColors):
step = int(255 / (numColors**0.33333333 - 1)) # cada canal deve ter n cores, onde numColors = n*n*n
for b in range(0, 256, step):
for g in range(0, 256, step):
for r in range(0, 256, step):
self.pallet.append([g,b,r])
self.pallet = np.array(self.pallet, dtype='uint8')
def f(self, z):
return z**2 + self.c
def mag(self, x):
return sqrt((x.real**2) + (x.imag**2))
def num(self, z, iterations):
if iterations < self.MAX_STACK:
if self.mag(self.f(z)) > 2:
return 1
else:
return self.num(self.f(z), iterations+1) + 1
return 0
def generateImage(self):
r = np.linspace(-2, 2, self.size) # eixo dos #'s reais
im = np.linspace(2j, -2j, self.size) # eixo dos #'s imaginarios
for i in range(0, self.size):
for j in range(0, self.size):
self.c = r[j] + im[i]
self.img[i,j] = self.pallet[self.num(0, 0)]
def getImage(self):
return self.img
mbs = MandelbrotSet(600, 500) # imagem de 600x600 e max 500 cores
cv2.imwrite('mandel.png', mbs.getImage())
| cleiston/Fractals | MandelbrotSet.py | MandelbrotSet.py | py | 1,684 | python | en | code | 0 | github-code | 36 |
73894898984 | __author__ = "Sebastian Heinlein <devel@glatzor.de>"
import datetime
import glob
import gzip
import locale
import logging
import os
import re
import subprocess
import tempfile
import time
import traceback
import uuid
import apt
import apt_pkg
from defer import inline_callbacks, return_value
from defer.utils import dbus_deferred_method
import dbus
from gi.repository import GObject
import lsb_release
import packagekit.enums as pk_enums
# for optional plugin support
try:
import pkg_resources
except ImportError:
pkg_resources = None
from aptdaemon import policykit1
import aptdaemon.core
from aptdaemon.core import APTDAEMON_TRANSACTION_DBUS_INTERFACE
import aptdaemon.enums as aptd_enums
from aptdaemon.errors import TransactionFailed, TransactionCancelled
from aptdaemon.progress import DaemonAcquireProgress
import aptdaemon.worker
import aptdaemon.networking
GObject.threads_init()
pklog = logging.getLogger("AptDaemon.PackageKit")
# Check if update-manager-core is installed to get aware of the
# latest distro releases
try:
from UpdateManager.Core.MetaRelease import MetaReleaseCore
except ImportError:
META_RELEASE_SUPPORT = False
else:
META_RELEASE_SUPPORT = True
# Xapian database is optionally used to speed up package description search
XAPIAN_DB_PATH = os.environ.get("AXI_DB_PATH", "/var/lib/apt-xapian-index")
XAPIAN_DB = XAPIAN_DB_PATH + "/index"
XAPIAN_DB_VALUES = XAPIAN_DB_PATH + "/values"
XAPIAN_SUPPORT = False
try:
import xapian
except ImportError:
pass
else:
if os.access(XAPIAN_DB, os.R_OK):
pklog.debug("Use XAPIAN for the search")
XAPIAN_SUPPORT = True
# Regular expressions to detect bug numbers in changelogs according to the
# Debian Policy Chapter 4.4. For details see the footnote 16:
# http://www.debian.org/doc/debian-policy/footnotes.html#f16
MATCH_BUG_CLOSES_DEBIAN=r"closes:\s*(?:bug)?\#?\s?\d+(?:,\s*(?:bug)?\#?\s?\d+)*"
MATCH_BUG_NUMBERS=r"\#?\s?(\d+)"
# URL pointing to a bug in the Debian bug tracker
HREF_BUG_DEBIAN="http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=%s"
MATCH_BUG_CLOSES_UBUNTU = r"lp:\s+\#\d+(?:,\s*\#\d+)*"
HREF_BUG_UBUNTU = "https://bugs.launchpad.net/bugs/%s"
# Regular expression to find cve references
MATCH_CVE="CVE-\d{4}-\d{4}"
HREF_CVE="http://web.nvd.nist.gov/view/vuln/detail?vulnId=%s"
# Map Debian sections to the PackageKit group name space
SECTION_GROUP_MAP = {
"admin" : pk_enums.GROUP_ADMIN_TOOLS,
"base" : pk_enums.GROUP_SYSTEM,
"comm" : pk_enums.GROUP_COMMUNICATION,
"devel" : pk_enums.GROUP_PROGRAMMING,
"doc" : pk_enums.GROUP_DOCUMENTATION,
"editors" : pk_enums.GROUP_PUBLISHING,
"electronics" : pk_enums.GROUP_ELECTRONICS,
"embedded" : pk_enums.GROUP_SYSTEM,
"games" : pk_enums.GROUP_GAMES,
"gnome" : pk_enums.GROUP_DESKTOP_GNOME,
"graphics" : pk_enums.GROUP_GRAPHICS,
"hamradio" : pk_enums.GROUP_COMMUNICATION,
"interpreters" : pk_enums.GROUP_PROGRAMMING,
"kde" : pk_enums.GROUP_DESKTOP_KDE,
"libdevel" : pk_enums.GROUP_PROGRAMMING,
"libs" : pk_enums.GROUP_SYSTEM,
"mail" : pk_enums.GROUP_INTERNET,
"math" : pk_enums.GROUP_SCIENCE,
"misc" : pk_enums.GROUP_OTHER,
"net" : pk_enums.GROUP_NETWORK,
"news" : pk_enums.GROUP_INTERNET,
"oldlibs" : pk_enums.GROUP_LEGACY,
"otherosfs" : pk_enums.GROUP_SYSTEM,
"perl" : pk_enums.GROUP_PROGRAMMING,
"python" : pk_enums.GROUP_PROGRAMMING,
"science" : pk_enums.GROUP_SCIENCE,
"shells" : pk_enums.GROUP_SYSTEM,
"sound" : pk_enums.GROUP_MULTIMEDIA,
"tex" : pk_enums.GROUP_PUBLISHING,
"text" : pk_enums.GROUP_PUBLISHING,
"utils" : pk_enums.GROUP_ACCESSORIES,
"web" : pk_enums.GROUP_INTERNET,
"x11" : pk_enums.GROUP_DESKTOP_OTHER,
"unknown" : pk_enums.GROUP_UNKNOWN,
"alien" : pk_enums.GROUP_UNKNOWN,
"translations" : pk_enums.GROUP_LOCALIZATION,
"metapackages" : pk_enums.GROUP_COLLECTIONS }
PACKAGEKIT_DBUS_INTERFACE = "org.freedesktop.PackageKit"
PACKAGEKIT_DBUS_SERVICE = "org.freedesktop.PackageKit"
PACKAGEKIT_DBUS_PATH = "/org/freedesktop/PackageKit"
PACKAGEKIT_TRANS_DBUS_INTERFACE = "org.freedesktop.PackageKit.Transaction"
PACKAGEKIT_TRANS_DBUS_SERVICE = "org.freedesktop.PackageKit.Transaction"
MAP_EXIT_ENUM = {
aptd_enums.EXIT_SUCCESS: pk_enums.EXIT_SUCCESS,
aptd_enums.EXIT_CANCELLED: pk_enums.EXIT_CANCELLED,
aptd_enums.EXIT_FAILED: pk_enums.EXIT_FAILED,
aptd_enums.EXIT_FAILED: pk_enums.EXIT_FAILED,
aptd_enums.EXIT_PREVIOUS_FAILED:
pk_enums.EXIT_FAILED
}
MAP_STATUS_ENUM = {
aptd_enums.STATUS_WAITING: pk_enums.STATUS_WAIT,
aptd_enums.STATUS_RUNNING: pk_enums.STATUS_RUNNING,
aptd_enums.STATUS_CANCELLING: pk_enums.STATUS_CANCEL,
aptd_enums.STATUS_CLEANING_UP: pk_enums.STATUS_CLEANUP,
aptd_enums.STATUS_COMMITTING: pk_enums.STATUS_COMMIT,
aptd_enums.STATUS_DOWNLOADING: pk_enums.STATUS_DOWNLOAD,
aptd_enums.STATUS_DOWNLOADING_REPO: pk_enums.STATUS_DOWNLOAD_REPOSITORY,
aptd_enums.STATUS_FINISHED: pk_enums.STATUS_FINISHED,
aptd_enums.STATUS_LOADING_CACHE: pk_enums.STATUS_LOADING_CACHE,
aptd_enums.STATUS_RESOLVING_DEP: pk_enums.STATUS_DEP_RESOLVE,
aptd_enums.STATUS_RUNNING: pk_enums.STATUS_RUNNING,
aptd_enums.STATUS_WAITING_LOCK:
pk_enums.STATUS_WAITING_FOR_LOCK,
aptd_enums.STATUS_WAITING_MEDIUM: pk_enums.STATUS_UNKNOWN,
aptd_enums.STATUS_WAITING_CONFIG_FILE_PROMPT:
pk_enums.STATUS_UNKNOWN
}
MAP_ERROR_ENUM = {
aptd_enums.ERROR_CACHE_BROKEN: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_DEP_RESOLUTION_FAILED:
pk_enums.ERROR_DEP_RESOLUTION_FAILED,
aptd_enums.ERROR_INCOMPLETE_INSTALL: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_INVALID_PACKAGE_FILE:
pk_enums.ERROR_PACKAGE_CORRUPT,
aptd_enums.ERROR_KEY_NOT_INSTALLED: pk_enums.ERROR_GPG_FAILURE,
aptd_enums.ERROR_KEY_NOT_REMOVED: pk_enums.ERROR_GPG_FAILURE,
aptd_enums.ERROR_NOT_REMOVE_ESSENTIAL_PACKAGE:
pk_enums.ERROR_PACKAGE_FAILED_TO_REMOVE,
aptd_enums.ERROR_NO_CACHE: pk_enums.ERROR_NO_CACHE,
aptd_enums.ERROR_NO_LOCK: pk_enums.ERROR_CANNOT_GET_LOCK,
aptd_enums.ERROR_NO_PACKAGE: pk_enums.ERROR_PACKAGE_NOT_FOUND,
aptd_enums.ERROR_PACKAGE_ALREADY_INSTALLED:
pk_enums.ERROR_PACKAGE_ALREADY_INSTALLED,
aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED:
pk_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
aptd_enums.ERROR_PACKAGE_MANAGER_FAILED:
pk_enums.ERROR_TRANSACTION_ERROR,
aptd_enums.ERROR_PACKAGE_NOT_INSTALLED:
pk_enums.ERROR_PACKAGE_NOT_INSTALLED,
aptd_enums.ERROR_PACKAGE_UNAUTHENTICATED:
pk_enums.ERROR_BAD_GPG_SIGNATURE,
aptd_enums.ERROR_PACKAGE_UPTODATE:
pk_enums.ERROR_NO_PACKAGES_TO_UPDATE,
aptd_enums.ERROR_REPO_DOWNLOAD_FAILED:
pk_enums.ERROR_REPO_NOT_AVAILABLE,
aptd_enums.ERROR_UNREADABLE_PACKAGE_FILE:
pk_enums.ERROR_INVALID_PACKAGE_FILE,
aptd_enums.ERROR_SYSTEM_ALREADY_UPTODATE:
pk_enums.ERROR_NO_PACKAGES_TO_UPDATE,
}
MAP_PACKAGE_ENUM = {
aptd_enums.PKG_CONFIGURING:
pk_enums.INFO_INSTALLING,
aptd_enums.PKG_DISAPPEARING:
pk_enums.INFO_UNKNOWN,
aptd_enums.PKG_INSTALLED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_INSTALLING:
pk_enums.INFO_INSTALLING,
aptd_enums.PKG_PREPARING_INSTALL:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PREPARING_PURGE:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PREPARING_REMOVE:
pk_enums.INFO_PREPARING,
aptd_enums.PKG_PURGED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_PURGING:
pk_enums.INFO_REMOVING,
aptd_enums.PKG_REMOVED:
pk_enums.INFO_FINISHED,
aptd_enums.PKG_REMOVING:
pk_enums.INFO_REMOVING,
aptd_enums.PKG_RUNNING_TRIGGER:
pk_enums.INFO_CLEANUP,
aptd_enums.PKG_UNKNOWN:
pk_enums.INFO_UNKNOWN,
aptd_enums.PKG_UNPACKING:
pk_enums.INFO_DECOMPRESSING,
aptd_enums.PKG_UPGRADING:
pk_enums.INFO_UPDATING,
}
class PackageKit(aptdaemon.core.DBusObject):
"""Provides a limited set of the PackageKit system D-Bus API."""
SUPPORTED_ROLES = [pk_enums.ROLE_REFRESH_CACHE,
pk_enums.ROLE_UPDATE_SYSTEM,
pk_enums.ROLE_SIMULATE_UPDATE_PACKAGES,
pk_enums.ROLE_UPDATE_PACKAGES,
pk_enums.ROLE_SIMULATE_REMOVE_PACKAGES,
pk_enums.ROLE_INSTALL_PACKAGES,
pk_enums.ROLE_SIMULATE_INSTALL_PACKAGES,
pk_enums.ROLE_INSTALL_PACKAGES,
pk_enums.ROLE_GET_DISTRO_UPGRADES,
pk_enums.ROLE_GET_UPDATES,
pk_enums.ROLE_GET_UPDATE_DETAIL,
pk_enums.ROLE_GET_PACKAGES,
pk_enums.ROLE_GET_DETAILS,
pk_enums.ROLE_GET_DEPENDS,
pk_enums.ROLE_GET_REQUIRES,
pk_enums.ROLE_SEARCH_NAME,
pk_enums.ROLE_SEARCH_DETAILS,
pk_enums.ROLE_SEARCH_GROUP,
pk_enums.ROLE_SEARCH_FILE,
pk_enums.ROLE_WHAT_PROVIDES,
pk_enums.ROLE_DOWNLOAD_PACKAGES]
SUPPORTED_FILTERS = [pk_enums.FILTER_INSTALLED,
pk_enums.FILTER_NOT_INSTALLED,
pk_enums.FILTER_FREE,
pk_enums.FILTER_NOT_FREE,
pk_enums.FILTER_GUI,
pk_enums.FILTER_NOT_GUI,
pk_enums.FILTER_COLLECTIONS,
pk_enums.FILTER_NOT_COLLECTIONS,
pk_enums.FILTER_SUPPORTED,
pk_enums.FILTER_NOT_SUPPORTED,
pk_enums.FILTER_NEWEST]
def __init__(self, queue, connect=True, bus=None):
"""Initialize a new PackageKit compatibility layer.
Keyword arguments:
connect -- if the daemon should connect to the D-Bus (default is True)
bus -- the D-Bus to connect to (defaults to the system bus)
"""
pklog.info("Initializing PackageKit compat layer")
bus_name = None
bus_path = None
if connect == True:
if bus is None:
bus = dbus.SystemBus()
self.bus = bus
bus_path = PACKAGEKIT_DBUS_PATH
bus_name = dbus.service.BusName(PACKAGEKIT_DBUS_SERVICE, self.bus)
aptdaemon.core.DBusObject.__init__(self, bus_name, bus_path)
self._updates_changed_timeout_id = None
self._updates_changed = False
self.queue = queue
self.queue.worker.connect("transaction-done", self._on_transaction_done)
self.queue.connect("queue-changed", self._on_queue_changed)
self._distro_id = None
self.netmon = aptdaemon.networking.get_network_monitor()
self.netmon.connect("network-state-changed",
self._on_network_state_changed)
self.netmon.get_network_state()
# SIGNALS
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def RestartSchedule(self):
"""A system restart has been sceduled."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="as")
def TransactionListChanged(self, transactions):
"""The transaction list has changed, because either a transaction
has finished or a new transaction created.
:param transactions: A list of transaction ID's.
:type transactions: as
"""
pklog.debug("Emitting TransactionListChanged signal: %s", transactions)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def UpdatesChanged(self):
"""This signal is emitted when the number of updates has changed."""
pklog.debug("Emitting UpdatesChanged signal")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def RepoListChanged(self):
"""This signal is emitted when the repository list has changed."""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pklog.debug("Emitting PackageKit Changed()")
# METHODS
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="s", out_signature="s")
def CanAuthorize(self, action_id):
"""Allows a client to find out if it would be allowed to authorize
an action.
:param action_id: The action ID, e.g.
org.freedesktop.packagekit.system-network-proxy-configure
:returns: The result, either yes, no or interactive.
"""
#FIXME: We need to map packagekit and aptdaemon polices
return "interactive"
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="s", out_signature="")
def StateHasChanged(self, reason):
"""This method suggests to PackageKit that the package backend state
may have changed. This allows plugins to the native package manager
to suggest that PackageKit drops it's caches.
:param reason:
The reason of the state change. Valid reasons are resume or
posttrans. Resume is given a lower priority than posttrans.
"""
pklog.debug("StateHasChanged() was called: %s", reason)
self._updates_changed = True
if reason == "cache-update":
self._check_updates_changed(timeout=30)
elif reason == "resume":
self._check_updates_changed(timeout=180)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="", out_signature="s",
sender_keyword="sender")
def GetTid(self, sender):
"""Gets a new transaction ID from the daemon.
:returns: The tid, e.g. 45_dafeca_checkpoint32
"""
return self._get_tid(sender)
@inline_callbacks
def _get_tid(self, sender):
pid, uid, cmdline = \
yield policykit1.get_proc_info_from_dbus_name(sender, self.bus)
pktrans = PackageKitTransaction(pid, uid, cmdline, self.queue, sender)
return_value(pktrans.tid)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_DBUS_INTERFACE,
in_signature="", out_signature="as")
def GetTransactionList(self):
"""Gets the transaction list of any transactions that are in
progress.
:returns: A list of transaction ID's
"""
pklog.debug("GetTransactionList() was called")
return self._get_transaction_list()
# HELPERS
def _get_properties(self, iface):
"""Helper to get the properties of a D-Bus interface."""
if iface == PACKAGEKIT_DBUS_INTERFACE:
return {# Claim that we are a stable version
"VersionMajor": dbus.UInt32(6),
"VersionMinor": dbus.UInt32(18),
"VersionMicro": dbus.UInt32(0),
"BackendName": dbus.String("aptdaemon"),
"BackendDescription": dbus.String("Compatibility layer"),
"BackendAuthor": dbus.String(__author__),
"Filters": dbus.String(";".join(self.SUPPORTED_FILTERS)),
"Groups": dbus.String(";".join(SECTION_GROUP_MAP.values())),
"Roles": dbus.String(";".join(self.SUPPORTED_ROLES)),
"Locked": dbus.Boolean(False),
"NetworkState": dbus.String(self.netmon.state),
"DistroId": dbus.String(self._get_distro_id()),
}
else:
return {}
def _get_distro_id(self):
"""Return information about the distibution."""
if self._distro_id is None:
info = lsb_release.get_distro_information()
arch = subprocess.Popen(["dpkg", "--print-architecture"],
stdout=subprocess.PIPE).communicate()[0]
try:
self._distro_id = "%s;%s;%s" % (info["ID"], info["CODENAME"], arch)
except KeyError:
self._distro_id = "unknown;unknown;%s" % arch
return self._distro_id
def _on_network_state_changed(self, mon, state):
self.Changed()
self.PropertiesChanged(PACKAGEKIT_DBUS_INTERFACE,
{"Network": state}, [])
def _on_queue_changed(self, queue):
self.TransactionListChanged(self._get_transaction_list())
self._check_updates_changed()
def _get_transaction_list(self):
pk_transactions = []
for trans in self.queue.items:
# We currently only emit PackageKit transaction
#FIXME: Should we use MergedTransaction for all transactions and
# ROLE_UNKOWN for aptdaemon only transactions?
try:
pk_transactions.append(trans.pktrans.tid)
except AttributeError:
pass
try:
pk_transactions.append(self.queue.worker.trans.pktrans.tid)
except AttributeError:
pass
return pk_transactions
def _on_transaction_done(self, worker, trans):
# If a cache modifing transaction is completed schedule an
# UpdatesChanged signal
if trans.role in (aptd_enums.ROLE_INSTALL_FILE,
aptd_enums.ROLE_INSTALL_PACKAGES,
aptd_enums.ROLE_REMOVE_PACKAGES,
aptd_enums.ROLE_UPGRADE_PACKAGES,
aptd_enums.ROLE_COMMIT_PACKAGES,
aptd_enums.ROLE_UPGRADE_SYSTEM,
aptd_enums.ROLE_FIX_BROKEN_DEPENDS):
self._updates_changed = True
self._check_updates_changed()
elif trans.role == aptd_enums.ROLE_UPDATE_CACHE:
self._updates_changed = True
self._check_updates_changed(timeout=30)
def _check_updates_changed(self, timeout=60):
"""After the queue was processed schedule a delayed UpdatesChanged
signal if required.
"""
if not self.queue.items and self._updates_changed:
if self._updates_changed_timeout_id:
# If we already have a scheduled UpdatesChanged signal
# delay it even further
pklog.debug("UpdatesChanged signal re-scheduled")
GObject.source_remove(self._updates_changed_timeout_id)
else:
pklog.debug("UpdatesChanged signal scheduled")
self._updates_changed_timeout_id = \
GObject.timeout_add_seconds(timeout,
self._delayed_updates_changed)
def _delayed_updates_changed(self):
"""Emit the UpdatesChanged signal and clear the timeout."""
self.UpdatesChanged()
self._updates_changed_timeout_id = None
self._updates_changed = False
return False
class MergedTransaction(aptdaemon.core.Transaction):
"""Overlay of an Aptdaemon transaction which also provides the
PackageKit object and its interfaces.
"""
def __init__(self, pktrans, role, queue, connect=True,
bus=None, packages=None, kwargs=None):
aptdaemon.core.Transaction.__init__(self, pktrans.tid[1:], role, queue,
pktrans.pid, pktrans.uid,
pktrans.cmdline, pktrans.sender,
connect, bus, packages, kwargs)
self.pktrans = pktrans
self.run_time = 0
def _set_status(self, enum):
aptdaemon.core.Transaction._set_status(self, enum)
self.pktrans.status = get_pk_status_enum(enum)
status = property(aptdaemon.core.Transaction._get_status, _set_status)
def _set_progress(self, percent):
aptdaemon.core.Transaction._set_progress(self, percent)
self.pktrans.percentage = self._progress
progress = property(aptdaemon.core.Transaction._get_progress, _set_progress)
def _set_progress_details(self, details):
aptdaemon.core.Transaction._set_progress_details(self, details)
self.pktrans.speed = int(details[4])
self.pktrans.remaining_time = int(details[5])
self.pktrans.elapsed_time = int(time.time() - self.pktrans.start_time)
progress_details = property(aptdaemon.core.Transaction._get_progress_details,
_set_progress_details)
def _set_progress_package(self, progress):
aptdaemon.core.Transaction._set_progress_package(self, progress)
pkg_name, enum = progress
self.emit_package(get_pk_package_enum(enum),
get_pk_package_id(pkg_name),
"")
progress_package = property(aptdaemon.core.Transaction._get_progress_package,
_set_progress_package)
def _set_exit(self, enum):
aptdaemon.core.Transaction._set_exit(self, enum)
self.pktrans.exit = get_pk_exit_enum(enum)
exit = property(aptdaemon.core.Transaction._get_exit, _set_exit)
def _set_error(self, excep):
aptdaemon.core.Transaction._set_error(self, excep)
self.pktrans.ErrorCode(get_pk_error_enum(excep.code),
self._error_property[1])
error = property(aptdaemon.core.Transaction._get_error, _set_error)
def _remove_from_connection_no_raise(self):
aptdaemon.core.Transaction._remove_from_connection_no_raise(self)
self.pktrans.Destroy()
try:
self.pktrans.remove_from_connection()
except LookupError as error:
pklog.debug("remove_from_connection() raised LookupError: %s",
error)
finally:
self.pktrans.trans = None
self.pktrans = None
return False
def emit_details(self, package_id, license, group, detail, url, size):
self.pktrans.Details(package_id, license, group, detail, url, size)
def emit_files(self, id, file_list):
self.pktrans.Files(id, file_list)
def emit_package(self, info, id, summary):
self.pktrans.Package(info, id, summary)
self.pktrans.last_package = id
def emit_update_detail(self, package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text,
changelog, state, issued, updated):
self.pktrans.UpdateDetail(package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text,
changelog, state, issued, updated)
class PackageKitTransaction(aptdaemon.core.DBusObject):
"""Provides a PackageKit transaction object."""
def __init__(self, pid, uid, cmdline, queue, sender,
connect=True, bus=None):
pklog.info("Initializing PackageKit transaction")
bus_name = None
bus_path = None
self.tid = "/%s" % uuid.uuid4().get_hex()
if connect == True:
if bus is None:
bus = dbus.SystemBus()
self.bus = bus
bus_path = self.tid
bus_name = dbus.service.BusName(PACKAGEKIT_DBUS_SERVICE, bus)
aptdaemon.core.DBusObject.__init__(self, bus_name, bus_path)
self.queue = queue
self.hints = {}
self.start_time = time.time()
self._elapsed_time = 0
self._remaining_time = 0
self._speed = 0
self._caller_active = True
self._allow_cancel = False
self._percentage = 0
self._subpercentage = 0
self._status = pk_enums.STATUS_SETUP
self._last_package = ""
self.uid = uid
self.pid = pid
self.cmdline = cmdline
self.role = pk_enums.ROLE_UNKNOWN
self.sender = sender
self.trans = None
@property
def allow_cancel(self):
return self._allow_cancel
@allow_cancel.setter
def allow_cancel(self, value):
self._allow_cancel = dbus.Boolean(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"AllowCancel": self._allow_cancel}, [])
self.Changed()
@property
def last_package(self):
return self._last_package
@last_package.setter
def last_package(self, value):
self._last_package = dbus.String(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"LastPackage": self._last_package}, [])
self.Changed()
@property
def caller_active(self):
return self._caller_active
@caller_active.setter
def caller_active(self, value):
self._caller_active = dbus.Boolean(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"CallerActive": self._caller_active}, [])
self.Changed()
@property
def percentage(self):
return self._percentage
@percentage.setter
def percentage(self, progress):
self._percentage = dbus.UInt32(progress)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"Percentage": self._percentage}, [])
self.Changed()
@property
def subpercentage(self):
return self._subpercentage
@subpercentage.setter
def subpercentage(self, progress):
self._subpercentage = dbus.UInt32(progress)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"SubPercentage": self._subpercentage}, [])
self.Changed()
@property
def status(self):
return self._status
@status.setter
def status(self, enum):
self._status = dbus.String(enum)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"Status": self._status}, [])
self.Changed()
@property
def elapsed_time(self):
return self._elapsed_time
@elapsed_time.setter
def elapsed_time(self, ela):
self._elpased_time = dbus.UInt32(ela)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"ElapsedTime": self._elapsed_time}, [])
self.Changed()
@property
def remaining_time(self):
return self._remaining_time
@remaining_time.setter
def remaining_time(self, value):
self._elpased_time = dbus.UInt32(value)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"RemainingTime": self._remaining_time}, [])
self.Changed()
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
self._speed = dbus.UInt32(speed)
self.PropertiesChanged(PACKAGEKIT_TRANS_DBUS_INTERFACE,
{"AllowCancel": self._speed}, [])
self.Changed()
@property
def exit(self):
return self._exit
@exit.setter
def exit(self, enum):
self._exit = exit
self.run_time = int((time.time() - self.start_time) * 1000)
# The time could go backwards ...
if self.run_time < 0:
self.run_time = 0
if enum == pk_enums.EXIT_CANCELLED:
self.ErrorCode(pk_enums.ERROR_TRANSACTION_CANCELLED, "")
self.status = pk_enums.STATUS_FINISHED
self.Finished(enum, self.run_time)
# SIGNALS
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssbsusus")
def Transaction(self, old_tid, timespec, succeeded, role, duration, data,
uid, cmdline):
"""This signal is sent when more details are required about a
specific transaction.
:param old_tid: The transaction ID of the old transaction.
:param timespec: The timespec of the old transaction in ISO8601 format.
:param succeeded: If the transaction succeeded.
:param role: The role enumerated type.
:param duration: The duration of the transaction in milliseconds.
:param data: Any data associated
:param uid: The user ID of the user that scheduled the action.
:param cmdline: The command line of the tool that scheduled the action,
e.g. /usr/bin/gpk-application.
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def ErrorCode(self, code, details):
"""This signal is used to report errors back to the session program.
Errors should only be send on fatal abort.
:param code: Enumerated type, e.g. no-network.
:param details: Long description or error, e.g. "failed to connect"
:type code: s
:type details: s
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="")
def Changed(self):
"""This signal is emitted when a property on the interface changes."""
pklog.debug("Emitting PackageKitTransaction Changed()")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="")
def Destroy(self):
"""This signal is sent when the transaction has been destroyed
and is no longer available for use."""
pklog.debug("Emmitting Destroy()")
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="su")
def Finished(self, exit, runtime):
"""This signal is used to signal that the transaction has finished.
:param exit: The PkExitEnum describing the exit status of the
transaction.
:param runtime: The amount of time in milliseconds that the
transaction ran for.
:type exit: s
:type runtime: u
"""
pklog.debug("Emitting Finished: %s, %s", exit, runtime)
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssssst")
def Details(self, package_id, license, group, detail, url, size):
"""This signal allows the backend to convey more details about the
package.
:param package_id: The package ID
:param license:
The license string, e.g. GPLv2+ or BSD and (MPLv1.1 or GPLv2+).
Moredetails about the correct way to format licensing strings can
be found on the Fedora packaging wiki.
:param group:
The enumerated package group description
:param detail:
The multi-line package description. If formatting is required,
then markdown syntax should be used, e.g. This is **critically**
important
:param url:
The upstream project homepage
:param size:
The size of the package in bytes. This should be the size of the
entire package file, not the size of the files installed on the
system. If the package is not installed, and already downloaded
and present in the package manager cache, then this value should
be set to zero.
"""
pklog.debug("Emmitting Details signal for %s", package_id)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def Files(self, package_id, file_list):
"""This signal is used to push file lists from the backend to the
session.
:param package_id:
The Package ID that called the method.
:param file_list:
The file list, with each file seporated with ;.
"""
pklog.debug("Emitting Files signal: %s, %s", package_id, file_list)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ssssssssssss")
def UpdateDetail(self, package_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart, update_text, changelog,
state, issued, updated):
"""This signal is sent when more details are required about a
specific update.
:param package_id: The package ID
:param updates:
A list of package_id's that are to be updated, seporated by
&. This odd delimited was chosen as \t is already being used
in the spawned backends, and & is a banned character in a
package_id.
:param obsoletes:
A list of package_id's that are to be obsoleted, separated by &
:param vendor_url:
A URL with more details on the update, e.g. a page with more
information on the update. The format of this command should
be http://www.foo.org/page.html?4567;Update to SELinux
:param bugzilla_url:
A bugzilla URL with more details on the update. If no URL is
available then this field should be left empty.
:param cve_url:
A CVE URL with more details on the security advisory.
:param restart:
A valid restart type, e.g. system.
:param update_text:
The update text describing the update. If formatting is required,
then markdown syntax should be used, e.g. This is **critically**
important.
:param changelog:
The ChangeLog text describing the changes since the last version.
:param state:
The state of the update, e.g. stable or testing.
:param issued:
The ISO8601 encoded date that the update was issued.
:param updated:
The ISO8601 encoded date that the update was updated.
"""
pklog.debug("Emmitting UpdateDetail signal for %s", package_id)
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="sss")
def Package(self, info, package_id, summary):
"""This signal allows the backend to communicate packages to the
session.
If updating, as packages are updated then emit them to the screen.
This allows a summary to be presented after the transaction.
When returning results from a search always return installed
before available for the same package name.
:param info: A valid info string enumerated type
:param package_id: This identifier is of the form
name;version;arch;data in a single string and is meant to
represent a single package unique across all local and remote
data stores. For a remote, not-installed package the data
field should be set as the repository identifier or repository
name. The data field for an installed package must be prefixed
with installed as this is used to identify which packages are
installable or installed in the client tools. As a special
extension, if the package manager is able to track which
repository a package was originally installed from, then the data
field can be set to installed:REPO-NAME which allows the frontend
client to advise the user of the package origin. The data field
for a non-installed local package must be local as this signifies
a repository name is not available and that package resides
locally on the client system rather than in any specific
repository.
:param summary: The one line package summary, e.g. Clipart for
OpenOffice
"""
pklog.debug("Emmitting Package signal: info=%s id=%s summary='%s'",
info, package_id, summary[:10])
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="sss")
def DistroUpgrade(self, type, name, summary):
"""This signal allows the backend to communicate distribution upgrades
to the session.
:param type: A valid upgrade string enumerated type, e.g. stable
or unstable
:param name: The short name of the distribution, e.g. Fedora Core
10 RC1
:param summary: The multi-line description of the release
"""
pass
# pylint: disable-msg=C0103,C0322
@dbus.service.signal(dbus_interface=PACKAGEKIT_TRANS_DBUS_INTERFACE,
signature="ss")
def RequireRestart(self, restart_type, package_id):
"""This signal is sent when the session client should notify the user
that a restart is required to get all changes into effect.
:param package_id:
The Package ID of the package tiggering the restart
:param file_list:
One of system, application or session
"""
pklog.debug("Emitting RequireRestart signal: %s, %s",
restart_type, package_id)
# METHODS
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="")
def SetHints(self, hints):
"""This method allows the calling session to set transaction hints
for the package manager which can change as the transaction runs.
This method can be sent before the transaction has been run or
whilst it is running. There is no limit to the number of times
this method can be sent, although some backends may only use the
values that were set before the transaction was started.
Each parameter value is optional.
:param hints: The values as an array of strings, for example
['locale=en_GB.utf8','interactive=false','cache-age=3600']
"""
for hint in hints:
key, value = hint.split("=", 1)
if key not in ["locale", "idle", "background", "interactive",
"cache-age", "frontend-socket"]:
raise Exception("Invalid option %s" % key)
self.hints[key] = value
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def Cancel(self, sender):
"""This method cancels a transaction that is already running."""
if self.trans:
return self.trans._cancel(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="asbb", out_signature="",
sender_keyword="sender")
def RemovePackages(self, package_ids, allow_deps, autoremove, sender):
"""This method removes packages from the local system.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating, installing or removing.
:param package_ids: An array of package IDs.
:param allow_deps:
Either true or false. If true allow other packages to be removed
with the package, but false should cause the script to abort if
other packages are dependant on the package.
:param autoremove:
Either true or false. This option is only really interesting on
embedded devices with a limited amount of flash storage. It
suggests to the packagekit backend that dependencies installed at
the same time as the package should also be removed if they are not
required by anything else. For instance, if you install OpenOffice,
it might download libneon as a dependency. When auto_remove is set
to true, and you remove OpenOffice then libneon will also get
removed automatically.
"""
pklog.debug("RemovePackages() was called")
self.role = pk_enums.ROLE_REMOVE_PACKAGES
return self._remove_packages(package_ids, allow_deps, autoremove,
sender)
@inline_callbacks
def _remove_packages(self, package_ids, allow_deps, autoremove, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_REMOVE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_REMOVE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"RemoveObsoletedDepends", autoremove,
sender)
try:
yield self.trans._simulate(sender)
except aptdameon.errors.TransactionFailed:
raise StopIteration
for pkgs in self.trans.depends:
if pkgs:
error_code = packagekit.errors.ERROR_DEP_RESOLUTION_FAILED
self.trans.pktrans.ErrorCode(error_code,
"Would change additional packages")
self.trans.pktrans.exit = pk_enums.EXIT_FAILED
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="asb", out_signature="",
sender_keyword="sender")
def SimulateRemovePackages(self, package_ids, autoremove, sender):
"""This method simulates a package update emitting packages
required to be installed, removed, updated, reinstalled,
downgraded, obsoleted or untrusted. The latter is used to present
the user untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
:param autoremove:
Either true or false. This option is only really interesting on
embedded devices with a limited amount of flash storage. It
suggests to the packagekit backend that dependencies installed at
the same time as the package should also be removed if they are not
required by anything else. For instance, if you install OpenOffice,
it might download libneon as a dependency. When auto_remove is set
to true, and you remove OpenOffice then libneon will also get
removed automatically.
"""
pklog.debug("SimulateRemovePackages() was called")
GObject.idle_add(defer_idle, self._simulate_remove_packages,
package_ids, autoremove, sender)
@inline_callbacks
def _simulate_remove_packages(self, package_ids, autoremove, sender):
self.role = pk_enums.ROLE_SIMULATE_REMOVE_PACKAGES
self.status = pk_enums.STATUS_DEP_RESOLVE
self.trans = self._get_merged_trans(aptd_enums.ROLE_REMOVE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_REMOVE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"RemoveObsoletedDepends", autoremove,
sender)
yield self._simulate_and_emit_packages(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def UpdatePackages(self, only_trusted, package_ids, sender):
"""This method updates existing packages on the local system.
The installer should always update extra packages automatically
to fulfil dependencies.
This should allow an application to find out what package owns a
file on the system.
This method typically emits Progress, Status and Error and Package.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("UpdatePackages() was called")
return self._update_packages(only_trusted, package_ids, sender)
@inline_callbacks
def _update_packages(self, only_trusted, package_ids, sender):
self.role = pk_enums.ROLE_UPDATE_PACKAGES
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_UPGRADE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateUpdatePackages(self, package_ids, sender):
"""This method simulates a package update emitting packages
required to be installed, removed, updated, reinstalled,
downgraded, obsoleted or untrusted. The latter is used to present
the user untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
"""
pklog.debug("SimulateUpdatePackages() was called")
self.role = pk_enums.ROLE_SIMULATE_UPDATE_PACKAGES
GObject.idle_add(defer_idle, self._simulate_update_packages,
package_ids, sender)
@inline_callbacks
def _simulate_update_packages(self, package_ids, sender):
self.status = pk_enums.STATUS_RUNNING
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_UPGRADE)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", True, sender)
yield self._simulate_and_emit_packages(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def InstallPackages(self, only_trusted, package_ids, sender):
"""This method installs new packages on the local system.
The installer should always install extra packages automatically
as the use could call GetDepends prior to the install if a
confirmation is required in the UI.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating,
installing or removing.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("InstallPackages() was called")
self.role = pk_enums.ROLE_INSTALL_PACKAGES
return self._install_packages(only_trusted, package_ids, sender)
@inline_callbacks
def _install_packages(self, only_trusted, package_ids, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_INSTALL_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_INSTALL)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateInstallPackages(self, package_ids, sender):
"""This method simulates a package instalation emitting packages
required to be installed, removed, updated, reinstalled, downgraded,
obsoleted or untrusted. The latter is used to present the user
untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param package_ids: An array of package IDs.
"""
pklog.debug("SimulateInstallPackages() was called")
self.role = pk_enums.ROLE_SIMULATE_INSTALL_PACKAGES
GObject.idle_add(defer_idle, self._simulate_install_packages,
package_ids, sender)
@inline_callbacks
def _simulate_install_packages(self, package_ids, sender):
self.status = pk_enums.STATUS_RUNNING
self.trans = self._get_merged_trans(aptd_enums.ROLE_INSTALL_PACKAGES,
pkg_ids=package_ids,
pkg_type=aptd_enums.PKGS_INSTALL)
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", True, sender)
yield self._simulate_and_emit_packages(sender)
@inline_callbacks
def _simulate_and_emit_packages(self, sender, update_info=None):
try:
yield self.trans._simulate(sender)
except:
raise StopIteration
for pkg in self.trans.depends[aptd_enums.PKGS_INSTALL]:
self.Package(pk_enums.INFO_INSTALLING,
get_pk_package_id(pkg), "")
for pkg in self.trans.depends[aptd_enums.PKGS_REINSTALL]:
self.Package(pk_enums.INFO_REINSTALLING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_REMOVE]:
self.Package(pk_enums.INFO_REMOVING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_PURGE]:
self.Package(pk_enums.INFO_REMOVING,
get_pk_package_id(pkg, "installed"), "")
for pkg in self.trans.depends[aptd_enums.PKGS_UPGRADE]:
self.Package(update_info or pk_enums.INFO_UPDATING,
get_pk_package_id(pkg, None), "")
for pkg in self.trans.depends[aptd_enums.PKGS_DOWNGRADE]:
self.Package(pk_enums.INFO_DOWNGRADING,
get_pk_package_id(pkg), "")
for pkg in self.trans.depends[aptd_enums.PKGS_KEEP]:
self.Package(pk_enums.INFO_BLOCKED,
get_pk_package_id(pkg), "")
self.status = pk_enums.STATUS_FINISHED
self.Finished(pk_enums.EXIT_SUCCESS, 0)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="b", out_signature="",
sender_keyword="sender")
def RefreshCache(self, force, sender):
"""This method should fetch updated meta-data for all enabled
repositories.
When fetching each software source, ensure to emit RepoDetail for
the current source to give the user interface some extra details.
Be sure to have the "enabled" field set to true, otherwise you
wouldn't be fetching that source.
This method typically emits Progress, Error and RepoDetail.
:param force: If the caches should be cleaned and reloaded even if
there is valid, up to date data.
"""
pklog.debug("RefreshCache() was called")
self.role = pk_enums.ROLE_REFRESH_CACHE
return self._refresh_cache(force, sender)
@inline_callbacks
def _refresh_cache(self, force, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPDATE_CACHE,
kwargs={"sources_list": None})
yield self.trans._run(sender)
# pylint: disable-msg=C0103,C0322
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="b", out_signature="",
sender_keyword="sender")
def UpdateSystem(self, only_trusted, sender):
"""This method updates all packages on the system to thier newest
versions.
The installer should update all the updateable packages on the
system, including automatically installing any new packages that
are needed for dependancies.
:param only_trusted:
If the transaction is only allowed to install trusted packages.
Unsigned packages should not be installed if this parameter is
TRUE. If this method is can only install trusted packages, and
the packages are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after
gaining further authentication.
: param package_ids: An array of package IDs.
"""
pklog.debug("UpdateSystem() was called")
return self._update_system(only_trusted, sender)
@inline_callbacks
def _update_system(self, only_trusted, sender):
self.role = pk_enums.ROLE_UPDATE_SYSTEM
self.trans = self._get_merged_trans(aptd_enums.ROLE_UPGRADE_SYSTEM,
kwargs={"safe_mode": False})
yield self.trans._set_property(APTDAEMON_TRANSACTION_DBUS_INTERFACE,
"AllowUnauthenticated", not only_trusted,
sender)
yield self.trans._run(sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetUpdateDetail(self, package_ids, sender):
"""This method returns details about a specific update.
This method typically emits UpdateDetail and Error
:param package_ids: An array of package IDs.
"""
pklog.debug("GetUpdateDetail() was called")
self.role = pk_enums.ROLE_GET_UPDATE_DETAIL
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetUpdates(self, filter, sender):
"""This method should return a list of packages that are installed
and are upgradable. It should only return the newest update for
each installed package.
This method typically emits Progress, Error and Package.
:param filter: A correct filter, e.g. none or installed;~devel
"""
pklog.debug("GetUpdates() was called")
self.role = pk_enums.ROLE_GET_UPDATES
kwargs = {"filters": filter.split(";")}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def GetDistroUpgrades(self, sender):
"""This method should return a list of distribution upgrades that are
available. It should not return updates, only major upgrades.
This method typically emits DistroUpgrade, Error
"""
pklog.debug("GetDistroUpgrades() was called")
self.role = pk_enums.ROLE_GET_DISTRO_UPGRADES
self.status = pk_enums.STATUS_RUNNING
GObject.idle_add(defer_idle, self._get_distro_upgrades)
def _get_distro_upgrades(self):
#FIXME: Should go into the worker after the threading branch is merged
# It allows to run a nested loop until the download is finished
self.allow_cancel = False
self.percentage = 101
self.status = pk_enums.STATUS_DOWNLOAD_UPDATEINFO
if META_RELEASE_SUPPORT == False:
self.ErrorCode(pk_enums.ERROR_INTERNAL_ERROR,
"Please make sure that update-manager-core is"
"correctly installed.")
self.exit = pk_enums.EXIT_FAILED
return
#FIXME Evil to start the download during init
meta_release = GMetaRelease()
meta_release.connect("download-done",
self._on_distro_upgrade_download_done)
def _on_distro_upgrade_download_done(self, meta_release):
#FIXME: Add support for description
if meta_release.new_dist != None:
self.DistroUpgrade("stable",
"%s %s" % (meta_release.new_dist.name,
meta_release.new_dist.version),
"The latest stable release")
self.exit = pk_enums.EXIT_SUCCESS
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def Resolve(self, filter, packages, sender):
"""This method turns a single package name into a package_id suitable
for the other methods.
If the package is a fully formed package_id, then this should be
treated as an exact package match. This is useful to find the summary
or installed status of a package_id returned from other methods.
This method typically emits Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param packages:
An array of package names, e.g. scribus-clipart. The package
names are case sensitive, so for instance: Resolve('Packagekit')
would not match PackageKit. As a special case, if Resolve() is
called with a name prefixed with @ then this should be treated as
a category, for example: @web-development. In this instance, a
meta-package should be emitted, for example:
web-development;;;meta with the correct installed status and
summary for the category.
"""
pklog.debug("Resolve() was called")
self.role = pk_enums.ROLE_RESOLVE
kwargs = {"filters": filter.split(";"), "packages": packages}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetPackages(self, filter, sender):
"""This method returns all the packages without a search term.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
"""
pklog.debug("GetPackages() was called")
self.role = pk_enums.ROLE_GET_PACKAGES
kwargs = {"filters": filter.split(";")}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetDetails(self, package_ids, sender):
"""This method should return all the details about a specific
package_id.
This method typically emits Progress, Status and Error and Details.
:param package_ids: An array of package IDs.
"""
pklog.debug("GetDetails() was called")
self.role = pk_enums.ROLE_GET_DETAILS
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def GetFiles(self, package_ids, sender):
"""This method should return the file list of the package_id.
This method typically emits Progress, Status and Error and Files.
:param package_ids: An array of package IDs.
"""
pklog.debug("GetFiles() was called")
self.role = pk_enums.ROLE_GET_FILES
kwargs = {"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchFiles(self, filter, values, sender):
"""This method searches for files on the local system and files in
available packages.
This should search for files. This should allow an application to
find out what package owns a file on the system.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A filename or fully qualified path and filename on the system.
If the search term begins with a / it will be assumed the entire
path has been given and only packages that contain this exact
path and filename will be returned. If the search term does not
start with / then it should be treated as a single filename,
which can be in any directory. The search is case sensitive,
and should not be escaped or surrounded in quotes.
"""
pklog.debug("SearchFiles() was called")
self.role = pk_enums.ROLE_SEARCH_FILE
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchDetails(self, filter, values, sender):
"""This method allows deeper searching than SearchName().
Do not refresh the package cache. This should be fast. This is very
similar to search-name. This should search as much data as possible,
including, if possible repo names, package summaries, descriptions,
licenses and URLs.
Try to emit installed before available packages first, as it allows
the client program to perform the GUI filtering and matching whilst
the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A single word search term with no wildcard chars. The search term
can contain many words separated by spaces. In this case, the
search operator is AND. For example, search of gnome power should
returns gnome-power-manager but not gnomesword or powertop.
The search should not be treated as case sensitive.
"""
pklog.debug("SearchDetails() was called")
self.role = pk_enums.ROLE_SEARCH_DETAILS
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchGroups(self, filter, values, sender):
"""This method returns packages from a given group enumerated type.
Do not refresh the package cache. This should be fast.
Try to emit installed before available packages first, as it
allows the client program to perform the GUI filtering and matching
whilst the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
An enumerated group type, or unknown. The search cannot contain
spaces. The following recommendations are made below: If the values
strings are prefixed with category: then the request is treated
as a 'category search', for example: category:web-development.
Note: the old nomenclature for a 'category search' suggested using
a @ prefix for the values options. This is still supported, and
backends should continue to support category searches like
@web-development. If the values strings are prefixed with
repo: then the request is treated as a 'repository search', for
example: repo:fedora-debuginfo. In this instance all packages that
were either installed from, or can be installed from the
fedora-debuginfo source would be returned.
"""
pklog.debug("SearchGroups() was called")
self.role = pk_enums.ROLE_SEARCH_GROUP
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sas", out_signature="",
sender_keyword="sender")
def SearchNames(self, filter, values, sender):
"""This method searches the package database by package name.
Try to emit installed before available packages first, as it
allows the client program to perform the GUI filtering and matching
whilst the daemon is running the transaction.
If the backend includes installed and available versions of the same
package when searching then the available version will have to be
filtered in the backend.
The search methods should return all results in all repositories.
This may mean that multiple versions of package are returned. If this
is not what is wanted by the client program, then the newest filter
should be used.
This method typically emits Progress, Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param values:
A single word search term with no wildcard chars. The search term
can contain many words separated by spaces. In this case, the
search operator is AND. For example, search of gnome power should
returns gnome-power-manager but not gnomesword or powertop.
The search should not be treated as case sensitive.
"""
pklog.debug("SearchNames() was called")
self.role = pk_enums.ROLE_SEARCH_NAME
kwargs = {"filters": filter.split(";"),
"values": values}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def AcceptEula(self, eula_id, sender):
"""This method allows the user to accept a end user licence agreement.
:param eula_id: A valid EULA ID
"""
self.role = pk_enums.ROLE_ACCEPT_EULA
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def DownloadPackages(self, store_in_cache, package_ids, sender):
"""This method downloads packages into a temporary directory.
This method should emit one Files signal for each package that
is downloaded, with the file list set as the name of the complete
downloaded file and directory, so for example:
DownloadPackages('hal;0.1.2;i386;fedora',
'hal-info;2009-09-07;no-arch;updates') should send two signals,
e.g. Files('hal;0.1.2;i386;fedora', '/tmp/hal-0.1.2.i386.rpm')
and Files('hal-info;2009-09-07;no-arch;updates',
'/tmp/hal-info-2009-09-07.noarch.rpm').
:param store_in_cache:
If the downloaded files should be stored in the system package
cache rather than copied into a newly created directory. See the
developer docs for more details on how this is supposed to work.
:param package_ids: An array of package IDs.
"""
self.role = pk_enums.ROLE_DOWNLOAD_PACKAGES
kwargs = {"store_in_cache": store_in_cache,
"package_ids": package_ids}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="u", out_signature="",
sender_keyword="sender")
def GetOldTransactions(self, number, sender):
"""This method allows a client to view details for old transactions.
:param number:
The number of past transactions, or 0 for all known transactions.
"""
self.role = pk_enums.ROLE_GET_OLD_TRANSACTIONS
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def GetRepoList(self, filter, sender):
"""This method returns the list of repositories used in the system.
This method should emit RepoDetail.
:param filter: A correct filter, e.g. none or installed;~devel
"""
self.role = pk_enums.ROLE_GET_REPO_LIST
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="as", out_signature="",
sender_keyword="sender")
def SimulateInstallFiles(self, full_paths, sender):
"""This method simulates a package file instalation emitting packages
required to be installed, removed, updated, reinstalled, downgraded,
obsoleted or untrusted. The latter is used to present the user
untrusted packages that are about to be installed.
This method typically emits Error and Package.
:param full_paths:
An array of full path and filenames to packages.
"""
self.role = pk_enums.ROLE_SIMULATE_INSTALL_FILES
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="bas", out_signature="",
sender_keyword="sender")
def InstallFiles(self, only_trusted, full_paths, sender):
"""This method installs local package files onto the local system.
The installer should always install extra dependant packages
automatically.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be downloading, updating, installing
or removing.
:param only_trusted:
If the transaction is only allowed to install trusted files.
Unsigned files should not be installed if this parameter is TRUE.
If this method is can only install trusted files, and the files
are unsigned, then the backend will send a
ErrorCode(missing-gpg-signature). On recieving this error, the
client may choose to retry with only_trusted FALSE after gaining
further authentication.
:param full_paths: An array of full path and filenames to packages.
"""
self.role = pk_enums.ROLE_INSTALL_FILES
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sss", out_signature="",
sender_keyword="sender")
def InstallSignature(self, sig_type, key_id, package_id, sender):
"""This method allows us to install new security keys.
:param sig_type: A key type, e.g. gpg
:param key_id: A key ID, e.g. BB7576AC
:param package_id:
A PackageID for the package that the user is trying to install
"""
self.role = pk_enums.ROLE_INSTALL_SIGNATURE
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sss", out_signature="",
sender_keyword="sender")
def RepoSetData(self, repo_id, parameter, value, sender):
"""This method allows arbitary data to be passed to the repository
handler.
:param repo_id:
A repository identifier, e.g. fedora-development-debuginfo
:param parameter:
The backend specific value, e.g. set-download-url.
:param value:
The backend specific value, e.g. http://foo.bar.org/baz.
"""
self.role = pk_enums.ROLE_REPO_SET_DATA
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sb", out_signature="",
sender_keyword="sender")
def RepoEnable(self, repo_id, enabled, sender):
"""This method enables the repository specified.
:param repo_id:
A repository identifier, e.g. fedora-development-debuginfo
:param enabled: true if enabled, false if disabled.
"""
self.role = pk_enums.ROLE_REPO_ENABLE
GObject.idle_add(self._fail_not_implemented)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="s", out_signature="",
sender_keyword="sender")
def Rollback(self, transaction_id, sender):
"""This method rolls back the package database to a previous transaction.
:param transaction_id: A valid transaction ID.
"""
self.role = pk_enums.ROLE_GET_CATEGORIES
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="ssas", out_signature="",
sender_keyword="sender")
def WhatProvides(self, filter, type, values, sender):
"""This method returns packages that provide the supplied attributes.
This method is useful for finding out what package(s) provide a
modalias or GStreamer codec string.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter:
A correct filter, e.g. none or installed;~devel
:param type:
A PkProvideType, e.g. PK_PROVIDES_ENUM_CODEC.
:param values:
The data to send to the backend to get the packages. Note: This
is backend specific.
"""
self.role = pk_enums.ROLE_WHAT_PROVIDES
kwargs = {"filters": filter.split(";"),
"type": type,
"values": values}
return self._run_query(kwargs, sender)
@dbus.service.method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="", out_signature="",
sender_keyword="sender")
def GetCategories(self, sender):
"""This method return the collection categories"""
self.role = pk_enums.ROLE_GET_CATEGORIES
GObject.idle_add(self._fail_not_implemented)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sasb", out_signature="",
sender_keyword="sender")
def GetRequires(self, filter, package_ids, recursive, sender):
"""This method returns packages that depend on this package. This is
useful to know, as if package_id is being removed, we can warn the
user what else would be removed.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param package_ids: An array of package IDs.
:param recursive:
Either true or false. If yes then the requirements should be
returned for all packages returned. This means if
gnome-power-manager depends on NetworkManager and NetworkManager
depends on HAL, then GetRequires on HAL should return both
gnome-power-manager and NetworkManager.
"""
self.role = pk_enums.ROLE_GET_REQUIRES
kwargs = {"filters": filter.split(";"),
"package_ids": package_ids,
"recursive": recursive}
return self._run_query(kwargs, sender)
@dbus_deferred_method(PACKAGEKIT_TRANS_DBUS_INTERFACE,
in_signature="sasb", out_signature="",
sender_keyword="sender")
def GetDepends(self, filter, package_ids, recursive, sender):
"""This method returns packages that this package depends on.
This method typically emits Progress, Status and Error and Package.
Package enumerated types should be available or installed.
:param filter: A correct filter, e.g. none or installed;~devel
:param package_ids: An array of package IDs.
:param recursive:
Either true or false. If yes then the requirements should be
returned for all packages returned. This means if
gnome-power-manager depends on NetworkManager and NetworkManager
depends on HAL, then GetDepends on gnome-power-manager should
return both HAL and NetworkManager.
"""
self.role = pk_enums.ROLE_GET_DEPENDS
kwargs = {"filters": filter.split(";"),
"package_ids": package_ids,
"recursive": recursive}
return self._run_query(kwargs, sender)
# HELPERS
def _fail_not_implemented(self):
self.ErrorCode(pk_enums.ERROR_NOT_SUPPORTED, "")
self.exit = pk_enums.EXIT_FAILED
return False
def _get_properties(self, iface):
"""Helper to get the properties of a D-Bus interface."""
if iface == PACKAGEKIT_TRANS_DBUS_INTERFACE:
return {"Role": dbus.String(self.role),
"Status": dbus.String(self.status),
"LastPackage": dbus.String(self.last_package),
"Uid": dbus.UInt32(self.uid),
"Percentage": dbus.UInt32(self.percentage),
"Subpercentage": dbus.UInt32(self.subpercentage),
"AllowCancel": dbus.Boolean(self.allow_cancel),
"CallerActive": dbus.Boolean(self.caller_active),
"ElapsedTime": dbus.UInt32(self.elapsed_time),
"RemainingTime": dbus.UInt32(self.remaining_time),
"Speed": dbus.UInt32(self.speed)
}
else:
return {}
@inline_callbacks
def _run_query(self, kwargs, sender):
self.trans = self._get_merged_trans(aptd_enums.ROLE_PK_QUERY,
kwargs=kwargs)
yield self.trans._run(sender)
def _get_merged_trans(self, role, pkg_ids=None, pkg_type=None, kwargs=None):
if pkg_ids:
packages = [[], [], [], [], [], []]
packages[pkg_type] = [get_aptd_package_id(pkg) for pkg in pkg_ids]
else:
packages = None
if self.trans:
raise Exception("%s: Transaction may only run once." % \
pk_enums.ERROR_TRANSACTION_FAILED)
trans = MergedTransaction(self, role, self.queue,
packages=packages, kwargs=kwargs)
try:
trans._set_locale(self.hints["locale"])
except (KeyError, ValueError):
# If the locale isn't vaild or supported a ValueError will be raised
pass
try:
trans._set_debconf(self.hints["frontend-socket"])
except KeyError:
pass
self.queue.limbo[trans.tid] = trans
return trans
class PackageKitWorker(aptdaemon.worker.AptWorker):
_plugins = None
"""Process PackageKit Query transactions."""
def query(self, trans):
"""Run the worker"""
if trans.role != aptd_enums.ROLE_PK_QUERY:
raise TransactionFailed(aptd_enums.ERROR_UNKNOWN,
"The transaction doesn't seem to be "
"a query")
if trans.pktrans.role == pk_enums.ROLE_RESOLVE:
self.resolve(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_UPDATES:
self.get_updates(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_UPDATE_DETAIL:
self.get_update_detail(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_PACKAGES:
self.get_packages(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_FILES:
self.get_files(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_NAME:
self.search_names(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_GROUP:
self.search_groups(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_DETAILS:
self.search_details(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_SEARCH_FILE:
self.search_files(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_DEPENDS:
self.get_depends(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_REQUIRES:
self.get_requires(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_GET_DETAILS:
self.get_details(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_DOWNLOAD_PACKAGES:
self.download_packages(trans, **trans.kwargs)
elif trans.pktrans.role == pk_enums.ROLE_WHAT_PROVIDES:
self.what_provides(trans, **trans.kwargs)
else:
raise TransactionFailed(aptd_enums.ERROR_UNKNOWN,
"Role %s isn't supported",
trans.pktrans.role)
def search_files(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchFiles()
Works only for installed file if apt-file isn't installed.
"""
trans.progress = 101
result_names = set()
# Optionally make use of apt-file's Contents cache to search for not
# installed files. But still search for installed files additionally
# to make sure that we provide up-to-date results
if os.path.exists("/usr/bin/apt-file") and \
pk_enums.FILTER_INSTALLED not in filters:
#FIXME: Make use of rapt-file on Debian if the network is available
#FIXME: Show a warning to the user if the apt-file cache is several
# weeks old
pklog.debug("Using apt-file")
filenames_regex = []
for filename in values:
if filename.startswith("/"):
pattern = "^%s$" % filename[1:].replace("/", "\/")
else:
pattern = "\/%s$" % filename
filenames_regex.append(pattern)
cmd = ["/usr/bin/apt-file", "--regexp", "--non-interactive",
"--package-only", "find", "|".join(filenames_regex)]
pklog.debug("Calling: %s" % cmd)
apt_file = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = apt_file.communicate()
if apt_file.returncode == 0:
#FIXME: Actually we should check if the file is part of the
# candidate, e.g. if unstable and experimental are
# enabled and a file would only be part of the
# experimental version
result_names.update(stdout.split())
self._emit_visible_packages_by_name(trans, filters,
result_names)
else:
raise TransactionFailed(ERROR_INTERNAL_ERROR,
"%s %s" % (stdout, stderr))
# Search for installed files
filenames_regex = []
for filename in values:
if filename.startswith("/"):
pattern = "^%s$" % filename.replace("/", "\/")
else:
pattern = ".*\/%s$" % filename
filenames_regex.append(pattern)
files_pattern = re.compile("|".join(filenames_regex))
for pkg in self._iterate_packages():
if pkg.name in result_names:
continue
for installed_file in self._get_installed_files(pkg):
if files_pattern.match(installed_file):
self._emit_visible_package(trans, filters, pkg)
break
def search_groups(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchGroups()"""
#FIXME: Handle repo and category search
trans.progress = 101
for pkg in self._iterate_packages():
if self._get_package_group(pkg) in values:
self._emit_visible_package(trans, filters, pkg)
def search_names(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchNames()"""
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.info("Searching for package name: %s" % values)
trans.progress = 101
for pkg_name in self._cache.keys():
if matches(values, pkg_name):
self._emit_all_visible_pkg_versions(trans, filters,
self._cache[pkg_name])
def search_details(self, trans, filters, values):
"""Implement org.freedesktop.PackageKit.Transaction.SearchDetails()"""
trans.progress = 101
results = []
if XAPIAN_SUPPORT == True:
search_flags = (xapian.QueryParser.FLAG_BOOLEAN |
xapian.QueryParser.FLAG_PHRASE |
xapian.QueryParser.FLAG_LOVEHATE |
xapian.QueryParser.FLAG_BOOLEAN_ANY_CASE)
pklog.debug("Performing xapian db based search")
db = xapian.Database(XAPIAN_DB)
parser = xapian.QueryParser()
parser.set_default_op(xapian.Query.OP_AND)
query = parser.parse_query(u" ".join(values), search_flags)
enquire = xapian.Enquire(db)
enquire.set_query(query)
matches = enquire.get_mset(0, 1000)
for pkg_name in (match.document.get_data()
for match in enquire.get_mset(0,1000)):
if pkg_name in self._cache:
self._emit_visible_package(trans, filters,
self._cache[pkg_name])
else:
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.debug("Performing apt cache based search")
values = [val.lower() for val in values]
for pkg in self._iterate_packages():
txt = pkg.name
try:
txt += pkg.candidate.raw_description.lower()
txt += pkg.candidate._translated_records.long_desc.lower()
except AttributeError:
pass
if matches(values, txt):
self._emit_visible_package(trans, filters, pkg)
def get_updates(self, trans, filters):
"""Only report updates which can be installed safely: Which can depend
on the installation of additional packages but which don't require
the removal of already installed packages or block any other update.
"""
def succeeds_security_update(pkg):
"""
Return True if an update succeeds a previous security update
An example would be a package with version 1.1 in the security
archive and 1.1.1 in the archive of proposed updates or the
same version in both archives.
"""
for version in pkg.versions:
# Only check versions between the installed and the candidate
if (pkg.installed and
apt_pkg.version_compare(version.version,
pkg.installed.version) <= 0 and
apt_pkg.version_compare(version.version,
pkg.candidate.version) > 0):
continue
for origin in version.origins:
if origin.origin in ["Debian", "Ubuntu"] and \
(origin.archive.endswith("-security") or \
origin.label == "Debian-Security") and \
origin.trusted:
return True
return False
#FIXME: Implment the basename filter
pklog.info("Get updates()")
self.cancellable = False
self.progress = 101
# Start with a safe upgrade
self._cache.upgrade(dist_upgrade=True)
for pkg in self._iterate_packages():
if not pkg.is_upgradable:
continue
# This may occur on pinned packages which have been updated to
# later version than the pinned one
if not pkg.candidate.origins:
continue
if not pkg.marked_upgrade:
#FIXME: Would be nice to all show why
self._emit_package(trans, pkg, pk_enums.INFO_BLOCKED,
force_candidate=True)
continue
# The update can be safely installed
info = pk_enums.INFO_NORMAL
# Detect the nature of the upgrade (e.g. security, enhancement)
candidate_origin = pkg.candidate.origins[0]
archive = candidate_origin.archive
origin = candidate_origin.origin
trusted = candidate_origin.trusted
label = candidate_origin.label
if origin in ["Debian", "Ubuntu"] and trusted == True:
if archive.endswith("-security") or label == "Debian-Security":
info = pk_enums.INFO_SECURITY
elif succeeds_security_update(pkg):
pklog.debug("Update of %s succeeds a security update. "
"Raising its priority." % pkg.name)
info = pk_enums.INFO_SECURITY
elif archive.endswith("-backports"):
info = pk_enums.INFO_ENHANCEMENT
elif archive.endswith("-updates"):
info = pk_enums.INFO_BUGFIX
if origin in ["Backports.org archive"] and trusted == True:
info = pk_enums.INFO_ENHANCEMENT
self._emit_package(trans, pkg, info, force_candidate=True)
self._emit_require_restart(trans)
def _emit_require_restart(self, trans):
"""Emit RequireRestart if required."""
# Check for a system restart
if self.is_reboot_required():
trans.pktrans.RequireRestart(pk_enums.RESTART_SYSTEM, "")
def get_update_detail(self, trans, package_ids):
"""
Implement the {backend}-get-update-details functionality
"""
def get_bug_urls(changelog):
"""
Create a list of urls pointing to closed bugs in the changelog
"""
urls = []
for r in re.findall(MATCH_BUG_CLOSES_DEBIAN, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_DEBIAN % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
for r in re.findall(MATCH_BUG_CLOSES_UBUNTU, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_UBUNTU % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
return urls
def get_cve_urls(changelog):
"""
Create a list of urls pointing to cves referred in the changelog
"""
return map(lambda c: HREF_CVE % c,
re.findall(MATCH_CVE, changelog, re.MULTILINE))
pklog.info("Get update details of %s" % package_ids)
trans.progress = 0
trans.cancellable = False
trans.pktrans.status = pk_enums.STATUS_DOWNLOAD_CHANGELOG
total = len(package_ids)
count = 1
old_locale = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
for pkg_id in package_ids:
self._iterate_mainloop()
trans.progress = count * 100 / total
count += 1
pkg = self._get_package_by_id(pkg_id)
# FIXME add some real data
if pkg.installed.origins:
installed_origin = pkg.installed.origins[0].label
else:
installed_origin = ""
updates = "%s;%s;%s;%s" % (pkg.name, pkg.installed.version,
pkg.installed.architecture,
installed_origin)
obsoletes = ""
vendor_url = ""
restart = "none"
update_text = u""
state = ""
issued = ""
updated = ""
#FIXME: make this more configurable. E.g. a dbus update requires
# a reboot on Ubuntu but not on Debian
if pkg.name.startswith("linux-image-") or \
pkg.name in ["libc6", "dbus"]:
restart == pk_enums.RESTART_SYSTEM
changelog_dir = apt_pkg.config.find_dir("Dir::Cache::Changelogs")
if changelog_dir == "/":
changelog_dir = os.path.join(apt_pkg.config.find_dir("Dir::"
"Cache"),
"Changelogs")
filename = os.path.join(changelog_dir,
"%s_%s.gz" % (pkg.name,
pkg.candidate.version))
changelog_raw = ""
if os.path.exists(filename):
pklog.debug("Reading changelog from cache")
changelog_file = gzip.open(filename, "rb")
try:
changelog_raw = changelog_file.read().decode("UTF-8")
except:
pass
finally:
changelog_file.close()
if not changelog_raw:
pklog.debug("Downloading changelog")
changelog_raw = pkg.get_changelog()
# The internal download error string of python-apt ist not
# provided as unicode object
if not isinstance(changelog_raw, unicode):
changelog_raw = changelog_raw.decode("UTF-8")
# Cache the fetched changelog
if not os.path.exists(changelog_dir):
os.makedirs(changelog_dir)
# Remove old cached changelogs
pattern = os.path.join(changelog_dir, "%s_*" % pkg.name)
for old_changelog in glob.glob(pattern):
os.remove(os.path.join(changelog_dir, old_changelog))
changelog_file = gzip.open(filename, mode="wb")
try:
changelog_file.write(changelog_raw.encode("UTF-8"))
finally:
changelog_file.close()
# Convert the changelog to markdown syntax
changelog = u""
for line in changelog_raw.split("\n"):
if line == "":
changelog += " \n"
else:
changelog += u" %s \n" % line
if line.startswith(pkg.candidate.source_name):
match = re.match(r"(?P<source>.+) \((?P<version>.*)\) "
"(?P<dist>.+); urgency=(?P<urgency>.+)",
line)
update_text += u"%s\n%s\n\n" % (match.group("version"),
"=" * \
len(match.group("version")))
elif line.startswith(" "):
update_text += u" %s \n" % line
elif line.startswith(" --"):
#FIXME: Add %z for the time zone - requires Python 2.6
update_text += u" \n"
match = re.match("^ -- (?P<maintainer>.+) (?P<mail><.+>) "
"(?P<date>.+) (?P<offset>[-\+][0-9]+)$",
line)
if not match:
continue
try:
date = datetime.datetime.strptime(match.group("date"),
"%a, %d %b %Y "
"%H:%M:%S")
except ValueError:
continue
issued = date.isoformat()
if not updated:
updated = date.isoformat()
if issued == updated:
updated = ""
bugzilla_url = ";;".join(get_bug_urls(changelog))
cve_url = ";;".join(get_cve_urls(changelog))
trans.emit_update_detail(pkg_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart,
update_text, changelog,
state, issued, updated)
locale.setlocale(locale.LC_TIME, old_locale)
def get_details(self, trans, package_ids):
"""Implement org.freedesktop.PackageKit.Transaction.GetDetails()"""
trans.progress = 101
for pkg_id in package_ids:
version = self._get_version_by_id(pkg_id)
#FIXME: We need more fine grained license information!
origins = version.origins
if (origins and
origins[0].component in ["main", "universe"] and
origins[0].origin in ["Debian", "Ubuntu"]):
license = "free"
else:
license = "unknown"
group = self._get_package_group(version.package)
trans.emit_details(pkg_id, license, group, version.description,
version.homepage, version.size)
def get_packages(self, trans, filters):
"""Implement org.freedesktop.PackageKit.Transaction.GetPackages()"""
pklog.info("Get all packages")
self.progress = 101
for pkg in self._iterate_packages():
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg)
def resolve(self, trans, filters, packages):
"""Implement org.freedesktop.PackageKit.Transaction.Resolve()"""
pklog.info("Resolve()")
trans.status = aptd_enums.STATUS_QUERY
trans.progress = 101
self.cancellable = False
for name_raw in packages:
#FIXME: Python-apt doesn't allow unicode as key. See #542965
name = str(name_raw)
try:
# Check if the name is a valid package id
version = self._get_version_by_id(name)
except ValueError:
pass
else:
if self._package_is_visible(version.package, filters):
self._emit_pkg_version(trans, version)
continue
# The name seems to be a normal name
try:
self._emit_visible_package(trans, filters, self._cache[name])
except KeyError:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Package name %s could not be "
"resolved.", name)
def get_depends(self, trans, filters, package_ids, recursive):
"""Emit all dependencies of the given package ids.
Doesn't support recursive dependency resolution.
"""
def emit_blocked_dependency(base_dependency, pkg=None,
filters=""):
"""Send a blocked package signal for the given
apt.package.BaseDependency.
"""
if FILTER_INSTALLED in filters:
return
if pkg:
summary = pkg.candidate.summary
try:
filters.remove(FILTER_NOT_INSTALLED)
except ValueError:
pass
if not self._is_package_visible(pkg, filters):
return
else:
summary = u""
if base_dependency.relation:
version = "%s%s" % (base_dependency.relation,
base_dependency.version)
else:
version = base_dependency.version
trans.emit_package("%s;%s;;" % (base_dependency.name, version),
pk_enums.INFO_BLOCKED, summary)
def check_dependency(pkg, base_dep):
"""Check if the given apt.package.Package can satisfy the
BaseDepenendcy and emit the corresponding package signals.
"""
if not self._is_package_visible(pkg, filters):
return
if base_dep.version:
satisfied = False
# Sort the version list to check the installed
# and candidate before the other ones
ver_list = list(pkg.versions)
if pkg.installed:
ver_list.remove(pkg.installed)
ver_list.insert(0, pkg.installed)
if pkg.candidate:
ver_list.remove(pkg.candidate)
ver_list.insert(0, pkg.candidate)
for dep_ver in ver_list:
if apt_pkg.check_dep(dep_ver.version,
base_dep.relation,
base_dep.version):
self._emit_pkg_version(trans, dep_ver)
satisfied = True
break
if not satisfied:
emit_blocked_dependency(base_dep, pkg, filters)
else:
self._emit_package(trans, pkg)
# Setup the transaction
pklog.info("Get depends (%s,%s,%s)" % (filter, package_ids, recursive))
self.status = aptd_enums.STATUS_RESOLVING_DEP
trans.progress = 101
self.cancellable = True
dependency_types = ["PreDepends", "Depends"]
if apt_pkg.config["APT::Install-Recommends"]:
dependency_types.append("Recommends")
for id in package_ids:
version = self._get_version_by_id(id)
for dependency in version.get_dependencies(*dependency_types):
# Walk through all or_dependencies
for base_dep in dependency.or_dependencies:
if self._cache.is_virtual_package(base_dep.name):
# Check each proivider of a virtual package
for provider in \
self._cache.get_providing_packages(base_dep.name):
check_dependency(provider, base_dep)
elif base_dep.name in self._cache:
check_dependency(self._cache[base_dep.name], base_dep)
else:
# The dependency does not exist
emit_blocked_dependency(trans, base_dep, filters=filters)
def get_requires(self, trans, filters, package_ids, recursive):
"""Emit all packages which depend on the given ids.
Recursive searching is not supported.
"""
pklog.info("Get requires (%s,%s,%s)" % (filter, package_ids, recursive))
self.status = aptd_enums.STATUS_RESOLVING_DEP
self.progress = 101
self.cancellable = True
for id in package_ids:
version = self._get_version_by_id(id)
for pkg in self._iterate_packages():
if not self._is_package_visible(pkg, filters):
continue
if pkg.is_installed:
pkg_ver = pkg.installed
elif pkg.candidate:
pkg_ver = pkg.candidate
for dependency in pkg_ver.dependencies:
satisfied = False
for base_dep in dependency.or_dependencies:
if version.package.name == base_dep.name or \
base_dep.name in version.provides:
satisfied = True
break
if satisfied:
self._emit_package(trans, pkg)
break
def download_packages(self, trans, store_in_cache, package_ids):
"""Implement the DownloadPackages functionality.
The store_in_cache parameter gets ignored.
"""
def get_download_details(ids):
"""Calculate the start and end point of a package download
progress.
"""
total = 0
downloaded = 0
versions = []
# Check if all ids are vaild and calculate the total download size
for id in ids:
pkg_ver = self._get_version_by_id(id)
if not pkg_ver.downloadable:
raise TransactionFailed(aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
"package %s isn't downloadable" % id)
total += pkg_ver.size
versions.append((id, pkg_ver))
for id, ver in versions:
start = downloaded * 100 / total
end = start + ver.size * 100 / total
yield id, ver, start, end
downloaded += ver.size
pklog.info("Downloading packages: %s" % package_ids)
trans.status = aptd_enums.STATUS_DOWNLOADING
trans.cancellable = True
trans.progress = 10
# Check the destination directory
if store_in_cache:
dest = apt_pkg.config.find_dir("Dir::Cache::archives")
else:
dest = tempfile.mkdtemp(prefix="aptdaemon-download")
if not os.path.isdir(dest) or not os.access(dest, os.W_OK):
raise TransactionFailed(aptd_enums.ERROR_INTERNAL_ERROR,
"The directory '%s' is not writable" % dest)
# Start the download
for id, ver, start, end in get_download_details(package_ids):
progress = DaemonAcquireProgress(trans, start, end)
self._emit_pkg_version(trans, ver, pk_enums.INFO_DOWNLOADING)
try:
ver.fetch_binary(dest, progress)
except Exception as error:
raise TransactionFailed(aptd_enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
str(error))
else:
trans.emit_files(id,
os.path.join(dest,
os.path.basename(ver.filename)))
self._emit_pkg_version(trans, ver, pk_enums.INFO_FINISHED)
def get_files(self, trans, package_ids):
"""Emit the Files signal which includes the files included in a package
Apt only supports this for installed packages
"""
for id in package_ids:
pkg = self._get_package_by_id(id)
files = ";".join(self._get_installed_files(pkg))
trans.emit_files(id, files)
def what_provides(self, trans, filters, type, values):
"""Emit all dependencies of the given package ids.
Doesn't support recursive dependency resolution.
"""
self._init_plugins()
supported_type = False
# run plugins
for plugin in self._plugins.get("what_provides", []):
pklog.debug("calling what_provides plugin %s %s" % (str(plugin), str(filters)))
for search_item in values:
try:
for package in plugin(self._cache, type, search_item):
self._emit_visible_package(trans, filters, package)
supported_type = True
except NotImplementedError:
pass # keep supported_type as False
if not supported_type and type != pk_enums.PROVIDES_ANY:
# none of the plugins felt responsible for this type
raise TransactionFailed(aptd_enums.ERROR_NOT_SUPPORTED,
"Query type '%s' is not supported" % type)
# Helpers
def _get_id_from_version(self, version):
"""Return the package id of an apt.package.Version instance."""
if version.origins:
origin = version.origins[0].label
else:
origin = ""
if version.architecture == apt_pkg.config.find("APT::Architecture") or \
version.architecture == "all":
name = version.package.name
else:
name = version.package.name.split(":")[0]
id = "%s;%s;%s;%s" % (name, version.version,
version.architecture, origin)
return id
def _emit_package(self, trans, pkg, info=None, force_candidate=False):
"""
Send the Package signal for a given apt package
"""
if (not pkg.is_installed or force_candidate) and pkg.candidate:
self._emit_pkg_version(trans, pkg.candidate, info)
elif pkg.is_installed:
self._emit_pkg_version(trans, pkg.installed, info)
else:
pklog.debug("Package %s hasn't got any version." % pkg.name)
def _emit_pkg_version(self, trans, version, info=None):
"""Emit the Package signal of the given apt.package.Version."""
id = self._get_id_from_version(version)
section = version.section.split("/")[-1]
if not info:
if version == version.package.installed:
if section == "metapackages":
info = pk_enums.INFO_COLLECTION_INSTALLED
else:
info = pk_enums.INFO_INSTALLED
else:
if section == "metapackages":
info = pk_enums.INFO_COLLECTION_AVAILABLE
else:
info = pk_enums.INFO_AVAILABLE
trans.emit_package(info, id, version.summary)
def _emit_all_visible_pkg_versions(self, trans, filters, pkg):
"""Emit all available versions of a package."""
if self._is_package_visible(pkg, filters):
if pk_enums.FILTER_NEWEST in filters:
if pkg.candidate:
self._emit_pkg_version(trans, pkg.candidate)
elif pkg.installed:
self._emit_pkg_version(trans, pkg.installed)
else:
for version in pkg.versions:
self._emit_pkg_version(trans, version)
def _emit_visible_package(self, trans, filters, pkg, info=None):
"""
Filter and emit a package
"""
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg, info)
def _emit_visible_packages(self, trans, filters, pkgs, info=None):
"""
Filter and emit packages
"""
for pkg in pkgs:
if self._is_package_visible(pkg, filters):
self._emit_package(trans, pkg, info)
def _emit_visible_packages_by_name(self, trans, filters, pkgs, info=None):
"""
Find the packages with the given namens. Afterwards filter and emit
them
"""
for name_raw in pkgs:
#FIXME: Python-apt doesn't allow unicode as key. See #542965
name = str(name_raw)
if self._cache.has_key(name) and \
self._is_package_visible(self._cache[name], filters):
self._emit_package(trans, self._cache[name], info)
def _is_package_visible(self, pkg, filters):
"""
Return True if the package should be shown in the user interface
"""
if filters == [pk_enums.FILTER_NONE]:
return True
for filter in filters:
if (filter == pk_enums.FILTER_INSTALLED and not pkg.is_installed) or \
(filter == pk_enums.FILTER_NOT_INSTALLED and pkg.is_installed) or \
(filter == pk_enums.FILTER_SUPPORTED and not \
self._is_package_supported(pkg)) or \
(filter == pk_enums.FILTER_NOT_SUPPORTED and \
self._is_package_supported(pkg)) or \
(filter == pk_enums.FILTER_FREE and not self._is_package_free(pkg)) or \
(filter == pk_enums.FILTER_NOT_FREE and \
not self._is_package_not_free(pkg)) or \
(filter == pk_enums.FILTER_GUI and not self._has_package_gui(pkg)) or \
(filter == pk_enums.FILTER_NOT_GUI and self._has_package_gui(pkg)) or \
(filter == pk_enums.FILTER_COLLECTIONS and not \
self._is_package_collection(pkg)) or \
(filter == pk_enums.FILTER_NOT_COLLECTIONS and \
self._is_package_collection(pkg)) or\
(filter == pk_enums.FILTER_DEVELOPMENT and not \
self._is_package_devel(pkg)) or \
(filter == pk_enums.FILTER_NOT_DEVELOPMENT and \
self._is_package_devel(pkg)):
return False
return True
def _is_package_not_free(self, pkg):
"""
Return True if we can be sure that the package's license isn't any
free one
"""
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["multiverse", "restricted"]) or
(origins[0].origin == "Debian" and
origins[0].component in ["contrib", "non-free"])) and
origins[0].trusted)
def _is_package_collection(self, pkg):
"""
Return True if the package is a metapackge
"""
section = pkg.section.split("/")[-1]
return section == "metapackages"
def _is_package_free(self, pkg):
"""
Return True if we can be sure that the package has got a free license
"""
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["main", "universe"]) or
(origins[0].origin == "Debian" and
origins[0].component == "main")) and
origins[0].trusted)
def _has_package_gui(self, pkg):
#FIXME: should go to a modified Package class
#FIXME: take application data into account. perhaps checking for
# property in the xapian database
return pkg.section.split('/')[-1].lower() in ['x11', 'gnome', 'kde']
def _is_package_devel(self, pkg):
#FIXME: should go to a modified Package class
return pkg.name.endswith("-dev") or pkg.name.endswith("-dbg") or \
pkg.section.split('/')[-1].lower() in ['devel', 'libdevel']
def _is_package_supported(self, pkg):
if not pkg.candidate:
return False
origins = pkg.candidate.origins
return (origins and
((origins[0].origin == "Ubuntu" and
candidate[0].component in ["main", "restricted"]) or
(origins[0].origin == "Debian" and
origins[0].component == "main")) and
origins[0].trusted)
def _get_package_by_id(self, id):
"""Return the apt.package.Package corresponding to the given
package id.
If the package isn't available error out.
"""
version = self._get_version_by_id(id)
return version.package
def _get_version_by_id(self, id):
"""Return the apt.package.Version corresponding to the given
package id.
If the version isn't available error out.
"""
name, version_string, arch, data = id.split(";", 4)
if arch and arch != apt_pkg.config.find("APT::Architecture") and \
arch != "all":
name += ":%s" % arch
try:
pkg = self._cache[name]
except KeyError:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"There isn't any package named %s",
name)
#FIXME:This requires a not yet released fix in python-apt
try:
version = pkg.versions[version_string]
except:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Verion %s doesn't exist",
version_string)
if version.architecture != arch:
raise TransactionFailed(aptd_enums.ERROR_NO_PACKAGE,
"Version %s of %s isn't available "
"for architecture %s",
pkg.name, version.version, arch)
return version
def _get_installed_files(self, pkg):
"""
Return the list of unicode names of the files which have
been installed by the package
This method should be obsolete by the apt.package.Package.installedFiles
attribute as soon as the consolidate branch of python-apt gets merged
"""
path = os.path.join(apt_pkg.config["Dir"],
"var/lib/dpkg/info/%s.list" % pkg.name)
try:
list = open(path)
files = list.read().decode().split("\n")
list.close()
except:
return []
return files
def _get_package_group(self, pkg):
"""
Return the packagekit group corresponding to the package's section
"""
section = pkg.section.split("/")[-1]
if SECTION_GROUP_MAP.has_key(section):
return SECTION_GROUP_MAP[section]
else:
pklog.debug("Unkown package section %s of %s" % (pkg.section,
pkg.name))
return pk_enums.GROUP_UNKNOWN
def _init_plugins(self):
"""Initialize PackageKit apt backend plugins.
Do nothing if plugins are already initialized.
"""
if self._plugins is not None:
return
if not pkg_resources:
return
self._plugins = {} # plugin_name -> [plugin_fn1, ...]
# just look in standard Python paths for now
dists, errors = pkg_resources.working_set.find_plugins(pkg_resources.Environment())
for dist in dists:
pkg_resources.working_set.add(dist)
for plugin_name in ["what_provides"]:
for entry_point in pkg_resources.iter_entry_points(
"packagekit.apt.plugins", plugin_name):
try:
plugin = entry_point.load()
except Exception as e:
pklog.warning("Failed to load %s from plugin %s: %s" % (
plugin_name, str(entry_point.dist), str(e)))
continue
pklog.debug("Loaded %s from plugin %s" % (
plugin_name, str(entry_point.dist)))
self._plugins.setdefault(plugin_name, []).append(plugin)
def _apply_changes(self, trans, fetch_range=(15, 50),
install_range=(50, 90)):
"""Apply changes and emit RequireRestart accordingly."""
aptdaemon.worker.AptWorker._apply_changes(self, trans,
fetch_range,
install_range)
if (hasattr(trans, "pktrans") and
(trans.role == aptd_enums.ROLE_UPGRADE_SYSTEM or
trans.packages[aptd_enums.PKGS_UPGRADE] or
trans.depends[aptd_enums.PKGS_UPGRADE])):
self._emit_require_restart(trans)
if META_RELEASE_SUPPORT:
class GMetaRelease(GObject.GObject, MetaReleaseCore):
__gsignals__ = {"download-done": (GObject.SignalFlags.RUN_FIRST,
None,
())}
def __init__(self):
GObject.GObject.__init__(self)
MetaReleaseCore.__init__(self, False, False)
def download(self):
MetaReleaseCore.download(self)
self.emit("download-done")
def get_pk_exit_enum(enum):
try:
return MAP_EXIT_ENUM[enum]
except KeyError:
return pk_enums.EXIT_UNKNOWN
def get_pk_status_enum(enum):
try:
return MAP_STATUS_ENUM[enum]
except KeyError:
return pk_enums.STATUS_UNKNOWN
def get_pk_package_enum(enum):
try:
return MAP_PACKAGE_ENUM[enum]
except KeyError:
return pk_enums.INFO_UNKNOWN
def get_pk_error_enum(enum):
try:
return MAP_ERROR_ENUM[enum]
except KeyError:
return pk_enums.ERROR_UNKNOWN
def get_aptd_package_id(pk_id):
"""Convert a PackageKit Package ID to the apt syntax.
e.g. xterm;235;i386;installed to xterm:i386=235
"""
name, version, arch, data = pk_id.split(";")
id = name
if arch != apt_pkg.config.find("APT::Architecture") and arch != "all":
id += ":%s" % arch
if version:
id += "=%s" % version
return id
def get_pk_package_id(pk_id, data=""):
"""Convert an AptDaemon package ID to the PackageKit syntax.
e.g. xterm:i368=235; to xterm;235;i386;installed
"""
#FIXME add arch support
name, version, release = \
aptdaemon.worker.AptWorker._split_package_id(pk_id)
try:
name, arch = name.split(":", 1)
except ValueError:
arch = ""
if version is None:
version = ""
if release is None:
release = ""
return "%s;%s;%s;%s" % (name, version, arch, data or release)
def defer_idle(func, *args):
func(*args)
return False
if __name__ == '__main__':
main()
# vim: ts=4 et sts=4
| thnguyn2/ECE_527_MP | mp4/SD_card/partition1/usr/share/pyshared/aptdaemon/pkcompat.py | pkcompat.py | py | 126,545 | python | en | code | 0 | github-code | 36 |
38197374041 | import matplotlib.pylab as plt
import matplotlib.patches as mpatch
import numpy as np
import pandas as pd
# Get alcohol consumption level and GSP
YEAR = 2009
df = pd.read_csv("cache/niaaa-report.csv")
df = df[df.Year == YEAR]
df2 = pd.read_csv("cache/usgs_state_2009.csv", dtype={"Gross State Product": np.float64},
skiprows=5, nrows=52, thousands=",")
df3 = df.merge(df2, on='State', how='left')
# Compute measures per capita
df3["GSP per capita"] = df3["Gross State Product"].div(df3["Population (million)"])
df3["Alcohol"] = df3["Beer"] + df3["Wine"] + df3["Spirits"]
# Construct cross-table
gsp = df3["GSP per capita"] > df3["GSP per capita"].mean()
alcohol = df3["Alcohol"] > df3["Alcohol"].mean()
table = pd.crosstab(gsp, alcohol)
print(table)
# Compute correlation between ALCOHOL CONSUMPTION/capita and GSP/capita
df4 = pd.DataFrame({"GSP": df3["GSP per capita"], "Alcohol": df3["Alcohol"]})
print("\ncorr: ", df4.corr().GSP[0])
# Generate scatter plot, each alcohol is plotted separately
plt.scatter(df3["Beer"], df3["GSP per capita"], color="Blue")
plt.scatter(df3["Spirits"], df3["GSP per capita"], color="Green")
plt.scatter(df3["Wine"], df3["GSP per capita"], color="Red")
red = mpatch.Patch(color='red', label='Wine')
blue = mpatch.Patch(color='blue', label='Beer')
green = mpatch.Patch(color='green', label='Spirits')
plt.legend(handles=[red, green, blue], loc="upper left")
plt.title("GSP/Capita vs Alcohol Consumption/Capita")
plt.xlabel("Alcohol Consumption/Capita")
plt.ylabel("GSP/Capita")
plt.grid()
plt.savefig("results/gsp-alcohol.png")
| hmly/data-science | demo-analysis/demo-analysis.py | demo-analysis.py | py | 1,586 | python | en | code | 0 | github-code | 36 |
30466359767 | class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
def pathSum(self, root, total):
if not root:
return []
res = []
path = []
self.dfs(root, total, path, res)
return res
def dfs(self, root, total, path, res):
path.append(root.val)
if not root.left and not root.right and sum(path) == total:
res.append(path[:])
if root.left:
self.dfs(root.left, total, path, res)
if root.right:
self.dfs(root.right, total, path, res)
path.pop()
######16 叉树
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import random
class MyTreeNode:
def __init__(self, val):
self.val = val
self.chlidren = [None] * 16
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
# traverse
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
path = []
res = []
new_root = self.copy_tree(root)
self.dfs(new_root, path, res)
return res
def dfs(self, root, path, res):
path.append(str(root.val))
# base case
if not any(root.chlidren) and path:
res.append('->'.join(path))
# general case
for child in root.chlidren:
if child:
self.dfs(child, path, res)
path.pop()
def copy_tree(self, root):
if not root:
return None
my_root = MyTreeNode(root.val)
my_root.chlidren[random.randint(0, 7)] = self.copy_tree(root.left)
my_root.chlidren[random.randint(0, 7) + 8] = self.copy_tree(root.right)
return my_root
p = TreeNode(5)
p.left = TreeNode(4)
p.left.left = TreeNode(11)
p.left.left.right = TreeNode(2)
p.left.left.left = TreeNode(7)
# p.left.right = TreeNode(3)
p.right = TreeNode(8)
p.right.left = TreeNode(13)
p.right.right = TreeNode(4)
p.right.right.left = TreeNode(5)
p.right.right.right = TreeNode(1)
s = Solution()
print(s.pathSum(p, 22))
| dundunmao/LeetCode2019 | 113. binary tree path sum.py | 113. binary tree path sum.py | py | 2,279 | python | en | code | 0 | github-code | 36 |
73974487785 | import random
import unittest
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from accounting_bot.ext.checklist import CheckList, Task, RepeatDelay
class ChecklistTest(unittest.TestCase):
def test_expire(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_valid = Task(name="valid", time=now - timedelta(days=1, hours=12), repeat=RepeatDelay.never)
task_expired = Task(name="expired", time=now - timedelta(days=2, hours=12), repeat=RepeatDelay.never)
task_other1 = Task(name="other3", time=now, repeat=RepeatDelay.never)
task_other2 = Task(name="other1", time=now + timedelta(days=2, hours=12), repeat=RepeatDelay.never)
task_other3 = Task(name="other2", time=now + timedelta(days=1, hours=12), repeat=RepeatDelay.never)
checklist.tasks = [task_valid, task_expired, task_other1, task_other2, task_other3]
checklist.cleanup_tasks()
self.assertCountEqual([task_valid, task_other1, task_other2, task_other3], checklist.tasks)
def test_refresh(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_never = Task(name="never", time=now - timedelta(days=1), repeat=RepeatDelay.never)
task_daily = Task(name="daily", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.daily)
task_weekly = Task(name="weekly", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.weekly)
task_monthly = Task(name="monthly", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.monthly)
# This task is only about one day expired and is not marked as finished, it should not get refreshed yet
task_daily_pending = Task(name="daily2", time=now - timedelta(days=1, hours=1), repeat=RepeatDelay.daily)
# This task is the same but marked as finished, it should get refreshed
task_daily_completed = Task(name="daily3", time=now - timedelta(days=1, hours=1), repeat=RepeatDelay.daily)
task_daily_completed.finished = Task
checklist.tasks = [task_never, task_daily, task_weekly, task_monthly, task_daily_pending, task_daily_completed]
checklist.cleanup_tasks()
self.assertEqual(now - timedelta(days=1), task_never.time)
self.assertEqual(now + timedelta(days=1, hours=-1), task_daily.time)
self.assertEqual(now + timedelta(days=4, hours=-1), task_weekly.time)
self.assertEqual(now + relativedelta(months=1) - timedelta(days=3, hours=1), task_monthly.time)
self.assertEqual(now - timedelta(days=1, hours=1), task_daily_pending.time)
self.assertEqual(now + timedelta(days=1, hours=-1), task_daily_completed.time)
def test_sorting(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_a = Task(name="a", time=now + timedelta(days=1, hours=0), repeat=RepeatDelay.never)
task_b = Task(name="b", time=now + timedelta(days=3, hours=1), repeat=RepeatDelay.daily)
task_c = Task(name="c", time=now + timedelta(days=9, hours=11), repeat=RepeatDelay.weekly)
task_d = Task(name="d", time=now + timedelta(days=20, hours=17), repeat=RepeatDelay.monthly)
checklist.tasks = random.sample([task_a, task_b, task_c, task_d], 4)
checklist.cleanup_tasks()
self.assertListEqual([task_a, task_b, task_c, task_d], checklist.tasks)
if __name__ == '__main__':
unittest.main()
| Blaumeise03/AccountingBot | tests/test_checklist.py | test_checklist.py | py | 3,556 | python | en | code | 6 | github-code | 36 |
6771193553 | # 10026
# 적록 색약은 빨강 === 초록으로 인식, 적록 색약인 사람과 아닌 사람이 보는 구역의 수를 출력
import sys
input = sys.stdin.readline
# 로직에 문제 없는데도 계속 뜨는 RecursionError
# 알고보니 파이썬 자체에서 최대 재귀 한도가 적용되어 무한 루프를 발생하지 않도록 막아뒀기 때문에, 재귀 제한이 있다.
# 아래 셋팅을 추가하여 제한을 푼다.
sys.setrecursionlimit(100000)
n = int(input())
matrix = [list(map(str, input())) for _ in range(n)]# 입력받은 그림 2차원 배열로 초기화
visited_1 = [[0] * n for _ in range(n)] # 방문 여부 배열
visited_2 = [[0] * n for _ in range(n)] # 방문 여부 배열
# dfs 선정 이유 =>
# 연결된 그래프에서 연관있는 색을 연속적으로 탐색하는 것이므로
# 깊이 있는 노드까지 탐색하는 dfs, 가까이 있는 노드를 우선 탐색하는 bfs 모두 적용 가능하다고 생각한다.
dx = [0, 0, -1, 1]
dy = [1, -1, 0, 0]
# 적록 색약이 아닌 경우
def dfs_1(x, y):
visited_1[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# n*n 범위 안에 들고, 탐색을 시작한 값과 같고, 방문하지 않았다면
if 0 <= nx < n and 0 <= ny < n and matrix[x][y] == matrix[nx][ny] and visited_1[nx][ny]==0:
dfs_1(nx, ny)
# 적록 색약인 경우
def dfs_2(x, y):
visited_2[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# n*n 범위 안에 들고, 탐색을 시작한 값과 같고, 방문하지 않았다면
if 0 <= nx < n and 0 <= ny < n and visited_2[nx][ny]==0:
# 탐색을 시작한 값과 같거나
# R or G 이면
if (matrix[x][y] == matrix[nx][ny]) or (matrix[x][y] == "R" and matrix[nx][ny] == "G" ) or (matrix[x][y] == "G" and matrix[nx][ny] == "R" ):
dfs_2(nx, ny)
# 구역의 수
cnt_1 = 0
cnt_2 = 0
for i in range(n):
for j in range(n):
if visited_1[i][j] == 0:
cnt_1 += 1
dfs_1(i, j)
if visited_2[i][j] == 0:
cnt_2 += 1
dfs_2(i, j)
print(cnt_1, cnt_2)
# 5
# RRRBB
# GGBBB
# BBBRR
# BBRRR
# RRRRR | chajuhui123/algorithm-solving | BOJ/그래프/230104_적록색약.py | 230104_적록색약.py | py | 2,255 | python | ko | code | 0 | github-code | 36 |
6319889570 | import json
import socket as s
import selectors
import threading
import types
import logging
logger = logging.getLogger("Main." + __name__)
class SocketHandler:
socket = None
sel = selectors.DefaultSelector()
selector_timeout = 4
doShutdown = threading.Event()
connected_sockets = []
handler_list = []
def __init__(self, host_addr="localhost", socket_port=5000) -> None:
self.host_addr = host_addr
self.port = socket_port
socket = s.socket(s.AF_INET, s.SOCK_STREAM)
socket.bind((self.host_addr, self.port))
socket.listen()
logger.info(f"Socket listening on: {self.host_addr}:{self.port}")
socket.setblocking(False)
self.sel.register(socket, selectors.EVENT_READ, data=None)
self.listener_thread = threading.Thread(target=self._listener_loop)
self.listener_thread.start()
def _accept_and_register(self, sock):
conn, addr = sock.accept()
logger.debug(f"Accepted connection from {addr}")
conn.setblocking(False)
data = types.SimpleNamespace(addr=addr, inb=b'', outb=b'')
events = selectors.EVENT_READ # | selectors.EVENT_WRITE
self.sel.register(conn, events, data=data)
self.connected_sockets.append(conn)
def _service_connection(self, key, mask):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
# read magic happens here
recv_data = sock.recv(1)
recv_data = sock.recv(int.from_bytes(recv_data, "big"))
logger.debug(f"Received from {data.addr}: {recv_data}")
if recv_data:
self._handle_incoming(sock, recv_data)
else:
logger.debug(f"closing connection to {data.addr}")
self._close_socket(sock)
def _listener_loop(self):
while not self.doShutdown.is_set():
events = self.sel.select(timeout=self.selector_timeout)
if events is None:
continue
for key, mask in events:
if key.data is None:
self._accept_and_register(key.fileobj)
else:
try:
self._service_connection(key, mask)
except Exception as e:
if str(e).startswith("[WinError 10054]"):
self._close_socket(key.fileobj)
logger.debug("Socket Closed")
else:
logger.warning(f"Socket error! {key.data.addr}:\n{e}")
raise e
def _handle_incoming(self, sock, data: bytes):
try:
str_version = data.decode("utf-8")
except UnicodeDecodeError:
data = data[1:]
str_version = data.decode("utf-8")
str_version = str_version.replace('"true"', 'true').replace('"false"', 'false')
usable_json = json.loads(str_version)
for i in self.handler_list:
i(usable_json)
def _prune_sockets(self):
index = 0
while index < len(self.connected_sockets):
if self.connected_sockets[index].fileno() == -1:
del self.connected_sockets[index]
index = 0
def _close_socket(self, sock):
self._prune_sockets()
try:
self.connected_sockets.remove(self._find_same_addr_index(sock))
except ValueError:
pass
except Exception as e:
logger.warning(f"Error removing socket from list: {repr(e)}")
try:
self.sel.unregister(sock)
sock.close()
except Exception as e:
logger.warning(f"Error unregistering or closing socket: {repr(e)}")
self._prune_sockets()
def _find_same_addr_index(self, sock):
for i in range(len(self.connected_sockets) - 1):
if self.connected_sockets[i].raddr == sock.raddr:
return i
nothing = False
def send_all(self, message: str):
if len(self.connected_sockets) == 0:
if not self.nothing:
logger.warning("TRY TO SEND MESSAGE TO NOTHING! regards, SocketHandler.send_all()")
self.nothing = True
return
logger.debug("Sending to all sockets: " + message)
for sock in self.connected_sockets:
self.nothing = False
try:
sock.sendall(message.encode("utf-8") + b"\n")
except BlockingIOError as e:
logger.critical(f"Sending IO Error! {repr(e)}")
except ConnectionResetError:
logger.warning("Socket Forcibly close by host")
self._close_socket(sock)
def register_message_handler(self, function):
"""register function to be called when a message is received from the socket
Args:
function ([function(json.JSON)]): [description]
"""
self.handler_list.append(function)
def unregister_handler(self, function):
self.handler_list.remove(self.handler_list.index(function))
def close(self):
self.doShutdown.set()
if len(self.connected_sockets) == 0:
return
for i in range(len(self.connected_sockets)):
self._close_socket(self.connected_sockets[i])
| Nickiel12/Church-Programs | Android/android_server/Classes/SocketHandler.py | SocketHandler.py | py | 5,396 | python | en | code | 0 | github-code | 36 |
2848517125 | #############################################################################################################################################
__filename__ = "main.py"
__description__ = """Represents main program.
"""
__author__ = "Anand Iyer"
__copyright__ = "Copyright 2016-17, Anand Iyer"
__credits__ = ["Anand Iyer"]
__version__ = "2.6"
__maintainer__ = "Anand Iyer"
__email__ = "anand.iyer@moolya.com"
__status__ = "Testing" #Upgrade to Production once tested to function.
#############################################################################################################################################
import re
import random
import sys
import os
import argparse
import support
from importlib import import_module
from excelfunctions import *
default_module = __import__("functions") #functions.py
def store (line_counter, variable, fp, function_args):
return_value = call_function (line_counter, fp, function_args)
setattr (default_module, variable, return_value)
return "Stored return value in $%s" %(variable)
def handle_reference (pattern, each, each_row, replace=False):
m = re.search (pattern, each)
if m is not None:
try:
return_value = readfromexcelmap (excel_map, each_row, int (m.groups(1)[0]))
if replace:
if type (return_value) == str and not support.isdigit (return_value):
return_value = "'" + return_value + "'"
return_value = re.sub (pattern, return_value, each)
except:
pass
else:
return_value = each
return return_value
def call_function (each_row, fp, function_args):
global excel_map
all_args = []
if fp == None:
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
all_args.append (handle_reference ("C(\d+)$", each, each_row))
return_value = ''.join (all_args) #populate with plain strings. No function calls.
elif "EVAL_IN_PYTHON" in str (fp):
#Need to first split the arguments, so we can use the references. Now, join appropriately to form the function call syntax.
function, function_args = extract_function_with_args (pattern_eval, function_args) #function_args[0]
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
arg = handle_reference ("C(\d+)$", each, each_row)
if type(arg) == str and not support.isdigit(arg):
arg = "'" + arg + "'"
all_args.append (arg)
all_args = function + "(" + ','.join(str(x) for x in all_args) + ")"
all_args = support.remove_slashes (all_args)
return_value = fp (all_args) #internal calls the function with appropriate arguments
elif callable (fp):
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
all_args.append (handle_reference ("C(\d+)$", each, each_row))
return_value = fp (all_args) #internal calls the function with appropriate arguments
return return_value
#Call function once for each row to fill
#If there are column references, use it along with current row, and fill in actual value
#Same function is used with fp and function_args passed as None and empty string respectively, to populate plain strings.
def construct_result_dict (start_row, col, end_row, fp, function_args):
return_list = []
for each_row in range (start_row, end_row):
return_list.append (call_function (each_row, fp, function_args))
#construct tuple for row, col number, in order construct the result dictionary.
all_cells = []
for each_row in range (start_row, end_row):
all_cells.append ((each_row, col))
#and return all results as part of a single dictionary.
return dict(zip(all_cells, return_list))
def extract_function_with_args (pattern, function_to_call):
function = ""
function_args = ""
function_object = pattern.search (function_to_call)
if function_object:
function = function_object.groups(1)[0]
function_args = str (function_object.groups(1)[1]).strip ()
return function, function_args
def get_function (line):
function = ""
function_args = ""
mod = None
#some defaults
variable = ""
start_row = 1
col = 0
try:
#Identify start_row and col
pattern_row_col = re.compile ("R(\d+)C(\d+)")
row_col_object = pattern_row_col.search (line)
if row_col_object:
start_row = int (row_col_object.groups(1)[0])
col = int (row_col_object.groups(0)[1])
#Identify variable
pattern_variable = re.compile ("(.*?)\|(.*?)\|(.*?)\|(.*)")
variable_object = pattern_variable.search (line)
if variable_object:
if variable_object.groups(0)[0][0] == "$":
variable = variable_object.groups(0)[0][1:] #remove the $
function_to_call = variable_object.groups(0)[3]
function, function_args = extract_function_with_args (pattern, function_to_call)
except:
pass
end_row = start_row + rows_to_fill
return start_row, col, end_row, mod, variable, function, function_args
#main program
support.log ("####ExcelWriter version %s. %s####" %(__version__, __copyright__))
support.log ()
pattern = re.compile ("\((.*?):(.*)\)")
pattern_eval = re.compile ("(.*)\((.*)\)")
#"parser" is an "argparse" object that defaults to certain values for each of the command line arguments
#Following command line arguments are supported = config, rowcount, colcount, startrow.
#Use with appropriate switches, when calling from command line.
parser = argparse.ArgumentParser(prog='python main.py', conflict_handler='resolve')
parser.add_argument("--config", help="Configuration file", default="..\\Template.txt")
parser.add_argument("--output", help="Output excel", default="..\\Template_latest.xls")
parser.add_argument("--rowcount", help="Number of rows to fill in Excel template", default=5)
parser.add_argument("--colcount", help="Number of columns in Excel template", default=10)
parser.add_argument("--startrow", help="Starting row number (to generate excel map)", default=1)
args = parser.parse_args()
config_file = args.config
f = open (config_file) #input config file
book, sheet = openexcel (os.path.splitext(config_file)[0] + ".xls", 0) #0 is the first sheet
rows_to_fill = int (args.rowcount)
start_row = int (args.startrow)
end_row = start_row + rows_to_fill
colcount = int (args.colcount)
excel_map = {}
excel_map = map_excel (sheet, start_row, end_row, colcount)
line_counter = 0
for line in f:
line_counter += 1
if len (line) == 1 or line[0] == '#': #only newline or line has been commented
continue
start_row, col, end_row, mod, variable, function, function_args = get_function (line) #in case of plain string, function and function_args are empty.
if mod is None:
mod = default_module
if function == "":
plain_string = line.split ('|')[3].strip('\n')
return_dict = construct_result_dict (start_row, col, end_row, None, plain_string)
excel_map.update (return_dict)
support.log (str.format ("%d. ," %(line_counter))) # the comma at the end ensures newline is NOT printed.
support.log (str.format ("========>%s printed %d times" %(plain_string, rows_to_fill)))
support.log ()
continue
try:
fp = getattr (mod, function) #getting function pointer
support.log (str.format ("%d. ," %(line_counter))) # the comma at the end ensures newline is NOT printed.
if variable != "":
support.log (store (line_counter, variable, fp, function_args))
support.log ()
continue
return_dict = construct_result_dict (start_row, col, end_row, fp, function_args)
excel_map.update (return_dict)
#support.log (return_dict)
support.log (str.format ("========>%s called %d times" %(function, rows_to_fill)))
support.log ()
except:
support.log (str.format ("%s may not exist." %(function)), True)
closeexcel (book)
#write excel_map to excel
book, copybook, sheet = openexcel (os.path.splitext(config_file)[0] + ".xls", 0,"w") #0 is the first sheet
for each in excel_map:
writetoexcel (sheet, each[0], each[1], excel_map[each])
copybook.save(args.output)
support.log (str.format ("Saved to " + args.output + ". Please rename the file to avoid overwriting during the next iteration."))
closeexcel (book) | ananddotiyer/DDE-Lite | ExcelWriter/main.py | main.py | py | 9,200 | python | en | code | 1 | github-code | 36 |
6543226908 | def find_lis(x): # Sequence[Tuple]) -> List[Tuple]:
"""Find the longest increasing subsequence.
Description of the algorithm and pseudo-code at the link:
https://en.wikipedia.org/wiki/Longest_increasing_subsequence#Efficient_algorithms"""
n = len(x)
p = [0] * n
m = [0] * (n + 1)
# m[0] = -1
l = 0
for i in range(n):
left = 1
right = l
while left <= right:
# mid = math.ceil((left + right)/2)
mid = (left + right) // 2
if x[m[mid]][2] < x[i][2]:
left = mid + 1
else:
right = mid - 1
newl = left
p[i] = m[newl - 1]
m[newl] = i
if newl > l:
l = newl
s = [0] * l
k = m[l]
for i in range(l - 1, -1, -1):
s[i] = x[k]
k = p[k]
return s
def find_aligning_minimizers(first_minimizers, second_minimizers):
# Sequence[Tuple], second_minimizers: Sequence[Tuple]) -> List[Tuple]:
"""Find list of minimizers for aligning with index in first sequence
and index in second sequence using LIS algorithm"""
res = []
for i1, j1 in first_minimizers:
for i2, j2 in second_minimizers:
if i1 == i2:
res.append((i1, j1, j2))
return find_lis(res)
def smith_waterman_algorithm(sequence1, sequence2):
"""Smith-Waterman algorithm for local alignment of two sequences"""
match_score = 2
mismatch_penalty = -1
gap_penalty = -2
# Initialize the scoring matrix
rows = len(sequence1) + 1
cols = len(sequence2) + 1
scoring_matrix = [[0] * cols for _ in range(rows)]
# Initialize variables to store the maximum score and its position
max_score = 0
max_position = (0, 0)
# Fill the scoring matrix
for i in range(1, rows):
for j in range(1, cols):
# Calculate the match/mismatch score
if sequence1[i - 1] == sequence2[j - 1]:
match = scoring_matrix[i - 1][j - 1] + match_score
else:
match = scoring_matrix[i - 1][j - 1] + mismatch_penalty
# Calculate the gap scores
delete = scoring_matrix[i - 1][j] + gap_penalty
insert = scoring_matrix[i][j - 1] + gap_penalty
# Determine the maximum score for the current position
score = max(0, match, delete, insert)
scoring_matrix[i][j] = score
# Update the maximum score and its position if necessary
if score > max_score:
max_score = score
max_position = (i, j)
# Traceback to find the local alignment
alignment = []
i, j = max_position
while i > 0 and j > 0 and scoring_matrix[i][j] > 0:
if scoring_matrix[i][j] == scoring_matrix[i - 1][j - 1] + (
match_score if sequence1[i - 1] == sequence2[
j - 1] else mismatch_penalty):
alignment.append((sequence1[i - 1], sequence2[j - 1]))
i -= 1
j -= 1
elif scoring_matrix[i][j] == scoring_matrix[i - 1][j] + gap_penalty:
alignment.append((sequence1[i - 1], '-'))
i -= 1
else:
alignment.append(('-', sequence2[j - 1]))
j -= 1
# Reverse the alignment list to obtain the correct order
alignment.reverse()
return alignment, max_score
def decide_indels(subsequence1: str, subsequence2: str):
score = 0
if len(subsequence1) > len(subsequence2):
result = ["-"] * len(subsequence1)
i = -1
for j, letter in enumerate(subsequence2):
try:
i = subsequence1[i + 1:].index(letter)
if len(subsequence1) - i <= len(subsequence2) - j:
result[i] = letter
score += 1
else:
result[j] = letter
i = j
except ValueError:
result[j] = letter
i = j
elif len(subsequence1) < len(subsequence2):
result = []
i = -1
for j, letter in enumerate(subsequence1):
try:
i2 = subsequence2[i + 1:].index(letter)
if len(subsequence2) - i2 <= len(subsequence1) - j:
result.append(subsequence2[i + 1:i2 + 1])
i = i2
else:
result.append(letter)
i = j
score += 1
except ValueError:
result.append(letter)
i = j
score += 1
else:
result = []
for i in range(len(subsequence1)):
letter1 = subsequence1[i]
letter2 = subsequence2[i]
result.append(letter2)
if letter1 == letter2:
score += 1
return result, score
def align_sequences(sequence1: str, sequence2: str, aligning_minimizers):
"""Align two sequences using aligning minimizers"""
if len(aligning_minimizers) == 0:
return -1, [], 0
final_result = []
final_score = 0
i_prev, position1_prev, position2_prev = aligning_minimizers[0]
start_position = position1_prev - position2_prev
if start_position < 0:
start_position = 0
subsequence1 = sequence1[start_position:position1_prev]
subsequence2 = sequence2[:position2_prev]
result, score = decide_indels(subsequence1, subsequence2)
final_result.extend(result)
final_score += score
for index in range(1, len(aligning_minimizers)):
i, position1, position2 = aligning_minimizers[index]
subsequence1 = sequence1[position1_prev:position1]
subsequence2 = sequence2[position2_prev:position2]
result, score = decide_indels(subsequence1, subsequence2)
final_result.extend(result)
final_score += score
if index == len(aligning_minimizers) - 1:
end_position = position1 + len(sequence2) - position2
if end_position > len(sequence1):
end_position = len(sequence1)
subsequence1 = sequence1[position1:end_position]
subsequence2 = sequence2[position2:]
result, score = decide_indels(subsequence1, subsequence2)
final_result.extend(result)
final_score += score
i_prev, position1_prev, position2_prev = i, position1, position2
return start_position, final_result, final_score
| applepie-heidi/biomut-finder | mapper.py | mapper.py | py | 6,475 | python | en | code | 0 | github-code | 36 |
20672750408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from graph_tool.all import *
import numpy as np
from pathos.multiprocessing import ProcessingPool as Pool
import tqdm
import pickle
def swir(n, z, rho0, kappa, mu, eta, num_ensamble):
pER = z/n
ss = 1 - mu - kappa
ww = 1 - eta
np.random.seed(num_ensamble)
seed_rng(num_ensamble)
# inicialización del grafo erdos renyi
g = random_graph(n, lambda: np.random.poisson((n-1) * pER), directed=False, model="erdos", random=True)
# declarar una propiedad llamada "estado" que va a guardar el estado de cada nodo
estado = g.new_vertex_property("short")
# Asignar a todos los nodos el estado susceptible
estado.get_array()[:] = 0
# Generar rho0*n índices aleatorios
infected_index = np.random.choice(np.arange(0, n), size=int(n*rho0), replace=False)
# Actualizar el estado de rho0*n nodos al estado infectado
estado.get_array()[infected_index] = 1
# Iterador que contiene los nodos infectados
I = [g.vertex(i) for i in infected_index]
def reaction(vertex):
"""
Función que realiza un paso de tiempo para un nodo infectado:
vertex es un nodo en estado infectado.
"""
# actualiza el estado del nodo actual infectado a recuperado
estado[vertex] = 3
# obtiene los vecinos del nodo infectado
vecinos = vertex.out_neighbors()
# lista de índices de vecinos susceptibles
S = np.array([g.vertex_index[v] for v in vecinos if estado[v]==0]).astype(int)
#obtiene los vecinos del nodo infectado, de nuevo.
vecinos = vertex.out_neighbors()
# lista de índices de vecinos debilitados
W = np.array([g.vertex_index[v] for v in vecinos if estado[v]==2]).astype(int)
# calcula nuevos estados de los vecinos susceptibles
new_states = np.random.choice([0, 1, 2], size=S.size, p=[ss, kappa, mu])
# actualiza estados de vecinos susceptibles
estado.get_array()[S] = new_states
# inicia lista de nuevos nodos infectados con los susceptibles que se infectaron
new_infected = [g.vertex(i) for i in S[new_states==1]]
# calcula nuevos estados de los vecinos debilitados
new_states = np.random.choice([2, 1], size=W.size, p=[ww, eta])
# actualiza estados de vecinos debilitados
estado.get_array()[W] = new_states
# actualiza lista de nuevos nodos infectados con los debilitados que se infectaron
new_infected += [g.vertex(i) for i in W[new_states==1]]
# devuelve los vecinos que se infectaron
return new_infected
while I:
# inicia una lista que guarda los nodos que serán infectados en el paso de tiempo n
new_infected_n = []
# itera sobre los nodos infectados
for i in I:
# hace que el nodo infectado reaccione con sus vecinos
ni = reaction(i)
# agrega los vecinos infectados a la lista de los nuevos infectados
new_infected_n.append(ni)
# actualiza la lista de los nuevos infectados para el paso de tiempo n+1
I = [s for sublist in new_infected_n for s in sublist]
np.random.shuffle(I)
magnetisation = np.count_nonzero(estado.get_array()==3)/estado.get_array().size
return magnetisation
# N = 50000
# def worker_function(num_ensamble, N):
# kappa_range = (np.linspace(-1, 1, 20))**3
# max_new = 0.108021+0.003
# min_new = 0.108021-0.003
# kappa_range = (kappa_range-kappa_range.min()) * (max_new - min_new) / 2 + min_new
# results = []
# for kappa in kappa_range:
# results.append(swir(N, 8, 0.00747762, kappa, kappa, 0.5, num_ensamble))
# with open('./results/{0}_{1}_rho_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(results, f)
# ensambles = 20000
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function, range(ensambles), (N for n in range(ensambles))),
# total=ensambles))
# def worker_function_2(num_ensamble):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# #N_range = [int(n) for n in np.geomspace(1e5, 3e6, 10)]
# N_range = [ 100000, 145923, 212936, 310723]
# np.random.shuffle(N_range)
# results = []
# for N in N_range:
# #results.append(swir(N, 8, rho_0_sub, kappa_sub, kappa_sub, 0.5, num_ensamble))
# results.append(swir(N, 8, rho_0_c, kappa_c, kappa_c, 0.5, num_ensamble))
# with open('./results/{0}_fig7_rho_critical.p'.format(num_ensamble), "wb") as f:
# pickle.dump(results, f)
# ensambles = 32000
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_2, range(ensambles)), total=ensambles))
# def worker_function_3(num_ensamble, N):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# r = swir(N, 8, rho_0_c, kappa_c, kappa_c, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig10_rho_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(r, f)
# N_range = [ 100000, 145923, 212936, 310723]
# ensambles = 10000
# for N in N_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_3, range(ensambles), [N]*ensambles), total=ensambles))
# def worker_function_4(num_ensamble, N):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# r = swir(N, 8, rho_0_sub, kappa_sub, kappa_sub, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig7_rho_sub_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(r, f)
# N_range = [ 100000, 145923, 212936, 310723]
# ensambles = 10000
# for N in N_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_4, range(ensambles), [N]*ensambles), total=ensambles))
# def worker_function_5(num_ensamble, kappa):
# rho_0_sub = 2e-3
# rho_0_c = 0.00747762
# r = swir(1000000, 8, rho_0_sub, kappa, kappa, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig6_rho_sub_critical.p'.format(num_ensamble, kappa), "wb") as f:
# pickle.dump(r, f)
# kappa_sub = 0.115023
# kappa_range = np.linspace(kappa_sub-8e-5, kappa_sub-1e-2, 10)
# ensambles = 3000
# for kappa in kappa_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_5, range(ensambles), [kappa]*ensambles), total=ensambles))
Nnu = 656
kappa_range = (np.linspace(-1, 1, 20))**3
max_new = 0.108021+10/Nnu
min_new = 0.108021-10/Nnu
kappa_range = (kappa_range-kappa_range.min()) * (max_new - min_new) / 2 + min_new
ensambles = 3000
def worker_function_6(num_ensamble, kappa):
rho_0_sub = 2e-3
rho_0_c = 0.00747762
return swir(1000000, 8, rho_0_c, kappa, kappa, 0.5, num_ensamble)
for idx, kappa in enumerate(kappa_range):
with Pool(64) as pool:
results = list(tqdm.tqdm(pool.imap(worker_function_6, range(ensambles), [kappa]*ensambles), total=ensambles))
with open('./results/{0}_{1}_fig14y15_rho_critical.p'.format(1000000, idx), "wb") as f:
pickle.dump(results, f) | VolodyaCO/erSWIR | implementation.py | implementation.py | py | 7,234 | python | es | code | 1 | github-code | 36 |
27705271301 | import yfinance as yf
import requests
from datetime import datetime
def calculate_dma(ticker, days):
data = yf.download(ticker, period='1mo')
data['DMA'] = data['Close'].rolling(window=days).mean()
return data
def generate_signal(live_price, dma):
if live_price > dma:
return "buy"
else:
return "sell"
def get_live_price():
response = requests.get('https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd')
return response.json()['bitcoin']['usd']
data = calculate_dma('BTC-USD', 23)
dma = data['DMA'].iloc[-1]
dma_date = data.index[-1].strftime('%Y-%m-%d') # Date of the last DMA calculation
live_price = get_live_price()
live_price_date = datetime.now().strftime('%Y-%m-%d') # Current date
print(f"23-DMA at {dma_date:16} : {dma:.2f}")
print(f"Live Price at {live_price_date:12} : {live_price:.2f}")
signal = generate_signal(live_price, dma)
print(f"Signal : {signal}") | vajjhala/dma-btc | dma-btc.py | dma-btc.py | py | 946 | python | en | code | 0 | github-code | 36 |
19258098333 | import time
import Adafruit_ADS1x15
import csv
adc1 = Adafruit_ADS1x15.ADS1115(address=0x48, busnum=1)
adc2 = Adafruit_ADS1x15.ADS1115(address=0x49, busnum=1)
GAIN = 1
sensors = ['MQ135', 'MQ3', 'MQ4', 'MQ2', 'MQ4', 'MQ6', 'MQ7', 'MQ8']
print('Reading ADS1x15 values, press Ctrl-C to quit...')
print("Training Data Name:", end=" ")
name = input()
print('| {0:>7} | {1:>7} | {2:>7} | {3:>7} | {4:>7} | {5:>7} | {6:>7} | {7:>7} |'.format(*sensors))
print('-' * 74)
with open(name + '.csv', mode='w') as csv_file:
csv_file = csv.writer(csv_file, delimiter=',')
iteration = 0
while iteration != 1000:
iteration += 1
values = [0]*8
for i in range(4):
values[i] = ('{0:.5f}').format(adc1.read_adc(i, gain=GAIN) * (4.096/32767))
for i in range(4,8):
values[i] = ('{0:.5f}').format(adc2.read_adc(i-4, gain=GAIN) * (4.096/32767))
csv_file.writerow(values)
print('| {0:>7} | {1:>7} | {2:>7} | {3:>7} | {4:>7} | {5:>7} | {6:>7} | {7:>7} |'.format(*values), end="\r\n")
print(" Iteration: " + str(iteration), end="\r")
time.sleep(0.2)
| macoycorpuz/pca-knn-rpi-azotemia | ot/gather.py | gather.py | py | 1,157 | python | en | code | 0 | github-code | 36 |
29595058013 | from fontTools.ttLib import TTFont
import random, copy, os, time, base64
# Measure creation time
start = time.time()
# Read original TTF/OTF font file
f = TTFont('font.ttf')
# Find font's longest CMAP table
cmap = f['cmap']
longestCMAPtable = None
for t in cmap.tables:
if not longestCMAPtable or len(t.cmap) > len(longestCMAPtable.cmap):
longestCMAPtable = t
# Read it into a normal list for shuffling
# This is not excatly elegant, but it works. Improve it.
originalCMAP = []
for u in longestCMAPtable.cmap:
originalCMAP.append((u, longestCMAPtable.cmap[u]))
# Make copy and shuffle that copy
newCMAP = copy.copy(originalCMAP)
random.shuffle(newCMAP)
# These funtions are ugly, but work well. Improve them.
def newNameToUnicode(unicode):
for i in range(len(originalCMAP)):
if originalCMAP[i][0] == unicode:
return newCMAP[i][1]
def newUnicodeToUnicode(unicode):
for i in range(len(originalCMAP)):
if newCMAP[i][0] == unicode:
return originalCMAP[i][0]
def newUnicodeToName(name):
for i in range(len(newCMAP)):
if newCMAP[i][1] == name:
return originalCMAP[i][0]
def translateText(text):
new = u''
for g in text:
if newUnicodeToUnicode(ord(g)):
new += unichr(newUnicodeToUnicode(ord(g)))
return new
# Go through all entries in all cmap tables and assign the new randomized glyph names to the unicodes
for t in cmap.tables:
for u in t.cmap.keys():
if newNameToUnicode(u):
t.cmap[u] = newNameToUnicode(u)
# Save new font file to disk
# Maybe it's a good idea to use unique file names here
f.save('new.ttf')
# Stop measuring time
end = time.time()
# Read font file into base64 string for delivery within CSS
fontBase64 = base64.b64encode(open('new.ttf').read())
# Delete the temporary file
os.remove('new.ttf')
# Time it took to create the web font
duration = end - start
# Output this text alongside the new throw-away web font
securetext = translateText('Putin is a wussy.') | yanone/geheimsprache | geheimsprache.py | geheimsprache.py | py | 1,928 | python | en | code | 12 | github-code | 36 |
26699383365 | # https://www.codewars.com/kata/5254ca2719453dcc0b00027d/train/python
s1 = 'ab'
s2 = 'aabb'
def permutations(string):
s = list(string)
for i, letra in enumerate(s):
s[i] = [s+i]
return s
print(permutations(s2))
| nicorl/codewars | sin terminar/permutations.py | permutations.py | py | 235 | python | en | code | 0 | github-code | 36 |
27338086141 | # inspired from: https://codehandbook.org/how-to-read-email-from-gmail-using-python/
# https://github.com/jay3dec/pythonReadEmail
# Python 3.8^ standard libraries
from traceback import print_exc
from imaplib import IMAP4_SSL
from email import message_from_bytes
from base64 import b64decode
from uuid import uuid4
from json import load, dump
from os import walk, linesep
from email.policy import default as default_policy
import configparser
import subprocess
# get environment variables
config = configparser.ConfigParser()
config.read('./env/env.ini')
# -------------------------------------------------
#
# Read email from gmail using python
#
# -------------------------------------------------
def read_email_from_gmail():
# note: values from env.ini don't need quotes and are all strings
# except for FROM_BOX as explained below
FROM_EMAIL = config['DEFAULT']['FROM_USER'] + \
config['DEFAULT']['ORG_EMAIL']
FROM_PWD = config['DEFAULT']['FROM_PWD']
IMAP_SERVER = config['DEFAULT']['IMAP_SERVER']
# collect new spot data in geojson-friendly 'features' list
new_spots = []
# json data to read, update and write back to file
spots_db = False
# append each feature to a JSON structure: [{feature}, {feature}]
with open(f"{config['DEFAULT']['DATA_PATH']}/spots.geojson", 'r') as db_file:
spots_db = load(db_file)
try:
print('\nconnecting to gmail..')
# SSL to SMTP server via imaplib using credentials
mail = IMAP4_SSL(IMAP_SERVER)
mail.login(FROM_EMAIL, FROM_PWD)
# avoid selecting entire inbox if possible
# be careful to transmit double quotes to mail.select()
# env.ini FROM_BOX string includes the double quotes
mail.select(config['DEFAULT']['FROM_BOX'])
# can avoid selecting 'ALL'
# try 'UNSEEN' once its up and running
mail_data = mail.search(None, 'ALL')
print('reading mail..')
# all this is just to list ints [1,...24] to decrement over
# what about a less variably and extra implementation?
# range only uses it once: mail.fetch(str(i), '<PROTOCOL>')
mail_ids = mail_data[1]
id_list = mail_ids[0].split()
if not len(id_list):
print('<error> no email')
return
first_id = int(id_list[0])
last_id = int(id_list[-1])
# original implementation was not printing final list value
# range(start, stop, step)
# ranges stop when they hit their stop arg value
# ∴ the stop value itself is not used
# ∴ i need a range of (1, 25)
for i in range(last_id, first_id - 1, -1):
# use RFC822 protocol (investigate security/options)
response = mail.fetch(str(i), '(RFC822)')
# response is a tuple of length 2
# for response_part in response:
# get first item of tuple part
# arr = response_part[0]
# if not isinstance(arr, tuple):
# print('<continue> response_part[0] is not a tuple')
# continue
# print('part:', type(arr))
# bytes and a default policy avoids environment
# string encoding preferences
# ∴ is more consistent, predictable and robust
# msg = email.message_from_bytes(
# arr[1], policy=default_policy)
# condensed into list comprehension
msgs = [message_from_bytes(res_part[0][1], policy=default_policy)
for res_part in response if isinstance(res_part[0], tuple)]
# list for identifying emails
subject_strings = ['New submission', 'on-the-spot']
for msg in msgs:
# filter for formspree new submissions on on-the-spot only
if not (msg['from'] == 'Formspree <noreply@formspree.io>' and all(x in msg['subject'] for x in subject_strings)):
# print('<continue> wrong mail')
continue
body = msg.get_body(('plain'))
content = body.get_content()
# line 26 is base64 value (if it exists)
# is this split reading all lines up front?
# could try just grab first 25 ie by yielding
lines = content.split(linesep)
# can access features and spots_db here
# but wastes computation making a full spot
# just to check the lat/lng
# shift range props add up to here
# leave img handling to its own function
# or idk global or something else
# theres light repeatable work to do up front
# and img stuff to do conditionally
# test if last form field line number has changed
if lines[21] != 'base64:':
# likely some change in form fields number/name
# eventually change away from hardcoded 'base64' detection
print('<continue> cannot find form fields in message')
continue
# returns the msg spot data in geojson dict structure
# is this call expensive? use <caching thingy> to check
# test each functions, img will likely be biggest
spot_data = get_spot_data(lines)
# if spot data is wrong and function empty returns
# todo: improve this cos function is unlikely to falsy
# q: is a nested empty object falsy? use any()?
# still not a great test cos line values could be trivially non-empty
if not any(spot_data.values()):
print('<continue> all form fields are empty')
print(spot_data)
continue
# quick-test if new spot already exists
# assume if lat_lng are identical
# functionalise this test for lat/lng/id's?
# this would help break the loop when a match is found via return
# lines[14] = lat value
# lines[18] = lng value
# create 2D list of coords
db_coords = [v['geometry']['coordinates']
for v in spots_db['features']]
# create msg_coords from lines data
msg_coords = [float(lines[18].strip()),
float(lines[14].strip())]
if msg_coords in db_coords:
print('<continue> spot already exists')
continue
# enforce no whitespace and use hypens?
# or let people say what they wanna say?
# or save both?
# make spot name up front to ref into functions
spot_name = lines[6].strip().lower()
# make spot id up front to ref into functions
spot_id = str(uuid4())[:8].lower()
# add id
spot_data['id'] = spot_id
# check if new spot image already exists
if match_file_name(f"{config['DEFAULT']['DATA_PATH']}/img", spot_name):
print('<continue> spot name already exists in images')
continue
# img_file is changed to the path if the image exists
# i dont think i need these if False's
img_file = False
# handle img if it exists in msg but not on disk
if lines[22]:
img_file = save_base64_img(
lines[22].strip(), spot_name, spot_id)
# add path to image
if img_file:
spot_data['picture'] = img_file
# organise spot data into geojson feature dict
# couldnt properties dict be filled out with a loop?
# coordinates in GeoJSON: [longitude, latitude] --> use this one
# coordinates in Leaflet: [latitude, longitude]
# [float(spot_data['longitude']), float(spot_data['latitude'])]
feature = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": msg_coords
},
"properties": {
"id": spot_data['id'],
"name": spot_data['name'],
"city": spot_data['city'],
}
}
# check for spot picture
if 'picture' in spot_data:
# add picture filename to spot feature
feature['properties']['picture'] = spot_data['picture']
# add feature to spots list
new_spots.append(feature)
# print(f"found {feature['properties']['name']}..")
# else:
# print('some other response part')
except Exception as e:
print_exc()
print(str(e))
return
if not new_spots:
print('..no new spots found\n')
return
print(f"found {len(new_spots)} new spot{'s' if len(new_spots) != 1 else ''}")
# this is far better approach
# communicates to devs declaratively
# uses built ins and avoids branching
# print(f"{len(spots_db['features'])}")
# print(spots_db['features'])
# get a list of spots from db
db_spots = spots_db.get('features', [])
print(f"found {len(db_spots)} old spot{'s' if len(db_spots) != 1 else ''}")
# todo: add some id checker thingy here
print('updating spots..')
# add new spots to list
db_spots.extend(new_spots)
# add all spots back to db
spots_db['features'] = db_spots
print(
f"total spot{'s' if len(spots_db['features']) != 1 else ''}: {len(spots_db['features'])}")
# write updated spots back to file
with open(f"{config['DEFAULT']['DATA_PATH']}/spots.geojson", 'w') as json_file:
dump(spots_db, json_file, indent=2)
print('updated spots database')
print('pushing changes to github..')
result_git_add = subprocess.run(
["git", "add", "-A"], cwd=config['DEFAULT']['DATA_PATH'])
result_git_commit = subprocess.run(
["git", "commit", "-m", "updated spots from python"], cwd=config['DEFAULT']['DATA_PATH'])
result_git_push = subprocess.run(
["git", "push", "origin", "main"], cwd=config['DEFAULT']['DATA_PATH'])
# if needed can check results e.g:
# print(result_git_push.stderr)
print('..done\n')
# -------------------------------------------------
#
# Parses message content into spot data dict
#
# -------------------------------------------------
# expect a change: removing email form field
# to automate parameterise some values like:
# or use +4 system from starting_point = 5
# <assume> msg content is predictable
# because if lines[n] == <last_form_field> check has been done
# prior to calling this function
def get_spot_data(lines):
# starting key:value at 5:6, next at +4
# apply line-plucking
spot_data = {lines[n][:-1].strip().lower(): lines[n + 1].strip().lower()
for n in range(5, 22, 4)}
return spot_data
# -------------------------------------------------
#
# Checks if file names match a given substring
# Returns a Boolean
#
# -------------------------------------------------
def match_file_name(file_dir, match_name):
file_names = next(walk(file_dir), (None, None, []))[2]
# todo :there must be a better higher order method to do this with
# feels like im walking the file_directory then looping to check
# surely this could be one loop?
for file_name in file_names:
if file_name.startswith(match_name.replace(' ', '-') + '-'):
print('found a match:', file_name.replace('-', ' '), match_name)
# end loop after first match
return True
return False
# -------------------------------------------------
#
# Creates metadata and file from base64 string
#
# -------------------------------------------------
def save_base64_img(data_url, spot_name, spot_id):
# todo: return an object with created values instead of updating globals
# 'jpg' and 'png' exts only (form restricts file types)
# that validation is still client-side vulnerable though
# todo: add (MIME?) type checks for jpg/png only
# (or all valid/safe image types)
# change 'jpeg' to 'jpg' and maintain 'png' as is
ext = data_url[11:15].lower().replace('e', '').replace(';', '')
# remove data URI scheme prefix
b64 = data_url[data_url.find(';base64,') + len(';base64,'):]
# img file name: <spot-name>-<uuid>.<jpg/png>
# replace all whitespaces in spot_name with hypens
file_name = f"{spot_name.replace(' ', '-')}-{spot_id}.{ext}"
# write out to image file
with open(f"{config['DEFAULT']['DATA_PATH']}/img/{file_name}", "wb") as fh:
fh.write(b64decode(b64))
return file_name
# -------------------------------------------------
#
# Runs module if called directly
#
# -------------------------------------------------
if __name__ == '__main__':
read_email_from_gmail()
| PAR-iTY/on-the-spot | python/on-the-spot-mail.py | on-the-spot-mail.py | py | 13,406 | python | en | code | 0 | github-code | 36 |
73139030824 | import numpy as np
from scipy.ndimage import maximum_filter
from operator import itemgetter
# implementation with pure functional procedure
# it could be refactored as object-oriented way....
def find_spot(mesh, N):
""" find view spots in a landscape """
try:
validate_mesh_grid(mesh)
grid = scale_to_grid(mesh)
peaks = find_peak(grid)
elements = []
for peak in peaks:
idx = lookup_mesh_element(grid=grid, gcoord=peak, mesh=mesh)
values = list(itemgetter(*idx)(mesh['values']))
elements.append(max(values, key=lambda v: v['value']))
elements.sort(key=lambda v: v['value'], reverse=True)
return elements[0:N]
except AssertionError:
raise RuntimeError("Support only well-defined mesh format (i.e. implicit grid)")
def find_peak(grid):
"""
find peak (e.g local maxima) in a 2d grid
@param grid
a 2d grid structure
@return a list of [x, y] pair where x,y denote the index from the grid
For example: [[x_0, y_0], [x_1, y_1], ... [x_k, y_k]]
"""
local_max = maximum_filter(grid, size=(4,4), mode="nearest") == grid
return np.swapaxes(np.nonzero(local_max), 0, 1)
def validate_mesh_grid(mesh):
"""
validate if a mesh object is in a well-defined format.
A well-defined mesh has implicit a grid structure.
An AssertionError would be raised on an invalidation.
@param mesh
a well-defined mesh object
"""
elements = mesh['elements']
nodes = mesh['nodes']
values = mesh['values']
# all are sorted w.r.t array index
for idx, n in enumerate(nodes):
assert n["id"] == idx
for idx, e in enumerate(elements):
assert e["id"] == idx
for idx, v in enumerate(values):
assert v["element_id"] == idx
# triangle elements are well defined
assert len(elements) == len(values)
# referenced nodes of triangle are well defined
ref_n = {n for e in elements for n in e["nodes"]}
assert len(ref_n) == len(nodes)
# two consecutive elements share a long edge
assert len(elements) % 2 == 0
for idx in range(0, len(elements), 2):
e1 = set(elements[idx]['nodes'])
e2 = set(elements[idx+1]['nodes'])
edge_n = e1.intersection(e2)
assert len(edge_n) == 2
# rigid grid (nodes)
first, second, last = nodes[0], nodes[1], nodes[-1]
w, h = last['x'] - first['x'], last['y'] - first['y']
step_y = second['y'] - first['y']
rows = 1 + int(h / step_y)
cols = int(len(nodes) / rows)
step_x = w / (cols - 1)
assert rows * cols == len(nodes)
for idx, node in enumerate(nodes):
assert node['y'] == first['y'] + step_y * (idx % rows)
assert node['x'] == first['x'] + step_x * (idx // rows)
# rigid grid (elements)
for x in range(0, cols-1):
for y in range(0, rows-1):
i = x*(rows-1)+y
e1 = elements[i*2]
e2 = elements[i*2+1]
node_id = x*rows+y
assert sorted(e1['nodes']) == sorted([node_id, node_id+1, node_id+1+rows])
assert sorted(e2['nodes']) == sorted([node_id, node_id+rows, node_id+1+rows])
def lookup_mesh_element(grid, gcoord, mesh):
""""
retrieve elements from mesh according to corresponding
coordinates in the equivalent grid.
@param grid
a 2d grid
@param gcoord
[x,y] coordinate of the grid
@param mesh
the well-defined mesh
@return a list of element IDs from the mesh
"""
x, y = gcoord
w, h = grid.shape
i = x * h + y
elements = mesh['elements']
return [elements[i*2]['id'], elements[i*2+1]['id']]
def scale_to_grid(mesh):
"""
Scale the mesh down to a structural grid.
Every two consecutive elements (i.e. triangles) of the mesh
are represented with a single cell of the grid.
@param mesh
a well-defined mesh object
@return numpy 2d array representing the mesh as a grid structure
"""
elements = mesh['elements']
nodes = mesh['nodes']
values = mesh['values']
first, second, last = nodes[0], nodes[1], nodes[-1]
w, h = last['x'] - first['x'], last['y'] - first['y']
step_y = second['y'] - first['y']
rows = 1 + int(h / step_y)
cols = int(len(nodes) / rows)
grid = np.zeros((cols-1, rows-1))
for x in range(0, cols-1):
grid_y = np.zeros((rows-1,))
for y in range(0, rows-1):
i = x*(rows-1)+y
grid_y[y] = (values[i*2]['value'] + values[i*2+1]['value']) / 2.0
grid[x] = grid_y
return grid | easz/view_spot_finder | view_spot_finder/finder.py | finder.py | py | 4,367 | python | en | code | 0 | github-code | 36 |
6672720656 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Function
from model import encoder, predictor
from LoadData import DATASET
import sys
from torch.utils.data import Dataset, DataLoader
from collections import defaultdict
'''
dataset : infograph, quickdraw, real, sketch
'''
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
#print('cuda = ', cuda)
BATCH_SIZE = 256
EP = 50
class ToRGB(object):
def __init__(self):
pass
def __call__(self, sample):
sample = sample.convert('RGB')
return sample
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5])
transform = transforms.Compose([
ToRGB(),
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
if __name__ == '__main__':
argument = sys.argv[1:]
source_domain = argument[:-1]
target_domain = argument[-1]
N = len(source_domain)
# dataloader
source_dataloader_list = []
source_clf = {}
extractor = encoder().to(device)
extractor_optim = optim.Adam(extractor.parameters(), lr=3e-4)
for source in source_domain:
print(source)
if source == 'svhn':
dataset = dset.SVHN(root='./dataset/svhn/', download=True, transform=transform)
elif source == 'mnist':
dataset = dset.MNIST('./dataset/mnist', train=True, download=True, transform=transform)
else:
print(source)
dataset = DATASET(source, 'train')
dataset = DataLoader(dataset, batch_size = BATCH_SIZE, shuffle=True)
source_dataloader_list.append(dataset)
# c1 : for target
# c2 : for source
source_clf[source] = {}
source_clf[source]['c1'] = predictor().to(device)
source_clf[source]['c2'] = predictor().to(device)
source_clf[source]['optim'] = optim.Adam(list(source_clf[source]['c1'].parameters()) + list(source_clf[source]['c2'].parameters()), lr=3e-4, weight_decay=0.0005)
if target_domain == 'svhn':
target_dataset = dset.SVHN(root='./dataset/svhn/', download=True, transform=transform)
elif target_domain == 'mnist':
target_dataset = dset.MNIST('./dataset/mnist', train=True, download=True, transform=transform)
else:
target_dataset = DATASET(target_domain, 'train')
target_dataloader = DataLoader(target_dataset, batch_size=BATCH_SIZE, shuffle=True)
loss_extractor = nn.CrossEntropyLoss()
for ep in range(EP):
print(ep+1)
extractor.train()
source_ac = {}
for source in source_domain:
source_clf[source]['c1'] = source_clf[source]['c1'].train()
source_clf[source]['c2'] = source_clf[source]['c2'].train()
source_ac[source] = defaultdict(int)
for batch_index, (src_batch, tar_batch) in enumerate(zip(zip(*source_dataloader_list), target_dataloader)):
src_len = len(src_batch)
loss_cls = 0
# train extractor and source clssifier
for index, batch in enumerate(src_batch):
x, y = batch
x = x.to(device)
y = y.to(device)
y = y.view(-1)
feature = extractor(x)
pred1 = source_clf[source_domain[index]]['c1'](feature)
pred2 = source_clf[source_domain[index]]['c2'](feature)
source_ac[source_domain[index]]['c1'] += torch.sum(torch.max(pred1, dim=1)[1] == y).item()
source_ac[source_domain[index]]['c2'] += torch.sum(torch.max(pred2, dim=1)[1] == y).item()
loss_cls += loss_extractor(pred1, y) + loss_extractor(pred2, y)
if batch_index % 5 == 0:
for source in source_domain:
print(source)
print('c1 : [%.8f]' % (source_ac[source]['c1']/(batch_index+1)/BATCH_SIZE))
print('c2 : [%.8f]' % (source_ac[source]['c2']/(batch_index+1)/BATCH_SIZE))
print('\n')
#extractor_optim.zero_grad()
#for index, source in enumerate(source_domain):
# source_clf[source_domain[index]]['optim'].zero_grad()
#loss_cls.backward(retain_graph=True)
#extractor_optim.step()
#for index, source in enumerate(source_domain):
# source_clf[source]['optim'].step()
# source_clf[source]['optim'].zero_grad()
#extractor_optim.zero_grad()
m1_loss = 0
m2_loss = 0
for k in range(1, 3):
for i_index, batch in enumerate(src_batch):
x, y = batch
x = x.to(device)
y = y.to(device)
y = y.view(-1)
tar_x, _ = tar_batch
tar_x = tar_x.to(device)
src_feature = extractor(x)
tar_feature = extractor(tar_x)
e_src = torch.mean(src_feature**k, dim=0)
e_tar = torch.mean(tar_feature**k, dim=0)
m1_dist = e_src.dist(e_tar)
m1_loss += m1_dist
for j_index, other_batch in enumerate(src_batch[i_index:]):
other_x, other_y = other_batch
other_x = other_x.to(device)
other_y = other_y.to(device)
other_y = other_y.view(-1)
other_feature = extractor(other_x)
e_other = torch.mean(other_feature**k, dim=0)
m2_dist = e_src.dist(e_other)
m2_loss += m2_dist
loss_m = 0.5 * (m1_loss/N + m2_loss/N/(N-1)*2)
loss = loss_cls
if batch_index % 5 == 0:
print('[%d]/[%d]' % (batch_index, len(target_dataloader)))
print('class loss : [%.5f]' % (loss_cls))
print('msd loss : [%.5f]' % (loss_m))
extractor_optim.zero_grad()
for source in source_domain:
source_clf[source]['optim'].zero_grad()
loss.backward(retain_graph=True)
extractor_optim.step()
for source in source_domain:
source_clf[source]['optim'].step()
source_clf[source]['optim'].zero_grad()
extractor_optim.zero_grad()
tar_x , _ = tar_batch
tar_x = tar_x.to(device)
tar_feature = extractor(tar_x)
loss = 0
for index, batch in enumerate(src_batch):
x, y = batch
x = x.to(device)
y = y.to(device)
y = y.view(-1)
feature = extractor(x)
pred1 = source_clf[source_domain[index]]['c1'](feature)
pred2 = source_clf[source_domain[index]]['c2'](feature)
clf_loss = loss_extractor(pred1, y) + loss_extractor(pred2, y)
pred_c1 = source_clf[source_domain[index]]['c1'](tar_feature)
pred_c2 = source_clf[source_domain[index]]['c2'](tar_feature)
discrepency_loss = torch.mean(torch.sum(abs(F.softmax(pred_c1, dim=1) - F.softmax(pred_c2, dim=1)), dim=1))
loss += clf_loss - discrepency_loss
loss.backward(retain_graph=True)
for source in source_domain:
source_clf[source]['optim'].zero_grad()
source_clf[source]['optim'].step()
source_clf[source]['optim'].zero_grad()
extractor_optim.zero_grad()
discrepency_loss = 0
for index, _ in enumerate(src_batch):
pred_c1 = source_clf[source_domain[index]]['c1'](tar_feature)
pred_c2 = source_clf[source_domain[index]]['c2'](tar_feature)
discrepency_loss += torch.mean(torch.sum(abs(F.softmax(pred_c1, dim=1) - F.softmax(pred_c2, dim=1)), dim=1))
extractor_optim.zero_grad()
discrepency_loss.backward(retain_graph=True)
extractor_optim.step()
extractor_optim.zero_grad()
for source in source_domain:
source_clf[source]['optim'].zero_grad()
if batch_index % 5 == 0:
print('Discrepency Loss : [%.4f]' % (discrepency_loss))
extractor.eval()
for source in source_domain:
source_clf[source]['c1'] = source_clf[source]['c1'].eval()
source_clf[source]['c2'] = source_clf[source]['c2'].eval()
source_ac = {}
if target_domain == 'svhn':
eval_loader = dset.SVHN(root='./dataset/svhn/', download=True, transform=transform)
elif target_domain == 'mnist':
eval_loader = dset.MNIST('./dataset/mnist', train=True, download=True, transform=transform)
else:
eval_loader = DATASET(target_domain, 'train')
eval_loader = DataLoader(eval_loader, batch_size=BATCH_SIZE, shuffle=True)
for source in source_domain:
source_ac[source] = defaultdict(int)
fianl_ac = 0
with torch.no_grad():
for index, batch in enumerate(eval_loader):
x, y = batch
x = x.to(device)
y = y.to(device)
y = y.view(-1)
feature = extractor(x)
final_pred = 1
for source in source_domain:
pred1 = source_clf[source]['c1'](feature)
pred2 = source_clf[source]['c2'](feature)
if isinstance(final_pred, int):
final_pred = F.softmax(pred1, dim=1) + F.softmax(pred2, dim=1)
else:
final_pred += F.softmax(pred1, dim=1) + F.softmax(pred2, dim=1)
source_ac[source]['c1'] += np.sum(np.argmax(pred1.cpu().detach().numpy(), axis=1) == y.cpu().detach().numpy())
source_ac[source]['c2'] += np.sum(np.argmax(pred2.cpu().detach().numpy(), axis=1) == y.cpu().detach().numpy())
fianl_ac += np.sum(np.argmax(final_pred.cpu().detach().numpy(), axis=1) == y.cpu().detach().numpy())
for source in source_domain:
print('Current Source : ', source)
print('Accuray for c1 : [%.4f]' % (source_ac[source]['c1']/BATCH_SIZE/len(eval_loader)))
print('Accuray for c2 : [%.4f]' % (source_ac[source]['c2']/BATCH_SIZE/len(eval_loader)))
print('Combine Ac : [%.4f]' % (fianl_ac/BATCH_SIZE/len(eval_loader)))
torch.save(extractor.state_dict(), './model/extractor'+'_'+str(ep)+'.pth')
for source in source_domain:
torch.save(source_clf[source]['c1'].state_dict(), './model/'+source+'_c1_'+str(ep)+'.pth')
torch.save(source_clf[source]['c2'].state_dict(), './model/'+source+'_c2_'+str(ep)+'.pth')
| PRCinguhou/domain-adaptation | train.py | train.py | py | 9,439 | python | en | code | 2 | github-code | 36 |
42236411796 | from pick import pick
import re
import sys
result = ""
selected = []
all_feature = []
def loadDB(filename):
# 加载产生式数据库
dictionary = {}
all = []
global all_feature
with open(filename, 'r') as f:
for line in f.readlines():
# 按行加载
if line[0] == "#":
continue
# 产生式应该长这样:<编号>:IF <条件1> [& <条件2>...] -> <结论>
num = line.split("IF")[0].strip()
tmp = re.findall(r'IF(.*?)->', line.replace(" ", ""))[0]
conditions = tmp.split("&")
all.extend(conditions)
conclusion = line.split("->")[1].strip()
# 存到 dict 里面
dictionary[conclusion] = conditions
# print(num, conditions, conclusion)
all_feature = list(set(all))
# print(all_feature)
return dictionary
def IS(text, current_dict):
# 如果给定的条件text,没有出现在dict的key里,排除掉
ans = {}
for key, value in current_dict.items():
if text in value:
ans[key] = value
return ans
def main(debug=False):
dictionary = loadDB("db.txt")
title = "请选择全部条件。"
temp = pick(all_feature, title, multiselect=True, min_selection_count=1)
selected.extend(i[0] for i in temp)
if debug:
print(f"选择的所有条件与规则:{selected}")
for i in selected:
# 依据规则,逐条推理
if debug:
print(f"处理规则 '{i}' 后的结论区:{list(dictionary.keys())}")
dictionary = IS(i, dictionary)
# print(dictionary)
if len(dictionary) == 0:
print("没有通过条件找到您的结论")
elif len(dictionary) == 1:
print(f"您输入的条件找到的结论是:{list(dictionary.keys())[0]}!")
elif len(dictionary) > 1:
print(f"您提供的条件对应数条结论,第一条是:{list(dictionary.keys())[0]}!")
if __name__ == "__main__":
if len(sys.argv) > 1:
if (sys.argv[1] == "--debug"):
main(debug=True)
else:
main()
| littlebear0729/Production-system | identify_system.py | identify_system.py | py | 2,137 | python | en | code | 0 | github-code | 36 |
73137151144 | import pytest
from registry_schemas import validate
# Properties can be anything, using show* for testing.
REGISTRATIONS_TABLE = {
'showColumn1': True,
'showColumn2': False,
'showColumn3': True,
'showColumn4': False
}
# Properties can be anything, using misc* for testing.
MISC_PREFERENCES = {
'preference1': 'A',
'preference2': False,
'preference3': 3
}
TEST_REG_TABLE_JSON = {
'paymentConfirmationDialog': True,
'registrationsTable': REGISTRATIONS_TABLE
}
TEST_MISC_PREF_JSON = {
'paymentConfirmationDialog': True,
'miscellaneousPreferences': MISC_PREFERENCES
}
TEST_ALL_JSON = {
'paymentConfirmationDialog': True,
'selectConfirmationDialog': False,
'defaultDropDowns': True,
'defaultTableFilters': False,
'registrationsTable': REGISTRATIONS_TABLE,
'miscellaneousPreferences': MISC_PREFERENCES
}
TEST_COMBO_JSON = {
'paymentConfirmationDialog': True,
'selectConfirmationDialog': False
}
TEST_PAYMENT_JSON = {
'paymentConfirmationDialog': True
}
TEST_SELECT_JSON = {
'selectConfirmationDialog': False
}
TEST_DROPDOWN_JSON = {
'defaultDropDowns': True
}
TEST_FILTER_JSON = {
'defaultTableFilters': False
}
TEST_EMPTY_JSON = {
}
TEST_UNKNOWN_JSON = {
'unknown': 'xxxx'
}
TEST_INVALID_TYPE_JSON = {
'selectConfirmationDialog': 'wrong'
}
# testdata pattern is ({description}, {is valid}, {data})
TEST_DATA = [
('All settings', True, TEST_ALL_JSON),
('2 settings', True, TEST_COMBO_JSON),
('Just payment', True, TEST_PAYMENT_JSON),
('Just search select', True, TEST_SELECT_JSON),
('Just dropdown', True, TEST_DROPDOWN_JSON),
('Just table filter', True, TEST_FILTER_JSON),
('Just registrations table', True, TEST_REG_TABLE_JSON),
('Just miscellaneous preferences', True, TEST_MISC_PREF_JSON),
('No settings', False, TEST_EMPTY_JSON),
('Unknown setting', False, TEST_UNKNOWN_JSON),
('Invalid type setting', False, TEST_INVALID_TYPE_JSON)
]
@pytest.mark.parametrize('desc,valid,data', TEST_DATA)
def test_user_profile(desc, valid, data):
"""Assert that the schema is performing as expected for a user profile."""
is_valid, errors = validate(data, 'userProfile', 'common')
if errors:
# print(errors)
for err in errors:
print(err.message)
assert is_valid == valid
| bcgov/registry-schemas | tests/unit/common/test_user_profile.py | test_user_profile.py | py | 2,351 | python | en | code | 0 | github-code | 36 |
14128044898 | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
a, b = [], []
while headA:
a.append(headA)
headA = headA.next
while headB:
b.append(headB)
headB = headB.next
node = None
while a and b:
x , y = a.pop(), b.pop()
if x == y:
node = x
else:
break
return node
| zenmeder/leetcode | 160.py | 160.py | py | 619 | python | en | code | 0 | github-code | 36 |
6911896789 | import json
from pydantic import BaseModel
from pdf_token_type_labels.TokenType import TokenType
from pdf_features.Rectangle import Rectangle
SCALE_RATIO = 0.75
class SegmentBox(BaseModel):
left: float
top: float
width: float
height: float
page_number: int
segment_type: TokenType = TokenType.TEXT
def to_dict(self):
return json.loads(self.model_dump_json())
def get_bounding_box(self) -> Rectangle:
return Rectangle.from_width_height(
left=int(self.left), top=int(self.top), width=int(self.width), height=int(self.height)
)
def scale_down(self):
self.left = round(self.left * SCALE_RATIO, 0)
self.top = round(self.top * SCALE_RATIO, 0)
self.width = round(self.width * SCALE_RATIO, 0)
self.height = round(self.height * SCALE_RATIO, 0)
def scale_up(self):
self.left = round(self.left / SCALE_RATIO, 0)
self.top = round(self.top / SCALE_RATIO, 0)
self.width = round(self.width / SCALE_RATIO, 0)
self.height = round(self.height / SCALE_RATIO, 0)
| huridocs/pdf_metadata_extraction | src/data/SegmentBox.py | SegmentBox.py | py | 1,091 | python | en | code | 2 | github-code | 36 |
8649507511 | """
============================
Author:柠檬班-木森
Time:2020/5/12 20:40
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import time
import unittest
from selenium import webdriver
from ddt import ddt, data
from web_08day.page.page_login import LoginPage
from web_08day.page.page_index import IndexPage
"""
'18684720553,python'
"""
error_case_data = [
{'mobile': "", "pwd": "python1", "expected": "请输入手机号"},
{'mobile': "1868472055a", "pwd": "python1", "expected": "请输入正确的手机号"},
{'mobile': "18684720553", "pwd": "", "expected": "请输入密码"}
]
@ddt
class TestLogin(unittest.TestCase):
"""测试登录"""
def setUp(self):
self.driver = webdriver.Chrome()
self.login_page = LoginPage(self.driver)
self.index_page = IndexPage(self.driver)
def test_login_pass(self):
"""正常登录的用例"""
# 进行登录的操作
self.login_page.login('18684720553', 'python')
# 获取登录之后的用户信息
res = self.index_page.get_my_user_info()
# 断言用例执行是否通过
self.assertEqual('登录成功', res)
@data(*error_case_data)
def test_login_error_case(self, case):
# 执行登录操作
self.login_page.login(case['mobile'], case['pwd'])
# 获取实际提示结果
result = self.login_page.get_error_info()
# 断言
self.assertEqual(case['expected'], result)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| huchaoyang1991/py27_web | web_08day(web自动化用例编写和PO模式)/testcase/test_login_02.py | test_login_02.py | py | 1,630 | python | en | code | 0 | github-code | 36 |
37558477516 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.hashers import make_password, check_password
from superadmin.models import User, UserGroup, Account, AccountChangeLog
from .forms import RawLoginForm
# Create your views here.
def user_login(request):
if request.session.get('user'):
return redirect('user:user-home')
login = RawLoginForm()
response = True
if request.method=='POST':
login = RawLoginForm(request.POST)
if login.is_valid():
username = login.cleaned_data['username']
password = login.cleaned_data['password']
try:
user = User.objects.get(username=username)
if check_password(password, user.password):
print("Success")
login = RawLoginForm()
request.session['user'] = username
return redirect('user:user-home')
else:
response = False
login = RawLoginForm()
except User.DoesNotExist:
response = False
login = RawLoginForm()
context = {
'login_form': login,
'response': response
}
return render(request, "user_login.html", context)
def user_home(request):
changelog = ""
username = request.session.get('user')
if not username:
return redirect('user:user-login')
if request.method=='POST' and 'logout' in request.POST:
print("Logout")
return user_logout(request)
if request.method=='POST' and 'search_form' in request.POST:
print(request.POST.get("searchkey"))
searchkey = request.POST.get("searchkey")
try:
userdata = User.objects.get(username=username)
grouplist = userdata.group.all().values_list('id', flat='True')
try:
accountid = Account.objects.filter(username=searchkey, group__id__in=grouplist).values_list('id', flat='True').distinct()
for id in accountid.iterator():
try:
accountdata = Account.objects.get(id=id)
try:
changelog = AccountChangeLog.objects.filter(username=accountdata).order_by('-modified')
except AccountChangeLog.DoesNotExist:
changelog = ""
except Account.DoesNotExist:
changelog = ""
except Account.DoesNotExist:
changelog = ""
except User.DoesNotExist:
changelog = ""
context = {
'username': username,
'changelog': changelog
}
return render(request, "user_home.html", context)
def user_logout(request):
username = request.session.get('user')
if username and request.method=='POST' and 'logout' in request.POST:
print("Logout Button Clicked")
del request.session['user']
return redirect('user:user-login')
print("Logout Button Unclicked")
return redirect('user:user-home') | lymen/localusermanager | user/views.py | views.py | py | 2,564 | python | en | code | 0 | github-code | 36 |
3224619644 | import random
from os import path
from world_map import *
from game_data import *
from output import *
from shop import Shop
from entity import Entity
from color import Color
class Var():
def __init__(self):
self.running = True
self.ai_turn = True
self.data_slot = 0
self.data_file = "main-PythonTextRPG-ihave13digits.json"
self.data_path = "data/data"
self.quest_path = "data/quest"
self.game_path = "data/game_data"
self.state = "intro"
self.selected_quest = ""
self.location = "Fairlanding"
self.item_type = ""
self.player = Entity("Player", "human", "m", False)
self.mob = Entity("human", "human", "m")
self.shop = Shop()
self.c_text1 = Color(255, 255, 255)
self.c_text2 = Color(100, 100, 100)
self.c_count = Color(80, 80, 80)
self.c_attack = Color(225, 80, 0)
self.c_defense = Color(80, 225, 0)
self.c_magic = Color(0, 80, 225)
self.c_gold = Color(225, 225, 0)
self.c_edit = Color(255, 255, 255)
self.quest_hash = {
'player_name' : self.player.name,
}
##
### Engine Tools
##
def get_console_size(self):
from os import get_terminal_size
line = str(get_terminal_size())
x = ""
y = ""
z = 0
for c in line:
if c == " ":
z += 1
if c.isdigit() == True:
if z == 0:
x += c
if z == 1:
y += c
return [int(x), int(y)]
def randomize_mob(self):
race = world[V.location]['mobs'][random.randint(0, len(world[self.location]['mobs'])-1)]
sex = random.choice(("m", "f"))
name = race
if race in playable_mobs:
first_name = random.choice(names[race][sex])
last_name = random.choice(names[race]['l'])
name = "{} {}".format(first_name, last_name)
self.mob = Entity(name, race, sex)
first_name = random.choice(names[race]['m'])
last_name = random.choice(names[race]['l'])
name = "{} {}".format(first_name, last_name)
self.mob.randomize()
self.mob.gain_experience(random.randint(int(self.player.experience/2), int(self.player.experience)))
def inventory_selection(self, inventory, state, gold_txt='', mrk=1.0):
selecting = True
selection = "nothing"
while selecting:
T.clear_text()
T.print("Select an item to use", "\n", self.c_text1)
T.print("Item Type: {}\n{}\n".format(self.item_type, gold_txt), "\n", self.c_text1)
for i in inventory:
if items[i]['type'] == self.item_type:
s_value = str(items[i]['value'])
s_hp = ''
s_mp = ''
s_HP = ''
s_MP = ''
s_a = ''
s_d = ''
s_m = ''
s_atk = ''
s_def = ''
s_mgc = ''
if "hp" in items[i]:
s_hp = " [{}]".format(items[i]['hp'])
if "mp" in items[i]:
s_mp = " [{}]".format(items[i]['mp'])
if "HP" in items[i]:
s_hp = " [{}]".format(items[i]['HP'])
if "MP" in items[i]:
s_mp = " [{}]".format(items[i]['MP'])
if "atk" in items[i]:
s_a = " [{}]".format(items[i]['atk'])
if "def" in items[i]:
s_d = " [{}]".format(items[i]['def'])
if "mag" in items[i]:
s_m = " [{}]".format(items[i]['mag'])
if "attack" in items[i]:
s_atk = " [{}]".format(items[i]['attack'])
if "defense" in items[i]:
s_def = " [{}]".format(items[i]['defense'])
if "magic" in items[i]:
s_mgc = " [{}]".format(items[i]['magic'])
margin = T.menu_width-(len(i)+len(str(inventory[i]))+(
len(s_value)+len(s_hp)+len(s_mp)+len(s_HP)+len(s_MP)+len(s_a)+len(s_d)+len(s_m)+len(s_atk)+len(s_def)+len(s_mgc)))
print("[{}] {}{}{}{}{}{}{}{}{}{}{}{}{}".format(
T.get_colored_text(inventory[i], self.c_count),
T.get_colored_text(i, self.c_text1),
" "*margin,
T.get_colored_text(int((int(s_value)*mrk)), self.c_gold),
T.get_colored_text(s_hp, self.c_attack),
T.get_colored_text(s_mp, self.c_magic),
T.get_colored_text(s_HP, self.c_attack),
T.get_colored_text(s_MP, self.c_magic),
T.get_colored_text(s_a, self.c_attack),
T.get_colored_text(s_d, self.c_defense),
T.get_colored_text(s_m, self.c_magic),
T.get_colored_text(s_atk, self.c_attack),
T.get_colored_text(s_def, self.c_defense),
T.get_colored_text(s_mgc, self.c_magic)
))
T.print("\n(1) Material\n(2) Food\n(3) Potion\n(4) Scroll\n(5) Arms\n(6) Armor\n(0) Back\n", "\n", self.c_text2)
sel = T.input(": ")
if sel == "0":
self.state = state
selecting = False
elif sel == "1": V.item_type = "material"
elif sel == "2": V.item_type = "food"
elif sel == "3": V.item_type = "potion"
elif sel == "4": V.item_type = "scroll"
elif sel == "5": V.item_type = "arms"
elif sel == "6": V.item_type = "armor"
elif sel in inventory:
selection = sel
selecting = False
return selection
def roll_skill(self, entity, skill, rate=100):
return bool(random.randint(0, rate) < entity.get_skill(skill))
##
### Entity Tools
##
def display_entity(self, entity, menu):
self.display_stats(entity)
self.display_skills(entity)
sel = T.input("\n: ")
spending_points = bool(entity.points > 0 or entity.skill_points > 0)
if spending_points:
self.state = "level_up"
def entity_stats(self, entity, menu):
self.display_stats(entity)
sel = T.input("\n: ")
spending_points = bool(entity.points > 0)
while spending_points > 0:
if entity.points <= 0:
spending_points = False
self.state = menu
return
self.display_stats(entity)
T.print("(1) Strength\n(2) Constitution\n(3) Dexterity\n(4) Awareness\n(5) Intelligence\n(6) Charisma\n(0) Back", "\n", self.c_text2)
sel = T.input(": ")
if sel == "0":
spending_points = False
elif sel == "1":
entity.strength += 1
entity.points -= 1
elif sel == "2":
entity.constitution += 1
entity.points -= 1
elif sel == "3":
entity.dexterity += 1
entity.points -= 1
elif sel == "4":
entity.awareness += 1
entity.points -= 1
elif sel == "5":
entity.wisdom += 1
entity.points -= 1
elif sel == "6":
entity.charisma += 1
entity.points -= 1
entity.calculate_derived()
self.state = menu
def entity_skills(self, entity, menu):
T.clear_text()
for s in entity.skill_mod:
T.expanded_text("({})".format(s), entity.get_skill(s), " ", self.c_text2)
while entity.skill_points > 0:
T.clear_text()
for s in entity.skill_mod:
T.expanded_text("({})".format(s), entity.get_skill(s), " ", self.c_text2)
T.print("(0) Done")
T.print("Remaining Points: {}".format(entity.skill_points))
sel = T.input(": ")
if sel == "0":
self.state = menu
break
else:
if sel in entity.skill_mod and entity.skill_points > 0:
entity.skill_mod[sel] += 1
entity.skill_points -= 1
def display_stats(self, entity):
T.clear_text()
exp = "{}/{}".format(entity.exp, entity.level_up)
ehp = "{}/{}".format(entity.hp, entity.HP)
emp = "{}/{}".format(entity.mp, entity.MP)
emg = "{} [{}]".format(entity.magic, entity.get_magic_bonus())
eat = "{} [{}]".format(entity.attack, entity.get_attack_bonus())
edf = "{} [{}]".format(entity.defense, entity.get_defense_bonus())
T.print("Location:{}{}".format(" "*(T.menu_width-(len("Location:")+len(V.location))), V.location), "\n", self.c_text1)
T.print("\nName:{}{}".format(" "*(T.menu_width-(len("Name:")+len(entity.name))), entity.name), "\n", self.c_text1)
T.print("Race:{}{}".format(" "*(T.menu_width-(len("Race:")+len(entity.race))), entity.race), "\n", self.c_text1)
T.print("Sex:{}{}".format(" "*(T.menu_width-(len("Sex:")+len(entity.sex))), entity.sex), "\n", self.c_text1)
T.print("Job:{}{}".format(" "*(T.menu_width-(len("Job:")+len(entity.job))), entity.job), "\n", self.c_text1)
T.print("\nGold:{}{}".format(" "*(T.menu_width-(len("Gold:")+len(str(entity.gold)))), entity.gold), "\n", self.c_text1)
T.print("Level:{}{}".format(" "*(T.menu_width-(len("Level:")+len(str(entity.level)))), entity.level), "\n", self.c_text1)
T.print("Points:{}{}".format(" "*(T.menu_width-(len("Points:")+len(str(entity.points)))), entity.points), "\n", self.c_text1)
T.print("Skill Points:{}{}".format(" "*(T.menu_width-(len("Skill Points:")+len(str(entity.skill_points)))), entity.skill_points), "\n", self.c_text1)
T.print("Experience:{}{}".format(" "*(T.menu_width-(len("Experience:")+len(exp))), exp), "\n", self.c_text1)
T.print("\nHealth:{}{}".format(" "*(T.menu_width-(len("Health:")+len(ehp))), ehp), "\n", self.c_text1)
T.print("Mana:{}{}".format(" "*(T.menu_width-(len("Mana:")+len(emp))), emp), "\n", self.c_text1)
T.print("Magic:{}{}".format(" "*(T.menu_width-(len("Magic:")+len(emg))), emg), "\n", self.c_text1)
T.print("Attack:{}{}".format(" "*(T.menu_width-(len("Attack:")+len(eat))), eat), "\n", self.c_text1)
T.print("Defense:{}{}".format(" "*(T.menu_width-(len("Defense:")+len(edf))), edf), "\n", self.c_text1)
T.print("\nStrength:{}{}".format(" "*(T.menu_width-(len("Strength:")+len(str(entity.strength)))), entity.strength), "\n", self.c_text1)
T.print("Constitution:{}{}".format(" "*(T.menu_width-(len("Constitution:")+len(str(entity.constitution)))), entity.constitution), "\n", self.c_text1)
T.print("Dexterity:{}{}".format(" "*(T.menu_width-(len("dexterity:")+len(str(entity.dexterity)))), entity.dexterity), "\n", self.c_text1)
T.print("Awareness:{}{}".format(" "*(T.menu_width-(len("Awareness:")+len(str(entity.awareness)))), entity.awareness), "\n", self.c_text1)
T.print("Intelligence:{}{}".format(" "*(T.menu_width-(len("Intelligence:")+len(str(entity.intelligence)))), entity.intelligence), "\n", self.c_text1)
T.print("Charisma:{}{}".format(" "*(T.menu_width-(len("Charisma:")+len(str(entity.charisma)))), entity.charisma), "\n", self.c_text1)
def display_skills(self, entity):
T.print()
count = 1
for s in entity.skills:
skl = "{}{}".format(T.expand_text("| {}".format(s), 12, ' ', 'l'), T.expand_text(entity.get_skill(s), 4, ' ', 'r'))
T.print(skl, "", V.c_text1)
if count % int(T.menu_width/16) == 0:
T.print()
count += 1
V = Var()
| ihave13digits/PythonTextRPG | var.py | var.py | py | 12,139 | python | en | code | 4 | github-code | 36 |
8546055493 | import random
import pygame as pg
from creature import Creature
def draw_creatures(list):
i= 0
for i in range(len(list)):
list[i].draw()
def replication(creatures, display, border_rect, start_speed,\
start_sense, start_energy, speed_mutation, sense_mutation, nutrition, color):
for creature in creatures:
if creature.food_count >= 2 and creature.energy_used < creature.energy:
new_creature = Creature(display, border_rect, start_speed, start_sense,\
start_energy, speed_mutation, sense_mutation, nutrition, color)
new_creature.speed = creature.speed
new_creature.radius = creature.radius
new_creature.sense = creature.sense
new_creature.mutate()
creatures.append(new_creature)
return creatures
def check_survival(creatures, display, border_rect, start_speed, start_sense,\
start_energy, speed_mutation, sense_mutation, nutrition, color):
creatures = list(filter(lambda creature: creature.food_count > 0, creatures))
creatures = list(filter(lambda creature: creature.energy_used < creature.energy, creatures))
creatures = replication(creatures, display, border_rect, start_speed,\
start_sense, start_energy, speed_mutation, sense_mutation, nutrition, color)
for creature in creatures:
creature.food_count = 0
creature.energy_used = 0
creature.spawn()
return creatures
def spawn_food(display, num, WIDTH, HEIGHT):
color = (0, 255, 0)
rectangles = []
for i in range(num):
x = random.randint(100, 900)
y = random.randint(100, 900)
rect = pg.draw.rect(display, color, (x, y, WIDTH, HEIGHT))
collides = False
for r in rectangles:
if rect.colliderect(r):
collides = True
if not collides:
rectangles.append(rect)
return rectangles
def avg_of_list(list):
avg_list = sum(list) / len(list)
avg = float("%.2f" % avg_list)
return avg | lkh-767572/natural-selection-of-traits | methods.py | methods.py | py | 2,031 | python | en | code | 0 | github-code | 36 |
25050666323 | import numpy as np
from teilab.utils import dict2str, subplots_create
from teilab.plot.plotly import boxplot
n_samples, n_features = (4, 1000)
data = np.random.RandomState(0).normal(loc=np.expand_dims(np.arange(n_samples), axis=1), size=(n_samples, n_features))
kwargses = [{"vert":True},{"vert":False}]
title = ", ".join([dict2str(kwargs) for kwargs in kwargses])
nfigs = len(kwargses)
fig = subplots_create(ncols=nfigs, style="plotly")
for col,kwargs in enumerate(kwargses, start=1):
_ = boxplot(data, title=title, fig=fig, col=col, width=1000, height=400, **kwargs)
fig.show()
| iwasakishuto/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments | docs/teilab-plot-plotly-2.py | teilab-plot-plotly-2.py | py | 584 | python | en | code | 0 | github-code | 36 |
6270963777 | import unittest
import os
import pandas as pd
from src.chant import Chant
from src.chant import get_chant_by_id
# Load demo chants
CUR_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(CUR_DIR, os.path.pardir))
_demo_chants_fn = os.path.join(ROOT_DIR, 'cantus-data', 'chants-demo.csv')
CHANTS = pd.read_csv(_demo_chants_fn, index_col='id')
class TestChant(unittest.TestCase):
def test_dummy(self):
data = {
'id': 'id1',
'volpiano': 'abc--de-f'
}
chant = Chant(data)
self.assertEqual(chant.id, 'id1')
self.assertTrue(chant.has_volpiano)
def test_init(self):
chant = get_chant_by_id(CHANTS, CHANTS.index[0])
self.assertEqual(chant.id, CHANTS.index[0])
self.assertEqual(chant.get('id'), CHANTS.index[0])
| bacor/ISMIR2020 | tests/test_chant.py | test_chant.py | py | 828 | python | en | code | 4 | github-code | 36 |
43661627525 |
def ordenar(stack):
return len(stack)== 0
def double(x):
y=x*2
return y
a=[5, 6, 9, 8, 7]
for i in range(0, len(a)):
print(a[i])
#bubble
for i in range (0, len(a)+1 -2):
for j in range (0, len(a)+1 - i-2):
x= a[j]
y= a[j+1]
if x > y:
a[j] = y
a[j+1] = x
print (a)
print("What's your name?")
nombre= input()
print("What's your last name?")
apellido= input()
print("What's your age?")
edad= input()
nomCom=nombre+" "+ apellido #concatenar inputs
print ("Hola ", nomCom, " de ", edad, " años")
#time.sleep(2)
| up210612/UP210612_DSA | Unit 1/section5.py | section5.py | py | 589 | python | en | code | 0 | github-code | 36 |
9661100905 | from __future__ import print_function
import sys
import time
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from useFunc.detectAndTrack import *
from useFunc.utils import *
from useFunc.featMatch import *
if __name__ == '__main__':
# Params
intv_EM = 4 # interval to implement
# - focal length, Camera height
foc_len, H_cam = 1200, 0.8
thresh = 3 # threshold angle to avoid outliers
# settings for read & write video
prePath = r'C:\ProgamData\global_dataset\img_vid'
vidName = r'\vid1_4'
fmt = '.mp4'
cap = cv.VideoCapture(prePath + vidName + fmt)
fourcc = cv.VideoWriter_fourcc(*'XVID')
fps_vid = cap.get(cv.CAP_PROP_FPS)
sizeW, sizeH = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
# size = (sizeW*2, int(sizeH*crop))
size = (sizeW, sizeH)
c_pnt = (int(sizeW / 2), int(sizeH / 2))
camMat = np.array([[foc_len, 0, c_pnt[0]],
[0, foc_len / 1.6, c_pnt[1]],
[0, 0, 1]])
# init video writer
write_name = 'output\\' + vidName + '_EM.avi'
vidWrite = cv.VideoWriter(write_name, fourcc, fps_vid, size)
# Read first frame, quit if unable to read the video file
success, _ = cap.read()
if not success:
print('Failed to read video')
sys.exit(1)
# MAIN
numFr = 0
while cap.isOpened():
# see if it's the end
t1 = time.time()
success, frame = cap.read()
if not success:
print("Done")
break
frameCopy = np.copy(frame)
# Eigen-motion estimation, independent of detection
if numFr == 0: # init
frame0 = np.copy(frameCopy)
angs = np.array([0, 0, 0])
pitch = 0 # orig pose
elif numFr % intv_EM == 0: # angle calc
angs = feat_match(frame0, frameCopy, numFr, size, camMat=camMat, crop=1,
foc_len=foc_len, match_pnts=20, thresh=thresh)
frame0 = np.copy(frameCopy) # stored for next round
pitch += angs[0]
# counter udpate
numFr += 1
t = time.time() - t1
# print info
if t > 0:
print_info(frameCopy, t, numFr, pitch, angs[0])
cv.imshow("Ego-motion", frameCopy)
vidWrite.write(frameCopy)
if cv.waitKey(1) & 0xFF == 27:
break
| dexter2406/MonoVision_MotionEstimation | MoVis_EM.py | MoVis_EM.py | py | 2,416 | python | en | code | 0 | github-code | 36 |
33629829422 | import os, re
root_dir = os.getcwd() + '\\homeworks'
all_dir = os.listdir(root_dir)
with open("file_lists.csv", "a") as file_list:
file_list.write('学生,检索报告,综述论文,Endnote截图,Endnote压缩库,其他\n')
for stu_dir in all_dir:
stu = os.listdir("{0}\\{1}".format(root_dir, stu_dir))
with open("file_lists.csv", "a") as file_list:
file_list.write(stu_dir)
report = ''
review = ''
pic = ''
enlx = ''
other = []
for i in range(0, len(stu), 1):
if re.search('cnki|CNKI', stu[i]):
other.append(stu[i])
if re.search('检索报告', stu[i]):
report = stu[i]
elif re.search('综述论文', stu[i]):
review = stu[i]
elif re.search('jpg|jpeg|png|PNG|JPG|JPGE', stu[i]):
pic = stu[i]
elif re.search('enlx', stu[i]):
enlx = stu[i]
else:
other.append(stu[i])
file_list.write(",{0},{1},{2},{3}".format(report,review,pic,enlx))
if len(other) == 0:
pass
else:
for j in range(0, len(other),1):
file_list.write(",{0}".format(other[j]))
file_list.write("\n") | tianyaxin/compare_docx | get_all_file_lists.py | get_all_file_lists.py | py | 1,269 | python | en | code | 0 | github-code | 36 |
29719010517 | from functools import reduce
def flip_data(arr, curr, l):
"""flip the data accounting for wrapping"""
# base case
if l == 1:
return
# get the subset
subset = []
for i in range(l):
n = (curr + i) % len(arr)
subset.append(arr[n])
# reverse
subset = subset[::-1]
# put back
for i in range(l):
n = (curr + i) % len(arr)
arr[n] = subset[i]
def chunks(l, n):
"""split l into chunks of size n"""
for i in range(0, len(l), n):
yield l[i:i + n]
def sparse_hash(arr):
"""calculate sparse hash"""
hash = ''
# iterate on size 16 chunks
for c in chunks(arr, 16):
assert len(c) == 16
# xor the chunk together
res = reduce((lambda x, y: x ^ y), c)
# convert to hex
res = hex(res)[2:].zfill(2)
# add to string
hash += res
return hash
def hash_data(numbers, lengths):
"""hash the data with knot tying"""
current = 0
skip_size = 0
# run 64 times
for _ in range(64):
for l in lengths:
# get slice
flip_data(numbers, current, l)
# move current spot
current = (current + l + skip_size) % len(numbers)
# increase skip size
skip_size += 1
return sparse_hash(numbers)
def convert_length(lengths_raw):
"""convert from char to ascii codes"""
return list(map(ord, lengths_raw)) + [17, 31, 73, 47, 23]
# test input
numbers = list(range(256))
lengths = convert_length('')
assert hash_data(numbers, lengths) == 'a2582a3a0e66e6e86e3812dcb672a272'
numbers = list(range(256))
lengths = convert_length('AoC 2017')
assert hash_data(numbers, lengths) == '33efeb34ea91902bb2f59c9920caa6cd'
numbers = list(range(256))
lengths = convert_length('1,2,3')
assert hash_data(numbers, lengths) == '3efbe78a8d82f29979031a4aa0b16a9d'
numbers = list(range(256))
lengths = convert_length('1,2,4')
assert hash_data(numbers, lengths) == '63960835bcdc130f0b66d7ff4f6a5a8e'
# real input
numbers = list(range(256))
lengths = convert_length(
"76,1,88,148,166,217,130,0,128,254,16,2,130,71,255,229")
print(hash_data(numbers, lengths))
| yknot/adventOfCode | 2017/10_02.py | 10_02.py | py | 2,192 | python | en | code | 0 | github-code | 36 |
16245179877 | # allows for use of bash scripts
import subprocess
#protects password imputs so they are not palin text
import getpass
# import os module
import os
gitHub_address = 'https://github.com/alexboyd92/TestLabScripts.git'
localFileLocation = '/etc/default/isc-dhcp-server'
localGit = '/home'
replacefile = '/home/testlab/TestLabScripts/config/isc-dhcp-server'
homeDir = '/home/testlab/'
install = 'sudo apt-get install'
# runs a bash command
def runCommand(bashCommand):
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return 0
# updates/ upgrades all programs
def update():
update = "sudo apt-get update -y"
upgrade = "sudo apt-get upgrade -y"
runCommand(update)
print ("update complete")
runCommand(upgrade)
print ("upgrade complete")
return 0
#installs virtualbox
def installVBox():
getKey = "wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -"
getKey2 = "wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -"
getRepo = 'sudo add-apt-repository "deb http://download.virtualbox.org/virtualbox/debian bionic contrib"'
getVbox = 'sudo apt-get install virtualbox -y'
runCommand(getKey)
runCommand(getKey2)
runCommand(getRepo)
print ("vbox repo setup complete")
update()
runCommand(getVbox)
print ("Vbox Installed")
return 0
# installs git
def installGit():
getGit = 'sudo apt-get install git -y'
print('installing git')
runCommand(getGit)
print('git Installed')
return 0
# grabs files from github and places them into the correct spots
def configDHCP(gitUser, gitPass):
localFileLocation = '/etc/default/isc-dhcp-server'
localGit = '/home'
replacefile = '/home/testlab/TestLabScripts/config/isc-dhcp-server'
# auto inputs username/ password
getConfigFiles = 'sudo git clone '+gitUser+':'+gitPass+'@'+ gitHub_address
#testing
#getConfigFiles = 'sudo git clone '+ gitHub_address
print('configuring your DHCP Server')
installGit()
os.chdir(localGit)
runCommand('git init')
print("git initilaized")
runCommand(getConfigFiles)
os.system('cp '+replacefile+' '+localFileLocation)
localFileLocation = '/etc/dhcps.conf'
localGit = '/home'
replacefile = '/home/testlab/TestLabScripts/config/dhcpd.conf'
os.system('cp '+replacefile+' '+localFileLocation)
print('DHCP Server configured')
os.chdir(homeDir)
#installs DHCP server
def installDHCP():
getDHCP = 'sudo apt-get install isc-dhcp-server -y'
print('installing a DHCP Server')
runCommand(getDHCP)
print('DHCP Server installed')
return 0
def installOpenssh():
getssh = 'sudo apt-get install openssh-server -y'
print('installing openssh')
runCommand(getssh)
print('openssh installed')
return 0
def startDHCP():
start = 'sudo systemctl start isc-dhcp-server.service'
enable = 'sudo systemctl enable isc-dhcp-server.service'
runCommand(start)
runCommand(enable)
print('DHCP is started and set up to restart on boot')
return 0
def configIP():
#'sudo nano /etc/network/interfaces'
setIP = 'sudo netplan apply'
localFileLocation = '/etc/netplan/'
replacefile = '/home/testlab/TestLabScripts/config/host_ip_setup.yaml'
print('configuring Ip settings')
os.system('cp '+replacefile+' '+localFileLocation)
runCommand(setIP)
print('Ip settings configured')
return 0
def main():
gitUser = input('github Username:')
gitPass = getpass.getpass(prompt='github password:', stream=None)
update()
installVBox()
installOpenssh()
installDHCP()
configDHCP(gitUser, gitPass)
configIP()
startDHCP()
update()
return 0
if __name__ == '__main__':
main()
| alexboyd92/TestLabScripts | python_scripts/host_install.py | host_install.py | py | 3,612 | python | en | code | 1 | github-code | 36 |
27668133159 | import os
from PIL import Image
def resize_image(path, new_path, width, height, crop_center=True):
'''Image resizing and saving to new path'''
original_image = Image.open(path)
image = original_image if not crop_center else crop_center_image(
original_image)
new_image = image.resize((width, height))
full_path = os.path.join(new_path, 'icon')
new_image.save("{}-{}.{}".format(full_path, str(width), 'png'))
def crop_center_image(image, new_width=None, new_height=None):
'''Crop the center of an image'''
width, height = image.size # Get dimensions
if (new_width is None or new_height is None):
if width >= height: # landscape crop
new_width, new_height = height, height
else: # portrait crop
new_width, new_height = width, width
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
image = image.crop((left, top, right, bottom))
return image
def generate_icons(image, path, sizes=(32, 57, 76, 96, 128, 228)):
for size in sizes:
resize_image(image, path, size, size)
| jonathanrodriguezs/image-resizer | image_resizer.py | image_resizer.py | py | 1,177 | python | en | code | 0 | github-code | 36 |
7690070657 | class Node:
def __init__(self, key):
self.data = key
self.left = None
self.right = None
self.hd = 0
def topview(root):
if root == None:
return
q = []
m = dict()
hd = 0
root.hd = hd
q.append(root)
while len(q):
root = q[0]
hd = root.hd
if hd not in m:
m[hd] = root.data
if root.left:
root.left.hd = hd - 1
q.append(root.left)
if root.right:
root.right.hd = hd + 1
q.append(root.right)
q.pop(0)
for i in sorted(m):
print(m[i], end="")
if __name__ == "__main__":
"""
from timeit import timeit
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.right = Node(4)
root.left.right.right = Node(5)
root.left.right.right.right = Node(6)
print(timeit(lambda: topview(root), number=10000)) # 0.043794581994006876
"""
| thisisshub/DSA | O_binary_search_tree/problems/I_top_view_of_binary_tree.py | I_top_view_of_binary_tree.py | py | 964 | python | en | code | 71 | github-code | 36 |
28466775330 | n1=int ( input ( "primera nota:" ) )
n2=int ( input ( "segunda nota:" ) )
n3=int ( input ( "tercera nota:" ) )
n4=int ( input ( "cuarta nota:" ) )
n5=int ( input ( "quinta nota:" ) )
nma=0
nme=0
if n1>n2 and n1>n3 and n1>n4 and n1>n5: nma=n1
elif n2>n1 and n2>n3 and n2>n4 and n2>n5: nma=n2
elif n3>n1 and n3>n2 and n3>n4 and n3>n5: nma=n3
elif n4>n1 and n4>n2 and n4>n3 and n4>n5: nma=n4
else: nma=n5
if n1<n2 and n1<n3 and n1<n4 and n1<n5: nme=n1
elif n2<n1 and n2<n3 and n2<n4 and n2<n5: nme=n2
elif n3<n1 and n3<n2 and n3<n4 and n3<n5: nme=n3
elif n4<n1 and n4<n2 and n4<n3 and n4<n5: nme=n4
else: nme=n5
prom=(n1+n2+n3+n4+n5-nma-nme)/3
print( f"Promedio:{prom:.1f}\nNota Mayor: {nma}\nNota Menor: {nme}" )
| PBMGC/EJERCICIOS-CONDICIONALES-PYTHON | 10.py | 10.py | py | 744 | python | en | code | 1 | github-code | 36 |
25168937459 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Find the least value of n for which p(n) is divisible by one million.
from __future__ import print_function
import timeit
import sys
try:
range = xrange
except NameError:
pass
start = timeit.default_timer()
sys.setrecursionlimit(25000)
def pent(n):
return int((0.5 * n) * ((3 * n) - 1))
def gen_pent(n):
return pent(int(((-1)**(n + 1)) * (round((n + 1) / 2))))
partitions = {0:1, 1:1, 2:2, 3:3, 4:5, 5:7, 6:11, 7:15, 8:22, 9:30, 10:42}
def partition(n):
if n in partitions:
return partitions[n]
total, sign, i = 0, 1, 1
while n - gen_pent(i) >= 0:
sign = (-1)**int((i - 1) / 2)
total += sign * partition(n - gen_pent(i))
i += 1
partitions[n] = total
return total
def euler_78():
n = 15001
while partition(n) % 1000000 != 0:
n += 1
return n
print("Answer: %s" % euler_78())
stop = timeit.default_timer()
print("Time: %f" % (stop - start))
| tijko/Project-Euler | py_solutions_71-80/Euler_78.py | Euler_78.py | py | 990 | python | en | code | 0 | github-code | 36 |
30988421589 | import json
import os
from flask import Flask, request, jsonify
app = Flask(__name__)
here = os.path.dirname(__file__)
state_path = os.path.join(here, "state.json")
@app.route("/")
def home():
with open(state_path) as f:
return jsonify(json.load(f))
@app.route("/<page>", methods=["GET", "POST"])
def route(page):
with open(state_path) as f:
data = json.load(f)
if request.method == "POST":
for k, v in request.form.items():
if k not in data[page]:
return f"Invalid attribute {k} for {page}", 400
try:
v = int(v)
except ValueError:
pass
data[page][k] = v
with open(state_path, "w") as f:
json.dump(data, f, indent=2)
return jsonify(data[page])
@app.route("/tank_stats")
def tank_stats():
with open(state_path) as f:
data = json.load(f)
return jsonify({
k: data["tank"][k]
for k in [
"volume_in", "volume_out_tank", "volume_out_urban_network",
"pump_in_running_duration", "pump_out_running_duration", "urban_network_running_duration", "is_tank_full",
"is_tank_empty"
]
})
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=5001)
| tartopum/atelier | fake_arduino/server.py | server.py | py | 1,307 | python | en | code | 2 | github-code | 36 |
35548614493 | def solution(code):
answer = ''
mode = False
for i in range(len(code)):
if code[i] == '1':
mode = not mode
else :
if mode and i % 2 :
answer += code[i]
if not mode and i % 2 == 0 :
answer += code[i]
if answer == '':
answer = 'EMPTY'
return answer | ckswls56/BaejoonHub | 프로그래머스/unrated/181932. 코드 처리하기/코드 처리하기.py | 코드 처리하기.py | py | 358 | python | en | code | 0 | github-code | 36 |
20052535936 | from __future__ import print_function
import matplotlib.pylab as plt
import Layer1.NLSVC as svm
import Layer1.learnerV2a as l
import Layer1.RLLSVM as rlvm
#import Layer1.SVMLearner as svm
#import Layer1.RecurrentSVM as rsvm
#import Layer1.Poly_Learner as pl
#import Layer1.MLP_Learner as mlp
import numpy as np
from Layer2.AccountV2 import Account as acc
if __name__ == "__main__":
#setup
#receive info
final_out = list()
for j in range(1):
for i in range(10):
#print("Now in iteration: "+str(i),end='\r')
#make up default values
num_learner = 1
learners = list()
learner = l.Learner(0.1, 0.1, 0.001, 1, 23)
learner.threshold = 2
#learner = svm.Learner()
#learner = rsvm.Learner(adaption=0.32,transactionCost=1.5)
#learner = nlsvm.Learner()
#learner = pl.Learner(j,200)
#learner = mlp.Learner(layers=2,mode='returns',n_itr=3)
#learner = rlvm.Learner()
account = acc("EURUSD",1000,0,0,0,2000)
val_watch = list()
val_watch.append(account.total_account_value())
#learner = l.Learner(0.02,0.3,1,0.95,10)
#numbers = (np.sin(np.linspace(0, np.pi*8, 201))/3)+np.linspace(0,1,201)*0.5
numbers = (np.sin(np.linspace(0, np.pi*8, 20001))/3)+0.5
#numbers = (np.linspace(0,np.pi*8, 201))
#numbers = np.multiply(np.sin(np.linspace(0,np.pi*8,201))+1,np.linspace(0,np.pi*8,201)*1.01)
#numbers = np.sin(np.linspace(0, np.pi*4, 201))+1
for i in range(len(numbers)):
numbers[i]+= np.random.normal(0,0.03,1)
pnumbers = numbers[:20000]
preds = list()
#execution loop
for i in range(1):
for i in range(len(numbers)-1):
learner.predict(numbers[i],numbers[i+1])
#learner.stopLearning()
for i in range(len(numbers)-1):
account.update(1/numbers[i], numbers[i],i)
prediction = learner.predict(numbers[i],numbers[i+1])
val_watch.append(account.total_account_value())
if(prediction < 0):
account.execute('long')
else:
account.execute('short')
# assert isinstance(prediction, float)
preds.append(prediction)
final_out.append(account.total_account_value())
print("------------------------------------------------------------------")
print("Iteration:"+str(j))
print("Maximum was: "+str(np.amax(final_out))+" with recurrence: "+str(np.argmax(final_out)))
print("Mean final profit: "+ str(np.mean(final_out)))
print("With variance: " + str(np.std(final_out)**2))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(np.linspace(0, np.pi*8,20000), pnumbers)
ax1.plot(np.linspace(0, np.pi*8,20000), preds)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(range(len(val_watch)),val_watch)
plt.xlabel('time in price-updates')
plt.ylabel('total account value')
plt.axis('tight')
plt.show()
| MLRichter/AutoBuffett | layer1_testScript.py | layer1_testScript.py | py | 3,264 | python | en | code | 8 | github-code | 36 |
11486559115 | import tkinter as tk
janela = tk.Tk()
janela.title("Formulário")
janela.geometry("500x400")
def press():
print("Press")
#Nome
Nome = tk.Label(text="Nome:", font=("arial", 16))
Nome.grid(column=0, row=0)
#Input nome
input_nome = tk.Entry()
input_nome.grid(column=0, row=1)
#Idade
idade = tk.Label(text="Idade:", font=("arial", 16))
idade.grid(column=1, row=0)
#Input Idade
input_idade = tk.Entry()
input_idade.grid(column=1, row=1)
#Genero
genero = tk.Label(text="Genero:", font=("arial", 16))
genero.grid(column=0, row=2)
#Input número
input_genero = tk.Entry()
input_genero.grid(column=0, row=3)
#Número
numero = tk.Label(text="Número:", font=("arial", 16))
numero.grid(column=1, row=2)
#Input número
input_numero = tk.Entry()
input_numero.grid(column=1, row=3)
#Classe
classe = tk.Label(text="Classe:", font=("arial", 16))
classe.grid(column=0, row=4)
#Input número
input_classe = tk.Entry()
input_classe.grid(column=0, row=5)
#Turma
turma = tk.Label(text="Turma:", font=("arial", 16))
turma.grid(column=1, row=4)
#Input número
input_classe = tk.Entry()
input_classe.grid(column=1, row=5)
#Gravar
btn_gravar = tk.Button(text="Gravar", font="arial", padx=30, pady=15, foreground="blue")
btn_gravar["command"] = press
btn_gravar.grid(column=0, row=7)
janela.mainloop()
| LeynilsonThe1st/python | scratches/my_app.py | my_app.py | py | 1,297 | python | pt | code | 0 | github-code | 36 |
36028935609 | import jwt
from .models.models import *
# get_permissions
def get_permissions(user_id):
# check role
user = User.query.get(user_id)
role = user.role
# If role is true then user is admin
if role:
# get all user created lists
user_owned_lists_query = List.query.filter(List.creator_id == user_id).all()
user_owned_lists = [ str(lst.id) for lst in user_owned_lists_query ]
# get all cards on user lists or cards he created
user_lists_cards_query = Cards.query.filter(Cards.list_id.in_(user_owned_lists)).all()
user_lists_cards = [ str(crd.id) for crd in user_lists_cards_query ]
all_user_cards_query = Cards.query.filter(Cards.creator_id == user_id).all()
all_user_cards = [ str(crd.id) for crd in all_user_cards_query ]
all_user_cards += user_lists_cards
# get all user created comments or comments in his cards or cards in own lists
user_own_comments_query = Comments.query.filter(Comments.creator_id == user_id).all()
user_own_comments = [ str(cmnt.id) for cmnt in user_own_comments_query ]
user_cards_comments_query = Comments.query.filter(Comments.card_id.in_(user_lists_cards)).all()
user_cards_comments = [ str(cmnt.id) for cmnt in user_cards_comments_query ]
all_user_comments = user_own_comments + user_cards_comments
# get all user created replies or replies in his cards or cards in own lists
all_user_replies_query = Replies.query.filter(Replies.comment_id.in_(all_user_comments)).all()
all_user_replies = [ str(rply.id) for rply in all_user_replies_query ]
# create payload
payload = {
'user_id': user_id,
'role': 'Admin',
'permissions': {
'get_all_lists': 'All',
'create_list': 'All',
'update_list': user_owned_lists,
'delete_list': user_owned_lists,
'get_list': 'All',
'assign_member_list': user_owned_lists,
'revoke_member_list': user_owned_lists,
'get_all_users': 'All',
'create_card': 'All',
'update_card': all_user_cards,
'delete_card': all_user_cards,
'get_card': 'All',
'create_comment': 'All',
'update_comment': all_user_comments,
'delete_comment': all_user_comments,
'get_comment': 'All',
'create_replies': 'All',
'update_replies': all_user_replies,
'delete_replies': all_user_replies,
'get_replies': 'All',
}
}
secret = 'Irithm task is awesome'
algo = "HS256"
# encode a jwt
encoded_jwt = jwt.encode(payload, secret, algorithm=algo)
return encoded_jwt
# if role is False the user is a member
else:
# get all lists assigned to the user
user_assigned_lists_query = UserLists.query.filter(UserLists.user_id == user_id).all()
user_assigned_lists = [str(lst.list_id) for lst in user_assigned_lists_query]
# get all cards on user lists and cards he created in his assigned lists
all_user_view_cards_query = Cards.query.filter(Cards.list_id.in_(user_assigned_lists)).all()
all_user_view_cards = [str(crd.id) for crd in all_user_view_cards_query]
all_user_created_cards_query = Cards.query.filter(Cards.creator_id == user_id).all()
all_user_created_cards = [str(crd.id) for crd in all_user_created_cards_query]
# get all user created comments and comments in his cards in assigned lists
all_user_view_comments_query = Comments.query.filter(Comments.card_id.in_(all_user_view_cards)).all()
all_user_view_comments = [str(cmnt.id) for cmnt in all_user_view_comments_query]
all_user_created_comments_query = Comments.query.filter(Comments.creator_id == user_id).all()
all_user_created_comments = [str(cmnt.id) for cmnt in all_user_created_comments_query]
# get all user created replies or replies in his cards or cards in own lists
all_user_view_replies_query = Replies.query.filter(Replies.comment_id.in_(all_user_view_comments)).all()
all_user_view_replies = [str(rply.id) for rply in all_user_view_replies_query]
all_user_created_replies_query = Replies.query.filter(Replies.creator_id == user_id).all()
all_user_created_replies = [ str(rply.id) for rply in all_user_created_replies_query ]
# create payload
payload = {
'user_id': user_id,
'role': 'Member',
'permissions': {
'get_all_lists': False,
'create_list': False,
'update_list': False,
'delete_list': False,
'get_list': user_assigned_lists,
'get_all_users': False,
'assign_member_list': False,
'revoke_member_list': False,
'create_card': user_assigned_lists,
'update_card': all_user_created_cards,
'delete_card': all_user_created_cards,
'get_card': user_assigned_lists,
'create_comment': all_user_view_cards,
'update_comment': all_user_created_comments,
'delete_comment': all_user_created_comments,
'get_comment': all_user_view_cards,
'create_replies': all_user_view_comments,
'update_replies': all_user_created_replies,
'delete_replies': all_user_created_replies,
'get_replies': all_user_view_comments,
}
}
secret = 'Irithm task is awesome'
algo = "HS256"
# encode a jwt
encoded_jwt = jwt.encode(payload, secret, algorithm=algo)
return encoded_jwt
# check_permissions
def check_permissions(token, permission, entity_id):
# Decode a JWT
secret = 'Irithm task is awesome'
algo = 'HS256'
payload = jwt.decode(token, secret, algorithms=algo, verify=True)
if 'permissions' in payload:
if payload['permissions'][permission]:
if payload['permissions'][permission] == 'All':
return True
elif str(entity_id) in payload['permissions'][permission]:
return True
else:
raise AuthError({
'code': 'invalid_id',
'description': 'Authorization to this entity is forbidden.'
}, 401)
else:
raise AuthError({
'code': 'permission_access_forbidden',
'description': 'Access to this entity is forbidden.'
}, 401)
else:
raise AuthError({
'code': 'invalid_permission',
'description': 'Permission not granted.'
}, 401)
# authorization error class
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
| mfragab5890/Irithim-python-flask | src/auth.py | auth.py | py | 7,106 | python | en | code | 0 | github-code | 36 |
74649265384 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
from BeautifulReport import BeautifulReport
from utils.my_logger import logger
from utils.get_path import *
from scp import SCPClient
import paramiko
import os
import time
def make_report(name):
base_dir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
report_dir = os.path.join(base_dir, 'test_report')
logger.debug("报告输出模块:获取当前脚本路径")
s = unittest.TestLoader().discover(start_dir=case_dir, pattern=name)
logger.debug("testsuit填充用例,%s", s)
print('*'*25,'测试开始','*'*25)
br = BeautifulReport(s)
filename = time.strftime("%Y-%m-%d_%H:%M:%S") + r".html"
logger.debug("报告输出模块:设置报告格式")
br.report(filename=filename, description='回归用例自动化测试报告', report_dir=report_dir)
try:
file = os.path.join('%s' % report_dir, filename)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='192.168.90.162', port=22, username='deploy', password='linkcld123456')
scpclient = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp = SCPClient(ssh.get_transport())
scp.put(file, recursive=True, remote_path='/linkcld/uploadfile/report.html')
scp.close()
except:
logger.exception("上传报告失败")
else:
logger.info("报告已上传192.168.90.162/linkcld/uploadfile/report.html") | iospeng/python | pycharm_demo/pythonProject2/utils/report.py | report.py | py | 1,550 | python | en | code | 0 | github-code | 36 |
17884436215 | # -*- encoding: utf-8 -*-
"""
说明:由于之前帮助按钮模式做的效果不是很理想,目前计划是做一个新的模块作为临时结局方案
"""
import webbrowser
class helpLinkEngine(object):
def __init__(self):
self.url_dict = {
"dataio_sample_showhelp":'导入其他数据分析软件的工作表?sort_id=3265627'
}
def openHelp(self, tag = ""):
if tag in self.url_dict:
url = "https://gitee.com/py2cn/pyminer/wikis/" + self.url_dict[tag]
webbrowser.open(url)
else:
from PySide2.QtWidgets import QMessageBox
QMessageBox.warning(None, '警告', '当前模块暂无帮助文档!', QMessageBox.Ok)
helpLink = helpLinkEngine()
| pyminer/pyminer | pyminer/packages/pm_helpLinkEngine/helpLinkEngine.py | helpLinkEngine.py | py | 754 | python | zh | code | 77 | github-code | 36 |
10863350609 | def quicksort(collection: list) -> list:
if len(collection) < 2:
return collection
pivot = collection.pop() #use last item as pivot
greater: list[int] = [] #all elements > than pivot
lesser: list[int] = [] #all elements <= pivot
for element in collection:
(greater if element > pivot else lesser).append(element)
return quicksort(lesser) + [pivot] + quicksort(greater)
if __name__ == "__main__":
import random
end = random.randrange(10,50)
list = []
i = 0
while (i < end) :
list.append(random.randrange(-20,50))
i += 1
print(list,"\n\n\n\n")
print(quicksort(list))
| TotallyNotTito/ForFun | quicksort.py | quicksort.py | py | 650 | python | en | code | 0 | github-code | 36 |
27338793613 | from math import sqrt
def prime(n):
sq=int(sqrt(n))
for p in range(2,sq+1):
if n%p==0:
return False
if n==2*p+1:
return True
for i in range(2,sq+1):
if n%i==0:
return False
return True
n=int(input())
print(prime(n))
| GeethaBhavani28/python-programming | safe prime.py | safe prime.py | py | 319 | python | en | code | 0 | github-code | 36 |
581853 | # edad = int(input("Escribe tu edad: "))
# if edad >= 18:
# print("Eres apto para entrar")
# else:
# print("No eres apto para entrar")
# numero = int(input("Escribe un numero: "))
# if numero > 100:
# print("Es mayor a 100")
# elif numero == 100:
# print("Es igual a 100")
# else:
# print("Es menor a 100")
caucho = input("Escriba su tipo de caucho (tipo A o B): ")
if caucho == "A":
print("El costo es de 100 dolares")
costoA = 100
dolaresA = str(int(input("Escriba la cantidad que necesita: ")) * costoA)
print("El costo total es de " + dolaresA + "dolares")
elif caucho == "B":
print("El costo es de 80 dolares")
costoB = 80
dolaresB = str(int(input("Escriba la cantidad que necesita: ")) * costoB)
print("El costo total es de " + dolaresB + " dolares")
else:
print("ERROR TIPO NO COMPATIBLE") | eliecerangel/practicas | CONDICIONALES.py | CONDICIONALES.py | py | 854 | python | es | code | 0 | github-code | 36 |
7289309011 | import networkx as nx
import json
import matplotlib.pyplot as plt
import sys
from collections import defaultdict
from networkx.algorithms import bipartite
import numpy as np
import mmsbm
import time
import pickle
def parse_reviews(review_file, business_ids):
user_ids = []
reviews = defaultdict(list)
stars = {}
i = 0
with open(review_file, "r") as f:
for line in f:
j = json.loads(line)
business_id = j["business_id"]
if business_id in business_ids:
user_id = j["user_id"]
user_ids.append(user_id)
reviews[user_id].append(business_id)
stars[(user_id, business_id)] = j["stars"]
i += 1
items = []
for key in reviews:
for val in reviews[key]:
items.append((key, val))
print(len(user_ids))
return items, user_ids, reviews, stars
def parse_reviews_training(review_file, business_ids):
user_ids = []
reviews = defaultdict(list)
stars = {}
i = 0
with open(review_file, "r") as f:
for line in f:
j = json.loads(line)
business_id = j["business_id"]
if business_id in business_ids:
user_id = j["user_id"]
user_ids.append(user_id)
reviews[user_id].append(business_id)
stars[(user_id, business_id)] = j["stars"]
i += 1
items = []
for key in reviews:
for val in reviews[key]:
items.append((key, val))
## shuffle items
rand_items = np.random.permutation(items)
## make train and test sets
training_items = rand_items[:int(len(rand_items)*.80)]
test_items = rand_items[int(len(rand_items)*.80):]
u_set = set(user_ids)
training_set = set()
for edge in training_items:
training_set.add(edge[0])
training_set.add(edge[1])
training_set.intersection_update(u_set)
test_set = set()
for edge in test_items:
test_set.add(edge[0])
test_set.add(edge[1])
test_set.intersection_update(u_set)
b_set = set(business_ids.keys())
b_training_set = set()
for edge in training_items:
b_training_set.add(edge[0])
b_training_set.add(edge[1])
b_training_set.intersection_update(b_set)
print(len(b_training_set),len(b_set))
return items, user_ids, reviews, stars, training_items, test_items, list(training_set), list(test_set), list(b_training_set)
def parse_businesses(business_file):
business_ids = {}
i = 0
with open(business_file, "r") as f:
for line in f:
j = json.loads(line)
#if i < 100 and j["city"] == "Las Vegas" and "Food" in j["categories"]:
if j["city"] == "Las Vegas" and "Food" in j["categories"]:
business_ids[j["business_id"]] = 0
i += 1
return business_ids
def main():
try:
review_file = sys.argv[1]
business_file = sys.argv[2]
except IndexError as e:
print("Must provide input file.")
sys.exit(-1)
business_ids = parse_businesses(business_file)
#items, user_ids, reviews, stars, training_items, test_items, training_ids, test_ids, training_business_ids= parse_reviews_training(review_file, business_ids)
items, user_ids, reviews, stars = parse_reviews(review_file, business_ids)
rating = np.zeros(5)
print(len(stars))
for key in stars:
rating[stars[key]-1] += 1
print(rating)
'''
b = nx.Graph()
b.add_nodes_from(user_ids, bipartite=0)
b.add_nodes_from(business_ids.keys(), bipartite=1)
b.add_edges_from(items)
print(len(user_ids), len(business_ids), len(items))
for node in b.nodes():
if
'''
'''
b = nx.Graph()
b.add_nodes_from(training_ids, bipartite=0)
b.add_nodes_from(training_business_ids, bipartite=1)
b.add_edges_from(training_items)
'''
b0 = 0
b1 = 0
for node in b.nodes():
b.node[node]['eta-theta'] = np.random.dirichlet(np.ones(10),1)[0]
if b.node[node]['bipartite'] == 0:
b0 += 1
if b.node[node]['bipartite'] == 1:
b1 += 1
print(b0,b1,b0+b1,len(b.nodes()))
p = np.full((5,10,10), 0.2) ## (r,k,l)
for k in range(10):
for l in range(10):
vector = np.random.dirichlet(np.ones(5),1)[0]
for r in range(5):
p[r,k,l] = vector[r]
#for edge in b.edges():
# print(stars[mmsbm.order_edge(b,edge)])
#for r in range(5):
# print(p[r])
pickle.dump(items,open("items.p","wb"))
pickle.dump(business_ids,open("business_ids.p","wb"))
'''
pickle.dump(user_ids.p,open("user_ids.p","wb"))
pickle.dump(stars,open("reviews.p","wb"))
pickle.dump(stars,open("stars.p","wb"))
pickle.dump(stars,open("training_items.p","wb"))
pickle.dump(stars,open("test_items.p","wb"))
pickle.dump(stars,open("training_ids.p","wb"))
pickle.dump(stars,open("test_ids.p","wb"))
pickle.dump(stars,open("pOG.p","wb"))
pickle.dump(stars,open("bOG.p","wb"))
for i in range(1,26):
t1 = time.time()
mmsbm.update(b,p,stars)
t2 = time.time()
print(t2-t1)
print(b.node['LDfEWQRx2_Ijv_GyD38Abg']['eta-theta'])
if i%5 == 0:
pickle.dump(b,open("b80_"+str(i)+".p","wb"))
pickle.dump(p,open("p80_"+str(i)+".p","wb"))
'''
#print(b.nodes(data=True))
#for r in range(5):
# print(p[r])
'''
count = 0
nodes = nx.get_node_attributes(b,'bipartite')
print(nodes)
for att in nodes:
if nodes[att] == 0:# print(att)# == 1:
count += 1
G.node[
'''
#print(count)
#print(stars[('ajxohdcsKhRGFlEvHZDyTw', 'PSMJesRmIDmust2MUw7aQA')])
# nx.draw(b)
# plt.show()
#print(len(user_ids))
if __name__ == "__main__":
main()
| jonnymags/Networks-project | yelp.py | yelp.py | py | 5,362 | python | en | code | 0 | github-code | 36 |
42534845706 | # c_units.py
# V0.5.0 LDO 19/10/2022: initial version
# V0.5.1 LDO 12/11/2022: refactor modules
'''
grafanacode: Grafana unit formats.
See `categories.ts <https://github.com/grafana/grafana/blob/main/packages/grafana-data/src/valueFormats/categories.ts>`_
Use::
import c_units as UNITS
units=UNITS.nG_P_NORMAL_M3
'''
#******************************************************************************
# EXTERNAL MODULE REFERENCES
#******************************************************************************
#******************************************************************************
# UNIT FORMATS
#******************************************************************************
NOFORMAT = 'none'
NUMBER = 'none'
STRING = 'string'
PERCENT = 'percent'
PERCENTUNIT = 'percentunit'
SHORT = 'short'
HEX = 'hex'
HEXOX = 'hex0x' # 0x
SCIENTIFIC = 'sci'
LOCALE = 'locale'
PIXELS = 'pixel'
HUMIDITY = 'humidity' # %H
DECIBEL = 'dB'
# Acceleration
M_P_S2 = 'accMS2' # m/sec²
FT_P_S2 = 'accFS2' # f/sec²
G = 'accG' # g
# Angle
DEG = 'degree' # °
RAD = 'radian' # rad
GRAD = 'grad' # grad
ARCMIN = 'arcmin' # arcmin
ARCSEC = 'arcsec' # arcsec
# Area
M2 = 'areaM2' # m²
FT2 = 'areaF2' # ft²
MI2 = 'areaMI2' # mi²
# Computation
FLOPS_P_S = 'flops' # FLOP/s
MFLOPS_P_S = 'mflops' # MFLOP/s
GFLOPS_P_S = 'gflops' # GFLOP/s
TFLOPS_P_S = 'tflops' # TFLOP/s
PFLOPS_P_S = 'pflops' # PFLOP/s
EFLOPS_P_S = 'eflops' # EFLOP/s
ZFLOPS_P_S = 'zflops' # ZFLOP/s
YFLOPS_P_S = 'yflops' # YFLOP/s
# Concentration
PPM = 'ppm' # ppm
PPB = 'conppb' # ppb
nG_P_M3 = 'conngm3' # ng/m³
nG_P_NORMAL_M3 = 'conngNm3' # ng/Nm³
uG_P_M3 = 'conμgm3' # μg/m³
uG_P_NORMAL_M3 = 'conμgNm3' # μg/Nm³
mG_P_M3 = 'conmgm3' # mg/m³
mG_P_NORMAL_M3 = 'conmgNm3' # mg/Nm³
G_P_M3 = 'congm3' # g/m³
G_P_NORMAL_M3 = 'congNm3' # g/Nm³
mG_P_DL = 'conmgdL' # mg/dL
mMOL_P_L = 'conmmolL' # mmol/L
# Currency
DOLLAR = 'currencyUSD' # $
POUND = 'currencyGBP' # £
EURO = 'currencyEUR' # €
YEN = 'currencyJPY' # ¥
RUBLES = 'currencyRUB' # ₽
HRYVNIAS = 'currencyUAH' # ₴
REAL = 'currencyBRL' # R$
DANISH_KRONE = 'currencyDKK' # kr
ICELANDIC_KRONA = 'currencyISK' # kr
NORWEGIAN_KRONE = 'currencyNOK' # kr
SWEDISH_KORNA = 'currencySEK' # kr
CZECH_KORUNA = 'currencyCZK' # czk
SWISS_FRANC = 'currencyCHF' # CHF
POLISH_ZLOTY = 'currencyPLN' # PLN
BITCOIN = 'currencyBTC' # ฿
MILLI_BITCOIN = 'currencymBTC' # mBTC
MICRO_BITCOIN = 'currencyμBTC' # μBTC
SOUTH_AFRICAN_RAND = 'currencyZAR' # R
INDIAN_RUPEE = 'currencyINR' # ₹
SOUTH_KOREAN_WON = 'currencyKRW' # ₩
INDONESIAN_RUPIAH = 'currencyIDR' # Rp
PHILIPPINE_PESO = 'currencyPHP' # PHP
# Data
BYTES_IEC = 'bytes'
BYTES = 'decbytes' # B
BITS_IEC = 'bits'
BITS = 'decbits'
KIBI_BYTES = 'kbytes' # KiB
KILO_BYTES = 'deckbytes' # kB
MEBI_BYTES = 'mbytes' # MiB
MEGA_BYTES = 'decmbytes' # MB
GIBI_BYTES = 'gbytes' # GiB
GIGA_BYTES = 'decgbytes' # GB
TEBI_BYTES = 'tbytes' # TiB
TERA_BYTES = 'dectbytes' # TB
PEBI_BYTES = 'pbytes' # PiB
PETA_BYTES = 'decpbytes' # PB
# Data Rate
PACKETS_P_S = 'pps' # p/s
BYTES_P_S_IEC = 'binBps' # B/s
KIBI_BYTES_P_S = 'KiBs' # KiB/s
MEBI_BYTES_P_S = 'MiBs' # MiB/s
GIBI_BYTES_P_S = 'GiBs' # GiB/s
TEBI_BYTES_P_S = 'TiBs' # TiB/s
PEBI_BYTES_P_S = 'PiBs' # PB/s
BYTES_P_S = 'Bps' # B/s
KILO_BYTES_P_S = 'KBs' # kB/s
MEGA_BYTES_P_S = 'MBs' # MB/s
GIGA_BYTES_P_S = 'GBs' # GB/s
TERA_BYTES_P_S = 'TBs' # TB/s
PETA_BYTES_P_S = 'PBs' # PB/s
BITS_P_S_IEC = 'binbps' # b/s
KIBI_BITS_P_S = 'Kibits' # Kib/s
MEBI_BITS_P_S = 'Mibits' # Mib/s
GIBI_BITS_P_S = 'Gibits' # Gib/s
TEBI_BITS_P_S = 'Tibits' # Tib/s
PEBI_BITS_P_S = 'Pibits' # Pib/s
BITS_P_S = 'bps' # b/s
KILO_BITS_P_S = 'Kbits' # kb/s
MEGA_BITS_P_S = 'Mbits' # Mb/s
GIGA_BITS_P_S = 'Gbits' # Gb/s
TERA_BITS_P_S = 'Tbits' # Tb/s
PETA_BITS_P_S = 'Pbits' # Pb/s
# Date & Time
DATETIME_ISO = 'dateTimeAsIso'
DATETIME_ISO_TODAY = 'dateTimeAsIsoNoDateIfToday'
DATETIME_US = 'dateTimeAsUS'
DATETIME_US_TODAY = 'dateTimeAsUSNoDateIfToday'
DATETIME_LOCAL = 'dateTimeAsLocal'
DATETIME_LOCAL_TODAY = 'dateTimeAsLocalNoDateIfToday'
DATETIME_DEFAULT = 'dateTimeAsSystem'
DATETIME_FROM_NOW = 'dateTimeFromNow'
# Energy
W = 'watt' # W
KW = 'kwatt' # kW
MW = 'megwatt' # MW
GW = 'gwatt' # GW
mM = 'mwatt' # mW
W_P_M2 = 'Wm2' # W/m²
VA = 'voltamp' # VA
KVA = 'kvoltamp' # kVA
VAR = 'voltampreact' # VAR
KVAR = 'kvoltampreact' # kVAR
W_HOUR = 'watth' # Wh
W_HOUR_KILO = 'watthperkg' # Wh/kg
KWH = 'kwatth' # kWh
KWMIN = 'kwattm' # kWm
AMP_HOUR = 'amph' # Ah
KAMP_HOUR = 'kamph' # kAh
mAMP_HOUR = 'mamph' # mAh
JOULE = 'joule' # J
EV = 'ev' # eV
AMP = 'amp' # A
KAMP = 'kamp' # kA
mAMP = 'mamp' # mA
V = 'volt' # V
KV = 'kvolt' # kV
mV = 'mvolt' # mV
DB_mW = 'dBm' # dBm
OHM = 'ohm' # Ω
KOHM = 'kohm' # kΩ
MOHM = 'Mohm' # MΩ
F = 'farad' # F
uF = 'µfarad' # µF
nF = 'nfarad' # nF
pF = 'pfarad' # pF
fF = 'ffarad' # fF
H = 'henry' # H
mH = 'mhenry' # mH
uH = 'µhenry' # µH
LM = 'lumens' # Lm
# Flow
GALLONS_P_MIN = 'flowgpm' # gpm
M3_P_S = 'flowcms' # cms
FT3_P_S = 'flowcfs' # cfs
FT3_P_MIN = 'flowcfm' # cfm
L_P_HOUR = 'litreh' # L/h
L_P_MIN = 'flowlpm' # L/min
mL_P_MIN = 'flowmlpm' # mL/min
LUX = 'lux' # lx
# Force
NM = 'forceNm' # Nm
KNM = 'forcekNm' # kNm
N = 'forceN' # N
KN = 'forcekN' # kN
# Hash Rate
HASHES_P_S = 'Hs' # H/s
KHASHES_P_S = 'KHs' # kH/s
MHASHES_P_S = 'MHs' # MH/s
GHASHES_P_S = 'GHs' # GH/s
THASHES_P_S = 'THs' # TH/s
PHASHES_P_S = 'PHs' # PH/s
EHASHES_P_S = 'EHs' # EH/s
# Mass
mG = 'massmg' # mg
G = 'massg' # g
LB = 'masslb' # lb
KG = 'masskg' # kg
TON = 'masst' # t
# Length
mM = 'lengthmm' # mm
IN = 'lengthin' # in
M = 'lengthm' # m
KM = 'lengthkm' # km
FT = 'lengthft' # ft
MI = 'lengthmi' # mi
# Pressure
mBAR = 'pressurembar' # mBar,
BAR = 'pressurebar' # Bar,
KBAR = 'pressurekbar' # kBar,
PA = 'pressurepa' # Pa
HPA = 'pressurehpa' # hPa
KPA = 'pressurekpa' # kPa
HG = 'pressurehg' # "Hg
PSI = 'pressurepsi' # psi
# Radiation
BECQUEREL = 'radbq' # Bq
CURIE = 'radci' # Ci
RAD_GRAY = 'radgy' # Gy
RAD_RAD = 'radrad' # rad
uSIEVERT = 'radusv' # µSv
mSIEVERT = 'radmsv' # mSv
SIEVERT = 'radsv' # Sv
REM = 'radrem' # rem
EXPOSURE = 'radexpckg' # C/kg
ROENTGEN = 'radr' # R
uSIEVERT_P_HOUR = 'radusvh' # µSv/h
mSIEVERT_P_HOUR = 'radmsvh' # mSv/h
SIEVERT_P_HOUR = 'radsvh' # Sv/h
# Rotational Speed
RPM = 'rotrpm' # rpm
ROT_HZ = 'rothz' # Hz
RAD_P_S = 'rotrads' # rad/s
DEG_P_S = 'rotdegs' # °/s
# Temperature
CELSIUS = 'celsius' # °C
FAHRENHEIT = 'fahrenheit' # °F
KELVIN = 'kelvin' # K
# Time
HZ = 'hertz' # Hz
nS = 'ns' # ns
uS = 'µs' # µs
mM = 'ms' # ms
S = 's' # s
MIN = 'm' # m
HOUR = 'h' # h
DAY = 'd' # d
DURATION_mS = 'dtdurationms' # ms
DURATION_S = 'dtdurations' # s
HH_MM_SS = 'dthms' # hh:mm:ss
D_HH_MM_SS = 'dtdhms' # d hh:mm:ss
TIMETICKS = 'timeticks' # s/100
CLOCK_mS = 'clockms' # ms
CLOCK_S = 'clocks' # s
# Throughput
COUNTS_P_S = 'cps' # cps
OPS_P_S = 'ops' # ops
REQUESTS_P_S = 'reqps' # rps
READS_P_S = 'rps' # rps
WRITES_P_S = 'wps' # wps
IO_OPS_P_S = 'iops' # iops
COUNTS_P_MIN = 'cpm' # cpm
OPS_P_MIN = 'opm' # opm
READS_P_MIN = 'rpm' # rpm
WRITES_P_MIN = 'wpm' # wpm
# Velocity
M_P_S = 'velocityms' # m/s
KM_P_HOUR = 'velocitykmh' # km/h
MI_P_HOUR = 'velocitymph' # mph
KNOTS = 'velocityknot' # kn
# Volume
mL = 'mlitre' # mL
L = 'litre' # L
M3 = 'm3' # m³
NORMAL_M3 = 'Nm3' # Nm³
dM3 = 'dm3' # dm³
GALLONS = 'gallons' # g
# Boolean
TRUE_FALSE = 'bool' # True/False
YES_NO = 'bool_yes_no' # Yes/No
ON_OFF = 'bool_on_off' # On/Off
| DOSprojects/grafanacode | grafanacode/c_units.py | c_units.py | py | 11,206 | python | en | code | 0 | github-code | 36 |
27635586847 | from codenames.data.codenames_pb2 import ActionOutcome
from codenames.data.codenames_pb2 import Role
from codenames.data.codenames_pb2 import SharedAction
from codenames.data.codenames_pb2 import SharedClue
from codenames.data.types import Codename
from codenames.data.types import EndTurn
from codenames.data.types import NullTeam
from codenames.data.types import Team
from codenames.game.game_state import GameState
from codenames.logging.game_logger import GameLogger
from codenames.players.team_players import TeamPlayers
class Game:
def __init__(
self, players: dict[Team, TeamPlayers], game_state: GameState,
logger: GameLogger
) -> None:
self._logger = logger
self._players = players
self._logger.log_players(players)
self._game_state = game_state
self._set_up_players()
@property
def game_state(self) -> GameState:
return self._game_state
def play(self) -> None:
while not self._game_finished():
self._logger.log_active_player(
self.game_state.active_team, self.game_state.active_role
)
if self.game_state.active_role == Role.CLUE_GIVER:
self._execute_clue_phase()
elif self.game_state.active_role == Role.GUESSER:
self._execute_action_phase()
else:
self._logger.log_invalid_role(self.game_state.active_role)
raise TypeError
def _set_up_players(self):
common_information = self.game_state.get_common_information()
secret_information = self.game_state.get_secret_information()
self._logger.log_secret_information(secret_information)
for team, team_players in self._players.items():
team_players.clue_giver.set_up(team, common_information)
team_players.guesser.set_up(team, common_information)
team_players.clue_giver.reaveal_secret_information(
secret_information
)
def _game_finished(self) -> bool:
if self.game_state.teams_remaining() == 1:
return True
if self.game_state.active_team == NullTeam:
return True
return False
def _execute_clue_phase(self) -> None:
clue_giver = self._players[self.game_state.active_team].clue_giver
clue = clue_giver.give_clue()
self._logger.log_clue(clue)
self.game_state.resolve_clue(clue)
shared_clue = SharedClue(team=self.game_state.active_team, clue=clue)
self._logger.log_shared_clue(shared_clue)
for team_players in self._players.values():
team_players.clue_giver.reveal_clue(shared_clue)
team_players.guesser.reveal_clue(shared_clue)
def _execute_action_phase(self) -> None:
guesser = self._players[self.game_state.active_team].guesser
action = guesser.give_action()
self._logger.log_action(action)
self.game_state.resolve_action(action)
if action.guess == EndTurn:
action_outcome = ActionOutcome(identity=NullTeam)
else:
identity = self.game_state.codename_identity(Codename(action.guess))
action_outcome = ActionOutcome(identity=identity)
shared_action = SharedAction(
team=self.game_state.active_team,
action=action,
action_outcome=action_outcome,
)
self._logger.log_shared_action(shared_action)
for team_players in self._players.values():
team_players.clue_giver.reveal_action(shared_action)
team_players.guesser.reveal_action(shared_action)
| ealt/Codenames | codenames/game/game.py | game.py | py | 3,650 | python | en | code | 0 | github-code | 36 |
17520933988 | # coding:utf-8
import unittest
import ddt
import os
import requests
from common import base_api
from common import readexcel
from common import writeexcel
from common.readexcel import ExcelUtil
curpath = os.path.dirname(os.path.realpath(__file__))
textxlsx = os.path.join(curpath,"demo_api.xlsx")
report_path = os.path.join(os.path.dirname(curpath),"report")
reportxlsx = os.path.join(report_path,"result.xlsx")
testdata = readexcel.ExcelUtil(textxlsx,sheetName="Sheet1").dict_data()
@ddt.ddt
class Test_api(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.s = requests.session()
writeexcel.copy_excel(textxlsx,reportxlsx)
@ddt.data(*testdata)
def test_api(self,data):
res = base_api.send_requests(self.s,data)
base_api.wirte_result(res,filename=reportxlsx)
check = data["checkpoint"]
print(u"检查点->:%s"%check)
res_text = res['text']
print(u"返回实际结果->:%s"%res_text)
self.assertTrue(check in res_text)
if __name__ == "__main__":
unittest.main() | fangjiantan/PostTest | Testcase/test_api.py | test_api.py | py | 1,065 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.