text stringlengths 38 1.54M |
|---|
import json
from pathlib import Path
from typing import Dict, Tuple, Any
import gym
from gym import spaces
import numpy as np
from gym_d2d.actions import Action, Actions
from gym_d2d.envs.obs_fn import LinearObsFunction
from gym_d2d.envs.reward_fn import SystemCapacityRewardFunction
from gym_d2d.id import Id
from gym_d2d.link_type import LinkType
from gym_d2d.simulator import Simulator, BASE_STATION_ID
EPISODE_LENGTH = 10
DEFAULT_OBS_FN = LinearObsFunction
DEFAULT_REWARD_FN = SystemCapacityRewardFunction
class D2DEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, env_config=None) -> None:
super().__init__()
env_config = env_config or {}
self.obs_fn = env_config.pop('obs_fn', DEFAULT_OBS_FN)()
self.reward_fn = env_config.pop('reward_fn', DEFAULT_REWARD_FN)()
self.simulator = Simulator(env_config)
self.observation_space = self.obs_fn.get_obs_space(self.simulator.config)
self.num_pwr_actions = { # +1 because include max value, i.e. from [0, ..., max]
'due': self.simulator.config.due_max_tx_power_dBm - self.simulator.config.due_min_tx_power_dBm + 1,
'cue': self.simulator.config.cue_max_tx_power_dBm + 1,
'mbs': self.simulator.config.mbs_max_tx_power_dBm + 1
}
self.action_space = spaces.Dict({
'due': spaces.Discrete(self.simulator.config.num_rbs * self.num_pwr_actions['due']),
'cue': spaces.Discrete(self.simulator.config.num_rbs * self.num_pwr_actions['cue']),
'mbs': spaces.Discrete(self.simulator.config.num_rbs * self.num_pwr_actions['mbs']),
})
self.actions = None
self.state = None
self.num_steps = 0
def reset(self):
self.num_steps = 0
self.simulator.reset()
# take a step with random D2D actions to generate initial SINRs
self.actions = self._reset_random_actions()
self.state = self.simulator.step(self.actions)
obs = self.obs_fn.get_state(self.actions, self.state, self.simulator.devices)
return obs
def _reset_random_actions(self) -> Actions:
cue_actions = {
(tx_id, BASE_STATION_ID): self._extract_action(tx_id, BASE_STATION_ID, self.action_space['cue'].sample())
for tx_id in self.simulator.devices.cues.keys()}
due_actions = {tx_rx_id: self._extract_action(*tx_rx_id, self.action_space['due'].sample())
for tx_rx_id in self.simulator.devices.dues.keys()}
return Actions({**cue_actions, **due_actions})
def step(self, raw_actions: Dict[str, Any]):
self.actions = self._extract_actions(raw_actions)
self.state = self.simulator.step(self.actions)
self.num_steps += 1
obs = self.obs_fn.get_state(self.actions, self.state, self.simulator.devices)
rewards = self.reward_fn(self.actions, self.state)
game_over = {'__all__': self.num_steps >= EPISODE_LENGTH}
info = self._infos(self.actions, self.state)
return obs, rewards, game_over, info
def _extract_actions(self, raw_actions: Dict[str, Any]) -> Actions:
actions = Actions()
for id_pair_str, action in raw_actions.items():
tx_rx_id = tuple([Id(_id) for _id in id_pair_str.split(':')])
actions[tx_rx_id] = self._extract_action(*tx_rx_id, action)
return actions
def _extract_action(self, tx_id: Id, rx_id: Id, action: Any) -> Action:
if tx_id in self.simulator.devices.due_pairs:
link_type = LinkType.SIDELINK
rb, tx_pwr_dBm = self._decode_action(action, 'due')
elif tx_id in self.simulator.devices.cues:
link_type = LinkType.UPLINK
rb, tx_pwr_dBm = self._decode_action(action, 'cue')
else:
link_type = LinkType.DOWNLINK
rb, tx_pwr_dBm = self._decode_action(action, 'mbs')
tx, rx = self.simulator.devices[tx_id], self.simulator.devices[rx_id]
return Action(tx, rx, link_type, rb, tx_pwr_dBm)
def _decode_action(self, action: Any, tx_type: str) -> Tuple[int, int]:
if isinstance(action, (int, np.integer)):
rb = action // self.num_pwr_actions[tx_type]
tx_pwr_dBm = action % self.num_pwr_actions[tx_type]
elif isinstance(action, np.ndarray) and action.ndim == 2:
rb, tx_pwr_dBm = action
else:
raise ValueError(f'Unable to decode action type "{type(action)}"')
return int(rb), int(tx_pwr_dBm)
def _infos(self, actions: Actions, state: dict) -> Dict[str, Any]:
return {':'.join(id_pair): self._info(action, state) for id_pair, action in actions.items()}
def _info(self, action: Action, state: dict) -> Dict[str, Any]:
id_pair = (action.tx.id, action.rx.id)
return {
'rb': action.rb,
'tx_pwr_dbm': action.tx_pwr_dBm,
# 'channel_gains_db': state['channel_gains_db'][id_pair],
'snr_db': state['snrs_db'][id_pair],
'sinr_db': state['sinrs_db'][id_pair],
'rate_bps': state['rate_bps'][id_pair],
'capacity_mbps': state['capacity_mbps'][id_pair],
}
def render(self, mode='human'):
assert self.state is not None and self.actions is not None, \
'Initialise environment with `reset()` before calling `render()`'
obs = self.obs_fn.get_state(self.actions, self.state, self.simulator.devices)
print(obs)
def save_device_config(self, config_file: Path) -> None:
"""Save the environment's device configuration in a JSON file.
:param config_file: The filepath to save to.
"""
config = {device.id: {
'position': device.position.as_tuple(),
'config': device.config,
} for device in self.simulator.devices.values()}
with config_file.open(mode='w') as fid:
json.dump(config, fid)
|
'''
查询协议接口
https://payment.test.bkjk.com/api/protocol/find
'''
#coding:utf-8
import json
import request
transNo = 'TR181212113002713403251'
print('####################查询协议开始##################')
##查询协议入参
data2 = {"transNo":transNo}
#查询协议请求地址
url2 = 'https://payment.test.bkjk.com/api/protocol/find'
t2 = request.request(url2, data2)
sss2 = json.loads(t2)
ss21 = sss2['data']
ss2 = json.loads(ss21)
print('------')
print('接口状态:'+sss2['msg'])
print('transNo:'+transNo,'amount:'+ss2['amount'],'onpassageAmount:'+ss2['onpassageAmount'],'outAmount:'+ss2['outAmount'],'cashierType:'+ss2['cashierType'])
print('------')
print('####################查询协议结束##################')
print(len(sss2.keys()))
print(sss2.keys())
print(len(ss2.keys()))
print(ss2.keys()) |
a = 3
b = 5
#I Sposób
# temp = a
# a = b
# b = temp
#II Sposób
# b = a + b #b = 8
# a = b - a #a = 5
# b = b - a #b = 3
#III Sposób
a, b = b, a
#c, d, e = [1, 2, 3]
print("a:",a,"oraz b:",b) |
__author__ = 'Justin'
w = "CAB"
def prepend(l, s):
return l+s
def append(l, s):
return s+l
num_tests = int(raw_input())
for x in xrange(1, num_tests+1):
w = raw_input()
first = w[0]
rest = w[1:]
words = list(first)
for letter in rest:
ws = []
for word in words:
ws.append(prepend(letter, word))
ws.append(append(letter, word))
words = ws
last = sorted(words)[-1]
print "Case #{}: {}".format(x, last)
|
import matplotlib.pyplot as plt
from math import exp, log
from numpy.random import uniform, exponential
SAMPLE_SIZE = 10 ** 6
A = 0.3
C = 1 / (2 * A + 2 * exp(-A))
EPS = 0
BINS = 1000
def save_plot(filename, ys):
plt.clf()
xs = list(range(len(ys)))
plt.plot(xs, ys)
plt.savefig(filename)
def save_hist(filename, xs):
plt.clf()
plt.hist(xs, bins=BINS)
plt.savefig(filename)
def gen1():
y = uniform(0, 1)
if y + EPS < C * exp(-A):
x = log(y / C)
elif y > C * exp(-A) + 2 * C * A + EPS:
x = -log((1 - y) / C)
else:
x = y / C - exp(-A) - A
return x
def gen2():
y = uniform(0, 1)
if y + EPS < C * exp(-A):
x = -(exponential(1) + A)
elif y > C * exp(-A) + 2 * C * A + EPS:
x = exponential(1) + A
else:
x = uniform(-A, A)
return x
def make_graphs(gen, suffix):
sample = [gen() for i in range(SAMPLE_SIZE)]
sample.sort()
save_plot("plot" + suffix, sample)
save_hist("hist" + suffix, sample)
if __name__ == "__main__":
make_graphs(gen1, "1")
make_graphs(gen2, "2") |
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from .models import Drone, Command
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password')
extra_kwargs = {'password': {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user=user)
return user
class DroneSerializer(serializers.ModelSerializer):
class Meta:
model = Drone
fields = ('id', 'name', 'connect', 'bat', 'fix', 'num_sat', 'lat',
'lng', 'alt', 'zspeed', 'gspeed', 'heading', 'pitch', 'roll', 'arm', 'armable',
'ekf', 'mode', 'mission')
class CommandSerializer(serializers.ModelSerializer):
class Meta:
model = Command
fields = ('id', 'name', 'Command', 'alt')
|
import logging
from decimal import Decimal
from typing import List
from pyinjective.composer import Composer as InjectiveComposer
from pyinjective.constant import Denom, Network
from pyinjective.orderhash import OrderHashResponse, build_eip712_msg, hash_order
from pyinjective.proto.injective.exchange.v1beta1 import (
exchange_pb2 as injective_dot_exchange_dot_v1beta1_dot_exchange__pb2,
)
from pyinjective.proto.injective.exchange.v1beta1.exchange_pb2 import DerivativeOrder, SpotOrder
from pyinjective.utils.utils import derivative_price_to_backend, derivative_quantity_to_backend
from hummingbot.connector.gateway.clob_spot.data_sources.injective.injective_constants import (
ACC_NONCE_PATH_RATE_LIMIT_ID,
NONCE_PATH,
RATE_LIMITS,
)
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
class OrderHashManager:
def __init__(self, network: Network, sub_account_id: str):
self._sub_account_id = sub_account_id
self._network = network
self._sub_account_nonce = 0
self._web_assistants_factory = WebAssistantsFactory(throttler=AsyncThrottler(rate_limits=RATE_LIMITS))
@property
def current_nonce(self) -> int:
return self._sub_account_nonce
async def start(self):
url = f"{self._network.lcd_endpoint}/{NONCE_PATH}/{self._sub_account_id}"
rest_assistant = await self._web_assistants_factory.get_rest_assistant()
res = await rest_assistant.execute_request(url=url, throttler_limit_id=ACC_NONCE_PATH_RATE_LIMIT_ID)
nonce = res["nonce"]
self._sub_account_nonce = nonce + 1
def compute_order_hashes(
self, spot_orders: List[SpotOrder], derivative_orders: List[DerivativeOrder]
) -> OrderHashResponse:
order_hashes = OrderHashResponse(spot=[], derivative=[])
for o in spot_orders:
order_hash = hash_order(build_eip712_msg(o, self._sub_account_nonce))
order_hashes.spot.append(order_hash)
self._sub_account_nonce += 1
for o in derivative_orders:
order_hash = hash_order(build_eip712_msg(o, self._sub_account_nonce))
order_hashes.derivative.append(order_hash)
self._sub_account_nonce += 1
return order_hashes
class Composer(InjectiveComposer):
def DerivativeOrder(
self,
market_id: str,
subaccount_id: str,
fee_recipient: str,
price: float,
quantity: float,
trigger_price: float = 0,
**kwargs,
):
"""Changes the way the margin is computed to be synchronous with the approach used by Gateway."""
# load denom metadata
denom = Denom.load_market(self.network, market_id)
logging.info("Loaded market metadata for:{}".format(denom.description))
if kwargs.get("is_reduce_only") is None:
margin = derivative_margin_to_backend_using_gateway_approach(
price, quantity, kwargs.get("leverage"), denom
)
elif kwargs.get("is_reduce_only", True):
margin = 0
else:
margin = derivative_margin_to_backend_using_gateway_approach(
price, quantity, kwargs.get("leverage"), denom
)
# prepare values
price = derivative_price_to_backend(price, denom)
trigger_price = derivative_price_to_backend(trigger_price, denom)
quantity = derivative_quantity_to_backend(quantity, denom)
if kwargs.get("is_buy") and not kwargs.get("is_po"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.BUY
elif not kwargs.get("is_buy") and not kwargs.get("is_po"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.SELL
elif kwargs.get("is_buy") and kwargs.get("is_po"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.BUY_PO
elif not kwargs.get("is_buy") and kwargs.get("is_po"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.SELL_PO
elif kwargs.get("stop_buy"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.STOP_BUY
elif kwargs.get("stop_sell"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.STOP_SEll
elif kwargs.get("take_buy"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.TAKE_BUY
elif kwargs.get("take_sell"):
order_type = injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderType.TAKE_SELL
return injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.DerivativeOrder(
market_id=market_id,
order_info=injective_dot_exchange_dot_v1beta1_dot_exchange__pb2.OrderInfo(
subaccount_id=subaccount_id,
fee_recipient=fee_recipient,
price=str(price),
quantity=str(quantity),
),
margin=str(margin),
order_type=order_type,
trigger_price=str(trigger_price),
)
def derivative_margin_to_backend_using_gateway_approach(
price: float, quantity: float, leverage: float, denom: Denom
) -> int:
decimals = Decimal(18 + denom.quote)
price_big = Decimal(str(price)) * 10 ** decimals
quantity_big = Decimal(str(quantity)) * 10 ** decimals
leverage_big = Decimal(str(leverage)) * 10 ** decimals
decimals_big = 10 ** decimals
numerator = price_big * quantity_big * decimals_big
denominator = leverage_big * decimals_big
res = int(numerator / denominator)
return res
|
import time
import pandas as pd
import numpy as np
import os
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体)
df = pd.read_csv('Metadata.csv')
accounts = df['account'].values
names = df['中译'].values
account2name = dict(zip(accounts, names))
token = 'http://api.moemoe.tokyo/anime/v1/twitter/follower/status?accounts='
for account in accounts:
token += account + ','
token = token[:-2]
with open('201901accounts', 'w') as f:
f.write(token)
res = pd.read_json(os.popen("curl -v " + token + ' | jq . '))
s = res.iloc[0]
s_names = [account2name[account] for account in s.index]
new_s = pd.Series(list(s), index=s_names)
new_s = new_s.sort_values(ascending=False)
ax = new_s.plot(kind='barh', figsize=(10,7), color="slateblue", fontsize=13);
ax.set_alpha(0.1)
ax.set_title('2019年1月主要新作推特关注数', fontsize=15)
ax.set_xlabel('关注数', fontsize=15);
# set individual bar values
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(i.get_width() + 100, i.get_y()+0.31, str(round((i.get_width()), 2)), fontsize=15, color='dimgrey')
ax.text(0.6*new_s[0], len(new_s) - 1, '统计时间:{}'.format(time.strftime("%Y-%m-%d", time.localtime())), fontsize=20)
ax.invert_yaxis() # largest at top
fig = ax.get_figure()
fig.savefig('{}.png'.format(time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()))) |
'''
Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.
Example 1:
Input: [[0, 30],[5, 10],[15, 20]]
Output: 2
Example 2:
Input: [[7,10],[2,4]]
Output: 1
'''
class Solution:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
maxRooms = currentRooms = 0
times = []
for interval in intervals:
times.append([1, interval[0]])
times.append([0, interval[1]])
times.sort(key = operator.itemgetter(1, 0))
for time in times:
if time[0]==1:
currentRooms+=1
maxRooms = max(currentRooms, maxRooms)
else:
currentRooms-=1
return maxRooms
'''
Very similar to the death question example given by the lady who wrote CTCI. Created an array of times and whether they were starts or ends.
Then I sorted them. When I hit a start time I increment my result. When I hit an end time I decrement.'''
|
import cv2
import numpy as np
img = np.zeros((512, 512, 3), np.uint8)
img = cv2.line(img, (10,30), (400,400), (0,255,0), 5)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# Generated by Django 3.0.2 on 2020-04-23 00:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0042_statements_ending_balance'),
]
operations = [
migrations.AddField(
model_name='statements',
name='Ratio',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
preserve_default=False,
),
]
|
a = '''A: Hi miss, how are you today? Are you checking in?
B: Yes, I had a room reserved under the name Rebecca
A: Oh ok, let me check. Oh great I found your reservation, you are in room 207.
B: Great so here are your keys and we have a complimentary continental breakfast between 7am and 10am in the lobby. Would you like a hand bringing those bags up to your room? Our bellhop can take those for you.
A: Sure, that would be great.
B: Enjoy your stay with us.
A: Hi, what’s your name?
B: I’m Jenny. You?
A: Oh I’m Akiko. It’s great to meet you. So where are you from?
B: I’m from New York. I am in Tokyo for a 10-day work trip.
A: How do you like Japan so far?
B: Oh my gosh, I never imagined the food would be this great and I’m having a blast.
A: Cool! Are you getting a lot of time to explore outside of work?
B: Yeah, I am in the office during the day, we have a Tokyo office but I get out around 5pm every evening so I have been going all around the city on my own.
A: Well some friends and I are having a cherry blossom party this weekend at Yoyogi Park. Would you be interested in joining us?
B: That sounds awesome. Do you want to send me a text later in the week and let me know the time and the address?
A: Will do. See you this weekend!
A: Lindsay, what are you doing?
B: Oh I’m trying to figure out how to make this microphone work better. It sounds kind of strange.
A: What do you mean? I think it sounds fine.
B: Do you know what my friend said? He’s an audio expert and he said that we need to improve it. I don’t know, what do you think?
A: I think it’s OK.'''
from aat import AAT
from os import remove
from os import path
from time import sleep
print()
print('*'*60)
input('Welcome to Vigenère Cipher: Please press "Enter" to continue')
#remove old files
if path.isfile('MsgServer\meandyou.txt'):
remove('MsgServer\meandyou.txt')
if path.isfile('MsgServer\davidandracheal.txt'):
remove('MsgServer\davidandracheal.txt')
#capture conversaton between David and Racheal(in codeword)
aatt = AAT(a)
print('Capturing Conversation...')
aatt.simulateConversation(a)
#Get the KEY (in codeword) using CPT Attack
input('Press Enter to use CPTAttack')
print('Initaiting Chosen Plain Text Attack to find the KEY:')
aatt.chosenPTAttack()
aatt.decode_key()
#decipher the conversation
input('Press Enter to decode the messages!')
print('\n')
print('*'*20,'Messages','*'*20)
aatt.decode_message()
print('*'*20,'Messages','*'*20)
|
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a layer of abstraction for the issue tracker API."""
from http import client as http_client
import json
import logging
from apiclient import discovery
from apiclient import errors
from application import utils
_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
STATUS_DUPLICATE = 'Duplicate'
MAX_DISCOVERY_RETRIES = 3
MAX_REQUEST_RETRIES = 5
class IssueTrackerClient:
"""Class for updating bug issues."""
def __init__(self):
"""Initializes an object for communicate to the issue tracker.
This object can be re-used to make multiple requests without calling
apliclient.discovery.build multiple times.
This class makes requests to the Monorail API.
API explorer: https://goo.gl/xWd0dX
Args:
http: A Http object that requests will be made through; this should be an
Http object that's already authenticated via OAuth2.
"""
http = utils.ServiceAccountHttp()
http.timeout = 30
# Retry connecting at least 3 times.
attempt = 1
while attempt != MAX_DISCOVERY_RETRIES:
try:
self._service = discovery.build(
'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI, http=http)
break
except http_client.HTTPException as e:
logging.error('Attempt #%d: %s', attempt, e)
if attempt == MAX_DISCOVERY_RETRIES:
raise
attempt += 1
def GetIssuesList(self, project='chromium', **kwargs):
"""Makes a request to the issue tracker to list bugs."""
# Normalize the project in case it is empty or None.
project = 'chromium' if project is None or not project.strip() else project
request = self._service.issues().list(projectId=project, **kwargs)
response = self._ExecuteRequest(request)
return response.get('items', []) if response else []
def GetIssue(self, issue_id, project='chromium'):
"""Makes a request to the issue tracker to get an issue."""
# Normalize the project in case it is empty or None.
project = 'chromium' if project is None or not project.strip() else project
request = self._service.issues().get(projectId=project, issueId=issue_id)
return self._ExecuteRequest(request)
def GetIssueComments(self, issue_id, project='chromium'):
"""Gets all the comments for the given bug.
Args:
issue_id: Bug ID of the issue to update.
Returns:
A list of comments
"""
request = self._service.issues().comments().list(
projectId=project, issueId=issue_id, maxResults=1000)
response = self._ExecuteRequest(request)
if not response:
return None
return [{
'id': r['id'],
'author': r['author'].get('name'),
'content': r['content'],
'published': r['published'],
'updates': r['updates']
} for r in response.get('items')]
def _ExecuteRequest(self, request):
"""Makes a request to the issue tracker.
Args:
request: The request object, which has a execute method.
Returns:
The response if there was one, or else None.
"""
response = request.execute(
num_retries=MAX_REQUEST_RETRIES, http=utils.ServiceAccountHttp())
return response
|
# -*-coding:utf-8 -*-
"""
"""
import os
import shutil
from application.model.event import Event
from application.model.payment_method import PaymentMethod
from application.model.permission import Permission
from application.model.role import Role
from application.model.role_permission import RolePermission
from application.model.scholar_payment_account import ScholarPaymentAccount
from application.model.service_template import ServiceTemplate
from application.model.user import User
from application.model.user_role import UserRole
from application.util import authorization
from application.util.database import session_scope
class ProjectInitializationToolkit(object):
def create_config_file(self) -> bool:
try:
if os.path.exists('local_settings.py'):
os.rename('local_settings.py', 'local_settings.py.bak')
shutil.copyfile('application/config/local_settings', 'local_settings.py')
if not os.path.exists('local_settings.py'):
return False
except:
return False
return True
def execute(self, minimal=False) -> bool:
with session_scope() as session:
try:
self._create_built_in_permissions(session)
self._create_built_in_role(session)
self._assign_role_permission(session)
self._create_built_in_payment_method(session)
if not minimal:
self._create_built_in_user(session)
self._assign_role_for_built_in_user(session)
self._create_scholar_account_for_built_in_user(session)
self._create_default_event(session)
self._create_default_service_template(session)
session.commit()
except:
return False
return True
@staticmethod
def _create_built_in_permissions(session):
for built_in_permission_enum in Permission.BuiltInPermission:
built_in_permission = built_in_permission_enum.value
permission = Permission(
name=built_in_permission.name,
label=built_in_permission.label,
description=built_in_permission.description,
)
session.add(permission)
session.flush()
# session.commit()
@staticmethod
def _create_built_in_role(session):
for built_in_permission_enum in Role.BuiltInRole:
built_in_role = built_in_permission_enum.value # type: Role.BuiltInRole.BuiltInRoleObject
role = Role(
name=built_in_role.name,
description=built_in_role.description,
)
session.add(role)
# session.commit()
session.flush()
@classmethod
def _assign_role_permission(cls, session):
cls._assign_permission_to_role(session, Role.BuiltInRole.REGISTRATION_USER, Permission.BuiltInPermission.LOGIN)
cls._assign_permission_to_role(session, Role.BuiltInRole.ADMINISTRATOR,
Permission.BuiltInPermission.LOGIN, Permission.BuiltInPermission.MANAGEMENT)
# session.commit()
session.flush()
@staticmethod
def _assign_permission_to_role(session, built_in_role: Role.BuiltInRole, *built_in_permissions):
role = session.query(Role).filter(
Role.name == built_in_role.value.name).first() # type: Role
for built_in_permission in built_in_permissions: # type: Permission.BuiltInPermission
permission = session.query(Permission).filter(
Permission.label == built_in_permission.value.label).first() # type: Permission
role_permission = RolePermission(
role_uuid=role.uuid,
permission_uuid=permission.uuid,
)
session.add(role_permission)
@staticmethod
def _create_built_in_user(session):
user = User(
username='admin',
email='admin@scholar.tool',
password=authorization.toolkit.hash_plaintext('12345679'),
)
user.status = 1
session.add(user)
# session.commit()
session.flush()
@staticmethod
def _assign_role_for_built_in_user(session):
user = session.query(User).filter(User.username == 'admin').first() # type: User
role = session.query(Role).filter(Role.name == Role.BuiltInRole.ADMINISTRATOR.value.name).first() # type: Role
user_role = UserRole(
user_uuid=user.uuid,
role_uuid=role.uuid,
)
session.add(user_role)
# session.commit()
session.flush()
@staticmethod
def _create_scholar_account_for_built_in_user(session):
user = session.query(User).filter(User.username == 'admin').first() # type: User
account = ScholarPaymentAccount(
user_uuid=user.uuid,
balance=0,
)
session.add(account)
# session.commit()
session.flush()
@staticmethod
def _create_default_event(session):
user = session.query(User).filter(User.username == 'admin').first() # type: User
event = Event(
author_uuid=user.uuid,
title='欢迎使用 ScholarToolManager 项目',
summary='这是一个默认公告',
content="""
祝好运
"""
)
session.add(event)
# session.commit()
session.flush()
@staticmethod
def _create_default_service_template(session):
gb = 1024 * 1024 * 1024
template0 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.DATA,
title='2G流量套餐',
subtitle='适合浏览网页,查找资料',
description='2GB套餐流量#12个月有效期#基本技术支持',
package=2 * gb,
price=2 * 2 * 1024,
initialization_fee=1024
)
session.add(template0)
template1 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.DATA,
title='3G流量套餐',
subtitle='适合浏览网页,查找资料',
description='3GB套餐流量#12个月有效期#基本技术支持',
package=3 * gb,
price=2 * 3 * 1024,
initialization_fee=1024
)
session.add(template1)
template2 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.DATA,
title='4G流量套餐',
subtitle='适合浏览网页,查找资料',
description='4GB套餐流量#12个月有效期#基本技术支持',
package=4 * gb,
price=2 * 4 * 1024,
initialization_fee=1024
)
session.add(template2)
template3 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.DATA,
title='5G流量套餐',
subtitle='适合浏览网页,查找资料',
description='5GB套餐流量#12个月有效期#基本技术支持',
package=5 * gb,
price=2 * 5 * 1024,
initialization_fee=1024
)
session.add(template3)
template4 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.MONTHLY,
title='包月3G套餐',
subtitle='适合浏览网页,查找资料',
description='3GB套餐流量#1个月有效期#每月1号重置流量#基本技术支持',
package=3 * gb,
price=3 * 1024,
initialization_fee=1024
)
session.add(template4)
template5 = ServiceTemplate(
service_type=ServiceTemplate.TYPE.MONTHLY,
title='包月5G套餐',
subtitle='适合浏览网页,查找资料',
description='3GB套餐流量#1个月有效期#每月1号重置流量#基本技术支持',
package=5 * gb,
price=5 * 1024,
initialization_fee=1024
)
session.add(template5)
# session.commit()
session.flush()
@staticmethod
def _create_built_in_payment_method(session):
payment_method = PaymentMethod(
name='学术积分账户余额'
)
session.add(payment_method)
# session.commit()
session.flush()
toolkit = ProjectInitializationToolkit()
|
from NeuralNetwork import NeuralNetwork
from random import choice
from pygame.draw import circle
import numpy as np
from colorsys import hsv_to_rgb
species = []
maxSpecies = 10
inputs = 13
outputs = 2
layers = range(1, 6)
layerSizes = range(2, 15)
def generateSpecies(num):
global species
species = []
for i in range(maxSpecies):
temp = [choice(layerSizes) for z in range(choice(layers))]
temp.insert(0, inputs)
temp.append(outputs)
species.append(temp)
class Brain(object):
def __init__(self):
self.species = choice(species)
self.speciesString = "Sh:"+str(len(self.species))+":"
for i in self.species[1:-1]:
self.speciesString += str(i)
self.network = NeuralNetwork(self.species)
self.inputs = [0 for i in range(inputs)]
self.outputs = [0 for i in range(outputs)]
def __add__(self, otherBrain):
pass
def render(self, surface):
w = surface.get_width()
h = surface.get_height()
for l in self.network.weights:
rows = len(l)
hgap = int(w/(rows + 1))
for row,p in enumerate(l):
columns = len(p)
vgap = int(w/(columns+1))
for column,s in enumerate(p):
C = hsv_to_rgb(abs(s), 1, 1)
circle(surface, (int(C[0]*255), int(C[1]*255), int(C[2]*255)), (hgap*(row+1),vgap*(column+1)), 2)
def mutate(self, mr):
self.network.mutate(mr)
def copy(self):
baby = Brain()
baby.species = self.species
baby.speciesString = self.speciesString
baby.network = self.network.copy()
return baby
def setInputs(self, arr):
if len(arr) != len(self.inputs):
raise Exception("Input array needs to match avaliable inputs: %d != %d" % (len(self.inputs), len(arr)))
for x,i in enumerate(arr):
self.inputs[x] = i
def run(self):
output = self.network.predict(self.inputs)
for x,i in enumerate(output):
self.outputs[x] = i
def getOutputs(self):
return self.outputs
if __name__ == "__main__":
generateSpecies(10)
test = Brain()
print(test.speciesString)
print(test.species)
test.setInputs([1,2,3,4,5,6,7,8,9,10,11,12,13])
test.run()
print(test.getOutputs())
test2 = test.copy()
print(test.network.weights[0])
print(test2.network.weights[0])
print(test2.network.copy().weights[0]) |
parta = "smallsdss"
partb = ["12.5", "17.5", "22.5", "27.5"]
neg = ["", "neg"]
end = ".csv"
error = ["", "Calc_Error_", "Error_"]
temp = []
for a in error:
for b in neg:
for c in partb:
temp.append(a+parta+b+c+end)
print(temp)
titles = temp
positive_ang = [10,31,50,70,94,110,130,150,178,187,203]
negative_ang = positive_ang[1:]
for filename in titles:
f = open(filename, "r")
if "neg" in filename:
angles = negative_ang
else:
angles = positive_ang
words = []
first = True
index = 0
for line in f:
if first:
first = False
words.append(line)
else:
comma = line.find(",")
temp = str(angles[index]) + line[comma:]
words.append(temp)
index += 1
f.close()
writer = open(filename, "w")
for i in words:
writer.write(i)
writer.close()
|
#Daniel Ogunlana
#9/9/2014
#Task 6
#1.Write a program that will ask the user for three integers and display the total.
#2.Write a program that will ask the user for two integers and display the result of multiplying them together.
#3.Ask the user for the length, width and depth of a rectangular swimming pool. Calculate the volume of water required
#python Task 6.2
print ("What is 3 + 4 * 2?")
|
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from main.models import Skill
# Create your views here.
class IndexView(ListView):
template_name = 'skills/index.html'
context_object_name = 'skills'
model = Skill
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
user = self.request.user
if user.is_authenticated():
ctx['user'] = user
ctx['profile'] = user.profile
return ctx
class SkillDetailView(DetailView):
template_name = 'skills/detail.html'
context_object_name = 'skill'
model = Skill
def get_context_data(self, **kwargs):
ctx = super(SkillDetailView, self).get_context_data(**kwargs)
user = self.request.user
skill = ctx['skill']
ctx['skill_profiles'] = skill.profile_set.all()
if user.is_authenticated():
ctx['user'] = user
ctx['profile'] = user.profile
return ctx
|
#
# Bug:36536
# Title: The glite wms purge storage library should rely on LBProxy while logging CLEAR events
# Link: https://savannah.cern.ch/bugs/?36536
#
import logging
from libutils import Job_utils
from libutils.Exceptions import *
def run(utils):
bug='36536'
logging.info("Start regression test for bug %s"%(bug))
logging.warning("To verify this bug you need access to LBProxy DATABASE. You have to set USERNAME and PASSWORD attributes at configuration file")
if utils.WMS_USERNAME=='' or utils.WMS_PASSWORD=='' or utils.USERNAME=='' or utils.PASSWORD=='' :
logging.error("Missing required variables (WMS_USERNAME,WMS_PASSWORD,USERNAME,PASSWORD) from configuration file")
raise GeneralError("Missing required variables","To verify this bug it is necessary to set WMS_USERNAME,WMS_PASSWORD,USERNAME and PASSWORD in the configuration file")
ssh=utils.open_ssh(utils.get_WMS(),utils.get_Username(),utils.get_Password())
logging.info("Prepare and submit a simple job")
utils.use_utils_jdl()
utils.set_jdl(utils.get_jdl_file())
JOBID=Job_utils.submit_wait_finish(utils,"")
logging.info("Create SQL script file")
utils.execute_remote_cmd(ssh,"echo \"SELECT * FROM jobs WHERE dg_jobid like '%%%s%%';\" > /root/test.sql"%(JOBID))
logging.info("Retrieve the job record from LBProxy database")
mysql_cmd="mysql -u %s --password=%s lbserver20 < /root/test.sql"%(utils.USERNAME,utils.PASSWORD)
output=utils.execute_remote_cmd(ssh,mysql_cmd)
if output.find(JOBID)==-1:
logging.error("Unable to retrieve the job record from LBProxy datasse for job id:%s"%(JOBID))
raise GeneralError("Retrieve job record from LBProxy database","Unable to retrieve the job record from LBProxy datasse for job id:%s"%(JOBID))
logging.info("Get job output")
Job_utils.output_normal_job(utils,JOBID)
#Check LBProxy
logging.info("Check again the job record in LBProxy database")
output=utils.execute_remote_cmd(ssh,mysql_cmd)
if output.find(JOBID)!=-1:
logging.error("Job's record in LBProxy database has not been removed.")
raise GeneralError("","Job's record in LBProxy database has not been removed.")
logging.info("End of regression test for bug %s"%(bug)) |
from pages.allsubjectspage.add_subject_page import AddSubjectPage
from pages.datasenderpage.data_sender_locator import SEND_IN_DATA_LINK, PROJECT_LIST, REGISTER_SUBJECT, SMARTPHONE_NAV
from pages.page import Page
from pages.smartphoneinstructionpage.smart_phone_instruction_page import SmartPhoneInstructionPage
from pages.websubmissionpage.web_submission_page import WebSubmissionPage
class DataSenderPage(Page):
def __init__(self, driver):
Page.__init__(self, driver)
def send_in_data(self):
self.driver.find(SEND_IN_DATA_LINK).click()
return WebSubmissionPage(self.driver)
def get_project_list(self):
return self.driver.find(PROJECT_LIST)
def register_subject(self):
self.driver.find(REGISTER_SUBJECT).click()
return AddSubjectPage(self.driver)
def navigate_to_smart_phone_instruction(self):
self.driver.find(SMARTPHONE_NAV).click()
return SmartPhoneInstructionPage(self.driver) |
# This monkey-patches scons' CacheDir to synchronize the cache
# to an s3 bucket.
#
# To enable it:
#
# - ensure python packages are installed: boto3, humanize
# - create a site_init.py file in site_scons containing 'import s3_cache'
# - setup ~/.aws/credentials with an access key
# - set the SCONS_CACHE_S3_BUCKET environment variable to a bucket name
#
# The --cache-debug=- flag is recommended to see s3 cache operations.
import boto3
import botocore.exceptions
import humanize
import os
import os.path
import stat
import SCons.Action
import SCons.CacheDir
import SCons.Errors
# fail early if SCONS_CACHE_S3_BUCKET is not set
S3_BUCKET = os.environ['SCONS_CACHE_S3_BUCKET']
s3_client = boto3.client('s3')
def make_cache_dir(fs, cachedir):
if not fs.isdir(cachedir):
try:
fs.makedirs(cachedir)
except EnvironmentError:
# We may have received an exception because another process
# has beaten us creating the directory.
if not fs.isdir(cachedir):
raise SCons.Errors.EnvironmentError("Unable to create cache dir")
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in disk cache\n', t, cachefile)
try:
# Try to download the file from S3 into the disk cache
sig = os.path.basename(cachefile)
head = s3_client.head_object(Bucket=S3_BUCKET, Key=sig)
download_size = humanize.naturalsize(head['ContentLength'], gnu=True)
cd.CacheDebug('CacheRetrieve(%%s): retrieving %%s from s3 (%s)\n' % download_size,
t, cachefile)
make_cache_dir(fs, cachedir)
# no race here: boto3 downloads to a temp file and then links into place
s3_client.download_file(S3_BUCKET, sig, cachefile)
except botocore.exceptions.ClientError as e:
if int(e.response['Error']['Code']) == 404:
cd.CacheDebug('CacheRetrieve(%s): %s not in s3\n', t, cachefile)
return 1
else:
raise SCons.Errors.EnvironmentError('boto exception %s' % e)
cd.CacheDebug('CacheRetrieve(%s): retrieving %s from disk cache\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.path)
else:
env.copy_from_cache(cachefile, t.path)
st = fs.stat(cachefile)
fs.chmod(t.path, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
SCons.CacheDir.CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, None)
SCons.CacheDir.CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in disk cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing %s to disk cache\n', t, cachefile)
tempfile = cachefile+'.tmp'+str(os.getpid())
make_cache_dir(fs, cachedir)
# Unlike the original CachePushFunc, we want any error in the
# following to halt the build. This is to ensure that every
# layer is pushed to the shared cache.
if fs.islink(t.path):
fs.symlink(fs.readlink(t.path), tempfile)
else:
fs.copy2(t.path, tempfile)
if t.__dict__.get('noshare', False):
cd.CacheDebug('CachePush(%s): not pushing %s to s3 (noshare)\n', t, cachefile)
else:
# Upload the file to S3 before linking it into place
tempfile_size = humanize.naturalsize(fs.getsize(tempfile), gnu=True)
cache_key = os.path.basename(cachefile)
cd.CacheDebug('CachePush(%%s): pushing %%s to s3 (%s)\n' % tempfile_size,
t, cachefile)
try:
s3_client.upload_file(tempfile, S3_BUCKET, cache_key,
ExtraArgs={'Metadata': {'VM-Layer': str(t)}})
except botocore.exceptions.ClientError as e:
# scons doesn't print errors raised here, but it does stop
print e
raise SCons.Errors.EnvironmentError('boto exception %s' % e)
fs.rename(tempfile, cachefile)
st = fs.stat(t.path)
fs.chmod(cachefile, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
SCons.CacheDir.CachePush = SCons.Action.Action(CachePushFunc, None)
|
n = int(input())
nails = [int(x) for x in input().split()]
"""def count_crossed_lines(i, j, crossed=0):
# base case
if i == n-1:
return crossed
# recursive loop
if nails[i] > nails[j]:
crossed += 1
j += 1
if j == n:
i += 1
j = i + 1
return count_crossed_lines(i, j, crossed)
"""
def count_crossed_lines(y, crossed=0):
print("y=", y, end=" ")
# base case
if y == n:
return crossed
# recursive loop
if nails[y-1] > nails[y]:
crossed += 1
y += 1
return count_crossed_lines(y, crossed)
i = total_count = 0
j = 1
while i < n-1:
i += 1
total_count += count_crossed_lines(i)
print()
"""
i = cnt = 0
j = 1
while i < n-1:
if nails[i] > nails[j]:
cnt += 1
j += 1
if j == n:
i += 1
j = i+1
"""
print(total_count)
cnt = 0
for i in range(n):
cnt += i - nails[i]
print("cnt=", cnt)
ix = 0
for nail in nails:
print(nail, ix)
ix += 1
def merge_sort(init, end):
if init == end
return 0
ms = merge_sort(init, (init + end)//2) + merge_sort((init+end)//2 + 1, end)
sz = 0
j = (init + end) / 2 + 1
for i in range(init, (init+end)//2):
while (j <= end) and (vetor[j] < vetor[i]):
aux[tam]=vetor[j];
tam++;
j++; // passo para o próximo elemento
invers += (ini+fim) / 2-i+1; // e adicino o número de inversões em metades diferentes com o elemento j
}
// adiciono
o
elemento
i
aux[tam] = vetor[i];
tam + +;
}
// adiciono
o
resto
dos
elementosda
segunda
metade
while (j <= fim){
aux[tam]=vetor[j];
tam++;
j++;
}
for (int i=ini; i <= fim; i++) vetor[i]=aux[i-ini]; // e troco os valores do vetor original pelos ordenados
return invers; // retorno
o
número
de
inversões
calculado
}
int
main()
{
scanf("%d", & n); // leio
o
valor
de
n
for (int i=1; i <= n; i++) scanf("%d", & vetor[i]); // leio os valores do vetor
printf("%lld\n", merge_sort(1, n)); // imprimo
a
quantidade
de
inversões
do
vetor
return 0;
}
|
# -*- coding:UTF-8 -*_
import psutil
a = psutil.cpu_count() # CPU逻辑数量
b = psutil.cpu_count(logical=False) # CPU物理核心
# 2说明是双核超线程,4则是4核非超线程
print(a, b) |
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords, wordnet
from unidecode import unidecode
import string
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
import spacy
nlp = spacy.load('fr_core_news_md')
french_stopwords = nltk.corpus.stopwords.words('french')
from snowballstemmer import stemmer
ar_stemmer = stemmer("arabic")
from qalsadi import lemmatizer
ar_lemmer = lemmatizer.Lemmatizer()
"""
ENGLISH
"""
def get_syntactically_preprocessed_sentence(sentence):
# Convert input sentence to lower case.
sentence = sentence.lower().replace("/","").replace("\\","").replace('"',"").replace("''","").replace("`","").replace("'s"," is").replace("'m"," am").replace("'ll"," will").replace("'re"," are").replace("n't"," not")
# Remove non-ascii characters
sentence = unidecode(sentence)
# Remove all Digits
sentence = ''.join([i for i in sentence if not i.isdigit()])
# Collecting a list of punctuations form string class
stopset = list(string.punctuation)
# (Noise Removal) Remove stop words and punctuations from string.
# word_tokenize is used to tokenize the input sentence in word tokens.
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
sentence = sentence.replace('"',"").replace("''","").replace("`","").replace("'s"," is").replace("-","").replace(".","").replace("'","")
return sentence
def get_syntactically_preprocessed_paragraph(paragraph):
preprocessed_paragraph = ""
for sentence in sent_tokenize(paragraph):
preprocessed_paragraph = preprocessed_paragraph + " " + get_syntactically_preprocessed_sentence(sentence)
return preprocessed_paragraph
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def get_semantically_preprocessed_sentence(sentence):
# Convert input sentence to lower case.
sentence = sentence.lower().replace("/","").replace("\\","").replace('"',"").replace("''","").replace("`","")
# Remove non-ascii characters
sentence = unidecode(sentence)
# Remove all Digits
sentence = ''.join([i for i in sentence if not i.isdigit()])
# Collecting a list of stop words from nltk and punctuation form string class and create single array
stopset = stopwords.words('english') + list(string.punctuation)
#stopset = list(string.punctuation)
# - ['no', 'nor', 'not', 'only']
# (Noise Removal) Remove stop words and punctuations from string.
# word_tokenize is used to tokenize the input sentence in word tokens.
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
# Lemmatization with WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
sentence = " ".join(lemmatizer.lemmatize(word, get_wordnet_pos(word)) for word in word_tokenize(sentence))
sentence = sentence.replace('"',"").replace("''","").replace("`","").replace("-","").replace(".","").replace("'","")
return sentence
def get_semantically_preprocessed_paragraph(paragraph):
preprocessed_paragraph = []
paragraph = paragraph.replace("'s"," is").replace("'m"," am").replace("'ll"," will").replace("'re"," are").replace("n't"," not")
paragraph = paragraph.replace("\n", "").replace(" ", "").replace(" ", "")
punctuations = list(string.punctuation)
punctuations.remove('.')
for punctuation in punctuations:
paragraph.replace(punctuation, "")
for sentence in sent_tokenize(paragraph):
preprocessed_sentence = get_semantically_preprocessed_sentence(sentence)
preprocessed_paragraph.append(preprocessed_sentence)
return preprocessed_paragraph
"""
FRENCH
"""
def get_semantically_preprocessed_french_paragraph(paragraph):
preprocessed_paragraph = []
paragraph = paragraph.replace("\n", "").replace(" ", "").replace(" ", "")
punctuations = list(string.punctuation)
punctuations.remove('.')
for punctuation in punctuations:
paragraph.replace(punctuation, "")
for sentence in sent_tokenize(paragraph):
preprocessed_sentence = get_semantically_preprocessed_french_sentence(sentence)
preprocessed_paragraph.append(preprocessed_sentence)
return preprocessed_paragraph
def get_semantically_preprocessed_french_sentence(sentence):
# https://newbedev.com/lemmatize-french-text
sentence = sentence.lower().replace("/","").replace("\\","").replace('"',"").replace("''","").replace("`","").replace("-", " ")
sentence = unidecode(sentence)
sentence = ''.join([i for i in sentence if not i.isdigit()])
stopset = french_stopwords + list(string.punctuation)
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
sentence = nlp(u"" + sentence)
lemmatized_sentence = " ".join(word.lemma_ for word in sentence)
sentence = lemmatized_sentence.replace('"',"").replace("''","").replace("`","").replace("-","").replace(".","").replace("'","")
return sentence
def get_syntactically_preprocessed_french_paragraph(paragraph):
paragraph = paragraph.replace("-", " ").replace("\n", " ").replace(" ", " ").replace(" ", " ")
preprocessed_paragraph = ""
for sentence in sent_tokenize(paragraph):
preprocessed_paragraph = preprocessed_paragraph + " " + get_syntactically_preprocessed_french_sentence(sentence)
return preprocessed_paragraph
def get_syntactically_preprocessed_french_sentence(sentence):
sentence = sentence.lower().replace("/","").replace("\\","").replace('"',"").replace("''","").replace("`","")
sentence = unidecode(sentence)
sentence = ''.join([i for i in sentence if not i.isdigit()])
stopset = list(string.punctuation)
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
sentence = sentence.replace('"',"").replace("''","").replace("`","").replace("-","").replace(".","").replace("'","")
return sentence
"""
ARABIC
"""
def get_semantically_preprocessed_arabic_paragraph(paragraph):
preprocessed_paragraph = []
paragraph = paragraph.replace("\n", "").replace(" ", "").replace(" ", "").replace("|", "").replace(".....", ".").replace("....", ".").replace("...", ".").replace("،", " ").replace(",", " ")
punctuations = list(string.punctuation)
punctuations.remove('.')
for punctuation in punctuations:
paragraph.replace(punctuation, "")
for sentence in sent_tokenize(paragraph):
preprocessed_sentence = get_semantically_preprocessed_arabic_sentence(u""+sentence)
preprocessed_paragraph.append(preprocessed_sentence)
return preprocessed_paragraph
def get_semantically_preprocessed_arabic_sentence(sentence):
sentence = sentence.replace("/","").replace("|", "").replace("\\","").replace('"',"").replace("''","").replace("`","").replace("'","").replace("-", " ").replace("–", " ").replace("؟", " ").replace(".....", " ").replace("....", " ").replace("...", " ").replace("..", " ").replace(".", " ").replace("~", " ")
sentence = ''.join([i for i in sentence if not i.isdigit()])
stopset = list(string.punctuation)
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
for letter in stopset:
if sentence.__contains__(letter):
sentence = sentence.replace(letter, " ")
eng_letters = list(string.ascii_letters) + ['é', 'è', 'ê', 'à', 'ù', 'î' , 'ô', 'û', 'ç']
for letter in eng_letters:
if sentence.__contains__(letter):
sentence = sentence.replace(letter, "")
words = []
for word in word_tokenize(sentence):
lemma = ar_lemmer.lemmatize(word, get_wordnet_pos(word))
# print(lemma)
# print(lemma[1])
if lemma[1] != "stopword":
lem = ar_stemmer.stemWord(lemma[0])
words.append(lem)
sentence = " ".join(word for word in words)
sentence = sentence.replace(' '," ").replace('"',"").replace("''","").replace("`","").replace("-","").replace(".","").replace("'","")
return sentence
def get_syntactically_preprocessed_arabic_paragraph(paragraph):
paragraph = paragraph.replace("-", " ").replace("\n", " ").replace("|", "").replace(" ", " ").replace(" ", " ").replace("؟", ".").replace(".....", ".").replace("....", ".").replace("...", ".").replace("..", ".")
preprocessed_paragraph = ""
for sentence in sent_tokenize(paragraph):
preprocessed_paragraph = preprocessed_paragraph + " " + get_syntactically_preprocessed_arabic_sentence(sentence)
return preprocessed_paragraph
def get_syntactically_preprocessed_arabic_sentence(sentence):
sentence = sentence.replace("/","").replace("|", "").replace("\\","").replace('"',"").replace("''","").replace("`","").replace("-", " ").replace("–", " ").replace("؟", ".").replace(".....", ".").replace("....", ".").replace("...", ".").replace("..", ".")
sentence = ''.join([i for i in sentence if not i.isdigit()])
stopset = list(string.punctuation)
sentence = " ".join([i for i in word_tokenize(sentence) if i not in stopset])
eng_letters = list(string.ascii_letters) + ['é', 'è', 'ê', 'à', 'ù', 'î' , 'ô', 'û', 'ç']
for letter in eng_letters:
if sentence.__contains__(letter):
sentence = sentence.replace(letter, "")
sentence = sentence.replace('"',"").replace("''","").replace("`","").replace("-","").replace(".","").replace("'","")
return sentence
|
import re
from util import hook, http, pystuff, randout
import usertracking
import sys
import time
re_lineends = re.compile(r'[\r\n]*')
@hook.command
def python(inp, prefix="direct call", conn=None, nick=None):
".python <prog> -- executes python code <prog>"
i = 0
while i < 3:
if not output and i not 2:
output = pystuff.eval(inp, nick)
if not output and i is 2:
output = "no result " + randout.fail()
i++
return output
@hook.command
def ply(inp, bot=None, input=None, nick=None, db=None, chan=None):
"execute local python - only admins can use this"
if not usertracking.query(db, bot.config, nick, chan, "ply"):
return "nope " + randout.fail()
try:
_blah = dict(globals())
_blah.update(input)
_blah.update(locals())
exec inp in _blah
return _blah["_r"] if "_r" in _blah else None
except:
import traceback
s = traceback.format_exc()
sp = [x for x in s.split("\n") if x]
if len(sp) > 2: sp = sp[-2:]
for i in sp:
input.notice(i)
|
from typing import *
import os.path
# types
Clause = Sequence[int]
Solution = Sequence[int]
PartialSolution = Union[Solution, Literal[False]]
class KissatError(Exception):
...
def replacedict(s: str, d: Mapping[str, str]) -> str:
for pattern, replacement in d.items():
s = s.replace(pattern, replacement)
return s
def clause_negation(clause: Clause) -> Clause:
return [-i for i in clause]
kissat_path = ""
candidates = ["./kissat.exe", "./lib/kissat.exe", "./src/lib/kissat.exe", "../kissat.exe"] # will be called from various places so need to test various possibilities
for i in candidates:
if os.path.exists(i):
kissat_path = os.path.abspath(i)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pykafka import KafkaClient
from pykafka.common import OffsetType
from schemas.MonitorMessage import MonitorMessage
from schemas.DataField import DataField
from schemas.GEMHist import GEMHist
from schemas.GEMTrack import GEMTrack
import pylab as pl
import matplotlib.pyplot as plt
import argparse, codecs, numpy
xtrack = numpy.zeros((50, 128))
ytrack = numpy.zeros((50, 128))
xhist = numpy.zeros(128)
yhist = numpy.zeros(128)
def Plotit():
pl.ion()
pl.clf()
global xhist, yhist, xtrack, ytrack
f1 = plt.figure(1)
f1.clf()
plt.suptitle("Detector Monitor")
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
ax1.set_title("x-strips - " + str(numpy.sum(xhist)) + " counts")
ax2.set_title("y-strips - " + str(numpy.sum(yhist)) + " counts")
ax1.bar(numpy.arange(128), xhist[0:128], 1.0, color='r')
ax2.bar(numpy.arange(128), yhist[0:128], 1.0, color='r')
f2 = plt.figure(2)
f2.clf()
ax3 = plt.subplot(2,1,1)
ax3.set_title("X-Tracks")
plt.imshow(xtrack[0:128], interpolation="none")
ax4 = plt.subplot(2,1,2)
ax4.set_title("Y-Tracks")
plt.imshow(ytrack[0:128], interpolation="none")
#plt.show()
pl.pause(0.0001)
#
def updateGemTrack(gemtrackdata):
global xtrack, ytrack
xtrack = numpy.zeros((50, 128))
ytrack = numpy.zeros((50, 128))
x = GEMTrack()
x.Init(gemtrackdata.Bytes, gemtrackdata.Pos)
for i in range(x.XtrackLength()):
#print("x: strip: %d, time: %d, adc: %d" % (x.Xtrack(i).Strip(), x.Xtrack(i).Time(), x.Xtrack(i).Adc()))
xtrack[x.Xtrack(i).Time()][x.Xtrack(i).Strip()] = x.Xtrack(i).Adc()
for i in range(x.YtrackLength()):
#print("y: strip: %d, time: %d, adc: %d" % (x.Ytrack(i).Strip(), x.Ytrack(i).Time(), x.Ytrack(i).Adc()))
ytrack[x.Ytrack(i).Time()][x.Ytrack(i).Strip()] = x.Ytrack(i).Adc()
#
def updateGemHist(gemhistdata):
global xhist, yhist
x = GEMHist()
x.Init(gemhistdata.Bytes, gemhistdata.Pos)
xhist = x.Xhist_as_numpy_array()
yhist = x.Yhist_as_numpy_array()
#print(xhist[80:100])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-b", help = "Broker to connect to.", type = str)
args = parser.parse_args()
if (args.b == None):
print("Broker must be given as argument.")
exit(0)
envtopic = "NMX_monitor"
client = KafkaClient(hosts=args.b)
topic = client.topics[codecs.encode(envtopic, "utf-8")]
consumer = topic.get_simple_consumer(
auto_offset_reset=OffsetType.LATEST,
reset_offset_on_start=True)
# fetch_message_max_bytes = 1024 * 1024 * 50,
# consumer_group=codecs.encode(envtopic, "utf-8"),
# auto_offset_reset=OffsetType.LATEST,
# reset_offset_on_start=True,
# consumer_timeout_ms=50)
print("Starting main loop")
while (True):
try:
msg = consumer.consume(block = True)
if (msg != None):
a = bytearray(msg.value)
arr = MonitorMessage.GetRootAsMonitorMessage(a, 0)
if arr.DataType() == DataField.GEMHist:
updateGemHist(arr.Data())
elif arr.DataType() == DataField.GEMTrack:
updateGemTrack(arr.Data())
Plotit()
else:
pass
except KeyboardInterrupt:
break
if __name__ == "__main__":
main()
|
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib import cm
from multires2DRendering import multires2DRendering
def computeCorrectRefl(material,idd,blockWidth):
filename = 'input/'+ material +'/' + material + repr(idd+1) + '.png'
output = np.loadtxt('output/'+ material + repr(idd+1) + '_0.95_100_down04/predict.csv', delimiter=',')
albedoScale = output
# %
scale = 100;
tile = 100;
downScale = [4];
NoSamples = 10000000;
receiptorSize = 'MAX';
fftOnly = 'no';
optimazation = 'no';
platform = 'Windows_C';
albedoMax = 0.95;
albedoMin = 0.95;
# albedo = albedoMax * np.ones((idd+1,blockWidth))
albedo = albedoMax * albedoScale.reshape(idd+1,blockWidth)
# velvet: 15
# gabardine: 20
# felt: 15
start = time.clock();
(downscale_list, sigmaT_d_list, logfft_d_list, fftcurve_d_list, \
mean_d_list, std_d_list, reflection_list, reflection_stderr_list, \
reflectionOptimize_list, insideVis_list, albedo_k_list) \
= multires2DRendering(filename, scale, tile, downScale, albedo, NoSamples, \
receiptorSize, platform, optimazation, fftOnly);
print('Time elapse: ' + repr(time.clock() - start) + 's');
N = len(downscale_list);
## draw curve
# refl all
x = np.log2(downscale_list);
y = reflection_list[:,0];
yerr = 1.96*reflection_stderr_list[:,0];
# plt.figure();
# plt.errorbar(x,y,yerr=yerr,color='b',ecolor='r')
# plt.xlabel('log downsampleScale');
# plt.ylabel('reflectance');
# plt.title('Scale = ' + repr(scale) + \
# ' Tile = ' + repr(tile) + \
# ' Albedo = ' + repr(albedoMax) + '-' + repr(albedoMin) + \
# ' NoSamples = ' + repr(NoSamples));
# plt.grid(True);
# plt.show()
print(y)
# In[]
print('velvet start')
for idd in range(9):
material = 'velvet'
blockWidth = 15
computeCorrectRefl(material,idd,blockWidth)
print('velvet end')
print(' ')
print('gabardine start')
for idd in range(4):
material = 'gabardine'
blockWidth = 20
computeCorrectRefl(material,idd,blockWidth)
print('gabardine end')
print(' ')
print('felt start')
for idd in range(11):
material = 'felt'
blockWidth = 15
computeCorrectRefl(material,idd,blockWidth)
print('felt end') |
import math
from copy import deepcopy
import random
import pyximport
pyximport.install()
import dfunc
import sys
#stone & stage reading
#data = two stone
#onedata = one stone
#twostage = two stage
#onestage = one stage
param = sys.argv
data = dfunc.getdata(param[1]+'.txt') #stage & stone two data
stage = data[0]
dfunc.addone(stage)
del data[0]
#codes
Maincode = []
for i in range(dfunc.LEN_N):
Maincode.append(dfunc.maincode(len(data)))
go = 1
ranktable = []
cou = 1
while(cou==1 or ranktable[0][1] > dfunc.TARGET):
dfunc.crossing(Maincode)
dfunc.mutation(Maincode)
allzero = dfunc.check_stop(stage,data,Maincode,ranktable)
dfunc.sortalive(ranktable)
result = dfunc.work2(stage,data,Maincode[ranktable[0][0]])
dfunc.writedata(param[1]+'answer.txt',result)
Maincode = dfunc.roulette(Maincode,ranktable,allzero)
cou += 1
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# import the classes in the files of the data folder here.
from .base import TransformerXHDataset
from .hotpotqa import HotpotDataset, batcher_hotpot
from .fever import FEVERDataset,batcher_fever
from .utils import load_data |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
DIALECT = 'mysql'
DRIVER = 'pymysql'
USERNAME = 'root'
PASSWORD = 'password'
HOST = '192.168.99.100'
PORT = '3306'
DATABASE = 'react_template'
SQLALCHEMY_DATABASE_URI = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(
DIALECT, DRIVER, USERNAME, PASSWORD, HOST, PORT, DATABASE
)
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# 查询时显示原始SQL语句
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app) |
import func_bsearch
def contains(a, q):
p = func_bsearch.bsearch(a, q)
return p < len(a) and a[p] == q
def main():
print contains([2,3,6,6,7,8], 1) # False
print contains([2,3,6,6,7,8], 2) # True
print contains([2,3,6,6,7,8], 6) # True
print contains([2,3,6,6,7,8], 8) # True
print contains([2,3,6,6,7,8], 9) # False
print contains([2,3,6,6,7,8], 4) # False
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
try:
# noinspection PyUnresolvedReferences, PyCompatibility
from builtins import * # noqa
except ImportError:
pass
import os
import pandas as pd
from climate.stats import empirical_distributions
from report.sections.common import input_tex
from report.util.template import get_output_name, save_table, get_key
from report.util.driver import extract_data
def output_plot_empirical_pdf(modf, info, output_path):
elements = []
# Section title
default_title = _('Plot empirical PDF')
# Required values
location = info['location_metocean']
driver = info['name_driver']
descriptor = info['name_descriptor']
block = info['name_block']
section = info['name_section']
# Optional values and default values
title = get_key(info, 'title_section', default_title)
var_name = get_key(info, 'var_name_descriptor', descriptor)
var_unit = get_key(info, 'unit_descriptor', '')
circular = get_key(info, 'circular_descriptor', False)
label_empirical = get_key(info, 'label_empirical_section', '')
bins = get_key(info, 'bins_section', 0)
# Input tex section
input_tex(elements, info, output_path, section)
# Computation
data = extract_data(modf, descriptor_name=descriptor)
# Remove null values
values_clean = data.dropna(axis=0, how='all')
data_empirical = empirical_distributions.epdf_histogram(values_clean, bins)
cumulative = False
data_kernel = None
# Figure
kind = 'figure'
default_caption = _('Empirical PDF') + ': {}'.format(info['title_descriptor'])
caption = get_key(info, 'empirical_pdf_figure_caption_section', default_caption)
path = get_output_name(location=location, driver=driver, name=descriptor, block=block, title=section, kind=kind)
empirical_distributions.plot_kde(data_empirical, data_kernel, cumulative, title='', var_name=var_name,
var_unit=var_unit, fig_filename=os.path.join(output_path, path),
circular=circular, label_empirical=label_empirical,
label_kernel='')
elements.append([path, kind, caption])
return pd.DataFrame(elements, columns=['path', 'kind', 'caption']), title
def output_plot_empirical_cdf(modf, info, output_path):
elements = []
# Section title
default_title = _('Plot empirical CDF')
# Required values
location = info['location_metocean']
driver = info['name_driver']
descriptor = info['name_descriptor']
block = info['name_block']
section = info['name_section']
# Optional values and default values
title = get_key(info, 'title_section', default_title)
var_name = get_key(info, 'var_name_descriptor', descriptor)
var_unit = get_key(info, 'unit_descriptor', '')
circular = get_key(info, 'circular_descriptor', False)
label_empirical = get_key(info, 'label_empirical_section', '')
label_kernel = get_key(info, 'label_kernel_section', '')
bins = get_key(info, 'bins_section', 100)
# Input tex section
input_tex(elements, info, output_path, section)
# Computation
data = extract_data(modf, descriptor_name=descriptor)
cumulative = True
data_empirical = empirical_distributions.ecdf_histogram(data)
data_kernel = empirical_distributions.kde_sm(data, cumulative=cumulative, gridsize=bins)
# Figure
kind = 'figure'
default_caption = _('Empirical CDF') + ': {}'.format(info['title_descriptor'])
caption = get_key(info, 'empirical_pdf_figure_caption_section', default_caption)
path = get_output_name(location=location, driver=driver, name=descriptor, block=block, title=section, kind=kind)
empirical_distributions.plot_kde(data_empirical, data_kernel, cumulative, title='', var_name=var_name,
var_unit=var_unit, fig_filename=os.path.join(output_path, path),
circular=circular, label_empirical=label_empirical,
label_kernel=label_kernel)
elements.append([path, kind, caption])
return pd.DataFrame(elements, columns=['path', 'kind', 'caption']), title
|
from util import *
from ADI_SOLVER import *
import ipywidgets
def start():
layout = ipywidgets.Layout(width= '100%',height='20px')
bc1 = ipywidgets.IntSlider(min=0,max=1000,value = 681,step=1,description='Top BC' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
bc2 = ipywidgets.IntSlider(min=0,max=1000,value=205,step=1,description='Right BC' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
bc3 = ipywidgets.IntSlider(min=0,max=1000,value=239,step=1,description='Bottom BC' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
bc4 = ipywidgets.IntSlider(min=0,max=1000,step=1,value=611,description='Left BC' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
ic0 = ipywidgets.IntSlider(min=0,max=1000,step=1,value=71,description='IC' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
lam = ipywidgets.FloatSlider(min=0,max=0.5,value=239,step=0.001,description='lambda' ,layout=layout,continuous_update=False,readout_format='.5f',style = {'description_width': 'initial'})
grid = ipywidgets.IntSlider(min=0,max=1000,step=1,value=50,description='grid size (NxN)' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
iters = ipywidgets.IntSlider(min=0,max=1000,step=1,value=10,description='iterations' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
time_step = ipywidgets.IntSlider(min=0,max=1000,step=1,value=10,description='plot time step' ,layout=layout,continuous_update=False,style = {'description_width': 'initial'})
interact_calc=ipywidgets.interact.options(manual=True, manual_name="Solve!")
@interact_calc(grid_size=grid,bc1=bc1,bc2=bc2,bc3=bc3,bc4=bc4,ic0=ic0,Lambda=lam,time_step=time_step)
def adi_solver_interact(grid_size,bc1,bc2,bc3,bc4,ic0,Lambda,time_step):
grid = generate_grid(n = grid_size , bc=(bc1,bc2,bc3,bc4) , ic=ic0)
solution = solve(grid.copy(),Lambda=Lambda,iters=time_step,steps = True)
show_heat_maps((solution[time_step],f'Numerical solution at time step ={time_step}'),figsize=(7,7))
|
from config.base_config import BaseConfig
class DatabaseConfig(BaseConfig):
def __init__(self):
super().__init__()
self.file = 'database.ini'
|
import os
import datetime
from etl.scripts import RetrieveProcedureComplicationsDPCA
from barbell2light.utils import Logger, current_time_secs, elapsed_secs, duration
class ScriptRunner:
def __init__(self, output_dir, log_dir):
self.output_dir = self.update_output_dir(output_dir)
self.logger = self.init_logger(log_dir)
self.scripts = self.init_scripts(self.output_dir, self.logger)
@staticmethod
def init_logger(log_dir):
os.makedirs(log_dir, exist_ok=True)
return Logger(prefix='log_etl', to_dir=log_dir)
@staticmethod
def init_scripts(output_dir, logger):
return [
RetrieveProcedureComplicationsDPCA(output_dir, logger),
]
@staticmethod
def update_output_dir(output_dir):
os.makedirs(output_dir, exist_ok=True)
now = datetime.datetime.now()
timestamp = '{}'.format(now.strftime('%Y%m%d%H%M%S'))
os.makedirs(os.path.join(output_dir, timestamp), exist_ok=True)
output_dir = os.path.join(output_dir, timestamp)
return output_dir
def execute(self):
runner_started = current_time_secs()
for s in self.scripts:
self.logger.print('Starting script {}'.format(s.name))
script_started = current_time_secs()
s.execute()
self.logger.print('Script finished after {}'.format(duration(elapsed_secs(script_started))))
self.logger.print('Runner finished after {}'.format(duration(elapsed_secs(runner_started))))
os.system('touch {}'.format(
os.path.join(self.output_dir, 'finished.txt')))
def main():
output_dir = os.environ['OUTPUT_DIR']
log_dir = os.environ['LOG_DIR']
runner = ScriptRunner(output_dir, log_dir)
runner.execute()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: fm_footer.py
Author: tdoly
"""
class Footer(object):
""" Footer class that outputs text to a curses screen """
msg = None
curses_screen = None
def __init__(self):
self.width = None
def set_screen(self, curses_screen):
self.curses_screen = curses_screen
self.width = curses_screen.getmaxyx()[1] - 5
if self.msg:
self.write(self.msg)
def write(self, msg):
self.msg = msg.strip().replace("\r", "").replace("\n", "")
content = self.msg
if isinstance(self.msg, unicode):
content = self.msg.encode('utf-8')
content = content[0: self.width]
if self.curses_screen:
self.curses_screen.erase()
self.curses_screen.addstr(0, (self.width-len(content))/2, content)
self.curses_screen.refresh()
def readline(self):
pass
|
from selenium import webdriver
from time import sleep
import iamarobot
driver = webdriver.Firefox()
driver.get('https://google.com/recaptcha/api2/demo')
sleep(4)
d = iamarobot.Docaptcha(driver, '//iframe[@title="reCAPTCHA"]')
d.solve() |
import os
import sys
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), '..','dots_and_squares')))
from dots_and_squares_game import dots_and_squares_game
import unittest
import pygame
class test_dots_and_squares_game(unittest.TestCase):
"""Tests for functions in dots_and_squares_game. The unit tests are only for the logic in the
game, and are not checking the pygame usage."""
@classmethod
def setUpClass(self):
pygame.init()
self.game = dots_and_squares_game()
@classmethod
def tearDownClass(self):
self.game.quit()
pygame.quit()
def test_get_end_points(self):
"""Testing get_end_points with multiple valid pos tuples"""
self.assertEqual(self.game.get_end_points((397,122)),((400,100),(400,150)))
self.assertEqual(self.game.get_end_points((323, 102)),((300, 100), (350, 100)))
self.assertEqual(self.game.get_end_points((248, 130)),((250, 100), (250, 150)))
self.assertEqual(self.game.get_end_points((215, 102)),((200, 100), (250, 100)))
self.assertEqual(self.game.get_end_points((148, 135)),((150, 100), (150, 150)))
self.assertEqual(self.game.get_end_points((165, 154)),((150, 150), (200,150)))
self.assertEqual(self.game.get_end_points((95,136)),((100, 100), (100, 150)))
def test_get_end_points_outside(self):
"""Testing get_end_points with multiple invalid pos tuples"""
self.assertIsNone(self.game.get_end_points((562,124)))
self.assertIsNone(self.game.get_end_points((511,83)))
self.assertIsNone(self.game.get_end_points((88, 529)))
self.assertIsNone(self.game.get_end_points((277, 565)))
def test_check_square1(self):
'''Given a list_of_lines already played in the game determine if a new line makes
a square or not'''
self.game.list_of_lines = [((300, 100), (350, 100)), ((250, 150), (250, 200)), ((200, 150),
(200, 200)), ((150, 200), (200, 200))]
self.assertEqual(self.game.check_square(((150, 200), (200, 200))),0)
self.assertEqual(self.game.check_square(((300, 200), (350, 200))),0)
self.assertEqual(self.game.check_square(((250, 200), (300, 200))),0)
self.assertEqual(self.game.check_square(((300, 150), (300, 200))),0)
self.assertEqual(self.game.check_square(((250, 150), (300, 150))),0)
def test_check_square2(self):
'''Given a list_of_lines already played in the game determine if a new line makes
a square or not. Also add the new line to the list_of_lines'''
self.game.list_of_lines = [((100, 200), (100, 250)), ((100, 250), (100, 300)), ((100, 300),
(100, 350)), ((100, 350), (100, 400)), ((100, 400), (100, 450)), ((100, 450), (100, 500)),
((100, 500), (150, 500)), ((150, 450), (150, 500)), ((150, 400), (150, 450)), ((150, 350),
(150, 400)), ((150, 300), (150, 350)), ((150, 250), (150, 300)), ((150, 200), (150, 250)),
((100, 200), (150, 200)), ((100, 250), (150, 250))]
self.assertEqual(self.game.check_square(((100, 250), (150, 250))),1)
self.game.list_of_lines.append(((100, 250), (150, 250)))
self.assertEqual(self.game.check_square(((100, 300), (150, 300))),1)
self.game.list_of_lines.append(((100, 300), (150, 300)))
self.assertEqual(self.game.check_square(((100, 300), (150, 300))),1)
self.game.list_of_lines.append(((100, 300), (150, 300)))
self.assertEqual(self.game.check_square(((100, 350), (150, 350))),1)
self.assertEqual(self.game.check_square(((550, 450), (550, 500))),0)
self.assertEqual(self.game.check_square(((500, 100), (550, 100))),0)
def test_declare_winner(self):
'''Check if the winner is determined correctly, given the scores'''
self.game.score["A"] = 7
self.game.score["B"] = 10
self.assertEqual(self.game.declare_winner(),"B")
self.game.score["A"] = 10
self.game.score["B"] = 7
self.assertEqual(self.game.declare_winner(),"A")
self.game.score["A"] = 7
self.game.score["B"] = 7
self.assertEqual(self.game.declare_winner(),"Both")
if __name__ == '__main__':
unittest.main(verbosity=2) |
"""
steps
1. search & locates modules src file
2. execute
3. object for the imported module
"""
from glob import glob
from psmakearchive import make_archive, make_tarball
file_list = glob('*.py')
make_archive('source.zip', *file_list) # content of the list as arguments
make_tarball('source.tar', *file_list)
|
#-*- coding: utf-8 -*-
import copy
from django.contrib import admin
from app_data.forms import multiform_factory
from app_data.admin import AppDataModelAdmin
from .models import MyModel, Tag
MyModelMultiForm = multiform_factory(MyModel)
class MyModelAdmin(AppDataModelAdmin):
multiform = MyModelMultiForm
@property
def declared_fieldsets(self):
df = [
(None, {'fields': ['title', 'description', 'is_awesome']}),
]
mf = self.multiform()
for form in mf.app_forms.values():
if hasattr(form, 'admin_fieldsets'):
for admin_fieldset in form.admin_fieldsets:
df.append(copy.deepcopy(admin_fieldset))
return df
admin.site.register(MyModel, MyModelAdmin)
admin.site.register(Tag)
MyModelMultiForm.add_form('tagging', {'fields': ['public_tags']}) |
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_m1(x):
return -np.log((1 / x) - 1)
class data_preparing:
def __init__(self):
# LOADING DATA, SELECTING COLUMNS AND FILLING NAN-S
self.loaded_data = pd.read_csv("data/COVID19_open_line_list.csv")
self.loaded_data['sex'] = self.loaded_data['sex'].str.lower()
self.loaded_data['outcome'] = self.loaded_data['outcome'].fillna(0.01)
self.data = self.loaded_data[['sex', 'city', 'province', 'country', 'age']]
self.data = self.data.fillna('0')
# ONE-HOT ENCODING DATA
self.x_train = self.data[['sex', 'city', 'province', 'country', 'age']].copy()
self.ohe_sex = OneHotEncoder(sparse=False)
sex_temp = np.argmax(self.ohe_sex.fit_transform(self.x_train[['sex']]), axis=1)
self.ohe_city = OneHotEncoder(sparse=False)
city_temp = np.argmax(self.ohe_city.fit_transform(self.x_train[['city']]), axis=1)
self.ohe_prov = OneHotEncoder(sparse=False)
prov_temp = np.argmax(self.ohe_prov.fit_transform(self.x_train[['province']]), axis=1)
self.ohe_country = OneHotEncoder(sparse=False)
country_temp = np.argmax(self.ohe_country.fit_transform(self.x_train[['country']]), axis=1)
self.ohe_age = OneHotEncoder(sparse=False)
age_temp = np.argmax(self.ohe_age.fit_transform(self.x_train[['age']]), axis=1)
# CONNECTING COLUMNS
self.x_train = np.array([sex_temp, city_temp, prov_temp, country_temp, age_temp]).transpose()
self.print_top_rows()
# print(self.ohe_sex.inverse_transform([self.x_train[4]]))
# print(self.ohe_city.get_feature_names())
# PREPARING TARGET DATA
self.y_train = self.loaded_data.pop('outcome').values
self.y_train[self.y_train == 'death'] = 1
self.y_train[self.y_train == 'died'] = 1
self.y_train[self.y_train == 'discharged'] = 0
self.y_train[self.y_train == 'discharge'] = 0
self.y_train[self.y_train == 'stable'] = 0.005
self.y_train[(self.y_train != 1) & (self.y_train != 0) & (self.y_train != 0.05) & (self.y_train != 0.1)] = 0
# DIVIDING DATA INTO GROUPS
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(
self.x_train, self.y_train, test_size=0.2, random_state=42)
# LINEAR REGRESSION
self.lin = LinearRegression()
self.lin.fit(self.x_train, sigmoid_m1(self.y_train.astype(float) * 0.9 + 0.05))
def get_columns(self):
return self.data.columns
def print_top_rows(self):
print("Normal data\n", self.data.head(5))
print("Encoded data\n", self.x_train[0:5])
def encode(self, data):
print(len(data['sex']))
if len(data['sex']) == 1:
sex_temp = np.argmax(self.ohe_sex.transform([data['sex']]), axis=1)
city_temp = np.argmax(self.ohe_city.transform([data['city']]), axis=1)
prov_temp = np.argmax(self.ohe_prov.transform([data['province']]), axis=1)
country_temp = np.argmax(self.ohe_country.transform([data['country']]), axis=1)
age_temp = np.argmax(self.ohe_age.transform([data['age']]), axis=1)
return np.array([sex_temp, city_temp, prov_temp, country_temp, age_temp]).transpose()
sex_temp = np.argmax(self.ohe_sex.transform(np.array(data['sex']).reshape(-1, 1)), axis=1)
city_temp = np.argmax(self.ohe_city.transform(np.array(data['city']).reshape(-1, 1)), axis=1)
prov_temp = np.argmax(self.ohe_prov.transform(np.array(data['province']).reshape(-1, 1)), axis=1)
country_temp = np.argmax(self.ohe_country.transform(np.array(data['country']).reshape(-1, 1)), axis=1)
age_temp = np.argmax(self.ohe_age.transform(np.array(data['age']).reshape(-1, 1)), axis=1)
return np.array([sex_temp, city_temp, prov_temp, country_temp, age_temp]).transpose()
def decode(self, data):
print(data.transpose())
sex_temp = self.ohe_sex.inverse_transform([data[0]])
city_temp = self.ohe_city.inverse_transform([data[1]])
prov_temp = self.ohe_prov.inverse_transform([data[2]])
country_temp = self.ohe_country.inverse_transform([data[3]])
age_temp = self.ohe_age.inverse_transform([data[4]])
return np.array([sex_temp, city_temp, prov_temp, country_temp, age_temp]).transpose()
def predict_probability(self, data):
encoded = self.encode(data)
return sigmoid(self.lin.predict(encoded))
'''
data_dict = {'sex': ['male'], 'city': ['Chaohu City, Hefei City'], 'province': ['Anhui'], 'country': ['China'],
'age': ['80']}
data_frame = pd.DataFrame.from_dict(data_dict)
dat_prep = data_preparing()
enc = dat_prep.encode(data_frame)
print("Encoded test data\n", enc)
for e in dat_prep.lin.predict(dat_prep.x_test):
print(sigmoid(e))
''' |
import sys
class Person ():
def __init__(self, name, surname, age):
self.name=name
self.surname=surname
self.age=age
class Employee(Person):
def __init__(self, name, surname, age, position, specialization, salary):
super().__init__(name, surname, age)
self.position = position
self.specialization = specialization
self.salary = salary
print( name, surname, age, position, specialization, salary)
class Client(Person):
def __init__(self):
super().__init__("Marta")
self.orders = "Strona internetowa"
pracownik1=Employee("Emilos","Czersky",28,"Programmer","Python",2000.76)
# klient1 = Client()
# print(klient1.name, klient1.orders)
|
import logging
import elks
logging.getLogger().setLevel(logging.INFO)
def main():
username = '<Your 46elks API username>'
password = '<Your 46elks API password>'
elk = elks.API(username, password)
number = elk.allocateNumber()
logging.info('Allocated number: %s' % number))
number.modify(sms_callback_url='http://example.com/sms_cb')
sms = number.sendMessage(receiver='+46123456789', message='Hello world')
logging.info('Sent message "%s" to "%s"' % (sms.message, sms.receiver))
if __name__ == "__main__":
main()
|
import csv
from pygal.maps.world import World
from pygal.style import RotateStyle as RS, LightColorizedStyle as LCS
from country_codes import get_country_code
# Load CO2 emissions data.
filename = 'co-emissions-per-capita.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
cc_emissions = {}
country_names, years, co2_emiss = [], [], []
for row in reader:
country_name = row[0]
year = row[2]
co2_emis = float(row[3])
country_names.append(country_name)
years.append(year)
co2_emiss.append(co2_emis)
code = get_country_code(country_name)
if code:
cc_emissions[code] = co2_emis
# Group the countries into 3 emissions levels.
cc_emi_1, cc_emi_2, cc_emi_3 = {}, {}, {}
for cc, emis in cc_emissions.items():
if emis < 5:
cc_emi_1[cc] = emis
elif emis < 10:
cc_emi_2[cc] = emis
else:
cc_emi_3[cc] = emis
# See how many countries are in each level.
print(len(cc_emi_1), len(cc_emi_2), len(cc_emi_3))
wm_style = RS('#336699', base_style=LCS)
wm = World(style=wm_style)
wm.title = 'World CO2 Emissions in 2017, by Country (in metric tons)'
wm.add('0-5t', cc_emi_1)
wm.add('5t-10t', cc_emi_2)
wm.add('>10t', cc_emi_3)
wm.render_to_file('world_co2_emissions.svg') |
from math import ceil
import numpy as np
import fenics as pde
import matplotlib.pyplot as plt
import mshr
from tqdm import trange
# Aliases for FEniCS names we will use
dx = pde.dx
ds = pde.ds
sym = pde.sym
grad = pde.grad
nabla_grad = pde.nabla_grad
div = pde.div
dot = pde.dot
inner = pde.inner
def epsilon(u): return sym(grad(u))
I = pde.Identity(2)
class IPCS:
def __init__(self, mesh, velocity_space, pressure_space, dt, viscosity, density, plot_freq=-1):
self.mesh = mesh
self.velocity_space = velocity_space
self.pressure_space = pressure_space
self.dt = dt
self.viscosity = viscosity
self.density = density
self.plot_freq = plot_freq
def get_trial_functions(self):
V, Q = self.velocity_space, self.pressure_space
return pde.TrialFunction(V), pde.TrialFunction(Q)
def get_test_functions(self):
V, Q = self.velocity_space, self.pressure_space
return pde.TestFunction(V), pde.TestFunction(Q)
def _setup_traction(self, boundary):
mu = pde.Constant(self.viscosity)
n = pde.FacetNormal(self.mesh)
mf = pde.MeshFunction('size_t', self.mesh, 1)
pde.CompiledSubDomain(boundary).mark(mf, 1)
ds = pde.ds(subdomain_data=mf)
sigma = (
- self.p_*I
+ mu*epsilon(self.u_)
)
self.traction = dot(sigma, n)
self.ds_traction = ds(1)
def _setup_pressure_differences(self, high_pressure_boundary, low_pressure_boundary):
mf = pde.MeshFunction('size_t', self.mesh, 1)
pde.CompiledSubDomain(high_pressure_boundary).mark(mf, 1)
pde.CompiledSubDomain(low_pressure_boundary).mark(mf, 2)
ds = pde.ds(subdomain_data=mf)
self.ds_high_pressure = ds(1)
self.ds_low_pressure = ds(2)
def _initialise_solution(self, initial_velocity, initial_pressure):
# Setup initial conditions
self.u_star_.vector()[:] = 0
self.phi_.vector()[:] = 0
self.u_.assign(pde.project(initial_velocity, self.velocity_space))
self.p_.assign(pde.project(initial_pressure, self.pressure_space))
def simulate(self, num_steps, initial_velocity, initial_pressure, bc_velocity, bc_pressure, traction_boundary, pressure_points):
self._setup_variational_problems()
self._setup_traction(traction_boundary)
self._initialise_solution(initial_velocity, initial_pressure)
self._setup_solvers(bc_velocity, bc_pressure)
self.high_pressure_point, self.low_pressure_point = pressure_points
self.t = 0
self.time_points = []
self.drag_forces = []
self.lift_forces = []
self.pressure_differences = []
self.continue_simulation(num_steps)
def continue_simulation(self, num_steps):
fig, (velocity_ax, pressure_ax) = plt.subplots(2)
for it in trange(num_steps):
# Update current time
self.t += self.dt
self.update_solution()
# Compute drag and lift
self.time_points.append(self.t)
# Forces
ds = self.ds_traction
self.drag_forces.append(pde.assemble(self.traction[0]*ds))
self.lift_forces.append(pde.assemble(self.traction[1]*ds))
# Pressure differences
high_pressure = self.p_(*self.high_pressure_point)
low_pressure = self.p_(*self.low_pressure_point)
self.pressure_differences.append(high_pressure - low_pressure)
# Plot solution
if self.plot_freq > 0 and it % self.plot_freq == 0:
velocity_ax.clear()
pressure_ax.clear()
plt.sca(velocity_ax)
pde.plot(self.u_)
plt.sca(pressure_ax)
pde.plot(self.p_)
plt.pause(0.1)
class ImplicitSolver(IPCS):
def _setup_variational_problems(self):
V, Q = self.velocity_space, self.pressure_space
# Get trial and test functions
u, p = self.get_trial_functions()
u_star, phi = self.get_trial_functions()
v, q = self.get_test_functions()
# Define expressions used in variational forms
n = pde.FacetNormal(self.mesh)
f = pde.Constant((0, 0))
k = pde.Constant(self.dt)
mu = pde.Constant(self.viscosity)
rho = pde.Constant(self.density)
nu = mu / rho
# Define functions for solutions at previous and current time steps
self.u_ = pde.Function(V)
self.u_star_ = pde.Function(V)
self.phi_ = pde.Function(Q)
self.p_ = pde.Function(Q)
u_, u_star_, phi_, p_ = self.u_, self.u_star_, self.phi_, self.p_
# Define variational problem for step 1
self.a1 = (
dot(u_star, v) * dx
+ k * dot( dot(u_, nabla_grad(u_star)), v ) * dx
+ k * mu * inner(epsilon(u_star), epsilon(v)) * dx
)
self.L1 = (
dot(u_, v) * dx
- (k/rho) * dot(grad(p_), v) * dx
- k * dot(f, v) * dx
)
# Define variational problem for step 2
self.a2 = dot(grad(p), grad(q))*dx
self.L2 = -(rho/k) * div(u_star_) * q * dx
# Define variational problem for step 3
self.a3 = dot(u, v)*dx
self.L3 = dot(u_star_, v)*dx - (k/rho)*dot(grad(phi_), v)*dx
# Define variational problem for step 4
self.a4 = dot(p, q)*dx
self.L4 = dot(p_ + phi_, q)*dx
def _setup_solvers(self, bc_velocity, bc_pressure):
A2 = pde.assemble(self.a2)
A3 = pde.assemble(self.a3)
A4 = pde.assemble(self.a4)
for bc in bc_pressure:
bc.apply(A2)
solver2 = pde.LUSolver(A2)
solver3 = pde.LUSolver(A3)
solver4 = pde.LUSolver(A4)
def update_solution():
# Step 1: Tentative velocity step
pde.solve(self.a1 == self.L1, self.u_star_, bcs=bc_velocity)
# Step 2: Pressure correction step
b2 = pde.assemble(self.L2)
for bc in bc_pressure:
bc.apply(b2)
solver2.solve(self.phi_.vector(), b2)
# Step 3: Velocity correction step
b3 = pde.assemble(self.L3)
solver3.solve(self.u_.vector(), b3)
# Step 4: Pressure update
b4 = pde.assemble(self.L4)
solver4.solve(self.p_.vector(), b4)
self.update_solution = update_solution
class ExplicitSolver(IPCS):
def _setup_variational_problems(self):
V, Q = self.velocity_space, self.pressure_space
# Get trial and test functions
u, p = self.get_trial_functions()
u_star, phi = self.get_trial_functions()
v, q = self.get_test_functions()
# Define expressions used in variational forms
n = pde.FacetNormal(self.mesh)
f = pde.Constant((0, 0))
k = pde.Constant(self.dt)
mu = pde.Constant(self.viscosity)
rho = pde.Constant(self.density)
nu = mu / rho
# Define functions for solutions at previous and current time steps
self.u_ = pde.Function(V)
self.u_star_ = pde.Function(V)
self.phi_ = pde.Function(Q)
self.p_ = pde.Function(Q)
u_, u_star_, phi_, p_ = self.u_, self.u_star_, self.phi_, self.p_
# Define variational problem for step 1
self.a1 = (
dot(u, v) * dx
)
self.L1 = (
dot(u_, v) * dx
- k * dot( dot(u_, nabla_grad(u_)), v) * dx
- (k/rho) * dot(grad(p_), v) * dx
- k * mu * inner(sym(grad(u_)), sym(grad(v))) * dx
+ k * dot(f, v) * dx
)
# Define variational problem for step 2
self.a2 = dot(grad(p), grad(q))*dx
self.L2 = -(rho/k) * div(u_star_) * q * dx
# Define variational problem for step 3
self.a3 = dot(u, v)*dx
self.L3 = dot(u_star_, v)*dx - (k/rho)*dot(grad(phi_), v)*dx
# Define variational problem for step 4
self.a4 = dot(p, q)*dx
self.L4 = dot(p_ + phi_, q)*dx
def _setup_solvers(self, bc_velocity, bc_pressure):
A1 = pde.assemble(self.a1)
A2 = pde.assemble(self.a2)
A3 = pde.assemble(self.a3)
A4 = pde.assemble(self.a4)
for bc in bc_velocity:
bc.apply(A1)
for bc in bc_pressure:
bc.apply(A2)
solver1 = pde.LUSolver(A1)
solver2 = pde.LUSolver(A2)
solver3 = pde.LUSolver(A3)
solver4 = pde.LUSolver(A4)
def update_solution():
# Step 1: Tentative velocity step
b1 = pde.assemble(self.L1)
for bc in bc_velocity:
bc.apply(b1)
solver1.solve(self.u_star_.vector(), b1)
# Step 2: Pressure correction step
b2 = pde.assemble(self.L2)
for bc in bc_pressure:
bc.apply(b2)
solver2.solve(self.phi_.vector(), b2)
# Step 3: Velocity correction step
b3 = pde.assemble(self.L3)
solver3.solve(self.u_.vector(), b3)
# Step 4: Pressure update
b4 = pde.assemble(self.L4)
solver4.solve(self.p_.vector(), b4)
self.update_solution = update_solution
def simulate_stokes(function_space, boundary_conditions):
u, p = pde.TrialFunctions(function_space)
v, q = pde.TestFunctions(function_space)
a = (
inner(nabla_grad(u), nabla_grad(v))*dx
+ p*div(v)*dx + div(u)*q*dx
)
L = (
dot(pde.Constant((0, 0)), v) * dx
+ pde.Constant(0)*q * dx
)
soln = pde.Function(function_space)
pde.solve(a == L, soln, bcs=boundary_conditions)
return soln.split()
|
from django.contrib.auth.models import User,Group
from django.test import TestCase
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase,APIClient
from .factory import populate_test_db_docs,populate_test_db_users
from .models import Document
class TestDocumentRulesGet(APITestCase):
def setUp(self):
self.client = APIClient()
self.url = reverse('documents-list')
# create user and group
populate_test_db_users(User,Group)
# create docs for users
populate_test_db_docs(Document)
def test_sergeant_permissions(self):
self.client.login(username='sergeant',password='123456')
self.response = self.client.get(self.url)
print(self.response.json())
self.assertContains(self.response, text='private doc', status_code=200)
def test_document_create(self):
self.client.login(username='general', password='123456')
data = {
"title":'gogo',
'status':'active',
'text':'1223',
'data_expired':'2020-06-06',
'document_root':'public'
}
self.response = self.client.post(self.url,data)
print(self.response.json())
self.assertEqual(self.response.status_code,status.HTTP_201_CREATED)
def test_document_no_create_sergeant(self):
self.client.login(username='sergeant', password='123456')
data = {
'title':'ogogo',
'status':'active',
'text':'1223',
'data_expired':'2020-06-06',
'document_root':'secret'
}
self.response = self.client.post(self.url,data)
self.assertNotEqual(self.response.status_code,status.HTTP_201_CREATED)
def test_document_create_president(self):
self.client.login(username='president', password='123456')
data = {
"title":'ogogo',
'status':'active',
'text':'1223',
'data_expired':'2020-06-06',
'document_root':'top-secret'
}
self.response = self.client.post(self.url,data)
print(self.response.json())
self.assertEqual(self.response.status_code,status.HTTP_201_CREATED)
def test_document_createe_president(self):
self.client.login(username='president', password='123456')
data = {
'title':'ogogo',
'status':'active',
'text':'1223',
'data_expired':'2020-06-06',
'document_root':'private'
}
self.response = self.client.post(self.url,data)
print(self.response.json())
self.assertEqual(self.response.status_code,status.HTTP_201_CREATED)
def test_president_create_error(self):
self.client.login(username='president', password='123456')
data = {
'title':'presidentahaha',
'status':'active',
'text':'ogogokg',
'data_expired':'2021-05-14',
'document_root':'top-secret'
}
self.response = self.client.post(self.url,data)
self.assertEqual(self.response.status_code,status.HTTP_201_CREATED)
class TestDataExpiredDocument(APITestCase):
def setUp(self):
self.client = APIClient()
self.doc1 = Document.objects.create(title='not expired doc',
data_expired="2021-05-22",document_root='private')
self.doc2 = Document.objects.create(title='expired doc',
data_expired="2021-05-09",document_root='private',status='dead')
populate_test_db_users(User, Group)
def test_get_not_expired(self):
self.url = reverse('documents-detail',kwargs={'pk':self.doc1.id})
self.client.login(username='general',password='123456')
self.response = self.client.get(self.url)
print(self.response.json())
self.assertContains(self.response,'active',status_code=200)
def test_get_expired(self):
self.url = reverse('documents-detail', kwargs={'pk': self.doc2.id})
self.client.login(username='general',password='123456')
self.response = self.client.get(self.url)
print(self.response.json())
self.assertContains(self.response,'Страница не найдена',status_code=404)
|
import serial
from . import make_command as cmd
from .response import Response
tf = cmd.TelegramFactory()
IDN = cmd.IDN.from_string
class IndraDrive:
def __init__(self):
self.serial = None
self.known_parameter_sizes = {}
@classmethod
def with_serial(cls, *args, **kwargs):
if 'baudrate' not in kwargs:
kwargs['baudrate'] = 115200
if 'timeout' not in kwargs:
kwargs['timeout'] = 4
if 'parity' not in kwargs:
kwargs['parity'] = serial.PARITY_NONE
inst = cls()
inst.serial = serial.Serial(*args, **kwargs)
return inst
def _receive(self, wordsize=2):
self.serial.flush()
result = self.serial.read(4)
if len(result) < 4:
raise TimeoutError('Drive is probably off')
length = result[2]
assert result[2] == result[3]
result += self.serial.read(length+4)
return Response(result, wordsize=wordsize)
def read(self, idnstr):
self.serial.write(tf.read(IDN(idnstr)))
return self._receive()
def attribute(self, idnstr):
self.serial.write(tf.attribute(IDN(idnstr)))
return self._receive(wordsize=4)
def cancel_tranfer(self):
self.serial.write(tf.cancel_tranfer())
return self._receive()
def read_list(self, idnstr, length, offset=0):
self.serial.write(tf.read_list(IDN(idnstr), offset, length))
return self._receive()
def write(self, idnstr, value, size=2):
self.serial.write(tf.write(IDN(idnstr), value, size))
return self._receive()
def rw(self, idnstr, value=None, size=2):
if value is None:
return self.read(idnstr)
else:
return self.write(idnstr, value, size)
def osci_trg_mask(self, v=None):
return self.rw('P-0-0025', v, 4)
def osci_trg_signal_choice(self, v=None):
return self.rw('P-0-0026', v, 4)
def osci_trg_threshold(self, v=None):
return self.rw('P-0-0027', v, 4)
def osci_ctrl(self, v=None):
return self.rw('P-0-0028', v, 2)
def osci_trg_slope(self, v=None):
return self.rw('P-0-0030', v, 2)
def osci_time_resolution(self, v=None):
return self.rw('P-0-0031', v, 4)
def osci_mem_depth(self, v=None):
return self.rw('P-0-0032', v, 2)
def osci_external_trigger(self, v=None):
return self.rw('P-0-0036', v, 2)
def osci_internal_trigger(self, v=None):
return self.rw('P-0-0037', v, 2)
def osci_status(self):
return self.rw('P-0-0029')
def osci_num_values_after_trg(self):
return self.rw('P-0-0033')
def osci_trg_ctrl_offset(self):
return self.rw('P-0-0035')
def osci_signal_choice_list(self):
return self.rw('P-0-0149')
def osci_num_valid_values(self):
return self.rw('P-0-0150')
def get_osci(self):
print('ctrl', self.osci_ctrl().value)
print('status', self.osci_status().value)
print('time_resolution', self.osci_time_resolution().value)
print('mem_depth', self.osci_mem_depth().value)
print('num_valid_values', self.osci_num_valid_values().value)
print('trigger')
print(' mask', self.osci_trg_mask().value)
print(' signal_choice', self.osci_trg_signal_choice().value)
print(' threshold', self.osci_trg_threshold().value)
print(' slope', self.osci_trg_slope().value)
print(' num_values_after_trg', self.osci_num_values_after_trg().value)
print(' ctrl_offset', self.osci_trg_ctrl_offset().value)
print(' ext trg', self.osci_external_trigger().value)
print(' int trg', self.osci_internal_trigger().value)
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import wandb
from torchvision import transforms
from MODELS.model_resnet import *
from custom_dataset import DatasetISIC2018
from gradcam import GradCAM, GradCAMpp
from gradcam.utils import visualize_cam
parser = argparse.ArgumentParser(description='PyTorch ResNet+CBAM ISIC2018 Visualization')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--vis-prefix', type=str, default=None,
help='prefix to save plots e.g. "baseline" or "SAM-1"')
parser.add_argument('--run-name', type=str, default='noname run', help='run name on the W&B service')
parser.add_argument('--is-server', type=int, choices=[0, 1], default=1)
parser.add_argument("--tags", nargs='+', default=['default-tag'])
parser.add_argument('--cuda-device', type=int, default=0)
parser.add_argument('--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 1)')
args = parser.parse_args()
is_server = args.is_server == 1
# make and save Grad-CAM plot (original image, mask, Grad-CAM, Grad-CAM++)
def make_plot_and_save(input_img, img_name, no_norm_image, segm, model, train_or_val, epoch=None, vis_prefix=None):
global is_server
# get Grad-CAM results and prepare them to show on the plot
target_layer = model.layer4
gradcam = GradCAM(model, target_layer=target_layer)
gradcam_pp = GradCAMpp(model, target_layer=target_layer)
# sam_output shapes:
# [1, 1, 56, 56]x3 , [1, 1, 28, 28]x4 [1, 1, 14, 14]x6 , [1, 1, 7, 7]x3
mask, no_norm_mask, logit, sam_output = gradcam(input_img)
sam1_show = torch.squeeze(sam_output[0].cpu()).detach().numpy()
sam4_show = torch.squeeze(sam_output[3].cpu()).detach().numpy()
sam8_show = torch.squeeze(sam_output[7].cpu()).detach().numpy()
sam14_show = torch.squeeze(sam_output[13].cpu()).detach().numpy()
heatmap, result = visualize_cam(mask, no_norm_image)
result_show = np.moveaxis(torch.squeeze(result).detach().numpy(), 0, -1)
mask_pp, no_norm_mask_pp, logit_pp, sam_output_pp = gradcam_pp(input_img)
heatmap_pp, result_pp = visualize_cam(mask_pp, no_norm_image)
result_pp_show = np.moveaxis(torch.squeeze(result_pp).detach().numpy(), 0, -1)
# prepare mask and original image to show on the plot
segm_show = torch.squeeze(segm.cpu()).detach().numpy()
segm_show = np.moveaxis(segm_show, 0, 2)
input_show = np.moveaxis(torch.squeeze(no_norm_image).detach().numpy(), 0, -1)
# draw and save the plot
plt.close('all')
fig, axs = plt.subplots(nrows=2, ncols=6, figsize=(24, 9))
plt.suptitle(f'{train_or_val}-Image: {img_name}')
axs[1][0].imshow(segm_show)
axs[1][0].set_title('Mask')
axs[0][0].imshow(input_show)
axs[0][0].set_title('Original Image')
axs[0][1].imshow(result_show)
axs[0][1].set_title('Grad-CAM')
axs[1][1].imshow(result_pp_show)
axs[1][1].set_title('Grad-CAM++')
axs[1][2].imshow(sam1_show, cmap='gray')
axs[1][2].set_title('SAM-1 relative')
axs[0][2].imshow(sam1_show, vmin=0., vmax=1., cmap='gray')
axs[0][2].set_title('SAM-1 absolute')
axs[1][3].imshow(sam4_show, cmap='gray')
axs[1][3].set_title('SAM-4 relative')
axs[0][3].imshow(sam4_show, vmin=0., vmax=1., cmap='gray')
axs[0][3].set_title('SAM-4 absolute')
axs[1][4].imshow(sam8_show, cmap='gray')
axs[1][4].set_title('SAM-8 relative')
axs[0][4].imshow(sam8_show, vmin=0., vmax=1., cmap='gray')
axs[0][4].set_title('SAM-8 absolute')
axs[1][5].imshow(sam14_show, cmap='gray')
axs[1][5].set_title('SAM-14 relative')
axs[0][5].imshow(sam14_show, vmin=0., vmax=1., cmap='gray')
axs[0][5].set_title('SAM-14 absolute')
plt.show()
if vis_prefix is not None:
plt.savefig(f'vis/{vis_prefix}/{train_or_val}/{img_name}.png', bbox_inches='tight')
if is_server:
if epoch is not None:
wandb.log({f'{train_or_val}/{img_name}': fig}, step=epoch)
else:
wandb.log({f'{train_or_val}/{img_name}': fig})
def main():
global args, is_server
if is_server:
wandb.login()
config = dict(
vis_prefix=args.vis_prefix,
resume=args.resume,
)
if is_server:
wandb.init(config=config, project="vol.4", name=args.run_name, tags=args.tags)
# define constants
# vis_prefix = 'baseline'
CLASS_AMOUNT = 5
DEPTH = 50
root_dir = 'data/'
# resume = "checkpoints/baseline_checkpoint.pth"
traindir = os.path.join(root_dir, 'train')
train_labels = os.path.join(root_dir, 'train', 'images_onehot_train.txt')
valdir = os.path.join(root_dir, 'val')
val_labels = os.path.join(root_dir, 'val', 'images_onehot_val.txt')
# define the model
model = ResidualNet('ImageNet', DEPTH, CLASS_AMOUNT, 'CBAM')
if is_server:
model = model.cuda(args.cuda_device)
# # load the checkpoint
if os.path.isfile(args.resume):
print(f"=> loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
print(f"=> loaded checkpoint '{args.resume}'")
print(f"epoch = {checkpoint['epoch']}")
else:
print(f"=> no checkpoint found at '{args.resume}'")
return -1
# define datasets and data loaders
size0 = 224
segm_dir = "images/256ISIC2018_Task1_Training_GroundTruth/"
train_dataset = DatasetISIC2018(
train_labels,
traindir,
segm_dir,
size0,
False, # perform flips
False, # perform random resized crop with size = 224
transforms.CenterCrop(size0)
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=False,
pin_memory=True
)
val_dataset = DatasetISIC2018(
val_labels,
valdir,
segm_dir,
size0,
False,
False,
transforms.CenterCrop(size0)
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
pin_memory=True
)
# create directories to save plots
if args.vis_prefix is not None:
if not os.path.exists(f'vis/{args.vis_prefix}'):
os.mkdir(f'vis/{args.vis_prefix}')
if not os.path.exists(f'vis/{args.vis_prefix}/train'):
os.mkdir(f'vis/{args.vis_prefix}/train')
if not os.path.exists(f'vis/{args.vis_prefix}/val'):
os.mkdir(f'vis/{args.vis_prefix}/val')
for i, dictionary in enumerate(train_loader):
input_img = dictionary['image']
img_name = dictionary['name'][0]
no_norm_image = dictionary['no_norm_image']
segm = dictionary['segm']
if is_server:
input_img = input_img.cuda(args.cuda_device)
make_plot_and_save(input_img, img_name, no_norm_image, segm, model, 'train', vis_prefix=args.vis_prefix)
return
for i, dictionary in enumerate(val_loader):
input_img = dictionary['image']
img_name = dictionary['name'][0]
no_norm_image = dictionary['no_norm_image']
segm = dictionary['segm']
if is_server:
input_img = input_img.cuda(args.cuda_device)
make_plot_and_save(input_img, img_name, no_norm_image, segm, model, 'val', vis_prefix=args.vis_prefix)
if __name__ == '__main__':
main()
|
import time
import ApplicationPerformance.applicationperformance.launchTime as launchTime
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.4.2'
desired_caps['deviceName'] = 'emulator-5556'
desired_caps['udid'] = 'emulator-5556'
desired_caps['appPackage'] = 'com.ushaqi.zhuishushenqi'
desired_caps['appActivity'] = 'com.ushaqi.zhuishushenqi.ui.SplashActivity'
# desired_caps['appPackage'] = 'sogou.mobile.explorer.speed'
# desired_caps['appActivity'] = 'sogou.mobile.explorer.NoDisplayActivity'
#driver = webdriver.Remote('http://localhost:4726/wd/hub', desired_caps)
# x = driver.get_window_size()['width']
# y = driver.get_window_size()['height']
# print(x, y)
#driver.find_element_by_id("com.ushaqi.zhuishushenqi:id/btnEntryApp").click()
# driver.find_element_by_xpath("//android.widget.ImageView[@resource-id='com.ushaqi.zhuishushenqi:id/home_action_menu_more']").click()
# try:
# driver.find_element_by_id("com.ushaqi.zhuishushenqi:id/btnEntryApp").click()
# driver.find_element_by_id("com.ushaqi.zhuishushenqi:id/home_action_menu_search").click()
# #driver.find_elements_by_class_name("android.widget.EditText")[0].send_keys("123")
# #print(driver.find_elements_by_class_name("android.widget.TextView"))
# driver.find_elements_by_name("书名、作者、分类")[0].send_keys("12312")
# driver.quit()
# except NoSuchElementException:
# print("sss")
# driver.quit()
#driver.find_element()
# driver.find_element_by_id("com.ushaqi.zhuishushenqi:id/home_action_menu_search").click()
casedata = launchTime.ReadExcel().readeExcelData('funcase')
endnumber = []
number =[]
for x in range(1, casedata.get('caserows')): # Excel中的测试用例数据,使用for遍历每一行的数据,进行判断执行对应的操作
excelcasedata = casedata.get('excledata_sheel').row_values(
x)
operatetype = excelcasedata[1]
if operatetype == "if":
number.append(x)
if operatetype == "end":
endnumber.append(x)
i = 1
ifnumber = 0
while i < casedata.get('caserows'): # Excel中的测试用例数据,使用for遍历每一行的数据,进行判断执行对应的操作
#print(i)
excelcasedata = casedata.get('excledata_sheel').row_values(
i)
caseid = int(excelcasedata[0]) # 用例编号
operatetype = excelcasedata[1] # 操作类型
element = excelcasedata[2] # 元素属性
parameter = excelcasedata[3] # 参数(如:输入的数据)
checkpoint = excelcasedata[4] # 检查点对比
# if operatetype == "if_":
# number = i
# if operatetype == "end":
# endnumber = i
i = i + 1
if excelcasedata[5] == "":
waittime = 2
if "if包含_" in operatetype:
if "com" in element:
print(i, excelcasedata)
else:
print("执行失败")
try:
i = endnumber[ifnumber]
except IndexError:
pass
ifnumber = ifnumber + 1
elif operatetype == "end":
print(i,"end")
elif operatetype == "点击_textname":
print(i,excelcasedata)
elif operatetype == "物理按钮":
print(i,excelcasedata) |
from typing import List
from ex3.src import GraphInterface
from ex3.src.DiGraph import DiGraph
from ex3.src.GraphAlgoInterface import GraphAlgoInterface
import json
import heapq
import queue
import math
import numpy as np
import matplotlib.pyplot as plt
import random
class GraphAlgo(GraphAlgoInterface):
def __init__(self, graph: DiGraph = DiGraph()):
self.graph = graph
self.string = "Graph"
def get_graph(self) -> GraphInterface:
return self.graph
def load_from_json(self, file_name: str) -> bool:
self.string = file_name
vertex = {}
edges = {}
try:
with open(file_name, "r") as file:
new_graph_dict = json.load(file)
vertex = new_graph_dict["Nodes"]
edges = new_graph_dict["Edges"]
self.graph = DiGraph()
for i in vertex:
if "pos" not in i:
i["pos"] = None
self.graph.add_node(i["id"], i["pos"])
for e in edges:
self.graph.add_edge(e["src"], e["dest"], e["w"])
except IOError as e:
print(e)
return False
return True
def save_to_json(self, file_name: str) -> bool:
vertex = []
edges = []
graph = {}
for i in self.graph.get_all_v():
n = {"pos": i.pos, "id": i.key}
vertex.append(n)
for j in self.graph.all_out_edges_of_node(i.key).values():
e = {"src": i.key, "w": j.weight, "dest": j.dst}
edges.append(e)
graph["Edges"] = edges
graph["Nodes"] = vertex
try:
with open(file_name, "w") as file:
json.dump(graph, indent=4, fp=file)
except IOError as e:
print(e)
return False
return True
def shortest_path(self, id1: int, id2: int) -> (float, list):
if id1 not in self.graph.Nodes.keys() or id2 not in self.graph.Nodes.keys():
return float('infinity'), []
self.Dijkstra(id1)
ls = []
if self.graph.Nodes.get(id2).tag == float('infinity'):
return float('infinity'), ls
form = id2
ls.append(form)
while id1 not in ls:
ls.append(self.graph.Nodes.get(form).r_from.key)
form = self.graph.Nodes.get(form).r_from.key
ls.reverse()
return self.graph.Nodes.get(id2).tag, ls
def connected_component(self, id1: int):
if id1 not in self.graph.Nodes.keys() or self.graph is None:
return []
l_ist = self.Kosaraju(id1)
return l_ist
def connected_components(self) -> List[list]:
if self.graph is None:
return []
l_ist = []
al_inScc = []
for i in self.graph.Nodes.values():
if i.key not in al_inScc:
l = self.Kosaraju(i.key)
for k in l:
al_inScc.append(k)
l_ist.append(l)
return l_ist
def plot_graph(self) -> None:
plt.title(self.string)
x_vals = []
y_vals = []
for xy in self.graph.Nodes.values():
if xy.pos is None:
xy.pos = (random.randrange(0, 100), random.randrange(0, 100), 0)
x_vals.append(xy.pos[0])
y_vals.append(xy.pos[1])
else:
string = xy.pos.split(',')
xy.pos = tuple(string)
x_vals.append(float(string[0]))
y_vals.append(float(string[1]))
plt.plot(x_vals, y_vals, 'o')
for v in self.graph.Nodes.values():
v_x = float(v.pos[0])
v_y = float(v.pos[1])
plt.annotate(v.key, (v_x - 0.00015, v_y + 0.00015), color='red')
for e in self.graph.EdgesSrc[v.key].values():
x_e = float(self.graph.Nodes[e.dst].pos[0])
y_e = float(self.graph.Nodes[e.dst].pos[1])
plt.arrow(v_x, v_y, x_e - v_x, y_e - v_y, length_includes_head=True, head_width=0.0001991599,
width=0.0000005, color='blue')
plt.show()
def Dijkstra(self, src):
"""
Dijkstra Algorithm :
Traverse the Graph from Source Node and tag each reachable Node with the MinSum Weight
Use's for Shortest_Path
"""
for vertex in self.graph.Nodes.values():
vertex.tag = float('infinity')
self.graph.Nodes.get(src).tag = 0
pq = [self.graph.Nodes.get(src)]
visited = [src]
while len(pq) > 0:
node = pq.pop(0)
for neighbor in self.graph.EdgesSrc[node.key].values():
weight = node.tag + neighbor.weight
if weight < self.graph.Nodes.get(neighbor.dst).tag:
self.graph.Nodes.get(neighbor.dst).tag = weight
self.graph.Nodes.get(neighbor.dst).r_from = node
if neighbor.dst not in visited:
pq.append(self.graph.Nodes.get(neighbor.dst))
visited.append(neighbor.dst)
return
def Kosaraju(self, src):
"""
This Algorithm is for finding all SCC in the graph -> Use on all Vertices
Work this way ->
DFS on the Graph - Contain a list of all reachable Vertices from Source Node
Graph^T = Transpose Original Graph
DFS on the Graph^T - Contain a list of all reachable Vertices from Source Node (On Graph^T)
:return An Intersection between both lists - > The Source Node SCC
"""
s = [src]
visited = {}
while len(s) > 0: # DFS Graph
v = s.pop()
if v not in visited.keys():
visited[v] = self.graph.Nodes[v]
for edge in self.graph.EdgesSrc[v].values():
s.append(edge.dst)
visited_2 = {}
s_2 = [src]
while len(s_2) > 0:
v = s_2.pop()
if v not in visited_2.keys():
visited_2[v] = self.graph.Nodes[v]
for edge in self.graph.EdgesDst[v].values():
s_2.append(edge.dst)
x = set(visited).intersection(visited_2)
return list(x)
|
from flask import Blueprint, session, render_template, flash, request, redirect, url_for
from werkzeug.exceptions import abort
from my_app.auth.model.user import User, LoginForm, RegisterForm
from my_app import db
from flask_login import login_user, logout_user, current_user, login_required
from my_app import login_manager
fauth = Blueprint('fauth', __name__)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@fauth.route('/register', methods=('GET', 'POST'))
def register():
form = RegisterForm(meta={'csrf': False})
if form.validate_on_submit():
if User.query.filter_by(username=form.username.data).first():
flash('El usuario ya existe', 'danger')
else:
# Creamos el usuario
p = User(form.username.data, form.password.data)
db.session.add(p)
db.session.commit()
flash("Usuario creado con éxito")
return redirect(url_for('auth.register'))
return render_template('auth/register.html', form=form)
@fauth.route('/login', methods=('GET', 'POST'))
def login():
if current_user.is_authenticated:
flash("Ya estás autenticado")
return redirect(url_for('product.index'))
form = LoginForm(meta={'csrf': False})
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user and User.check_password(user, form.password.data):
# Registramos la sesión.
login_user(user)
flash("Bienvenido nuevamente " + user.username)
next = request.form['next']
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
# if not is_safe_url(next):
# return abort(400)
return redirect(next or url_for('product.index'))
else:
# Error de password
flash('Usuario o Contraseña Incorrectos', 'danger')
return render_template('auth/login.html', form=form)
@fauth.route('/logout')
def logout():
# Se usa en este caso el método pop para eliminar
# cada uno de los elementos de session
logout_user()
return redirect(url_for('fauth.login'))
@fauth.route('/protegido')
@login_required
def protegido():
return "<h1>Vista protegida</h1>"
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
#TODO
##from infrastructure.sumoapi import (api_get_position, api_get_destination, api_get_speed, api_set_color, api_get_road, api_change_destination, api_get_origin)
##from .api import (api_get_position, api_get_destination, api_get_speed, api_set_color, api_get_road, api_change_destination, api_get_origin)
#TODO END
class Entity(object):
pass
class Person(Entity):
def __init__(self, id):
self._id = id
self._type = "pedestrian"
self._discarded = False
self._color = False
self.information = {}
def id(self):
return self._id
def type(self):
return self._type
# def change_destination(self, destination):
# ## check destination edge in this sim
# api_change_destination(self.id(), destination)
def discarded(self):
return self._discarded
# def change_color(self, color):
# api_set_color(self.id(), color)
class VehicleInformation(object):
def __init__(self):
self.origin = None
self.destination = None
self.route = []
def params(self):
return self.__dict__
class Vehicle(Entity):
def __init__(self, id):
self._id = id
self._type = "vehicle"
self._discarded = False
self._color = False
self._information = VehicleInformation()
def id(self):
return self._id
def type(self):
return self._type
# def update_information(self):
# self._information.origin = api_get_origin(self.id())
# self._information.position = api_get_position(self.id())
# self._information.road = api_get_road(self.id())
# self._information.destination = api_get_destination(self.id())
# self._information.speed = api_get_speed(self.id())
# return self._information
#
#
# def change_destination(self, destination):
# ## check destination edge in this sim
# api_change_destination(self.id(), destination)
def discarded(self):
return self._discarded
# def change_color(self, color):
# api_set_color(self.id(), color)
class POI(object):
def __init__(self, name, entrance, exit, capacity):
self._name = name
self._entrance = entrance
self._exit = exit
self._capacity = capacity
self._number = 0
## temp code
self.repos = []
def name(self):
return self._name
def entrance(self):
return self._entrance
def exit(self):
return self._exit
def enter(self, step, duration, object, number = 1):
if self.repos:
vehs = map(lambda x: x.object, self.repos)
if not object in vehs:
if number <= self.allowed_number():
self._number += number
self.repos.append(StayInfo(step, duration, object))
else:
return False
else:
self._number += number
self.repos.append(StayInfo(step, duration, object))
return True
def leave(self, step):
vehs = filter(lambda x: x.depart_step == step, self.repos)
self._number -= len(vehs)
for v in vehs:
self.repos.remove(v)
def check_leave(self, step):
if self.repos:
v = map(lambda x: x.depart_step, self.repos)
if v.count(step):
return True
else:
return False
def allowed_number(self):
return self._capacity - self._number
def information(self):
proportion = float(self._number)/self._capacity * 100
return "name:%s max_capacity:%d number:%d proportion:%.2f"%( self._name, self._capacity, self._number, proportion)
class StayInfo(object):
def __init__(self, step, duration, object):
self.arrive_step = step
self.depart_step = step + duration
self.object = object
|
from django.shortcuts import redirect
from django.http.response import HttpResponse
def index(request):
return redirect("/register")
def register(request):
return HttpResponse("marcador de posición para que los usuarios creen un nuevo registro de usuario")
def login(request):
return HttpResponse("marcador de posición para que los usuarios inicien sesión")
def usuarios(request):
return HttpResponse("marcador de posición para luego mostrar toda la lista de usuarios")
def new (request):
return HttpResponse("marcador de posición para que los usuarios agreguen una nueva encuesta")
|
"""
Attempting to support yTube Music in Home Assistant
"""
import asyncio
import logging
import time
import random
import pickle
import os.path
import random
import datetime
from urllib.request import urlopen
from urllib.parse import unquote
from .const import *
import voluptuous as vol
from homeassistant.helpers import config_validation as cv, entity_platform, service
from homeassistant.helpers.condition import state
from homeassistant.helpers.event import track_state_change
from homeassistant.helpers.event import call_later
from homeassistant.helpers.storage import STORAGE_DIR
from homeassistant.helpers import device_registry
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.components.input_select as input_select
import homeassistant.components.input_boolean as input_boolean
import homeassistant.components.media_player as media_player
from .browse_media import build_item_response, library_payload
from pytube import YouTube
from pytube import request
from pytube import extract
from pytube.cipher import Cipher
import ytmusicapi
#from .ytmusicapi.ytmusic import *
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Run setup via YAML."""
_LOGGER.debug("Config via YAML")
if(config is not None):
async_add_entities([yTubeMusicComponent(hass, config,"_yaml")], update_before_add=False)
async def async_setup_entry(hass, config, async_add_devices):
"""Run setup via Storage."""
_LOGGER.debug("Config via Storage/UI currently not supported due to me not understanding asyncio")
if(len(config.data) > 0):
async_add_devices([yTubeMusicComponent(hass, config.data,"")], update_before_add=False)
class yTubeMusicComponent(MediaPlayerEntity):
def __init__(self, hass, config, name_add):
self.hass = hass
self._debug_as_error = False
self._name = config.get(CONF_NAME,DOMAIN+name_add)
# confgurations can be either the full entity_id or just the name
self._select_playlist = input_select.DOMAIN+"."+config.get(CONF_SELECT_PLAYLIST, DEFAULT_SELECT_PLAYLIST).replace(input_select.DOMAIN+".","")
self._select_playMode = input_select.DOMAIN+"."+config.get(CONF_SELECT_PLAYMODE, DEFAULT_SELECT_PLAYMODE).replace(input_select.DOMAIN+".","")
self._select_playContinuous = input_boolean.DOMAIN+"."+config.get(CONF_SELECT_PLAYCONTINUOUS, DEFAULT_SELECT_PLAYCONTINUOUS).replace(input_boolean.DOMAIN+".","")
self._select_mediaPlayer = input_select.DOMAIN+"."+config.get(CONF_SELECT_SPEAKERS, DEFAULT_SELECT_SPEAKERS).replace(input_select.DOMAIN+".","")
self._select_source = input_select.DOMAIN+"."+config.get(CONF_SELECT_SOURCE, DEFAULT_SELECT_SOURCE).replace(input_select.DOMAIN+".","")
default_header_file = os.path.join(hass.config.path(STORAGE_DIR),DEFAULT_HEADER_FILENAME)
self._header_file = config.get(CONF_HEADER_PATH, default_header_file)
self._speakersList = config.get(CONF_RECEIVERS)
# proxy settings
self._proxy_url = config.get(CONF_PROXY_URL,"")
self._proxy_path = config.get(CONF_PROXY_PATH,"")
self.log_me('debug',"YtubeMediaPlayer config: ")
self.log_me('debug',"- Header path: " + self._header_file)
self.log_me('debug',"- playlist: " + self._select_playlist)
self.log_me('debug',"- mediaplayer: " + self._select_mediaPlayer)
self.log_me('debug',"- source: " + self._select_source)
self.log_me('debug',"- speakerlist: " + str(self._speakersList))
self.log_me('debug',"- playModes: " + str(self._select_playMode))
self.log_me('debug',"- playContinuous: " + str(self._select_playContinuous))
self._brand_id = str(config.get(CONF_BRAND_ID,""))
self._api = None
self._js = ""
self._update_needed = False
self._remote_player = ""
self._untrack_remote_player = None
self._playlists = []
self._playlist_to_index = {}
self._tracks = []
self._attributes = {}
self._next_track_no = 0
self._allow_next = False
self._last_auto_advance = datetime.datetime.now()
self._started_by = None
self._interrupt_data = None
self._attributes['_media_type'] = None
self._attributes['_media_id'] = None
self._attributes['_player_state'] = STATE_OFF
self._playing = False
self._state = STATE_OFF
self._volume = 0.0
self._is_mute = False
self._track_name = None
self._track_artist = None
self._track_album_name = None
self._track_album_cover = None
self._track_artist_cover = None
self._media_duration = None
self._media_position = None
self._media_position_updated = None
self._shuffle = config.get(CONF_SHUFFLE, DEFAULT_SHUFFLE)
self._shuffle_mode = config.get(CONF_SHUFFLE_MODE, DEFAULT_SHUFFLE_MODE)
self._playContinuous = True
self._x_to_idle = None # Some Mediaplayer don't transition to 'idle' but to 'off' on track end. This re-routes off to idle
# register "call_method"
if(name_add==""):
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_CALL_METHOD,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
},
"async_call_method",
)
# run the api / get_cipher / update select as soon as possible
if hass.is_running:
self._update_needed = True
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, self.startup)
# user had difficulties during the debug message on, so we'll provide a workaroud to post debug as errors
def log_me(self,type,msg):
if(self._debug_as_error):
_LOGGER.error(msg)
else:
if(type=='debug'):
_LOGGER.debug(msg)
else:
_LOGGER.error(msg)
# update will be called eventually BEFORE homeassistant is started completly
# therefore we should not use this method for ths init
def update(self):
if(self._update_needed):
self.startup(self.hass)
# either called once homeassistant started (component was configured before startup)
# or call from update(), if the component was configured AFTER homeassistant was started
def startup(self,hass):
self._get_cipher('BB2mjBuAtiQ')
self.check_api()
self._update_selects()
self._update_playmode()
def check_api(self):
self.log_me('debug',"check_api")
if(self._api == None):
self.log_me('debug',"- no valid API, try to login")
if(os.path.exists(self._header_file)):
[ret, msg, self._api] = try_login(self._header_file,self._brand_id)
if(msg!=""):
self._api = None
out = "Issue during login: "+msg
data = {"title": "yTubeMediaPlayer error", "message": out}
self.hass.services.call("persistent_notification","create", data)
return False
else:
self.log_me('debug',"- YouTube Api initialized ok, version: "+str(ytmusicapi.__version__))
else:
out = "can't find header file at "+self._header_file
_LOGGER.error(out)
data = {"title": "yTubeMediaPlayer error", "message": out}
self.hass.services.call("persistent_notification","create", data)
return False
return True
@property
def name(self):
""" Return the name of the player. """
return self._name
@property
def icon(self):
return 'mdi:music-circle'
@property
def supported_features(self):
""" Flag media player features that are supported. """
return SUPPORT_YTUBEMUSIC_PLAYER
@property
def should_poll(self):
""" No polling needed. """
return False
@property
def state(self):
""" Return the state of the device. """
return self._state
@property
def device_state_attributes(self):
""" Return the device state attributes. """
return self._attributes
@property
def is_volume_muted(self):
""" Return True if device is muted """
return self._is_mute
@property
def is_on(self):
""" Return True if device is on. """
return self._playing
@property
def media_content_type(self):
""" Content type of current playing media. """
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
""" Title of current playing media. """
return self._track_name
@property
def media_artist(self):
""" Artist of current playing media """
return self._track_artist
@property
def media_album_name(self):
""" Album name of current playing media """
return self._track_album_name
@property
def media_image_url(self):
""" Image url of current playing media. """
return self._track_album_cover
@property
def media_image_remotely_accessible(self):
# True returns: entity_picture: http://lh3.googleusercontent.com/Ndilu...
# False returns: entity_picture: /api/media_player_proxy/media_player.gmusic_player?token=4454...
return True
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._media_position_updated
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def shuffle(self):
""" Boolean if shuffling is enabled. """
return self._shuffle
@property
def repeat(self):
"""Return current repeat mode."""
if(self._playContinuous):
return REPEAT_MODE_ALL
return REPEAT_MODE_OFF
def set_repeat(self, repeat: str):
self.log_me('debug',"set_repleat: "+repeat)
"""Set repeat mode."""
data = {ATTR_ENTITY_ID: self._select_playContinuous}
if repeat != REPEAT_MODE_OFF:
self._playContinuous = True
if(self._select_playContinuous!=""):
self.hass.services.call(DOMAIN_IB, IB_ON, data)
else:
self._playContinuous = False
if(self._select_playContinuous!=""):
self.hass.services.call(DOMAIN_IB, IB_OFF, data)
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
return self._volume
def turn_on(self, *args, **kwargs):
self.log_me('debug',"TURNON")
""" Turn on the selected media_player from input_select """
self._started_by = "UI"
# exit if we don't konw what to play (the select_playlist will be set to "" if the config provided a value but the entity_id is not in homeassistant)
if(self._select_playlist==""):
self.log_me('debug',"no or wrong playlist select field in the config, exiting")
msg= "You have no playlist entity_id in your config, or that entity_id is not in homeassistant. I don't know what to play and will exit. Either use the media_browser or add the playlist dropdown"
data = {"title": "yTubeMediaPlayer error", "message": msg}
self.hass.services.call("persistent_notification","create", data)
self._turn_off_media_player()
return
# set UI to correct playlist, or grab playlist if none was submitted
playlist = self.hass.states.get(self._select_playlist).state
# exit if we don't have any playlists from the account
if(len(self._playlists)==0):
_LOGGER.error("playlists empty")
self._turn_off_media_player()
return
# load ID for playlist name
idx = self._playlist_to_index.get(playlist)
if idx is None:
_LOGGER.error("playlist to index is none!")
self._turn_off_media_player()
return
# playlist or playlist_radio?
if(self._select_source!=""):
_source = self.hass.states.get(self._select_source)
if _source is None:
_LOGGER.error("- (%s) is not a valid input_select entity.", self._select_source)
return
if(_source.state == "Playlist"):
self._attributes['_media_type'] = MEDIA_TYPE_PLAYLIST
else:
self._attributes['_media_type'] = CHANNEL
else:
self._attributes['_media_type'] = MEDIA_TYPE_PLAYLIST
# store id and start play_media
self._attributes['_media_id'] = self._playlists[idx]['playlistId']
return self.play_media(media_type=self._attributes['_media_type'], media_id=self._attributes['_media_id'])
def prepare_play(self):
self.log_me('debug',"prepare_play")
if(not self.check_api()):
return
# get _remote_player
if not self._update_remote_player():
return
_player = self.hass.states.get(self._remote_player)
# subscribe to changes
if(self._select_playMode!=""):
track_state_change(self.hass, self._select_playMode, self._update_playmode)
if(self._select_playContinuous!=""):
track_state_change(self.hass, self._select_playContinuous, self._update_playmode)
if(self._select_mediaPlayer!=""):
track_state_change(self.hass, self._select_mediaPlayer, self.select_source_helper)
# make sure that the player, is on and idle
try:
if self._playing == True:
self.media_stop()
elif self._playing == False and self._state == STATE_OFF:
if _player.state == STATE_OFF:
self._turn_on_media_player()
else:
self.log_me('debug',"self._state is: (%s).", self._state)
if(self._state == STATE_PLAYING):
self.media_stop()
except:
_LOGGER.error("We hit an error during prepare play, likely related to issue 52")
_LOGGER.error("Player: "+str(_player)+".")
_LOGGER.error("remote_player: "+str(self._remote_player)+".")
self.exc()
# update cipher
self._get_cipher('BB2mjBuAtiQ')
# display imidiatly a loading state to provide feedback to the user
self._allow_next = False
self._track_album_name = ""
self._track_artist = ""
self._track_artist_cover = None
self._track_album_cover = None
self._track_name = "loading..."
self._state = STATE_PLAYING # a bit early otherwise no info will be shown
self.schedule_update_ha_state()
return True
def _turn_on_media_player(self, data=None):
self.log_me('debug',"_turn_on_media_player")
"""Fire the on action."""
if data is None:
data = {ATTR_ENTITY_ID: self._remote_player}
self._state = STATE_IDLE
self.schedule_update_ha_state()
self.hass.services.call(DOMAIN_MP, 'turn_on', data)
def turn_off(self, entity_id=None, old_state=None, new_state=None, **kwargs):
""" Turn off the selected media_player """
self.log_me('debug',"turn_off")
self._playing = False
self._track_name = None
self._track_artist = None
self._track_album_name = None
self._track_album_cover = None
self._media_duration = None
self._media_position = None
self._media_position_updated = None
self._turn_off_media_player()
def _turn_off_media_player(self, data=None):
self.log_me('debug',"_turn_off_media_player")
"""Fire the off action."""
self._playing = False
self._state = STATE_OFF
self._attributes['_player_state'] = STATE_OFF
self._attributes['likeStatus'] = ""
self._attributes['videoId'] = ""
self._attributes['lyrics'] = ""
self._attributes['_media_type'] = ""
self._attributes['_media_id'] = ""
self._attributes['current_track'] = 0
self.schedule_update_ha_state()
if(self._remote_player == ""):
if(not(self._update_remote_player())):
return
if(data != 'skip_remote_player'):
data = {ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, 'media_stop', data)
self.hass.services.call(DOMAIN_MP, 'turn_off', data)
def _update_remote_player(self, remote_player=""):
self.log_me('debug',"_update_remote_player")
old_remote_player = self._remote_player
if(remote_player != ""):
# make sure that the entity ID is complete
if(not(remote_player.startswith(DOMAIN_MP+"."))):
remote_player = DOMAIN_MP+"."+remote_player
self._remote_player = remote_player
# sets the current media_player from input_select
elif(self._select_mediaPlayer == ""): # drop down for player does not exist
if(self._remote_player == ""): # no preselected entity ID
self._track_name = "Please select player first"
self.schedule_update_ha_state()
msg= "Please select a player before start playing, e.g. via the 'media_player.select_source' method"
data = {"title": "yTubeMediaPlayer error", "message": msg}
self.hass.services.call("persistent_notification","create", data)
return False
else:
if self.hass.states.get(self._remote_player) is None:
_LOGGER.error("(%s) is not a valid media player.", self._remote_player)
return False
else:
return True
else:
media_player = self.hass.states.get(self._select_mediaPlayer) # Example: self.hass.states.get(input_select.gmusic_player_speakers)
if media_player is None:
_LOGGER.error("(%s) is not a valid input_select entity.", self._select_mediaPlayer)
return False
_remote_player = "media_player." + media_player.state
if self.hass.states.get(_remote_player) is None:
_LOGGER.error("(%s) is not a valid media player.", media_player.state)
return False
# Example: self._remote_player = media_player.bedroom_stereo
self._remote_player = _remote_player
# unsubscribe / resubscribe
if self._remote_player != old_remote_player:
if(self._untrack_remote_player is not None):
try:
self._untrack_remote_player()
except:
pass
self._untrack_remote_player = track_state_change(self.hass, self._remote_player, self._sync_player)
return True
def _get_cipher(self, videoId):
self.log_me('debug',"_get_cipher")
embed_url = "https://www.youtube.com/embed/"+videoId
embed_html = request.get(url=embed_url)
js_url = extract.js_url(embed_html)
self._js = request.get(js_url)
self._cipher = Cipher(js=self._js)
#2do some sort of check if tis worked
def _sync_player(self, entity_id=None, old_state=None, new_state=None):
self.log_me('debug',"_sync_player")
if(entity_id!=None and old_state!=None) and new_state!=None:
self.log_me('debug',entity_id+": "+old_state.state+" -> "+new_state.state)
if(entity_id!=self._remote_player):
self.log_me('debug',"- ignoring old player")
return
else:
self.log_me('debug',self._remote_player)
""" Perform actions based on the state of the selected (Speakers) media_player """
if not self._playing:
return
""" _player = The selected speakers """
_player = self.hass.states.get(self._remote_player)
if('media_duration' in _player.attributes):
self._media_duration = _player.attributes['media_duration']
if('media_position' in _player.attributes):
self._media_position = _player.attributes['media_position']
self._media_position_updated = datetime.datetime.now(datetime.timezone.utc)
""" entity_id of selected speakers. """
self._attributes['_player_id'] = _player.entity_id
""" _player state - Example [playing -or- idle]. """
self._attributes['_player_state'] = _player.state
""" unlock allow next, some player fail because their media_position is 'strange' catch """
found_position = False
try:
if 'media_position' in _player.attributes:
found_position = True
if(isinstance(_player.attributes['media_position'],int)):
if _player.state == 'playing' and _player.attributes['media_position']>0:
self._allow_next = True
except:
found_position = False
pass
if not(found_position) and _player.state == 'playing': # fix for browser mod media_player not providing the 'media_position'
self._allow_next = True
""" auto next .. best cast: we have an old and a new state """
if(old_state!=None and new_state!=None):
# chromecast quite frequently change from playing to idle twice, so we need some kind of time guard
if(old_state.state == STATE_PLAYING and new_state.state == STATE_IDLE and (datetime.datetime.now()-self._last_auto_advance).total_seconds() > 10 ):
self._allow_next = False
self._get_track()
# turn this player of when the remote_player was shut down
elif((old_state.state == STATE_PLAYING or old_state.state == STATE_IDLE) and new_state.state == STATE_OFF):
if(self._x_to_idle == STATE_OFF): # workaround for MPD (changes to OFF at the end of a track)
self._allow_next = False
self._get_track()
else:
self._state = STATE_OFF
self.log_me('debug',"media player got turned off")
self.turn_off()
elif(old_state.state == STATE_PLAYING and new_state.state == STATE_PAUSED and # workaround for SONOS (changes to PAUSED at the end of a track)
(datetime.datetime.now()-self._last_auto_advance).total_seconds() > 10 and self._x_to_idle == STATE_PAUSED):
self._allow_next = False
self._get_track()
elif(old_state.state == STATE_PAUSED and new_state.state == STATE_IDLE and self._state == STATE_PAUSED):
self.log_me('debug',"Remote Player changed from PAUSED to IDLE withouth our interaction, so likely another source is using the player now. I'll step back and swich myself off")
self._turn_off_media_player('skip_remote_player')
return
# no states, lets rely on stuff like _allow_next
elif _player.state == 'idle':
if self._allow_next:
if (datetime.datetime.now()-self._last_auto_advance).total_seconds() > 10:
self._allow_next = False
self._get_track()
""" Set new volume if it has been changed on the _player """
if 'volume_level' in _player.attributes:
self._volume = round(_player.attributes['volume_level'],2)
self.schedule_update_ha_state()
def _ytubemusic_play_media(self, event):
self.log_me('debug',"_ytubemusic_play_media")
_speak = event.data.get('speakers')
_source = event.data.get('source')
_media = event.data.get('name')
if event.data['shuffle_mode']:
self._shuffle_mode = event.data.get('shuffle_mode')
_LOGGER.info("SHUFFLE_MODE: %s", self._shuffle_mode)
if event.data['shuffle']:
self.set_shuffle(event.data.get('shuffle'))
_LOGGER.info("- SHUFFLE: %s", self._shuffle)
self.log_me('debug',"- Speakers: (%s) | Source: (%s) | Name: (%s)", _speak, _source, _media)
self.play_media(_source, _media, _speak)
def extract_info(self, _track):
#self.log_me('debug',"extract_info")
""" If available, get track information. """
info = dict()
info['track_album_name'] = ""
info['track_artist_cover'] = ""
info['track_name'] = ""
info['track_artist'] = ""
info['track_album_cover'] = ""
try:
if 'title' in _track:
info['track_name'] = _track['title']
except:
pass
try:
if 'byline' in _track:
info['track_artist'] = _track['byline']
elif 'artists' in _track:
info['track_artist'] = ""
if(isinstance(_track["artists"],str)):
info['track_artist'] = _track["artists"]
elif(isinstance(_track["artists"],list)):
if 'name' in _track['artists'][0]:
info['track_artist'] = _track['artists'][0]['name']
else:
info['track_artist'] = _track['artists'][0]
except:
pass
try:
if 'thumbnail' in _track:
_album_art_ref = _track['thumbnail'] ## returns a list,
if 'thumbnails' in _album_art_ref:
_album_art_ref = _album_art_ref['thumbnails']
# thumbnail [0] is super tiny 32x32? / thumbnail [1] is ok-ish / thumbnail [2] is quite nice quality
if isinstance(_album_art_ref,list):
info['track_album_cover'] = _album_art_ref[len(_album_art_ref)-1]['url']
elif 'thumbnails' in _track:
_album_art_ref = _track['thumbnails'] ## returns a list
if isinstance(_album_art_ref,list):
info['track_album_cover'] = _album_art_ref[len(_album_art_ref)-1]['url']
except:
pass
return info
def select_source_helper(self, entity_id=None, old_state=None, new_state=None):
# redirect call, obviously we got called by status change, so we can call it without argument and let it pick
return self.select_source()
def select_source(self, source=None):
self.log_me('debug',"select_source("+str(source)+")")
# source should just be the NAME without DOMAIN, to select it in the dropdown
if(isinstance(source,str)):
source = source.replace(DOMAIN_MP+".","")
# shutdown old player if we're currently playimg
was_playing = self._playing
if(self._playing):
self.log_me('debug',"- was playing")
old_player = self.hass.states.get(self._remote_player)
self.media_stop(player=self._remote_player) # important to mention the player here explictly. We're going to change it and stuff runs async
## get track position of old player TODO
## set player
if(source is not None):
self._update_remote_player(remote_player=DOMAIN_MP+"."+source)
self.log_me('debug',"- Choosing "+self._remote_player+" as player")
## try to set drop down
if(self._select_mediaPlayer != ""):
if(not self.check_entity_exists(self._select_mediaPlayer)):
self.log_me('debug',"- Drop down for media player: "+str(self._select_mediaPlayer)+" not found")
else:
data = {input_select.ATTR_OPTION: source, ATTR_ENTITY_ID: self._select_mediaPlayer}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, data)
else:
# load from dropdown, if that fails, exit
if(not self._update_remote_player()):
_LOGGER.error("- _update_remote_player failed")
return
## if playing, switch player
if(was_playing):
# don't call "_play" here, as that resets the playlist position
self._next_track_no = max(self._next_track_no-1,-1) # get track will increase the counter
self._get_track()
# seek, if possible
new_player = self.hass.states.get(self._remote_player)
if (all(a in old_player.attributes for a in ('media_position','media_position_updated_at','media_duration')) and 'supported_features' in new_player.attributes):
if(new_player.attributes['supported_features'] | SUPPORT_SEEK):
now = datetime.datetime.now(datetime.timezone.utc)
delay = now - old_player.attributes['media_position_updated_at']
pos = delay.total_seconds() + old_player.attributes['media_position']
if pos < old_player.attributes['media_duration']:
data = {'seek_position': pos, ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, media_player.SERVICE_MEDIA_SEEK, data)
def _update_selects(self, now=None):
self.log_me('debug',"_update_selects")
# -- all others -- #
if(not self.check_entity_exists(self._select_playlist)):
self.log_me('debug',"- playlist: "+str(self._select_playlist)+" not found")
self._select_playlist = ""
if(not self.check_entity_exists(self._select_playMode)):
self.log_me('debug',"- playmode: "+str(self._select_playMode)+" not found")
self._select_playMode = ""
if(not self.check_entity_exists(self._select_playContinuous)):
self.log_me('debug',"- playContinuous: "+str(self._select_playContinuous)+" not found")
self._select_playContinuous = ""
if(not self.check_entity_exists(self._select_mediaPlayer)):
self.log_me('debug',"- mediaPlayer: "+str(self._select_mediaPlayer)+" not found")
self._select_mediaPlayer = ""
if(not self.check_entity_exists(self._select_source)):
self.log_me('debug',"- Source: "+str(self._select_source)+" not found")
self._select_source = ""
# ----------- speaker -----#
try:
if(isinstance(self._speakersList,str)):
speakersList = [self._speakersList]
else:
speakersList = list(self._speakersList)
for i in range(0,len(speakersList)):
speakersList[i] = speakersList[i].replace(DOMAIN_MP+".","")
except:
speakersList = list()
# check if the drop down exists
if(self._select_mediaPlayer == ""):
self.log_me('debug',"- Drop down for media player not found")
self._select_mediaPlayer = ""
# if exactly one unit is provided, stick with it, if it existst
if(len(speakersList) == 1):
self._update_remote_player(remote_player=speakersList[0])
self.log_me('debug',"- Choosing "+self._remote_player+" as player")
else: #dropdown exists
defaultPlayer = ''
if(len(speakersList)<=1):
if(len(speakersList) == 1):
defaultPlayer = speakersList[0]
all_entities = self.hass.states.all()
for e in all_entities:
if(e.entity_id.startswith(DOMAIN_MP) and not(e.entity_id.startswith(DOMAIN_MP+"."+DOMAIN))):
speakersList.append(e.entity_id.replace(DOMAIN_MP+".",""))
speakersList = list(dict.fromkeys(speakersList))
self.log_me('debug',"- Adding "+str(len(speakersList))+" player to the dropdown")
data = {input_select.ATTR_OPTIONS: list(speakersList), ATTR_ENTITY_ID: self._select_mediaPlayer}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SET_OPTIONS, data)
if(defaultPlayer!=''):
if(defaultPlayer in speakersList):
data = {input_select.ATTR_OPTION: defaultPlayer, ATTR_ENTITY_ID: self._select_mediaPlayer}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, data)
# finally call update playlist to fill the list .. if it exists
self._update_playlists()
def check_entity_exists(self, e):
try:
r = self.hass.states.get(e)
if(r is None):
return False
if(r.state == "unavailable"):
return False
return True
except:
return False
def _update_playlists(self, now=None):
self.log_me('debug',"_update_playlists")
""" Sync playlists from Google Music library """
if(self._api == None):
self.log_me('debug',"- no api, exit")
return
if(self._select_playlist == ""):
self.log_me('debug',"- no playlist select field, exit")
return
self._playlist_to_index = {}
try:
try:
self._playlists = self._api.get_library_playlists(limit = 99)
self.log_me('debug'," - "+str(len(self._playlists))+" Playlists loaded")
except:
self._api = None
self.exc(resp="ytmusicapi")
return
idx = -1
for playlist in self._playlists:
idx = idx + 1
name = playlist.get('title','')
if len(name) < 1:
continue
self._playlist_to_index[name] = idx
# the "your likes" playlist won't return a count of tracks
if not('count' in playlist):
try:
extra_info = self._api.get_playlist(playlistId=playlist['playlistId'])
if('trackCount' in extra_info):
self._playlists[idx]['count'] = int(extra_info['trackCount'])
else:
self._playlists[idx]['count'] = 25
except:
if('playlistId' in playlist):
self.log_me('debug',"- Failed to get_playlist count for playlist ID '"+str(playlist['playlistId'])+"' setting it to 25")
else:
self.log_me('debug',"- Failed to get_playlist, no playlist ID")
self.exc(resp="ytmusicapi")
self._playlists[idx]['count'] = 25
if(len(self._playlists)==0):
self._playlist_to_index["No playlists found"] = 0
playlists = list(self._playlist_to_index.keys())
self._attributes['playlists'] = playlists
data = {"options": list(playlists), "entity_id": self._select_playlist}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SET_OPTIONS, data)
except:
self.exc()
msg= "Caught error while loading playlist. please log for details"
data = {"title": "yTubeMediaPlayer error", "message": msg}
self.hass.services.call("persistent_notification","create", data)
def _tracks_to_attribute(self):
self.log_me('debug',"_tracks_to_attribute")
self._attributes['total_tracks'] = len(self._tracks)
self._attributes['tracks'] = []
for track in self._tracks:
info = self.extract_info(track)
self._attributes['tracks'].append(info['track_artist']+" - "+info['track_name'])
# called from HA when th user changes the input entry, will read selection to membervar
def _update_playmode(self, entity_id=None, old_state=None, new_state=None):
self.log_me('debug',"_update_playmode")
try:
if(self._select_playContinuous!=""):
if(self.hass.states.get(self._select_playContinuous).state=="on"):
self._playContinuous = True
else:
self._playContinuous = False
except:
self.log_me('debug',"- Selection field "+self._select_playContinuous+" not found, skipping")
try:
if(self._select_playMode!=""):
_playmode = self.hass.states.get(self._select_playMode)
if _playmode != None:
if(_playmode.state == PLAYMODE_SHUFFLE):
self._shuffle = True
self._shuffle_mode = 1
elif(_playmode.state == PLAYMODE_RANDOM):
self._shuffle = True
self._shuffle_mode = 2
if(_playmode.state == PLAYMODE_SHUFFLE_RANDOM):
self._shuffle = True
self._shuffle_mode = 3
if(_playmode.state == PLAYMODE_DIRECT):
self._shuffle = False
self.set_shuffle(self._shuffle)
except:
self.log_me('debug',"- Selection field "+self._select_playMode+" not found, skipping")
# if we've change the dropdown, reload the playlist and start playing
# else only change the mode
if(entity_id == self._select_playMode and old_state != None and new_state != None and self.state == STATE_PLAYING):
self._allow_next = False # player will change to idle, avoid auto_advance
return self.play_media(media_type=self._attributes['_media_type'], media_id=self._attributes['_media_id'])
def _play(self):
self.log_me('debug',"_play")
self._next_track_no = -1
self._get_track()
def _get_track(self, entity_id=None, old_state=None, new_state=None, retry=3):
self.log_me('debug',"_get_track")
""" Get a track and play it from the track_queue. """
""" grab next track from prefetched list """
_track = None
# get next track nr (randomly or by increasing).
if self._shuffle and self._shuffle_mode != 1 and len(self._tracks)>1: #1 will use the list as is (shuffled). 2 and 3 will also take songs randomized
self._next_track_no = random.randrange(len(self._tracks)) - 1
else:
self._next_track_no = self._next_track_no + 1
self.log_me('debug',"- Playing track nr "+str(self._next_track_no)+" / "+str(len(self._tracks)))
if self._next_track_no >= len(self._tracks):
# we've reached the end of the playlist
if(self._playContinuous):
# call PLAY_MEDIA with the same arguments
return self.play_media(media_type=self._attributes['_media_type'], media_id=self._attributes['_media_id'])
else:
_LOGGER.info("- End of playlist and playcontinuous is off")
self._turn_off_media_player()
return
try:
_track = self._tracks[self._next_track_no]
except IndexError:
_LOGGER.error("- Out of range! Number of tracks in track_queue == (%s)", len(self._tracks))
self._api = None
self._turn_off_media_player()
return
if _track is None:
_LOGGER.error("- _track is None!")
self._turn_off_media_player()
return
self._attributes['current_track'] = self._next_track_no
self._attributes['videoId'] = _track['videoId']
if('likeStatus' in _track):
self._attributes['likeStatus'] = _track['likeStatus']
else:
self._attributes['likeStatus'] = ""
""" Find the unique track id. """
if not('videoId' in _track):
_LOGGER.error("- Failed to get ID for track: (%s)", _track)
_LOGGER.error(_track)
if retry < 1:
self._turn_off_media_player()
return
return self._get_track(retry=retry-1)
info = self.extract_info(_track)
self._track_album_name = info['track_album_name']
self._track_artist_cover = info['track_artist_cover']
self._track_name = info['track_name']
self._track_artist = info['track_artist']
self._track_album_cover = info['track_album_cover']
self.schedule_update_ha_state()
"""@@@ Get the stream URL and play on media_player @@@"""
_url = self.get_url(_track['videoId'])
if(_url == ""):
if retry < 1:
self.log_me('debug',"- get track failed to return URL, turning off")
self._turn_off_media_player()
return
else:
_LOGGER.error("- Retry with: (%i)", retry)
return self._get_track(retry=retry-1)
# proxy playback, needed e.g. for sonos
try:
if(self._proxy_url!="" and self._proxy_path!=""):
p1 = datetime.datetime.now()
open(os.path.join(self._proxy_path,PROXY_FILENAME), 'wb').write(urlopen(_url).read())
if(self._proxy_url.endswith('/')):
self._proxy_url = self._proxy_url[:-1]
_url = self._proxy_url+"/"+PROXY_FILENAME
t = (datetime.datetime.now() - p1).total_seconds()
self.log_me('debug',"- proxy loading time: "+str(t)+" sec")
except:
_LOGGER.error("The proxy method hit an error, turning off")
self.exc()
self._turn_off_media_player()
return
### start playback ###
self._state = STATE_PLAYING
self._playing = True
self.schedule_update_ha_state()
data = {
ATTR_MEDIA_CONTENT_ID: _url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_ENTITY_ID: self._remote_player
}
self.hass.services.call(DOMAIN_MP, SERVICE_PLAY_MEDIA, data)
self._last_auto_advance = datetime.datetime.now() # avoid auto_advance
### get lyrics after playback started ###
self._attributes['lyrics'] = 'No lyrics available'
try:
l_id = self._api.get_watch_playlist(videoId=_track['videoId'])
if 'lyrics' in l_id:
if(l_id['lyrics'] != None):
lyrics = self._api.get_lyrics(browseId=l_id['lyrics'])
self._attributes['lyrics'] = lyrics['lyrics']
except:
pass
call_later(self.hass, 15, self._sync_player)
def get_url(self, videoId=None, retry=False):
self.log_me('debug',"get_url")
if(videoId==None):
self.log_me('debug',"videoId was None")
return ""
_url = ""
self.check_api()
try:
self.log_me('debug',"- try to find URL on our own")
try:
streamingData=self._api.get_streaming_data(videoId)
except:
self._api = None
self.exc(resp="ytmusicapi")
return
if('adaptiveFormats' in streamingData):
streamingData = streamingData['adaptiveFormats']
elif('formats' in streamingData): #backup, not sure if that is ever needed, or if adaptiveFormats are always present
streamingData = streamingData['formats']
streamId = 0
# try to find audio only stream
for i in range(0,len(streamingData)):
if(streamingData[i]['mimeType'].startswith('audio/mp4')):
streamId = i
break
elif(streamingData[i]['mimeType'].startswith('audio')):
streamId = i
if(streamingData[streamId].get('url') is None):
sigCipher_ch = streamingData[streamId]['signatureCipher']
sigCipher_ex = sigCipher_ch.split('&')
res = dict({'s': '', 'url': ''})
for sig in sigCipher_ex:
for key in res:
if(sig.find(key+"=")>=0):
res[key]=unquote(sig[len(key+"="):])
# I'm just not sure if the original video from the init will stay online forever
# in case it's down the player might not load and thus we won't have a javascript loaded
# so if that happens: we try with this url, might work better (at least the file should be online)
# the only trouble i could see is that this video is private and thus also won't load the player ..
if(self._js == ""):
self._get_cipher(videoId)
signature = self._cipher.get_signature(ciphered_signature=res['s'])
_url = res['url'] + "&sig=" + signature
self.log_me('debug',"- self decoded URL via cipher")
else:
_url = streamingData[streamId]['url']
self.log_me('debug',"- found URL in api data")
except Exception as err:
_LOGGER.error("- Failed to get own(!) URL for track, further details below. Will not try YouTube method")
_LOGGER.error(traceback.format_exc())
_LOGGER.error(videoId)
try:
_LOGGER.error(self._api.get_song(videoId))
except:
self._api = None
self.exc(resp="ytmusicapi")
return
# backup: run youtube stack, only if we failed
if(_url == ""):
try:
streams = YouTube('https://www.youtube.com/watch?v='+videoId).streams
streams_audio = streams.filter(only_audio=True)
if(len(streams_audio)):
_url = streams_audio.order_by('abr').last().url
else:
_url = streams.order_by('abr').last().url
_LOGGER.error("ultimatly")
_LOGGER.error(_url)
except Exception as err:
_LOGGER.error(traceback.format_exc())
_LOGGER.error("- Failed to get URL with YouTube methode")
_LOGGER.error(err)
return ""
return _url
def play_media(self, media_type, media_id, _player=None, **kwargs):
self.log_me('debug',"play_media, media_type: "+str(media_type)+", media_id: "+str(media_id))
self._started_by = "Browser"
self._attributes['_media_type'] = media_type
self._attributes['_media_id'] = media_id
self.prepare_play()
# Update player if we got an input
if _player is not None:
self._update_remote_player(remote_player=_player)
_option = {"option": _player, "entity_id": self._select_mediaPlayer}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _option)
# load Tracks depending on input
try:
if(media_type == MEDIA_TYPE_PLAYLIST):
self._tracks = self._api.get_playlist(playlistId=media_id)['tracks']
elif(media_type == MEDIA_TYPE_ALBUM):
self._tracks = self._api.get_album(browseId=media_id)['tracks']
elif(media_type == MEDIA_TYPE_TRACK):
self._tracks = [self._api.get_song(videoId=media_id)]
elif(media_id == HISTORY):
self._tracks = self._api.get_history()
elif(media_id == USER_TRACKS):
self._tracks = self._api.get_library_upload_songs(limit=999)
elif(media_type == CHANNEL):
# get original playlist from the media_id
self._tracks = self._api.get_playlist(playlistId=media_id,limit=999)['tracks']
# select on track randomly
if(isinstance(self._tracks, list)):
if(len(self._tracks)>0):
if(len(self._tracks)>1):
r_track = self._tracks[random.randrange(0,len(self._tracks)-1)]
else:
r_track = self._tracks[0]
# get a 'channel' based on that random track
self._tracks = self._api.get_watch_playlist(videoId=r_track['videoId'])['tracks']
self._started_by = "UI" # technically wrong, but this will enable auto-reload playlist once all tracks are played
elif(media_type == USER_ALBUM):
self._tracks = self._api.get_library_upload_album(browseId=media_id)['tracks']
elif(media_type == USER_ARTIST or media_type == USER_ARTIST_2): # Artist -> Track or Artist [-> Album ->] Track
self._tracks = self._api.get_library_upload_artist(browseId=media_id, limit=BROWSER_LIMIT)
else:
self.log_me('debug',"- error during fetching play_media, turning off")
self.turn_off()
except:
self._api = None
self.exc(resp="ytmusicapi")
self.turn_off()
return
# mode 1 and 3 shuffle the playlist after generation
if(isinstance(self._tracks,list)):
if self._shuffle and self._shuffle_mode != 2 and len(self._tracks)>1:
random.shuffle(self._tracks)
self.log_me('debug',"- shuffle new tracklist")
if(len(self._tracks)==0):
_LOGGER.error("racklist with 0 tracks loaded, existing")
self.turn_off()
return
else:
self.turn_off()
return
self._tracks_to_attribute()
# grab track from tracks[] and forward to remote player
self._next_track_no = -1
self._play()
def media_play(self, entity_id=None, old_state=None, new_state=None, **kwargs):
self.log_me('debug',"media_play")
"""Send play command."""
if self._state == STATE_PAUSED:
self._state = STATE_PLAYING
self.schedule_update_ha_state()
data = {ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, 'media_play', data)
else:
self._play()
def media_pause(self, **kwargs):
self.log_me('debug',"media_pause")
""" Send media pause command to media player """
self._state = STATE_PAUSED
#_LOGGER.error(" PAUSE ")
self.schedule_update_ha_state()
data = {ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, 'media_pause', data)
def media_play_pause(self, **kwargs):
self.log_me('debug',"media_play_pause")
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self._allow_next = False
self.media_pause()
elif(self._state == STATE_PAUSED):
self._allow_next = False
self.media_play()
def media_previous_track(self, **kwargs):
"""Send the previous track command."""
if self._playing:
self._next_track_no = max(self._next_track_no - 2, -1)
self._allow_next = False
self._get_track()
def media_next_track(self, **kwargs):
"""Send next track command."""
if self._playing:
self._allow_next = False
self._get_track()
def media_stop(self, **kwargs):
"""Send stop command."""
self.log_me('debug',"media_stop")
self._state = STATE_IDLE
self._playing = False
self._track_artist = None
self._track_album_name = None
self._track_name = None
self._track_album_cover = None
self.schedule_update_ha_state()
if('player' in kwargs):
self.log_me('debug',"- player found")
data = {ATTR_ENTITY_ID: kwargs.get('player')}
else:
data = {ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, 'media_stop', data)
self.log_me('debug',"- media_stop -> "+self._remote_player)
def media_seek(self, position):
"""Seek the media to a specific location."""
self.log_me('debug',"seek: "+str(position))
data = {ATTR_ENTITY_ID: self._remote_player, 'seek_position': position}
self.hass.services.call(DOMAIN_MP, 'media_seek', data)
def set_shuffle(self, shuffle):
self.log_me('debug',"set_shuffle: "+str(shuffle))
self._shuffle = shuffle # True / False
# mode 1 and 3 will shuffle the playlist after generation
if(isinstance(self._tracks,list)):
if(self._shuffle and self._shuffle_mode != 2 and len(self._tracks)>1):
random.shuffle(self._tracks)
self._tracks_to_attribute()
if self._shuffle_mode == 1:
self._attributes['shuffle_mode'] = PLAYMODE_SHUFFLE
elif self._shuffle_mode == 2:
self._attributes['shuffle_mode'] = PLAYMODE_RANDOM
elif self._shuffle_mode == 3:
self._attributes['shuffle_mode'] = PLAYMODE_SHUFFLE_RANDOM
else:
self._attributes['shuffle_mode'] = self._shuffle_mode
# setting the input will call the "input has changed" - callback .. but that should be alright
if(self._select_playMode!=""):
if(self._shuffle):
data = {input_select.ATTR_OPTION: self._attributes['shuffle_mode'], ATTR_ENTITY_ID: self._select_playMode}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, data)
else:
data = {input_select.ATTR_OPTION: PLAYMODE_DIRECT, ATTR_ENTITY_ID: self._select_playMode}
self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, data)
return self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set volume level."""
self._volume = round(volume,2)
data = {ATTR_ENTITY_ID: self._remote_player, 'volume_level': self._volume}
self.hass.services.call(DOMAIN_MP, 'volume_set', data)
self.schedule_update_ha_state()
def volume_up(self, **kwargs):
"""Volume up the media player."""
newvolume = min(self._volume + 0.05, 1)
self.set_volume_level(newvolume)
def volume_down(self, **kwargs):
"""Volume down media player."""
newvolume = max(self._volume - 0.05, 0.01)
self.set_volume_level(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
if self._is_mute == False:
self._is_mute = True
else:
self._is_mute = False
self.schedule_update_ha_state()
data = {ATTR_ENTITY_ID: self._remote_player, "is_volume_muted": self._is_mute}
self.hass.services.call(DOMAIN_MP, 'volume_mute', data)
def async_call_method(self, command=None, parameters=None):
self.log_me('debug','async_call_method')
all_params = []
if parameters:
for parameter in parameters:
all_params.append(parameter)
self.log_me('debug',command)
self.log_me('debug',parameters)
if(command == SERVICE_CALL_RATE_TRACK):
if(len(all_params)>=1):
try:
arg = 'LIKE'
if(all_params[0]==SERVICE_CALL_THUMB_UP):
self.log_me('debug',"rate thumb up")
arg = 'LIKE'
elif(all_params[0]==SERVICE_CALL_THUMB_DOWN):
self.log_me('debug',"rate thumb down")
arg = 'DISLIKE'
elif(all_params[0]==SERVICE_CALL_THUMB_MIDDLE):
self.log_me('debug',"rate thumb middle")
arg = 'INDIFFERENT'
elif(all_params[0]==SERVICE_CALL_TOGGLE_THUMB_UP_MIDDLE):
if('likeStatus' in self._attributes):
if(self._attributes['likeStatus']=='LIKE'):
self.log_me('debug',"rate thumb middle")
arg = 'INDIFFERENT'
else:
self.log_me('debug',"rate thumb up")
arg = 'LIKE'
self._api.rate_song(videoId=self._attributes['videoId'],rating=arg)
self._attributes['likeStatus'] = arg
self.schedule_update_ha_state()
self._tracks[self._next_track_no]['likeStatus'] = arg
except:
self.exc()
elif(command == SERVICE_CALL_INTERRUPT_START):
self._update_remote_player()
#_LOGGER.error(self._remote_player)
t = self.hass.states.get(self._remote_player)
#_LOGGER.error(t)
self._interrupt_data = dict()
if(all(a in t.attributes for a in ('media_position','media_position_updated_at','media_duration'))):
now = datetime.datetime.now(datetime.timezone.utc)
delay = now - t.attributes['media_position_updated_at']
pos = delay.total_seconds() + t.attributes['media_position']
if pos < t.attributes['media_duration']:
self._interrupt_data['pos'] = pos
#_LOGGER.error(self._interrupt_data)
#_LOGGER.error(self._remote_player)
self._interrupt_data['player'] = self._remote_player
#_LOGGER.error(self._interrupt_data)
self.media_stop(player=self._remote_player)
if(self._untrack_remote_player is not None):
try:
#_LOGGER.error("calling untrack")
self._untrack_remote_player()
except:
#_LOGGER.error("untrack failed!!")
pass
elif(command == SERVICE_CALL_INTERRUPT_RESUME):
if(self._interrupt_data['player']):
self._update_remote_player(remote_player=self._interrupt_data['player'])
self._untrack_remote_player = track_state_change(self.hass, self._remote_player, self._sync_player)
self._interrupt_data['player'] = None
self._next_track_no = max(self._next_track_no-1,-1)
self._get_track()
if(self._interrupt_data['pos']):
player = self.hass.states.get(self._remote_player)
if(player.attributes['supported_features'] | SUPPORT_SEEK):
data = {'seek_position': self._interrupt_data['pos'], ATTR_ENTITY_ID: self._remote_player}
self.hass.services.call(DOMAIN_MP, media_player.SERVICE_MEDIA_SEEK, data)
self._interrupt_data['pos'] = None
elif(command == SERVICE_CALL_RELOAD_DROPDOWNS):
self._update_selects()
elif(command == SERVICE_CALL_OFF_IS_IDLE): #needed for the MPD but for nobody else
self._x_to_idle = STATE_OFF
self.log_me('debug',"Setting x_is_idle to State Off")
elif(command == SERVICE_CALL_PAUSED_IS_IDLE): #needed for the Sonos but for nobody else
self._x_to_idle = STATE_PAUSED
self.log_me('debug',"Setting x_is_idle to State Paused")
elif(command == SERIVCE_CALL_DEBUG_AS_ERROR):
self._debug_as_error = True
self.log_me('debug',"Posting debug messages as error until restart")
def exc(self, resp="self"):
"""Print nicely formated exception."""
_LOGGER.error("\n\n============= ytube_music_player Integration Error ================")
if(resp=="self"):
_LOGGER.error("unfortunately we hit an error, please open a ticket at")
_LOGGER.error("https://github.com/KoljaWindeler/ytube_music_player/issues")
else:
_LOGGER.error("unfortunately we hit an error in the sub api, please open a ticket at")
_LOGGER.error("https://github.com/sigma67/ytmusicapi/issues")
_LOGGER.error("and paste the following output:\n")
_LOGGER.error(traceback.format_exc())
_LOGGER.error("\nthanks, Kolja")
_LOGGER.error("============= ytube_music_player Integration Error ================\n\n")
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
self.log_me('debug',"async_browse_media")
self.check_api()
if media_content_type in [None, "library"]:
return await self.hass.async_add_executor_job(library_payload, self._api)
payload = {
"search_type": media_content_type,
"search_id": media_content_id,
}
response = await build_item_response(self.hass, self._api, payload)
if response is None:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
return response
|
def pow2(n):
return n ** 2
pow2_new = lambda n: n**2
mysum = lambda a, b: a + b
val = pow2(10)
val_new = pow2_new(10)
sum_val = mysum(10, 20)
print(val)
print(val_new)
print(sum_val)
print(type(pow2))
print(type(pow2_new)) |
from base import *
from user import User
from city import City
class Place(BaseModel):
owner = peewee.ForeignKeyField(User, related_name="places")
city = peewee.ForeignKeyField(City, related_name="places")
name = peewee.CharField(128, null=False)
description = peewee.TextField()
number_rooms = peewee.IntegerField(default=0)
number_bathrooms = peewee.IntegerField(default=0)
max_guest = peewee.IntegerField(default=0)
price_by_night = peewee.IntegerField(default=0)
latitude = peewee.FloatField()
longitude = peewee.FloatField()
|
from flask import Flask, request, render_template, jsonify
import os
from Model import multiple_prediction_model
app = Flask(__name__)
# not actually a secret since no need for authentication
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
# standard route
@app.route('/')
def loadViz():
return render_template('/index.html')
# returns data that specifies what to place into the spec editor
@app.route('/getPrediction', methods=['GET'])
def getPrediction():
predictGrades = multiple_prediction_model.predictGrades
return jsonify(predictGrades(request.args.getlist('scores'), 'Model/params_all'))
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
debug = bool(os.environ.get('DEBUG', False))
app.run(host='0.0.0.0', port=port, debug=True)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
from testing_utils import testing
from model.suspected_cl_confidence import ConfidenceInformation
from model.suspected_cl_confidence import SuspectedCLConfidence
class VersionedConfigTest(testing.AppengineTestCase):
def testConfidenceInformationToDict(self):
confidence_info = ConfidenceInformation(
correct=90, total=100, confidence=0.9, score=None)
expected_dict = {'correct': 90, 'total': 100, 'confidence': 0.9}
self.assertEqual(expected_dict, confidence_info.ToDict())
def testCreateNewSuspectedCLConfidenceIfNone(self):
self.assertIsNotNone(SuspectedCLConfidence.Get())
def testUpdateSuspectedCLConfidence(self):
cl_confidence = SuspectedCLConfidence.Get()
start_date = datetime.datetime(2016, 10, 06, 0, 0, 0)
end_date = datetime.datetime(2016, 10, 07, 0, 0, 0)
compile_heuristic = [
ConfidenceInformation(correct=100, total=100, confidence=1.0, score=5)
]
compile_try_job = ConfidenceInformation(
correct=99, total=100, confidence=0.99, score=None)
compile_heuristic_try_job = ConfidenceInformation(
correct=98, total=100, confidence=0.98, score=None)
test_heuristic = [
ConfidenceInformation(correct=97, total=100, confidence=0.97, score=5)
]
test_try_job = ConfidenceInformation(
correct=96, total=100, confidence=0.96, score=None)
test_heuristic_try_job = ConfidenceInformation(
correct=95, total=100, confidence=0.95, score=None)
cl_confidence.Update(start_date, end_date, compile_heuristic,
compile_try_job, compile_heuristic_try_job,
test_heuristic, test_try_job, test_heuristic_try_job)
cl_confidence = SuspectedCLConfidence.Get()
self.assertEqual(compile_heuristic, cl_confidence.compile_heuristic)
self.assertEqual(compile_try_job, cl_confidence.compile_try_job)
self.assertEqual(compile_heuristic_try_job,
cl_confidence.compile_heuristic_try_job)
self.assertEqual(test_heuristic, cl_confidence.test_heuristic)
self.assertEqual(test_try_job, cl_confidence.test_try_job)
self.assertEqual(test_heuristic_try_job,
cl_confidence.test_heuristic_try_job)
|
# -*- coding: utf-8 -*-
'''
transforms使用
'''
#%%
import os
import numpy as np
import torch
import random
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from my_dataset import RMBDataset
from PIL import Image
from matplotlib import pyplot as plt
os.chdir('E:\pytorch_learning')
def set_seed(seed = 1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# 设置随机数 种子
set_seed(1)
# 参数设置
MAX_EPOCH = 10
BATCH_SIZE = 1
LR = 0.01
log_interval = 10
val_interval = 1
rmb_label = {'1': 0, '100': 1}
def transform_invert(img_, transform_train):
'''
将data进行反transfrom操作
'''
if 'Normalize' in str(transform_train):
norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms))
mean = torch.tensor(norm_transform[0].mean, dtype = img_.dtype, device = img_.device)
std = torch.tensor(norm_transform[0].std, dtype = img_.dtype, device = img_.device)
img_.mul_(std[:, None, None]).add_(mean[:, None, None])
img_ = img_.transpose(0, 2).transpose(0, 1)
img_ = np.array(img_) * 255
if img_.shape[2] == 3:
img_ = Image.fromarray(img_.astype('uint8')).convert('RGB')
elif img_.shape[2] == 1:
img_ = Image.fromarray(img_.astype('uint8').squeeze())
else:
raise Exception('Invalid img shape, expected 1 or 3 in axis 2, but got {0}' .format(img_.shape[2]))
return img_
split_dir = os.path.join('第二章', 'data', 'rmb_split')
train_dir = os.path.join(split_dir, 'train')
valid_dir = os.path.join(split_dir, 'valid')
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([
transforms.Resize((224, 224)),
# 1. CenterCrop 从图像中心裁剪图片, size为裁剪图像尺寸
# transforms.CenterCrop(512),
# 2. RandomCrop 从图片中随机裁剪出尺寸为size的图片
# padding是设置填充大小, 当为a时, 左右上下均为填充a个像素
# 当为(a, b)时, 左右填充a个像素, 上下填充b个像素
# 当为(a, b, c, d)时, 左上右下分别填充a,b,c,d个像素
# pad_if_need 只有图像小于设定的size才会填充
# padding_mode为填充模式, 四种模式: constant, edge, reflect, symmetric
# constant像素值由fill决定, edge像素值由图像边缘决定
# reflect表示镜像填充, 最后一个像素不镜像: eg:[1, 2, 3, 4] -> [3, 2, 1, 2, 3, 4, 3, 2]
# symmetric表示镜像填充, 最后一个像素镜像: eg:[1, 2, 3, 4] -> [2, 1, 1, 2, 3, 4, 4, 3]
# fill 设置填充的像素值, 需要padding_mode设置为constant, eg: (255, 0, 0)表示红色
# transforms.RandomCrop(224, padding = 16),
# transforms.RandomCrop(224, padding = (16, 64)),
# transforms.RandomCrop(224, padding=16, fill=(255, 0, 0), padding_mode = 'constant'),
# transforms.RandomCrop(512, pad_if_needed=True), # pad_if_needed=True
# transforms.RandomCrop(224, padding=64, padding_mode='edge'),
# transforms.RandomCrop(224, padding=64, padding_mode='reflect'),
# transforms.RandomCrop(224, padding=64, padding_mode='symmetric'),
# 3. RandomResizedCrop 随机大小、长宽比裁剪图片, size为裁剪尺寸
# scale为随机裁剪面积比例, 默认为(0.08, 1)
# ratio为随机长宽比, 默认(3/4, 4/3)
# interpolation插值方法
# PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC
# transforms.RandomResizedCrop(size = 224, scale = (0.5, 0.5)),
# 4. FiveCrop, TenCrop在图像上下左右以及中心裁剪出尺寸为size的5张图片
# TenCrop对这5张图片进行水平或者垂直镜像获得10张图片
# size为裁剪尺寸, vertical_flip为是否垂直翻转
# transforms.FiveCrop(112),
# transforms.Lambda(lambda crops: torch.stack([(transforms.ToTensor()(crop)) for crop in crops])),
# transforms.TenCrop(112, vertical_flip = False),
# transforms.Lambda(lambda crops: torch.stack([(transforms.ToTensor()(crop)) for crop in crops])),
# 5. RandomHorizontalFlip和RandomVerticalFlip 依据概率p进行水平、垂直翻转
# transforms.RandomHorizontalFlip(p = 1),
# transforms.RandomVerticalFlip(p = 1),
# 6. RandomRotation随机旋转图片, degrees为旋转角度
# resample为重采样方法, expand为是否扩大图片
# transforms.RandomRotation(90),
# transforms.RandomRotation((90), expand = True),
# transforms.RandomRotation(30, center = (0, 0)),
#transforms.RandomRotation(30, center = (0, 0), expand = True),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
valid_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
# 构建MyDataSet实例
train_data = RMBDataset(data_dir = train_dir, transform = train_transform)
valid_data = RMBDataset(data_dir = valid_dir, transform = valid_transform)
# 构建DataLoader
train_loader = DataLoader(dataset = train_data, batch_size = BATCH_SIZE, shuffle = True)
valid_loader = DataLoader(dataset = valid_data, batch_size = BATCH_SIZE)
# 训练模型
for epoch in range(MAX_EPOCH):
for i, data in enumerate(train_loader):
inputs, labels = data
img_tensor = inputs[0, ...]
img = transform_invert(img_tensor, train_transform)
plt.imshow(img)
plt.show()
plt.pause(0.5)
plt.close() |
import sqlite3 as sq
DB_NAME = "ipl.db"
try:
conn = sq.connect(DB_NAME)
cur = conn.cursor()
DELETE = "DROP TABLE IF EXISTS POINTS_TABLE;"
cur.execute(DELETE)
conn.commit()
CREATE = """
CREATE TABLE IF NOT EXISTS POINTS_TABLE
(
team_id INTEGER PRIMARY KEY,
team_name TEXT,
points INTEGER DEFAULT 0,
nrr DECIMAL DEFAULT 0
);
"""
cur.execute(CREATE)
conn.commit()
GET_TEAMS = "SELECT team_id, team_name FROM TEAM"
cur.execute(GET_TEAMS)
teams = cur.fetchall()
TEAM_POINTS = {team[0]: 0 for team in teams}
TEAM_NRR = {team[0]: 0.0 for team in teams}
GET_DATA = "SELECT * FROM MATCH"
cur.execute(GET_DATA)
rows = cur.fetchall()
for row in rows:
team1 = int(row[2])
team2 = int(row[3])
winner = row[9]
margin = row[-1]
win_type = row[12]
if win_type.lower() != "tie":
if winner != "NULL":
winner = int(winner)
TEAM_POINTS[winner] += 2
else:
# Match abandoned
TEAM_POINTS[team1] += 1
TEAM_POINTS[team2] += 1
else:
TEAM_POINTS[team1] += 1
TEAM_POINTS[team2] += 1
nrr = 0
if margin != "NULL":
margin = int(margin)
if win_type == "runs":
nrr = margin / 20
elif win_type == "wickets":
nrr = margin / 10
else:
nrr = 0
# NRR changes only when there is a winner
if team1 == winner:
TEAM_NRR[team1] += nrr
TEAM_NRR[team2] -= nrr
elif team2 == winner:
TEAM_NRR[team1] -= nrr
TEAM_NRR[team2] += nrr
DATA = []
for team in teams:
DATA.append((team[0], team[1], TEAM_POINTS[team[0]], TEAM_NRR[team[0]]))
DATA = sorted(DATA, key=lambda val: (val[2], val[3]), reverse=True)
ADD_DATA = "INSERT INTO POINTS_TABLE (team_id, team_name, points, nrr) VALUES (?, ?, ?, ?)"
cur.executemany(ADD_DATA, DATA)
conn.commit()
for i in range(len(DATA)):
print("{},{},{},{}".format(DATA[i][0], DATA[i][1], DATA[i][2], DATA[i][3]))
cur.close()
# Error handling
except sq.Error as error:
print("Errors: ", error)
# Close the connection
finally:
if (conn):
conn.close()
|
#APPENDING OPERATION ON LISTS
#appendig list as a element
print("\nAppending using list as a element:\n")
list_A = ['DIGITAL', 'SIGNAL']
print ("list_A before appendig",list_A )
list_A.append('PROCESSING')
print ("list_A after appendig",list_A )
#appending using two lists
print("\nAppending using two lists\n")
list1 = ['digital','signal','processing']
print("list1 before appendig:",list1)
list2 = ['-four periods per week']
print("list2 before appendig:",list2)
list1.append(list2)
print("list after appendig:")
print (list1)
|
# -*- coding: utf-8 -*-
import time, sys, cPickle, os, socket
from pylearn2.utils import serial
from itertools import izip
from pylearn2.utils import safe_zip
from collections import OrderedDict
from pylearn2.utils import safe_union
import numpy as np
import scipy.sparse as spp
import theano.sparse as S
from theano.gof.op import get_debug_values
from theano.printing import Print
from theano import function
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano import tensor as T
import theano
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.models.model import Model
from pylearn2.training_algorithms.sgd import SGD, MomentumAdjustor
from pylearn2.termination_criteria import MonitorBased, And, EpochCounter
from pylearn2.train import Train
from pylearn2.costs.cost import SumOfCosts, Cost, MethodCost
from pylearn2.costs.mlp import WeightDecay, L1WeightDecay
from pylearn2.models.mlp import MLP, ConvRectifiedLinear, \
RectifiedLinear, Softmax, Sigmoid, Linear, Tanh, max_pool_c01b, \
max_pool, Layer
from pylearn2.models.maxout import Maxout, MaxoutConvC01B
from pylearn2.monitor import Monitor
from pylearn2.space import VectorSpace, Conv2DSpace, CompositeSpace, Space
from pylearn2.train_extensions.best_params import MonitorBasedSaveBest
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets import preprocessing as pp
from layers import NoisyRELU, GaussianRELU, My_Softmax, My_MLP, My_Tanh
from dataset import My_CIFAR10
from faceEmo import FaceEmo
from pylearn2_objects import *
#from load_model import compute_nll
from jobman import DD, expand
from pylearn2.datasets.mnist import MNIST
from pylearn2.datasets.svhn import SVHN
class HPS:
def __init__(self,
state,
base_channel_names = ['train_objective'],
save_prefix = "model_",
cache_dataset = True):
self.cache_dataset = cache_dataset
self.dataset_cache = {}
self.state = state
self.mbsb_channel_name = self.state.term_array.early_stopping.save_best_channel
self.base_channel_names = base_channel_names
self.save_prefix = save_prefix
# TODO store this in data for each experiment or dataset
def run(self):
(model, learner, algorithm) \
= self.get_config()
# try:
print 'learning'
learner.main_loop()
# except Exception, e:
# print e
print 'End of model training'
def get_config(self):
# dataset
self.load_dataset()
# model
self.load_model()
# monitor:
self.setup_monitor()
# training algorithm
algorithm = self.get_train()
# extensions
extensions = self.get_extensions()
# channels
#self.setup_channels()
# learner
learner = Train(dataset=self.train_ddm,
model=self.model,
algorithm=algorithm,
extensions=extensions)
return (self.model, learner, algorithm)
def load_dataset(self):
# TODO: we might need other variables for identifying what kind of
# extra preprocessing was done such as features product and number
# of features kept based on MI.
#base_path = get_data_path(self.state)
#self.base_path = base_path
#import pdb
#pdb.set_trace()
if self.state.dataset == 'mnist':
self.test_ddm = MNIST(which_set='test', one_hot=True)
dataset = MNIST(which_set='train', shuffle=True, one_hot=True)
train_X, valid_X = np.split(dataset.X, [50000])
train_y, valid_y = np.split(dataset.y, [50000])
self.train_ddm = DenseDesignMatrix(X=train_X, y=train_y)
self.valid_ddm = DenseDesignMatrix(X=valid_X, y=valid_y)
elif self.state.dataset == 'svhn':
self.train_ddm = SVHN(which_set='splitted_train')
self.test_ddm = SVHN(which_set='test')
self.valid_ddm = SVHN(which_set='valid')
elif self.state.dataset == 'cifar10':
self.train_ddm = My_CIFAR10(which_set='train', one_hot=True)
self.test_ddm = None
self.valid_ddm = My_CIFAR10(which_set='test', one_hot=True)
elif self.state.dataset == 'faceEmo':
self.train_ddm = FaceEmo(which_set='train', one_hot=True)
self.test_ddm = None
self.valid_ddm = FaceEmo(which_set='test', one_hot=True)
if self.train_ddm is not None:
self.nvis = self.train_ddm.X.shape[1]
self.nout = self.train_ddm.y.shape[1]
print "nvis, nout :", self.nvis, self.nout
self.ntrain = self.train_ddm.X.shape[0]
print "ntrain :", self.ntrain
if self.valid_ddm is not None:
self.nvalid = self.valid_ddm.X.shape[0]
print "nvalid :", self.nvalid
if self.test_ddm is not None:
self.ntest = self.test_ddm.X.shape[0]
print "ntest :", self.ntest
def load_model(self):
model_class = self.state.model_class
fn = getattr(self, 'get_model_'+model_class)
self.model = fn()
return self.model
def get_model_mlp(self):
self.dropout = False
self.input_include_probs = {}
self.input_scales = {}
self.weight_decay = False
self.weight_decays = {}
self.l1_weight_decay = False
self.l1_weight_decays = {}
nnet_layers = self.state.layers
input_space_id = self.state.input_space_id
nvis = self.nvis
self.batch_size = self.state.batch_size
# TODO: add input_space as a config option.
input_space = None
# TODO: top_view always False for the moment.
self.topo_view = False
assert nvis is not None
layers = []
for i,layer in enumerate(nnet_layers.values()):
layer = expand(layer)
layer = self.get_layer(layer, i)
layers.append(layer)
# create MLP:
print layers
model = MLP(layers=layers,input_space=input_space,nvis=nvis,
batch_size=self.batch_size)
self.mlp = model
return model
def get_layer(self, layer, layer_id):
layer_class = layer.layer_class
layer_name = layer.layer_name
dropout_scale = layer.dropout_scale
dropout_prob = layer.dropout_probability
weight_decay = layer.weight_decay
l1_weight_decay = layer.l1_weight_decay
fn = getattr(self, 'get_layer_'+layer_class)
if layer_name is None:
layer_name = layer_class+str(layer_id)
layer.layer_name = layer_name
layer = fn(layer)
# per-layer cost function parameters:
if (dropout_scale is not None):
self.dropout = True
self.input_scales[layer_name] = dropout_scale
if (dropout_prob is not None):
self.dropout = True
self.input_include_probs[layer_name] = (1. - dropout_prob)
if (weight_decay is not None):
self.weight_decay = False
self.weight_decays[layer_name] = weight_decay
if (l1_weight_decay is not None):
self.l1_weight_decay = False
self.l1_weight_decays[layer_name] = l1_weight_decay
return layer
def get_layer_sigmoid(self, layer):
return Sigmoid(layer_name=layer.layer_name,dim=layer.dim,irange=layer.irange,
istdev=layer.istdev,sparse_init=layer.sparse_init,
sparse_stdev=layer.sparse_stdev, include_prob=layer.include_prob,
init_bias=layer.init_bias,W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale,max_col_norm=layer.max_col_norm,
max_row_norm=layer.max_row_norm)
def get_layer_tanh(self, layer):
return My_Tanh(layer_name=layer.layer_name,dim=layer.dim,irange=layer.irange,
istdev=layer.istdev,sparse_init=layer.sparse_init,
sparse_stdev=layer.sparse_stdev, include_prob=layer.include_prob,
init_bias=layer.init_bias,W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale,max_col_norm=layer.max_col_norm,
max_row_norm=layer.max_row_norm)
def get_layer_rectifiedlinear(self, layer):
# TODO: left_slope is set to 0.0 It should be set by the user!
layer.left_slope = 0.0
return RectifiedLinear(layer_name=layer.layer_name,dim=layer.dim,irange=layer.irange,
istdev=layer.istdev,sparse_init=layer.sparse_init,
sparse_stdev=layer.sparse_stdev, include_prob=layer.include_prob,
init_bias=layer.init_bias,W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale,max_col_norm=layer.max_col_norm,
max_row_norm=layer.max_row_norm,
left_slope=layer.left_slope,use_bias=layer.use_bias)
def get_layer_softmax(self, layer):
return My_Softmax(layer_name=layer.layer_name,n_classes=layer.dim,irange=layer.irange,
istdev=layer.istdev,sparse_init=layer.sparse_init,
init_bias_target_marginals=layer.init_bias, W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale, max_col_norm=layer.max_col_norm,
max_row_norm=layer.max_row_norm)
def get_layer_noisyRELU(self, layer):
return NoisyRELU(
dim=layer.dim,
layer_name=layer.layer_name,
irange=layer.irange,
sparse_init=layer.sparse_init,
W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale,
mask_weights = None,
max_row_norm=layer.max_row_norm,
max_col_norm=layer.max_col_norm,
use_bias=True,
noise_factor=layer.noise_factor,
desired_active_rate=layer.desired_active_rate,
adjust_threshold_factor=layer.adjust_threshold_factor
)
def get_layer_convRectifiedLinear(self, layer):
return ConvRectifiedLinear(
layer_name=layer.layer_name,
output_channels=layer.output_channels,
irange=layer.irange,
kernel_shape=layer.kernel_shape,
pool_shape=layer.pool_shape,
pool_stride=layer.pool_stride,
max_kernel_norm=layer.max_kernel_norm)
def get_layer_gaussianRELU(self, layer):
return GaussianRELU(
dim=layer.dim,
layer_name=layer.layer_name,
irange=layer.irange,
sparse_init=layer.sparse_init,
W_lr_scale=layer.W_lr_scale,
b_lr_scale=layer.b_lr_scale,
mask_weights = None,
max_row_norm=layer.max_row_norm,
max_col_norm=layer.max_col_norm,
use_bias=True,
desired_active_rate=layer.desired_active_rate,
adjust_threshold_factor=layer.adjust_threshold_factor,
noise_std=layer.noise_std
)
def setup_monitor(self):
if self.topo_view:
print "topo view"
self.minibatch = T.as_tensor_variable(
self.valid_ddm.get_batch_topo(self.batch_size),
name='minibatch'
)
else:
print "design view"
batch = self.valid_ddm.get_batch_design(self.batch_size)
if isinstance(batch, spp.csr_matrix):
print "sparse2"
self.minibatch = self.model.get_input_space().make_batch_theano()
print type(self.minibatch)
else:
self.minibatch = T.as_tensor_variable(
self.valid_ddm.get_batch_design(self.batch_size),
name='minibatch'
)
self.target = T.matrix('target')
self.monitor = Monitor.get_monitor(self.model)
self.log_channel_names = []
self.log_channel_names.extend(self.base_channel_names)
# self.monitor.add_dataset(self.valid_ddm, self.state.train_iteration_mode,
# self.batch_size)
# if self.test_ddm is not None:
# self.monitor.add_dataset(self.test_ddm, self.state.train_iteration_mode,
# self.batch_size)
def get_train(self):
train_class = self.state.train_class
fn = getattr(self, 'get_train_'+train_class)
return fn()
def get_train_sgd(self):
cost = MethodCost('cost_from_X')
#cost = self.get_costs()
num_train_batch = (self.ntrain/self.batch_size)
print "num training batches:", num_train_batch
termination_criterion = self.get_terminations()
monitoring_dataset = {}
for dataset_id in self.state.monitoring_dataset:
if dataset_id == 'test' and self.test_ddm is not None:
monitoring_dataset['test'] = self.test_ddm
elif dataset_id == 'valid' and self.valid_ddm is not None:
monitoring_dataset['valid'] = self.valid_ddm
else:
monitoring_dataset = None
return SGD( learning_rate=self.state.learning_rate,
batch_size=self.state.batch_size,
cost=cost,
batches_per_iter=num_train_batch,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
init_momentum=self.state.init_momentum,
train_iteration_mode=self.state.train_iteration_mode)
def get_terminations(self):
if 'term_array' not in self.state:
return None
terminations = []
for term_obj in self.state.term_array.values():
fn = getattr(self, 'get_term_' + term_obj.term_class)
terminations.append(fn(term_obj))
if len(terminations) > 1:
return And(terminations)
return terminations[0]
def get_term_epochcounter(self, term_obj):
return EpochCounter(term_obj.max_epochs)
def get_term_monitorbased(self, term_obj):
print 'monitor_based'
return MonitorBased(
prop_decrease=term_obj.proportional_decrease,
N=term_obj.max_epochs,
channel_name=term_obj.channel_name
)
def get_extensions(self):
if 'ext_array' not in self.state:
return []
extensions = []
for ext_obj in self.state.ext_array.values():
fn = getattr(self, 'get_ext_' + ext_obj.ext_class)
extensions.append(fn(ext_obj))
# monitor based save best
print 'save best channel', self.mbsb_channel_name
if self.mbsb_channel_name is not None:
self.save_path = self.save_prefix + str(self.state.config_id) + "_optimum.pkl"
extensions.append(MonitorBasedSaveBest(
channel_name = self.mbsb_channel_name,
save_path = self.save_path
)
)
return extensions
def get_ext_exponentialdecayoverepoch(self, ext_obj):
return ExponentialDecayOverEpoch(
decay_factor=ext_obj.decay_factor,
min_lr_scale=ext_obj.min_lr_scale
)
def get_ext_momentumadjustor(self, ext_obj):
return MomentumAdjustor(
final_momentum=ext_obj.final_momentum,
start=ext_obj.start_epoch,
saturate=ext_obj.saturate_epoch
)
'''
def get_data_path(state):
# TODO: we might need other variables for identifying what kind of
# extra preprocessing was done such as features product and number
# of features kept based on MI.
task = state.task
pack = state.pack
dataset = state.dataset
import os
os.environ['BASE_MQ_DATA_PATH'] = '/data/lisa/data/'
# Get mq cached data path from environment variable.
base_mq_data_path = os.environ.get('BASE_MQ_DATA_PATH')
if base_mq_data_path is None:
raise NotImplementedError('The environment variable BASE_MQ_DATA_PATH was not found.')
if task == 'svhn':
base_path = os.path.join(base_mq_data_path, '%s/' % SVHN)
if task == 'fun':
base_path = os.path.join(base_mq_data_path, "%s/cached/%s/%s"
%(pack, task, dataset))
elif task == 'diff':
base_path = os.path.join(base_mq_data_path, "%s/cached/%s"
%(pack, dataset))
else:
raise NotImplementedError('task=%s not supported yet!'%task)
return base_path
'''
|
import sys
@task
def source():
"""Generates source (development) version of test runner"""
core.test_source()
@task
def build():
"""Generates build (deployment) version of test runner"""
core.test_build()
@task
def clean():
"""Cleans up project environment"""
session.clean()
Repository.clean()
@task
def distclean():
"""Cleans up project environment with removing all non-repository files"""
session.clean()
Repository.distclean()
@task
def test(target="source", tool="phantom", browsers=None):
"""Automatically executes tests in either PhantomJS, NodeJS or Testem CI"""
retval = core.test(target, tool, browsers)
if retval > 0:
sys.exit(retval)
|
#! /usr/bin/env python
import rospy
import numpy as np
import tf
import tf2_ros
import sys
import math
from geometry_msgs.msg import Pose, TransformStamped, PoseStamped, Vector3, Quaternion, Transform
from visualization_msgs.msg import Marker, MarkerArray
from nav_msgs.msg import Odometry
from std_msgs.msg import Header
MAX = 200
class apriltagsglobalframe():
def __init__(self):
self.listener = tf.TransformListener()
self.broadcaster = tf.TransformBroadcaster()
self.target = rospy.get_param("~target", 1)
rospy.loginfo("target %d" % self.target)
self.trans = np.zeros((MAX, 3)) # translation placeholder
self.quat = np.zeros((MAX, 4)) # orientation placeholder
self.count = 0 # time counter
self.collected = False
self.trans_mean = None
self.quat_mean = None
self.publishing = False
self.translation = None
self.quaternion = None
self.timer = None
self.sub_pose = rospy.Subscriber("global_pose",PoseStamped,self.cb_pose,queue_size=1)
def pub_tf(self,event):
# broadcast transformation
self.broadcaster.sendTransform(self.translation, self.quaternion, rospy.Time.now(), "global", "map")
rospy.loginfo("BROADCASTING")
def cb_pose(self, msg):
if self.count < MAX:
# print "fetching"
trans_g = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]
rot_g = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]
self.trans[self.count] = np.array(trans_g)
self.quat[self.count] = np.array(rot_g)
self.count = self.count + 1
rospy.loginfo(self.count*100.0/MAX)
else:
if not self.collected:
# take mean values
self.trans_mean = np.mean(self.trans, axis=0)
self.quat_mean = np.mean(self.quat, axis=0)
self.quat_mean = self.quat_mean/np.linalg.norm(self.quat_mean) # normalization
self.collected = True
# build homogeneous transformation from `slam_map` to `GATE(num)`
homo_mat = tf.transformations.concatenate_matrices(tf.transformations.translation_matrix(self.trans_mean), tf.transformations.quaternion_matrix(self.quat_mean))
# rot_mat = tf.transformations.euler_matrix(-np.pi/2, np.pi/2, 0) # make X-axis into the turnel and Z-axis upward
global2camera = tf.transformations.inverse_matrix(homo_mat)
try:
# self.listener.waitForTransform('slam_map', 'GATE'+str(x), rospy.Time(0), rospy.Duration(1.0))
(trans_c, rot_c) = self.listener.lookupTransform('map', 'camera_middle_link', rospy.Time(0))
camera2map = tf.transformations.concatenate_matrices(tf.transformations.translation_matrix(trans_c), tf.transformations.quaternion_matrix(rot_c))
global2map = np.matmul(camera2map,global2camera)
self.translation = (global2map[0, 3], global2map[1, 3], global2map[2, 3]) # build translation tuple
euler_ = tf.transformations.euler_from_matrix(global2map) # convert quaternion to euler
self.quaternion = tf.transformations.quaternion_from_euler(0, 0, euler_[2]) # manual overwrite roll, pitch to 0
self.sub_pose.unregister()
# start publish tf
self.timer = rospy.Timer(rospy.Duration(0.05),self.pub_tf)
except (tf.Exception, tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("faile to catch tf camera 2 map")
def on_shutdown(self):
pass
if __name__ == "__main__":
rospy.init_node('global_frame')
node = apriltagsglobalframe()
rospy.on_shutdown(node.on_shutdown)
rospy.spin()
|
import argparse
import pickle
import spacy
from dataset import process_20newsgroup_dataset
def main(args):
nlp = spacy.load("en")
train_dataset, test_dataset = process_20newsgroup_dataset(nlp)
print("-- Saving processed dataset to: {}".format(args.dataset_file))
with open(args.dataset_file, mode="wb") as out_file:
pickle.dump({
"vocab": nlp.vocab.strings,
"train": train_dataset,
"test": test_dataset
}, out_file)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("dataset_file", help="Destination file name for the processed dataset")
main(arg_parser.parse_args())
|
import numpy as np
import lttb
def test_downsampling():
csv = 'tests/timeseries.csv'
data = np.genfromtxt(csv, delimiter=',', names=True)
xs = data['X']
ys = data['Y']
data = np.array([xs, ys]).T
out = lttb.downsample(data, 100)
assert out.shape == (100, 2)
|
## vamos agora, por fim, vermos como funciona o while.
## While significa 'enquanto', e trabalha com valores
## booleanos, assim como o if. Vejamos sua estrutura:
a = 5
while (a > 0):
print(a)
a -= 1
## --------- EXECUTE O CÓDIGO --------------
## nossa estrutura é a seguinte: ENQUANTO ('a' for maior
## do que 0), dê print() em 'a', e faça o operação: 'a' é
## igual a 'a - 1'.
## lembrando que 'a > 0' é uma operação lógica, o que
## resulta dela é True ou False. E quando for False,
## encerra-se o loop. Portanto, se eu quero por exemplo,
## fazer um menu de opções que se repete até o usuário
## desejar encerrar o loop, basta fazer assim:
opcao = 3
while (opcao != '0'):
opcao = input('Qual sua escolha? \n1 - Opção 1 \n2 - Opção 2 \n0 - Sair\n\n')
if (opcao == '1'):
print('Você escolheu a opção 1')
elif (opcao == '2'):
print('Você escolheu a opção 2')
elif (opcao != '0'):
print('Opção inválida')
## Agora, um pequeno desafio para exercitar o entendimento:
## escreva um código que funcione como uma calculadora. Tenha um menu com as
## seguintes opções: 1 - soma, 2 - subtração, 3 - divisão, 4 - multiplicação,
## 5 - exponenciação e 0 - sair. Quando o usuário escolher sua opção, deve-se
## pedir dele os dois números para realizar a operação, e no fim de cada
## operação, deve haver um print() do resultado.
## --------------- ESCREVA SEU CÓDIGO AQUI ABAIXO E EXECUTE -----------------
|
from sqlalchemy import Column, Integer, String, Boolean
from api.models import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(255), unique=True)
first_name = Column(String(255))
last_name = Column(String(255))
password = Column(String(255))
is_admin = Column(Boolean, default=False)
def __repr__(self):
return "<User(username={0}, first_name={1}, last_name={2}), is_admin={3}>".format(
self.username, self.first_name, self.last_name, self.is_admin
)
class UserRepository:
def __init__(self, session):
self.session = session
def find_by_username(self, username: str) -> User:
try:
return self.session.query(User).filter(User.username == username).one()
except Exception as e:
print(e)
return None
def save(self, user: User):
self.session.add(user)
self.session.commit() |
# -*- coding: utf-8 -*-
import pygame
import json
from classes import *
from loaders import *
TILE_W, TILE_H = 40, 26
class Game(object):
def __init__(self, screen, movekeys):
self.movekeys = movekeys
self.screen = screen
self.width, self.height = screen.get_width(), screen.get_height()
self.background = pygame.Surface((self.width, self.height))
self.background.fill((0, 0, 0))
self.emptytile = load_image("white.png", tp=True)
self.emptytile.convert_alpha()
self.musicplayer = MusicPlayer()
self.mouse = Mouse(pygame.Rect(0, 0, 1, 1))
self.player = pygame.sprite.GroupSingle()
with open('maps/test.json') as data_file:
self.data = json.load(data_file)
self.animated = [0, 0, 1, 0, 0]
self.centerx, self.centery = 0, 0
self.texture = load_sliced_sprites(
TILE_W+12, # width
TILE_H, # height
self.data["texture"], # texture file
self.animated # list of animated sprites
) # load textures
""" main function """
def main(self):
pygame.mixer.music.play() # Start playing music
self.draw_map2((9, 9))
while True:
self.musicplayer.next() # See if playback is finished
self.input() # Get user input.
try:
self.ground.update(pygame.time.get_ticks())
except:
pass
self.draw() # Draw sprites
pygame.time.Clock().tick(60) # Sets the fps of the game
def draw_map2(self, move):
self.centerx += move[0] # x-position of player (center of the screen)
self.centery += move[1] # y-position of player (center of the screen)
rows = self.data["rows"]
texture = self.texture
textmap = []
tilemap = []
horizontal_tiles = self.width/TILE_W # amount of x tiles to draw
vertical_tiles = (self.width/TILE_H+2)/2 # amount of y tiles to draw
xy = [self.centerx, self.centery]
self.ground = Ground(horizontal_tiles, vertical_tiles, rows, TILE_W, TILE_H, xy)
x, y = 0, vertical_tiles*2 # iterators for dicts
for i in xrange(
self.centery+vertical_tiles, # bottom border
self.centery-vertical_tiles, # top border
-1 # reversed direction
):
line = ""
row = []
tileID = []
for j in xrange(
self.centerx - horizontal_tiles, # left border
self.centerx + horizontal_tiles # right border
):
vert = (vertical_tiles*2-y)*13 # vertical offset
hori = horizontal_tiles/4 * TILE_W # horizontal offset
x_pos = x * TILE_W - vert - hori
y_pos = self.width-(y - 1)*TILE_H
image = self.emptytile # empty tile if no tile found
item = "."
im = -1
iddd = None
if 0 <= i and i < len(rows):
if 0 <= j and j < len(rows[i]["tiles"]):
if "texture" in rows[i]["tiles"][j]:
item = rows[i]["tiles"][j]["texture"]
image = texture[item]
im = item
else:
item = rows[i]["default_texture"]
im = item
image = texture[item]
iddd = (i, j)
tileID.append(iddd)
if i == self.centery and j == self.centerx:
image = texture[0]
im = 0
item = "P"
line += str(item)
if im >= 0:
if self.animated[im]:
self.ground.add(AnimatedTile(
x_pos,
y_pos,
i, j,
image
))
else:
self.ground.add(Tile(x_pos, y_pos, i, j, image))
else:
self.ground.add(Tile(x_pos, y_pos, i, j, image))
x += 1
row.append((x_pos, y_pos))
textmap.append(str(line))
tilemap.append(tileID)
x = 0
y -= 1
def draw_map(self, move):
self.centerx += move[0] # x-position of player (center of the screen)
self.centery += move[1] # y-position of player (center of the screen)
rows = self.data["rows"]
texture = self.texture
textmap = []
tilemap = []
horizontal_tiles = self.width/TILE_W # amount of x tiles to draw
vertical_tiles = (self.width/TILE_H+2)/2 # amount of y tiles to draw
self.ground = Ground(horizontal_tiles, vertical_tiles, self.data, TILE_W, TILE_H)
x, y = 0, vertical_tiles*2 # iterators for dicts
for i in xrange(
self.centery+vertical_tiles, # bottom border
self.centery-vertical_tiles, # top border
-1 # reversed direction
):
line = ""
row = []
tileID = []
for j in xrange(
self.centerx - horizontal_tiles, # left border
self.centerx + horizontal_tiles # right border
):
vert = (vertical_tiles*2-y)*13 # vertical offset
hori = horizontal_tiles/4 * TILE_W # horizontal offset
x_pos = x * TILE_W - vert - hori
y_pos = self.width-(y - 1)*TILE_H
image = self.emptytile # empty tile if no tile found
item = "."
im = -1
iddd = None
if 0 <= i and i < len(rows):
if 0 <= j and j < len(rows[i]["tiles"]):
if "texture" in rows[i]["tiles"][j]:
item = rows[i]["tiles"][j]["texture"]
image = texture[item]
im = item
else:
item = rows[i]["default_texture"]
im = item
image = texture[item]
iddd = (i, j)
tileID.append(iddd)
if i == self.centery and j == self.centerx:
image = texture[0]
im = 0
item = "P"
line += str(item)
if im >= 0:
if self.animated[im]:
self.ground.add(AnimatedTile(
x_pos,
y_pos,
i, j,
image
))
else:
self.ground.add(Tile(x_pos, y_pos, i, j, image))
else:
self.ground.add(Tile(x_pos, y_pos, i, j, image))
x += 1
row.append((x_pos, y_pos))
textmap.append(str(line))
tilemap.append(tileID)
x = 0
y -= 1
def input(self):
self.mouse.rect.center = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == QUIT:
exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
# get keyboard presses, better for multiple keys at once
pressed_keys = pygame.key.get_pressed()
x, y = 0, 0
for key, value in self.movekeys.items():
if pressed_keys[key]:
x += value[0]
y += value[1]
if x != 0 or y != 0:
self.ground.move(x, y)
if event.type == USEREVENT+1: # use this to catch timers
pass
def draw(self):
self.screen.blit(self.background, (0, 0))
self.ground.draw(self.screen)
pygame.display.update()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 11:28:15 2015
@author: Kapil Chandra
"""
import xlrd
from selenium.webdriver import Firefox
import time
from selenium import webdriver
#opening the xl sheet which is having all the numbers
workbook= xlrd.open_workbook('sms_sender.xlsx')
#going into the particular sheet of the xl file
worksheet = workbook.sheet_by_name('Sheet1')
#finding number of num_rows in the sheet
num_rows = worksheet.nrows -1
#opening firefox browser
driver = webdriver.Firefox()
#going into way2sms portal
driver.get("http://www.way2sms.com")
#the website has an advertisement displaying for 4-5 seconding so I sleep my code
time.sleep(20)
#filing my mobile number and password to login
inputElement = driver.find_element_by_name("username")
inputElement.send_keys("8500409855")
inputElement = driver.find_element_by_name("password")
inputElement.send_keys("28091997")
inputElement.submit()
#the site is slower so I sleep it for a while
time.sleep(5)
#the below line press the button Send Free Sms
driver.find_element_by_xpath("//input[@class='button br3'][@value='Send Free SMS']").click()
#this path locates send sms and press the button
driver.find_element_by_xpath("/html/body/div[7]/div[1]/ul/li[2]/a").click()
#Now the site goes into an html(inner) so I switched the frame
driver.switch_to.frame(driver.find_element_by_id('frame'))
#filling number and message to whom we have send
inputElement = driver.find_element_by_xpath("/html/body/div[3]/div[1]/form/div[2]/div[1]/input")
inputElement.send_keys(str(8500409855))
inputElement = driver.find_element_by_xpath("/html/body/div[3]/div[1]/form/div[2]/textarea")
inputElement.send_keys("sucessful")
driver.find_element_by_name("Send").click()
#the above was a test message to user and the loop takes the numbers from xl and sends respective messages
for i in range(0,num_rows+1):
time.sleep(2)
driver.find_element_by_xpath("/html/body/form/div[1]/div[1]/p[1]").click()
inputElement = driver.find_element_by_xpath("/html/body/div[3]/div[1]/form/div[2]/div[1]/input")
inputElement.send_keys(str(worksheet.cell_value(i,0)))
go =driver.find_element_by_xpath("/html/body/div[3]/div[1]/form/div[2]/textarea")
go .send_keys("susessful")
driver.find_element_by_name("Send").click()
|
#!/usr/bin/python
import sys, os
sys.path.append ("../scripts")
import vlaunch
import ihex2mem
def run_sim(ihx_name):
# open the test ihx file and populate ROM
memim = ihex2mem.mem_image()
memim.load_ihex (ihx_name)
print "Loaded",memim.bcount,"bytes"
for addr in range(memim.min, 32768):
if (addr in memim.map) and (addr < 32768):
vlaunch.load_byte (addr, int(memim.map[addr]))
else:
vlaunch.load_byte (addr, 0)
# start simulation
#random.seed (1)
#vlaunch.set_decode(1)
vlaunch.launch()
vlaunch.continueSim(200000)
print "Sim complete, checking results"
vlaunch.shutdown()
vlaunch.setTrace (1)
print repr(sys.argv)
run_sim (sys.argv[1])
|
# Generated by Django 2.2.3 on 2019-07-03 13:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_r', models.IntegerField()),
('nombre_r', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Municipio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_m', models.IntegerField()),
('nombre_m', models.CharField(max_length=200)),
('estado', models.CharField(choices=[('activo', 'Activo'), ('inactivo', 'Inactivo')], default='activo', max_length=50)),
('region', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='municipio.Region')),
],
),
]
|
"""
Reads pickled results from multivaraite TE or MI outputs and creates
a file with the adjaceny matrix of the information bits. This is a light
modification of the original pickle reader file script.
Created: July 25, 2019
Updated: November 12, 2019
Seth Campbell
"""
#Import classes
from idtxl.multivariate_te import MultivariateTE
from idtxl.data import Data
from idtxl.visualise_graph import plot_network
import numpy as np
import re, os, pickle, datetime, time
#Init global variables
# pickle_path = "C:/Users/sethc/Documents/Summer Research 2019/misc/April Analysis/last mMI pickles/" #path for where to extract pickles
pickle_path = "C:/Users/sethc/Documents/Summer Research 2019/misc/April Analysis/last max lag 1 mTE pickles/" #path for where to extract pickles
output_path = "C:/Users/sethc/Documents/Summer Research 2019/misc/April Analysis/" #path for where to dump result file
all_adj_matrix = []
results = []
#takes a list of coordinate, value triplets, and the side length of the matrix, and creates an array out of it by falttening a matrix of the information values
def make_array(coordinates_triplets, matrix_length):
matrix = np.zeros((matrix_length,matrix_length)) #init an appropriately sized arrays with zeroes
for (x,y,value) in coordinates_triplets:
matrix[x][y] = value #simply assign the value to the appropriate position in the matrix
print(matrix)
array = matrix.flatten() #flatten matrix to 1 dimension for easier handling in analysis (i.e. each subject is 1 row!)
return array
##############################################################################################################################
#iterate over files in pickle_path & unpickle appropriate files to a list
with os.scandir(pickle_path) as files:
for file in files:
if re.search(r"filtcleants.*\.p", file.name): #only use the files that start with "filtcleants..." & ends in ".p"
# if re.search(r"filtcleants_2011_dtd_c01 mTE.*\.p", file.name): #TESTING
#note: results is a 2d array eith each entry containing a 2 item list with the unpickled result, and file name
results.append([pickle.load(open(pickle_path + file.name, 'rb')),file.name]) #unpickle the file and add to a list
#extract raw adjacenty matrices from unpickled Results objects (from IDTXL package)
ROI_range = len((results[0][0].get_single_target(0,False))["sources_tested"])+1 #number of ROIS to look at based on the read data
subjects_matrix = np.zeros((len(results),(ROI_range**2)+1)) #init matrix of appropriate size with zeros, len(results+1) is the number of columns, plus columns for participant type
for number,result in enumerate(results):
coordinates = []
adj_matrix = result[0].get_adjacency_matrix(weights='max_te_lag', fdr=False) #get adj matrix instance (note that element 0 is accessed because each result in results is a list of two items: the result object and its file name)
adj_matrix = adj_matrix._weight_matrix #get actual adj matrix of numbers
print(adj_matrix)
for target_num in range(0 , ROI_range): #iterate through all target nodes (i.e. all ROIs)
# print(target_num)
target_info = result[0].get_single_target(target=target_num, fdr=False) #get the info about sources for the current target, this returns a dictionary of information
sig_sources = [i[0] for i in target_info["selected_vars_sources"]] #extract the y values of the significant source because they come in tuples paired with the time lag also (which is not needed)
sig_sources = [(target_num,i) for i in sig_sources] #append the x axis component of the coordinate
# print(target_info)
te_or_mi_values = target_info["selected_sources_te"] #get TE values (which are in the same order as the "selected_var_sources")
# te_or_mi_values = target_info["selected_sources_mi"] #get MI values (which are in the same order as the "selected_var_sources")
for i,(j,k) in enumerate(sig_sources): #combine the coordinates (j,k) with the matching TE/MI value into a list of triplets
coordinates += [(j,k,te_or_mi_values[i])] #triplet format: (x position, y position, TE/MI value)
# setting group labels for SPSS or to fill in later, i.e. if a control patient then 1, if DTD then 2
if re.search(r"dtd_d",result[1]):
group_type = 2
else: #controls either have "dtd_c" in the name or something else for the few exceptions from the new data from 5 participants or so
group_type = 1
subjects_matrix[number] = np.append(group_type,(make_array(coordinates, ROI_range))) #use coordinates to make array of information values for analysis
# subjects_matrix[number][0] = results[1]
np.savetxt(output_path + "foo FINAL all subjects mTE info bits.csv", subjects_matrix, delimiter=",", fmt='%.8f') #save each matrix to a csv file
# np.savetxt(output_path + "FINAL all subjects mMI info bits.csv", subjects_matrix, delimiter=",", fmt='%.8f') #save each matrix to a csv file
# print(subjects_matrix)
print("\n~Completed combination of " + str(len(results)) + " adjacency matrix files\n")
|
"""
recursive result
TC: O(n) --->n in no of nodes in tree
SC: O(h) --->h is height of tree
iterative using queue level order traversal --> if perfect bin tree is there it will take more space
if it is skewed tree then it will take O(1) space
TC: O(n) --->n in no of nodes in tree
SC: O(w) --->w is width of tree
"""
import sys
curr_max = - sys.maxsize
class Node:
def __init__(self, key):
self.data = key
self.left = None
self.right = None
def get_max(root):
global curr_max
if not root:
return
if root.data > curr_max:
curr_max = root.data
get_max(root.left)
get_max(root.right)
def get_max_2(root):
if not root:
return - sys.maxsize
return max(root.data, max(get_max_2(root.left), get_max_2(root.right)))
if __name__ == '__main__':
root = Node(10)
root.left = Node(20)
root.right = Node(30)
root.left.left = Node(40)
root.left.right = Node(50)
root.right.right = Node(60)
get_max(root)
print(f"{curr_max}")
print(f"{get_max_2(root)}")
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import os
import platform
import re
import sys
import traceback
from collections import OrderedDict
try:
basestring = basestring
except NameError:
basestring = str
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
StringIO = StringIO
def is_windows(platform=sys.platform, win_platforms={"win32", "cygwin", "msys"}):
return platform in win_platforms
PY2 = sys.version_info[0] < 3
IS_PYPY = '__pypy__' in sys.builtin_module_names
IS_WIN = is_windows()
def _py2_makedirs(name, mode=0o777, exist_ok=False):
return os.makedirs(name, mode)
def _py2_which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if IS_WIN:
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
if PY2: # if major is less than 3
from .excp_util_2 import raise_exception, is_string
def save_tb(ex):
tb = sys.exc_info()[2]
setattr(ex, "__traceback__", tb)
is_string = is_string
makedirs = _py2_makedirs
which = _py2_which
else:
from .excp_util_3 import raise_exception, is_string
from shutil import which
def save_tb(ex):
pass
is_string = is_string
makedirs = os.makedirs
which = which
odict = OrderedDict
def _mp_get_context_win32_py2(context_name):
if context_name != "spawn":
raise RuntimeError("only spawn is supported")
import multiprocessing
return multiprocessing
_mp_get_context = None # This will be patched at runtime
mp_ForkingPickler = None # This will be patched at runtime
mp_log_to_stderr = None # This will be patched at runtime
_mp_billiard_pyb_env = None # This will be patched at runtime
_old_billiard_spawn_passfds = None # This will be patched at runtime
_installed_tblib = False
# Billiard doesn't work on Win32
if PY2:
if IS_WIN:
# Python 2.7 on Windows already only works with spawn
from multiprocessing import log_to_stderr as mp_log_to_stderr
from multiprocessing.reduction import ForkingPickler as mp_ForkingPickler
_mp_get_context = _mp_get_context_win32_py2
# Python 2 on *nix uses Billiard to be patched later
else:
# On all of Python 3s use multiprocessing
from multiprocessing import log_to_stderr as mp_log_to_stderr, get_context as _mp_get_context
from multiprocessing.reduction import ForkingPickler as mp_ForkingPickler
def patch_mp_pyb_env(pyb_env):
global _mp_billiard_pyb_env
if not _mp_billiard_pyb_env:
_mp_billiard_pyb_env = pyb_env
def install_tblib():
global _installed_tblib
if not _installed_tblib:
from pybuilder._vendor.tblib import pickling_support
pickling_support.install()
_installed_tblib = True
def _patched_billiard_spawnv_passfds(path, args, passfds):
global _mp_billiard_plugin_dir, _old_billiard_spawn_passfds
try:
script_index = args.index("-c") + 1
script = args[script_index]
additional_path = []
add_env_to_path(_mp_billiard_pyb_env, additional_path)
args[script_index] = ";".join(("import sys", "sys.path.extend(%r)" % additional_path, script))
except ValueError:
# We were unable to find the "-c", which means we likely don't care
pass
return _old_billiard_spawn_passfds(path, args, passfds)
def patch_mp():
install_tblib()
global _mp_get_context
if not _mp_get_context:
if PY2 and not IS_WIN:
from billiard import get_context, log_to_stderr, compat, popen_spawn_posix as popen_spawn
from billiard.reduction import ForkingPickler
global mp_ForkingPickler, mp_log_to_stderr, _old_billiard_spawn_passfds
_mp_get_context = get_context
mp_ForkingPickler = ForkingPickler
mp_log_to_stderr = log_to_stderr
_old_billiard_spawn_passfds = compat.spawnv_passfds
compat.spawnv_passfds = _patched_billiard_spawnv_passfds
popen_spawn.spawnv_passfds = _patched_billiard_spawnv_passfds
def mp_get_context(context):
global _mp_get_context
return _mp_get_context(context)
mp_ForkingPickler = mp_ForkingPickler
mp_log_to_stderr = mp_log_to_stderr
_mp_get_context = _mp_get_context
def _instrumented_target(q, target, *args, **kwargs):
patch_mp()
ex = tb = None
try:
send_value = (target(*args, **kwargs), None, None)
except Exception:
_, ex, tb = sys.exc_info()
send_value = (None, ex, tb)
try:
q.put(send_value)
except Exception:
_, send_ex, send_tb = sys.exc_info()
e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
q.put(e_out)
def spawn_process(target=None, args=(), kwargs={}, group=None, name=None):
"""
Forks a child, making sure that all exceptions from the child are safely sent to the parent
If a target raises an exception, the exception is re-raised in the parent process
@return tuple consisting of process exit code and target's return value
"""
ctx = mp_get_context("spawn")
q = ctx.SimpleQueue()
p = ctx.Process(group=group, target=_instrumented_target, name=name, args=[q, target] + list(args), kwargs=kwargs)
p.start()
result = q.get()
p.join()
if isinstance(result, tuple):
if result[1]:
raise_exception(result[1], result[2])
return p.exitcode, result[0]
else:
msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
if result.args[2]:
chained_message = "This error masked the send error '%s':\n%s" % (
result.args[2], "".join(traceback.format_tb(result.args[3])))
msg += "\n" + chained_message
ex = Exception(msg)
raise_exception(ex, result.args[1])
def prepend_env_to_path(python_env, sys_path):
"""type: (PythonEnv, List(str)) -> None
Prepend venv directories to sys.path-like collection
"""
for path in reversed(python_env.site_paths):
if path not in sys_path:
sys_path.insert(0, path)
def add_env_to_path(python_env, sys_path):
"""type: (PythonEnv, List(str)) -> None
Adds venv directories to sys.path-like collection
"""
for path in python_env.site_paths:
if path not in sys_path:
sys_path.append(path)
if PY2:
def _py2_glob(pathname, recursive=False):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
return list(_py2_iglob(pathname, recursive=recursive))
def _py2_iglob(pathname, recursive=False):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
it = _iglob(pathname, recursive, False)
if recursive and _isrecursive(pathname):
s = next(it) # skip empty string
assert not s
return it
def _iglob(pathname, recursive, dironly):
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
assert not dironly
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
if recursive and _isrecursive(basename):
for v in _glob2(dirname, basename, dironly):
yield v
else:
for v in _glob1(dirname, basename, dironly):
yield v
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = _iglob(dirname, recursive, True)
else:
dirs = [dirname]
if has_magic(basename):
if recursive and _isrecursive(basename):
glob_in_dir = _glob2
else:
glob_in_dir = _glob1
else:
glob_in_dir = _glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename, dironly):
yield os.path.join(dirname, name)
def _glob1(dirname, pattern, dironly):
names = list(_iterdir(dirname, dironly))
if not _ishidden(pattern):
names = (x for x in names if not _ishidden(x))
return fnmatch.filter(names, pattern)
def _glob0(dirname, basename, dironly):
if not basename:
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
def glob0(dirname, pattern):
return _glob0(dirname, pattern, False)
def glob1(dirname, pattern):
return _glob1(dirname, pattern, False)
def _glob2(dirname, pattern, dironly):
assert _isrecursive(pattern)
yield pattern[:0]
for v in _rlistdir(dirname, dironly):
yield v
def _iterdir(dirname, dironly):
if not dirname:
if isinstance(dirname, bytes):
dirname = os.curdir.decode('ASCII')
else:
dirname = os.curdir
try:
for entry in os.listdir(dirname):
try:
if not dironly or os.path.isdir(os.path.join(dirname, entry)):
yield entry
except OSError:
pass
except OSError:
return
def _rlistdir(dirname, dironly):
names = list(_iterdir(dirname, dironly))
for x in names:
if not _ishidden(x):
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path, dironly):
yield os.path.join(x, y)
magic_check = re.compile('([*?[])')
magic_check_bytes = re.compile(b'([*?[])')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _ishidden(path):
return path[0] in ('.', b'.'[0])
def _isrecursive(pattern):
if isinstance(pattern, bytes):
return pattern == b'**'
else:
return pattern == '**'
def _py2_escape(pathname):
"""Escape all special characters.
"""
# Escaping is done by wrapping any of "*?[" between square brackets.
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = magic_check_bytes.sub(br'[\1]', pathname)
else:
pathname = magic_check.sub(r'[\1]', pathname)
return drive + pathname
glob = _py2_glob
iglob = _py2_iglob
escape = _py2_escape
else:
from glob import glob, iglob, escape
try:
from os import symlink
except ImportError:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
def symlink(source, link_name, target_is_directory=False):
flags = 1 if target_is_directory else 0
flags += 2
if csl(link_name, source, flags) == 0:
raise ctypes.WinError()
sys_executable_suffix = sys.executable[len(sys.exec_prefix) + 1:]
python_specific_dir_name = "%s-%s" % (platform.python_implementation().lower(),
".".join(str(f) for f in sys.version_info))
_, _venv_python_exename = os.path.split(os.path.abspath(getattr(sys, "_base_executable", sys.executable)))
__all__ = ["glob", "iglob", "escape"]
|
from bot import bot
import config
import send_movie
from models import Movie, Video
from telebot import types
import upload_video
def register_video_info(message, video_id: int, type: str, old_message_id: int, tapped_message_id: int):
if message.text == '/cancel':
return
video = Video.select().where(Video.video_id == video_id).first()
if video and type == 'number':
video.number = int(message.text)
video.save()
elif video and type == 'title':
video.title = message.text
video.save()
else:
return
try:
bot.delete_message(message.chat.id, message.message_id)
bot.delete_message(message.chat.id, old_message_id)
except:
pass
send_movie.send_video(message.chat.id, video.video_id, tapped_message_id)
def register_video(message, movie_id: int, number=None, title=None):
if message.text == '/cancel':
return
movie = Movie.select().where(Movie.movie_id == movie_id).first()
if message.video:
if movie and message.video:
video = Video.from_movie(movie_id, message.video.file_id, number, title)
video.save()
send_movie.send_video(message.chat.id, video.video_id)
return video
elif movie:
bot.send_message(message.chat.id, 'Пришли мне видео')
bot.register_next_step_handler_by_chat_id(message.chat.id, register_video, movie_id)
else:
bot.send_message(chat_id=message.chat.id, text='Фильм/сериал не найден!')
else:
upload_video.add_to_que(chat_id=message.chat.id, movie_id=movie_id, link=message.text)
def register_are_you_sure(chat_id: int, type: str, video_id=None):
keyboard = types.InlineKeyboardMarkup()
if type == 'videodelete':
keyboard.row(types.InlineKeyboardButton(text='Да!', callback_data=f'{video_id}_videodelete'), types.InlineKeyboardButton(text='Нет!', callback_data=f'delete_message'))
return bot.send_message(chat_id, 'Ты уверен, что хочешь удалить данное видео?', reply_markup=keyboard)
def register_movie_info(message, movie_id: int, type: str, old_message_id: int, tapped_message_id: int):
if message.text == '/cancel':
return
movie = Movie.select().where(Movie.movie_id == movie_id).first()
if movie and type == 'season':
movie.season = int(message.text)
movie.save()
else:
return
try:
bot.delete_message(message.chat.id, message.message_id)
bot.delete_message(message.chat.id, old_message_id)
except:
pass
send_movie.send_movie(message.chat.id, movie.movie_id, tapped_message_id)
def register_movie(message):
if message.text == '/cancel':
return
try:
movie = Movie.from_kinopoisk(message.text)
movie.save()
send_movie.send_movie(message.chat.id, movie.movie_id)
except:
bot.send_message(message.chat.id, 'Неверная ссылка. Пришли мне новую ссылку на кинопоиск')
bot.register_next_step_handler_by_chat_id(message.chat.id, register_movie)
|
# Put '' [empty string] if you dont want any cell type
from tqdm import tqdm
from functions import AVAILABLE_BEHAVIORS
import logging
class CellTransConfig:
"""
Define a class with sample_id, cell_type, event_time and filter_pattern (for behavioral_transitions)
"""
def __init__(
self,
sample_id,
cell_type,
event_time,
filter_pattern=None,
first_event=None,
second_event=None,
):
self.sample_id = sample_id
self.cell_type = cell_type
self.event_time = event_time
self.filter_pattern = filter_pattern
self.first_event = first_event
self.second_event = second_event
def get_filter_regex(self):
if self.cell_type is None:
cell_str = r"[a-zA-Z0-9]+"
else:
cell_str = self.cell_type
filter_regex = "^{}_".format(cell_str)
if self.filter_pattern:
filter_regex += ".*{}.*".format(self.filter_pattern)
return filter_regex
def extract_windows(
sample_data,
cell_transition_configs,
left_half_window_size=18.4,
right_half_window_size=42.4,
cell_pattern_filter=True,
):
# trans_df defined in pargraph before
all_Ptrans_events = []
n_behavior_per_sample = {}
# TODO: Split filter data and extract windows
for ctc in tqdm(cell_transition_configs, "Extracting windows"):
sample_df = sample_data.get(ctc.sample_id)
n_behavior = n_behavior_per_sample.get(ctc.sample_id, 1)
n_behavior_per_sample.setdefault(ctc.sample_id, 1)
if sample_df is None:
raise ValueError("{}: could not find sample data".format(ctc.sample_id))
continue
# TODO: make extracting behavior / cell columns easy and accessable anywhere
all_columns = set(sample_df.columns)
non_cell_columns = set(
column
for column in all_columns
if any(
behavior in column
for behavior in AVAILABLE_BEHAVIORS + ("quiet", "time")
)
)
cell_df = sample_df.filter(items=all_columns - non_cell_columns)
if cell_pattern_filter:
cell_subset_df = cell_df.filter(regex=ctc.get_filter_regex())
else:
cell_subset_df = cell_df
cell_subset_df.set_index(sample_df.time, inplace=True)
cell_subset_df.reset_index(inplace=True)
n_behavior_per_sample[ctc.sample_id] += 1
window_start = ctc.event_time - left_half_window_size
window_end = ctc.event_time + right_half_window_size
# Get subset of rows between window_start and window_end
# Including event_start
# trans = cell_subset_df[(cell_subset_df.time >= window_start) & (cell_subset_df.time <= window_end)]
# Excluding event start
trans = cell_subset_df[
(cell_subset_df.time > window_start) & (cell_subset_df.time < window_end)
]
# Normalizing the data to align on beginning of selected
# behavior (event_df = Zero) by substracting events in window
# around start of event of interest from start of event interest.
# Note: using ":" in event.loc[] will select "all rows" in our window.
# trans.loc[:, 'time'] = trans['time'] - row['time']
trans.loc[:, "time"] = trans["time"] - ctc.event_time
# Add sample_id to each column as prefix and n_behavior as suffix to distinguish events within a sample
trans.rename(
lambda x: "{}_{}_{}_{}_{}".format(
ctc.sample_id, x, n_behavior, ctc.first_event, ctc.second_event
),
axis="columns",
inplace=True,
)
# Rename time collum to time
trans.rename(columns={trans.columns[0]: "time"}, inplace=True)
all_Ptrans_events.append(trans) # Append a list with all event
return all_Ptrans_events
|
from django.db import models
class Trainer(models.Model):
first_name=models.CharField(max_length = 50)
second_name=models.CharField(max_length = 50)
gender=models.CharField(max_length=20)
id_number=models.CharField(max_length=50, null=True)
email=models.EmailField(max_length=70)
phone_number=models.CharField(max_length=70)
profession=models.CharField(max_length=70)
date_employed=models.DateField()
course_teaching=models.CharField(max_length=100)
working_status=models.CharField(max_length=100)
others=models.CharField(max_length=100)
pic=models.ImageField(upload_to="profile_pic",blank=True)
def __str__(self):
return self.first_name
|
#!/usr/bin/env python
fake = 'fL492_r_h4rd3r_th4n_th1s'
flag = ''
target = [58, 49, 82, 48, 52, 54, 82, 48, 51, 92, 58, 81, 115, 48, 53, 69, 92, 49, 90, 52]
for i in range(len(target)):
flag += chr(((target[i] ^ 0x32) - 1) ^ 0x32)
print flag
|
# --- --- IMPORTS --- ---
import ConfigParser
import json
import markdown
import logging
from logging.handlers import RotatingFileHandler
from math import ceil
from urllib import urlencode
from os import listdir
from os.path import basename
from flask import Flask, render_template, flash, redirect, url_for, request
# --- --- GLOBAL VARS --- ---
app = Flask(__name__)
app.secret_key="Ic&Ts3IuNS*uAQbc#nur2UUAAme$8xD|"
# app_config: Will be loaded in 'init()'
app_config = { 'graphic': {}, 'data': {}, 'code': {}, 'logging': {} }
app_nav = [
{'name': "Home", 'path': "/"},
{'name': "About", 'path': "/about"},
{'name': "Universes", 'path': "/universes"},
{'name': "Characters", 'path': "/characters"}
]
data_cache = {
'universes': {}, 'universe_tags': [],
'characters': {}, 'character_tags': [],
}
class NotFoundEx(Exception):
msg = "Ressource not found."
def __init__(self, msg=None):
Exception.__init__(self)
if msg is not None:
self.msg=msg
def __str__(self):
return self.msg
# --- --- ROUTES --- ---
@app.route('/')
def route_root():
logRequest()
flash("Welcome !")
data = {'config': app_config, 'nav': app_nav, 'active': "/", 'infos': {}}
data['infos']['nb_universes'] = len(data_cache['universes'])
data['infos']['nb_characters'] = len(data_cache['characters'])
return render_template('index.html', data=data)
@app.route('/about')
def route_about():
logRequest()
sourcesFile = open("../doc/sources.md", "r")
sourcesMD = markdown.markdown(sourcesFile.read())
sourcesFile.close()
data = {'config': app_config, 'nav': app_nav, 'active': "/about", 'sources': sourcesMD, 'code': app_config['code']}
return render_template('about.html', data=data)
@app.route('/reload')
def route_reload():
logRequest()
loadUniverseList()
flash("Reloaded "+str(len(data_cache['universes']))+" universes.")
loadCharacterList()
flash("Reloaded "+str(len(data_cache['characters']))+" characters.")
return redirect(url_for('route_root'))
# Universes
@app.route('/universes')
def route_universes():
logRequest()
data = {'config': app_config, 'nav': app_nav, 'active': "/universes", 'list': getUniverseList()}
data['search'] = {}
if 'tags' in request.args and not request.args['tags'] == "":
tag_filter = request.args['tags'].split(",")
app.logger.info("Filtered by tags: "+str(tag_filter))
data['list'] = filterListByTags(data['list'], tag_filter)
data['search']['tags'] = set(tag_filter)
if 'text' in request.args and not request.args['text'] == "":
text_filter = request.args['text']
app.logger.info("Filtered by keyword: "+text_filter)
data['search']['text'] = text_filter
data['list'] = filterListByKeyword(data['list'], text_filter.lower())
data = splitListIntoPages(data, request.args)
data = getLinks(data, request.args)
return render_template('universes.html', data=data)
@app.route('/universes/<univID>')
def route_universe(univID):
logRequest()
fileName = app_config['data']['data_folder']+"/universes/"+univID+".json"
app.logger.info("LOAD - Loading universe #"+univID+" (from: "+fileName+")")
try:
try:
info_file = open(fileName, "r") # TODO: escape univID
univ_info = json.load(info_file)
info_file.close()
data_cache['universes'][univID] = minUniverseData(univ_info) # Update cache
univ_info = fillUniverseData(univ_info) # Fill data
data = {'config': app_config, 'nav': app_nav, 'active': "/universes", 'univ': parseDown(univ_info)}
return render_template('universe-details.html', data=data)
except IOError:
raise NotFoundEx()
info_file.close()
except NotFoundEx as e:
app.logger.error("ERROR - "+str(e)+" / #"+univID)
raise e
# Characters
@app.route('/characters')
@app.route('/universes/<univID>/characters')
def route_characters(univID=None):
logRequest()
data = {'config': app_config, 'nav': app_nav, 'active': "/characters", 'list': getCharacterList()}
data['search'] = {}
if 'tags' in request.args and not request.args['tags'] == "":
tag_filter = request.args['tags'].split(",")
app.logger.info("Filtered by tags: "+str(tag_filter))
data['list'] = filterListByTags(data['list'], tag_filter)
data['search']['tags'] = set(tag_filter)
if 'text' in request.args and not request.args['text'] == "":
text_filter = request.args['text']
app.logger.info("Filtered by keyword: "+text_filter)
data['search']['text'] = text_filter
data['list'] = filterListByKeyword(data['list'], text_filter.lower())
if univID is not None:
if univID not in data_cache['universes']:
raise NotFoundEx("No such universe")
app.logger.info("Filtered by universe: "+univID)
data['list'] = filterListByUniverse(data['list'], univID)
data['search']['univ'] = data_cache['universes'][univID]['name']
data = splitListIntoPages(data, request.args);
data = getLinks(data, request.args)
return render_template('characters.html', data=data)
@app.route('/universes/<univID>/characters/<charID>')
def route_character(univID,charID):
logRequest()
fileName = app_config['data']['data_folder']+"/characters/"+charID+".json" # TODO: escape charID
app.logger.info("LOAD - Loading character #"+charID+" (from: "+fileName+")")
try:
try:
if univID not in data_cache['universes']:
raise NotFoundEx("No such universe")
info_file = open(fileName, "r")
char_info = json.load(info_file)
info_file.close()
if char_info['universe'] != univID:
raise NotFoundEx("Wrong universe")
data_cache['characters'][charID] = minCharacterData(char_info) # Update cache
char_info = fillCharacterData(char_info) # Fill data
data = {'config': app_config, 'nav': app_nav, 'active': "/characters", 'char': parseDown(char_info)}
return render_template('character-details.html', data=data)
except IOError:
raise NotFoundEx()
info_file.close()
except NotFoundEx as e:
app.logger.error("ERROR - "+str(e)+" / #"+charID)
raise e
# --- --- ERRORS --- --- #
@app.errorhandler(404)
@app.errorhandler(NotFoundEx)
def error_notFound(error):
data = {'config': app_config, 'nav': app_nav, 'error': error, 'active': ""}
app.logger.error("ERROR - "+error)
return render_template('e404.html', data=data), 404
@app.errorhandler(500)
def error_notFound(error):
app.logger.error("ERROR - "+str(error))
# --- --- Processing funcions --- --- #
def splitListIntoPages(data, urlArgs):
args = urlArgs.to_dict()
dList = data['list']
PAGE_LENGTH = app_config['graphic']['items_per_page']
NB_PAGES = int( ceil( len(dList)/float(PAGE_LENGTH) ) )
page = int(args.pop('page')) if 'page' in args else 1
iMin = (page-1)*PAGE_LENGTH
iMax = (page)*PAGE_LENGTH
data['list'] = dList[iMin:iMax]
data['pages'] = {'cur': page, 'list': []}
if page > 1:
data['pages']['prev'] = (page-1)
if page < NB_PAGES:
data['pages']['next'] = (page+1)
data['pages']['list'] = range(max(1, page-3), min(NB_PAGES, page+3)+1)
url_prefix = urlencode(args)
data['pages']['prefix'] = "?"+url_prefix+( "" if url_prefix == "" else "&" )+"page="
return data
def getLinks(data, urlArgs):
args = urlArgs.to_dict()
if args.has_key('page'):
args.pop('page')
data['links'] = {}
# Prefix tags
args_tags = args.copy()
tags_val = ""
if args_tags.has_key('tags'):
tags_val = args_tags.pop('tags')
tags_prefix = urlencode(args_tags)
data['links']['prefix_tags'] = "?"+tags_prefix+( "" if tags_prefix == "" else "&" )+"tags="
if tags_val!="":
data['links']['prefix_tags'] += tags_val+","
# Prefix univ
if data['active']=="/characters":
data['links']['suffix_univ'] = "?"+urlencode(args)
return data
def parseDown(item):
item['short_desc'] = markdown.markdown(item['short_desc'])
if 'full_desc' in item:
item['full_desc'] = markdown.markdown(item['full_desc'])
return item
def filterListByTags(inList, tags):
outList = []
tag_set = set(tags)
for item in inList:
if tag_set.issubset(set(item['tags'])):
outList.append(item)
return outList
def filterListByUniverse(inList, univ):
outList = []
for item in inList:
if item['universe']['id'] == univ:
outList.append(item)
return outList
def filterListByKeyword(inList, keyword):
outList = []
for item in inList:
if keyword in item['name'].lower() or keyword in item['short_desc'].lower():
outList.append(item)
return outList
# Universes
def getUniverseList():
return data_cache['universes'].values()
def minUniverseData(univ):
return {
'id': univ['id'],
'name': univ['name'],
'short_desc': univ['short_desc'],
'pic': univ['pic'],
'tags': univ['tags']
}
def fillUniverseData(univ):
res = univ
# if 'characters' in res['related']:
# rel_char = res['related']['characters']
# res['related']['characters'] = []
# for charID in rel_char:
# res['related']['characters'].append(data_cache['characters'][charID])
return res
def loadUniverseList():
folderPath = app_config['data']['data_folder']+"/universes/"
app.logger.info("LOAD - Loading universes (from: "+folderPath+")")
# Reset
data_cache['universe_tags'] = {}
# Walking on each file
files = listdir(folderPath)
for info_file in files:
app.logger.debug("\tFile: "+info_file)
# Loading JSON Object from file
filePtr = open(folderPath+info_file)
jsonObj = parseDown( minUniverseData( json.load(filePtr) ) )
filePtr.close()
# Caching data
data_cache['universes'][jsonObj['id']] = jsonObj
# Update tag list
for t in jsonObj['tags']:
if t in data_cache['universe_tags']:
data_cache['universe_tags'][t].append(jsonObj['id'])
else:
data_cache['universe_tags'][t] = [jsonObj['id']]
count = len(data_cache['universes'])
app.logger.info("DONE! - Loaded "+str(count)+" universes.")
# Characters
def getCharacterList():
return data_cache['characters'].values()
def minCharacterData(char):
res_univ = data_cache['universes'][char['universe']]
univ_info = {'id': res_univ['id'], 'name': res_univ['name']}
return {
'id': char['id'],
'name': char['name'],
'universe': univ_info,
'short_desc': char['short_desc'],
'pic': char['pic'],
'tags': char['tags']
}
def fillCharacterData(char):
res = char
# Universe
res['universe'] = data_cache['universes'][res['universe']]
# Allies
res_allies = res['allies']
res['allies'] = []
for charID in res_allies:
res['allies'].append(data_cache['characters'][charID])
# Enemies
res_enemies = res['enemies']
res['enemies'] = []
for charID in res_enemies:
res['enemies'].append(data_cache['characters'][charID])
return res
def loadCharacterList():
folderPath = app_config['data']['data_folder']+"/characters/"
app.logger.info("LOAD - Loading characters (from: "+folderPath+")")
# Reset
data_cache['character_tags'] = {}
# Walking on each file
files = listdir(folderPath)
for info_file in files:
app.logger.debug("\tFile: "+info_file)
# Loading JSON Object from file
filePtr = open(folderPath+info_file)
jsonObj = parseDown( minCharacterData( json.load(filePtr) ) )
filePtr.close()
# Caching data
data_cache['characters'][jsonObj['id']] = jsonObj
# Update tag list
for t in jsonObj['tags']:
if t in data_cache['character_tags']:
data_cache['character_tags'][t].append(jsonObj['id'])
else:
data_cache['character_tags'][t] = [jsonObj['id']]
count = len(data_cache['characters'])
app.logger.info("DONE! - Loaded "+str(count)+" characters.")
# --- --- SETUP --- ---
def logRequest():
app.logger.info(request.method+": "+request.url)
def init(app):
app.logger.info("INIT - Initializing application ...")
config = ConfigParser.ConfigParser ()
try:
config_location = "etc/defaults.cfg"
config.read(config_location)
# App
app_config['app_name'] = config.get("app", "name")
app_config['app_author'] = config.get("app", "author")
app_config['app_contact'] = config.get("app", "contact")
# Graphic
app_config['graphic']['default_universe_pic'] = config.get("graphic", "default_universe_pic")
app_config['graphic']['default_character_pic'] = config.get("graphic", "default_character_pic")
app_config['graphic']['items_per_page'] = int(config.get("graphic", "items_per_page"))
# Data
app_config['data']['data_folder'] = config.get("data", "ressource_folder")
# Code
app_config['code']['repo_url'] = config.get("code", "repo_url")
app_config['code']['repo_name'] = config.get("code", "repo_name")
# Main config
app.config['DEBUG'] = config.get("config", "debug")
app.config['ip_address'] = config.get("config", "ip_address")
app.config['port'] = config.get("config", "port")
app.config['url'] = config.get("config", "url")
#app_config['locale'] = config.get("config", "locale")
# Logging
app_config['logging']['file'] = config.get("logging", "name")
app_config['logging']['location'] = config.get("logging", "location")
app_config['logging']['level'] = config.get("logging", "level")
except IOError as e:
app.logger.error("ERROR - Could not read configs from: "+config_location)
app.logger.error("\t>>"+str(e))
# loading cached data
loadUniverseList()
loadCharacterList()
def logs(app):
log_pathname = app_config['logging']['location'] + app_config['logging']['file']
file_handler = RotatingFileHandler(log_pathname, maxBytes=1024*1024*10, backupCount=1024)
file_handler.setLevel(app_config['logging']['level'])
formatter = logging.Formatter("%(levelname)s | %(asctime)s | %(module)s | %(funcName)s | %(message)s")
file_handler.setFormatter(formatter)
app.logger.setLevel(app_config['logging']['level'])
app.logger.addHandler(file_handler)
if __name__ == '__main__':
init(app)
logs(app)
app.logger.info("START - Application started !")
app.run(
host=app.config['ip_address'],
port=int(app.config['port'])
)
app.logger.info("STOP - Application ended !")
|
#!/usr/bin/env python
from fastcgi import *
from time import sleep
@fastcgi
def hello():
name = sys.stdin.read()
sys.stdout.write(f'Content-type: text/html\n\nHello {name}\n')
sys.stdout.write(f'{os.environ}\n')
|
import os, sys, logging, argparse, pdb, imp, time
import unittest as test
from copy import deepcopy
from nistoar.testing import *
from nistoar.pdr import cli
from nistoar.pdr.publish.cmd import prepupd
from nistoar.pdr.exceptions import PDRException, ConfigurationException
import nistoar.pdr.config as cfgmod
testdir = os.path.dirname(os.path.abspath(__file__))
pdrmoddir = os.path.dirname(os.path.dirname(testdir))
distarchdir = os.path.join(pdrmoddir, "distrib", "data")
descarchdir = os.path.join(pdrmoddir, "describe", "data")
distarchive = os.path.join(tmpdir(), "distarchive")
mdarchive = os.path.join(tmpdir(), "mdarchive")
def startServices():
tdir = tmpdir()
archdir = distarchive
shutil.copytree(distarchdir, archdir)
# os.mkdir(archdir) # keep it empty for now
srvport = 9091
pidfile = os.path.join(tdir,"simdistrib"+str(srvport)+".pid")
wpy = os.path.join(pdrmoddir, "distrib/sim_distrib_srv.py")
assert os.path.exists(wpy)
cmd = "uwsgi --daemonize {0} --plugin python --http-socket :{1} " \
"--wsgi-file {2} --set-ph archive_dir={3} --pidfile {4}"
cmd = cmd.format(os.path.join(tdir,"simdistrib.log"), srvport, wpy, archdir, pidfile)
os.system(cmd)
archdir = mdarchive
shutil.copytree(descarchdir, archdir)
srvport = 9092
pidfile = os.path.join(tdir,"simrmm"+str(srvport)+".pid")
wpy = os.path.join(pdrmoddir, "describe/sim_describe_svc.py")
assert os.path.exists(wpy)
cmd = "uwsgi --daemonize {0} --plugin python --http-socket :{1} " \
"--wsgi-file {2} --set-ph archive_dir={3} --pidfile {4}"
cmd = cmd.format(os.path.join(tdir,"simrmm.log"), srvport, wpy, archdir, pidfile)
os.system(cmd)
time.sleep(0.5)
def stopServices():
tdir = tmpdir()
srvport = 9091
pidfile = os.path.join(tdir,"simdistrib"+str(srvport)+".pid")
cmd = "uwsgi --stop {0}".format(os.path.join(tdir,
"simdistrib"+str(srvport)+".pid"))
os.system(cmd)
# sometimes stopping with uwsgi doesn't work
try:
with open(pidfile) as fd:
pid = int(fd.read().strip())
os.kill(pid, signal.SIGTERM)
except:
pass
srvport = 9092
pidfile = os.path.join(tdir,"simrmm"+str(srvport)+".pid")
cmd = "uwsgi --stop {0}".format(os.path.join(tdir,
"simrmm"+str(srvport)+".pid"))
os.system(cmd)
time.sleep(1)
# sometimes stopping with uwsgi doesn't work
try:
with open(pidfile) as fd:
pid = int(fd.read().strip())
os.kill(pid, signal.SIGTERM)
except:
pass
def setUpModule():
ensure_tmpdir()
startServices()
def tearDownModule():
stopServices()
rmtmpdir()
class TestPrepupdCmd(test.TestCase):
def setUp(self):
self.tf = Tempfiles()
self.workdir = self.tf.mkdir("work")
self.headcache = self.tf.mkdir("headcache")
self.config = {
"repo_access": {
"headbag_cache": self.headcache,
"distrib_service": {
"service_endpoint": "http://localhost:9091/"
},
"metadata_service": {
"service_endpoint": "http://localhost:9092/"
}
}
}
self.cmd = cli.PDRCLI()
self.cmd.load_subcommand(prepupd)
def tearDown(self):
self.tf.clean()
def test_parse(self):
args = self.cmd.parser.parse_args("-q prepupd pdr2222".split())
self.assertEqual(args.workdir, "")
self.assertIsNone(args.cachedir)
self.assertTrue(args.quiet)
self.assertFalse(args.verbose)
self.assertEqual(args.cmd, "prepupd")
self.assertEqual(args.aipid, ["pdr2222"])
argline = "-q -w "+self.workdir+" prepupd pdr2210 -C headbags -u https://data.nist.gov/"
args = self.cmd.parser.parse_args(argline.split())
self.assertEqual(args.workdir, self.workdir)
self.assertEqual(args.cachedir, 'headbags')
self.assertEqual(args.repourl, "https://data.nist.gov/")
self.assertIsNone(args.replaces)
self.assertTrue(args.quiet)
self.assertFalse(args.verbose)
self.assertEqual(args.cmd, "prepupd")
self.assertEqual(args.aipid, ["pdr2210"])
argline = "-q -w "+self.workdir+" prepupd pdr2210 -C headbags -r pdr2001"
args = self.cmd.parser.parse_args(argline.split())
self.assertEqual(args.workdir, self.workdir)
self.assertEqual(args.cachedir, 'headbags')
self.assertEqual(args.replaces, "pdr2001")
self.assertIsNone(args.repourl)
self.assertTrue(args.quiet)
self.assertFalse(args.verbose)
self.assertEqual(args.cmd, "prepupd")
self.assertEqual(args.aipid, ["pdr2210"])
def test_get_access_config(self):
args = self.cmd.parser.parse_args("-q prepupd pdr2222".split())
cfg = prepupd.get_access_config(args, {})
self.assertNotIn('headbag_cache', cfg)
self.assertNotIn('distrib_service', cfg)
self.assertNotIn('metadata_service', cfg)
argline = "-q -w "+self.workdir+" prepupd pdr2210 -C headbags -u https://data.nist.gov/"
args = self.cmd.parser.parse_args(argline.split())
cfg = prepupd.get_access_config(args, {'working_dir': args.workdir})
self.assertEqual(cfg['headbag_cache'], os.path.join(self.workdir,"headbags"))
self.assertTrue(os.path.isdir(cfg['headbag_cache']))
self.assertEqual(cfg['distrib_service']['service_endpoint'], "https://data.nist.gov/od/ds/")
self.assertEqual(cfg['metadata_service']['service_endpoint'], "https://data.nist.gov/rmm/")
def test_execute(self):
argline = "-q -w "+self.workdir+" prepupd pdr2210"
cfg = deepcopy(self.config)
self.cmd.execute(argline.split(), cfg)
self.assertTrue(os.path.isfile(os.path.join(self.workdir, "pdr.log")))
self.assertTrue(os.path.isdir(os.path.join(self.workdir, "pdr2210")))
self.assertTrue(os.path.isdir(os.path.join(self.workdir, "pdr2210", "metadata")))
self.assertTrue(os.path.isdir(os.path.join(self.workdir, "pdr2210", "multibag")))
self.assertTrue(os.path.exists(os.path.join(self.workdir, "pdr2210", "data")))
self.assertEqual(len([f for f in os.listdir(os.path.join(self.workdir, "pdr2210", "data"))
if not f.startswith('.')]), 0)
def test_execute_notpub(self):
argline = "-q -w "+self.workdir+" prepupd pdr8888"
cfg = deepcopy(self.config)
try:
self.cmd.execute(argline.split(), cfg)
self.fail("failed to raise command failure")
except cli.PDRCommandFailure as ex:
self.assertEqual(ex.stat, 13)
if __name__ == '__main__':
test.main()
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
from .imagenet import *
|
# -*- coding:utf-8 -*-
i = 0
residual = 500000.0
interest_tuple = (0.01, 0.02, 0.03, 0.035)
repay = 30000.0
while residual > 0:
i = i + 1
print("第",i,"年还是要还钱")
if i <= 4:
interest = interest_tuple[i - 1]
else:
interest = 0.05
residual = residual*(1 + interest) - repay
print("第",i+1,"年终于还完了") |
__all__ = ()
import os
from json import dumps as to_json, load as from_json_file, loads as from_json
from math import ceil, floor
from zlib import compress, decompress
from hata import BUILTIN_EMOJIS, Color, DiscordException, ERROR_CODES, Embed, Emoji, KOKORO
from hata.ext.slash import Button, ButtonStyle, Row, Timeouter, abort
from scarletio import AsyncIO, CancelledError, Lock, Task, TaskGroup, copy_docs
from sqlalchemy.sql import select
from ..bot_utils.constants import PATH__KOISHI
from ..bot_utils.models import (
DB_ENGINE, DS_V2_RESULT_TABLE, DS_V2_TABLE, USER_COMMON_TABLE, ds_v2_model, ds_v2_result_model,
get_create_common_user_expression, user_common_model
)
from ..bots import SLASH_CLIENT
DUNGEON_SWEEPER_COLOR = Color(0xa000c4)
DUNGEON_SWEEPER_GAMES = {}
COLOR_TUTORIAL = Color(0xa000c4)
DIFFICULTY_COLORS = dict(enumerate((COLOR_TUTORIAL, Color(0x00cc03), Color(0xffe502), Color(0xe50016))))
DIFFICULTY_NAMES = dict(enumerate(('Tutorial', 'Easy', 'Normal', 'Hard',)))
CHAPTER_UNLOCK_DIFFICULTY = 1
CHAPTER_UNLOCK_STAGE = 9
CHAPTER_UNLOCK_DIFFICULTY_NAME = DIFFICULTY_NAMES[CHAPTER_UNLOCK_DIFFICULTY]
STAGE_STEP_MULTI_STEP_BUTTON = 10
EMOJI_KOISHI_WAVE = Emoji.precreate(648173118392762449)
GUI_TIMEOUT = 600.0
MAX_RENDER_EMOJI = 150
GUI_STATE_NONE = 0
GUI_STATE_READY = 1
GUI_STATE_EDITING = 2
GUI_STATE_CANCELLING = 3
GUI_STATE_CANCELLED = 4
GUI_STATE_SWITCHING_CONTEXT = 5
GUI_STATE_VALUE_TO_NAME = {
GUI_STATE_READY: 'ready',
GUI_STATE_EDITING: 'editing',
GUI_STATE_CANCELLING: 'cancelling',
GUI_STATE_CANCELLED: 'cancelled',
GUI_STATE_SWITCHING_CONTEXT: 'switching context',
}
RUNNER_STATE_MENU = 1
RUNNER_STATE_PLAYING = 2
RUNNER_STATE_END_SCREEN = 3
RUNNER_STATE_CLOSED = 4
RUNNER_STATE_VALUE_TO_NAME = {
RUNNER_STATE_MENU: 'menu',
RUNNER_STATE_PLAYING: 'playing',
RUNNER_STATE_END_SCREEN: 'end screen',
RUNNER_STATE_CLOSED: 'closed',
}
FILE_LOCK = Lock(KOKORO)
FILE_NAME = 'ds_v2.json'
FILE_PATH = os.path.join(PATH__KOISHI, 'koishi', 'library', FILE_NAME)
EMOJI_WEST = BUILTIN_EMOJIS['arrow_left']
EMOJI_NORTH = BUILTIN_EMOJIS['arrow_up']
EMOJI_SOUTH = BUILTIN_EMOJIS['arrow_down']
EMOJI_EAST = BUILTIN_EMOJIS['arrow_right']
EMOJI_NORTH_EAST = BUILTIN_EMOJIS['arrow_upper_right']
EMOJI_SOUTH_EAST = BUILTIN_EMOJIS['arrow_lower_right']
EMOJI_SOUTH_WEST = BUILTIN_EMOJIS['arrow_lower_left']
EMOJI_NORTH_WEST = BUILTIN_EMOJIS['arrow_upper_left']
EMOJI_BACK = BUILTIN_EMOJIS['leftwards_arrow_with_hook']
EMOJI_RESET = BUILTIN_EMOJIS['arrows_counterclockwise']
EMOJI_CANCEL = BUILTIN_EMOJIS['x']
EMOJI_UP = BUILTIN_EMOJIS['arrow_up_small']
EMOJI_DOWN = BUILTIN_EMOJIS['arrow_down_small']
EMOJI_UP2 = BUILTIN_EMOJIS['arrow_double_up']
EMOJI_DOWN2 = BUILTIN_EMOJIS['arrow_double_down']
EMOJI_LEFT = BUILTIN_EMOJIS['arrow_backward']
EMOJI_RIGHT = BUILTIN_EMOJIS['arrow_forward']
EMOJI_SELECT = BUILTIN_EMOJIS['ok']
EMOJI_NEXT = BUILTIN_EMOJIS['arrow_right']
EMOJI_CLOSE = BUILTIN_EMOJIS['x']
EMOJI_RESTART = BUILTIN_EMOJIS['arrows_counterclockwise']
EMOJI_NOTHING = Emoji.precreate(568838460434284574, name = '0Q')
EMOJI_REIMU = Emoji.precreate(574307645347856384, name = 'REIMU')
EMOJI_FLAN = Emoji.precreate(575387120147890210, name = 'FLAN')
EMOJI_YUKARI = Emoji.precreate(575389643424661505, name = 'YUKARI')
CUSTOM_ID_BASE = 'ds.game.'
CUSTOM_ID_UP = CUSTOM_ID_BASE + '1'
CUSTOM_ID_DOWN = CUSTOM_ID_BASE + '2'
CUSTOM_ID_UP2 = CUSTOM_ID_BASE + '3'
CUSTOM_ID_DOWN2 = CUSTOM_ID_BASE + '4'
CUSTOM_ID_RIGHT = CUSTOM_ID_BASE + '5'
CUSTOM_ID_LEFT = CUSTOM_ID_BASE + '6'
CUSTOM_ID_SELECT = CUSTOM_ID_BASE + '7'
CUSTOM_ID_WEST = CUSTOM_ID_BASE + '8'
CUSTOM_ID_NORTH = CUSTOM_ID_BASE + '9'
CUSTOM_ID_SOUTH = CUSTOM_ID_BASE + 'A'
CUSTOM_ID_EAST = CUSTOM_ID_BASE + 'B'
CUSTOM_ID_BACK = CUSTOM_ID_BASE + 'C'
CUSTOM_ID_RESET = CUSTOM_ID_BASE + 'D'
CUSTOM_ID_CANCEL = CUSTOM_ID_BASE + 'E'
CUSTOM_ID_NEXT = CUSTOM_ID_BASE + 'F'
CUSTOM_ID_CLOSE = CUSTOM_ID_BASE + 'G'
CUSTOM_ID_RESTART = CUSTOM_ID_BASE + 'H'
CUSTOM_ID_EMPTY_1 = CUSTOM_ID_BASE + 'I'
CUSTOM_ID_EMPTY_2 = CUSTOM_ID_BASE + 'J'
CUSTOM_ID_EMPTY_3 = CUSTOM_ID_BASE + 'K'
CUSTOM_ID_EMPTY_4 = CUSTOM_ID_BASE + 'L'
CUSTOM_ID_NORTH_TO_EAST = CUSTOM_ID_BASE + 'M'
CUSTOM_ID_NORTH_TO_WEST = CUSTOM_ID_BASE + 'N'
CUSTOM_ID_SOUTH_TO_EAST = CUSTOM_ID_BASE + 'O'
CUSTOM_ID_SOUTH_TO_WEST = CUSTOM_ID_BASE + 'P'
CUSTOM_ID_EAST_TO_NORTH = CUSTOM_ID_BASE + 'Q'
CUSTOM_ID_EAST_TO_SOUTH = CUSTOM_ID_BASE + 'R'
CUSTOM_ID_WEST_TO_NORTH = CUSTOM_ID_BASE + 'S'
CUSTOM_ID_WEST_TO_SOUTH = CUSTOM_ID_BASE + 'T'
CUSTOM_ID_SKILL = CUSTOM_ID_BASE + '0'
BIT_MASK_PASSABLE = 0b0000000000000111
BIT_MASK_FLOOR = 0b0000000000000001
BIT_MASK_TARGET = 0b0000000000000010
BIT_MASK_HOLE_P = 0b0000000000000011
BIT_MASK_OBJECT_P = 0b0000000000000100
BIT_MASK_PUSHABLE = 0b0000000000111000
BIT_MASK_BOX = 0b0000000000001000
BIT_MASK_BOX_TARGET = 0b0000000000010000
BIT_MASK_BOX_HOLE = 0b0000000000011000
BIT_MASK_BOX_OBJECT = 0b0000000000100000
BIT_MASK_SPECIAL = 0b0000000011000000
BIT_MASK_HOLE_U = 0b0000000001000000
BIT_MASK_OBJECT_U = 0b0000000010000000
BIT_MASK_CHAR = 0b0000011100000000
BIT_MASK_CHAR_N = 0b0000010000000000
BIT_MASK_CHAR_E = 0b0000010100000000
BIT_MASK_CHAR_S = 0b0000011000000000
BIT_MASK_CHAR_W = 0b0000011100000000
# BIT_MASK_CN_FLOOR = 0b0000010000000001
# BIT_MASK_CE_FLOOR = 0b0000010100000001
# BIT_MASK_CS_FLOOR = 0b0000011000000001
# BIT_MASK_CW_FLOOR = 0b0000011100000001
#
# BIT_MASK_CN_TARGET = 0b0000010000000010
# BIT_MASK_CE_TARGET = 0b0000010100000010
# BIT_MASK_CS_TARGET = 0b0000011000000010
# BIT_MASK_CW_TARGET = 0b0000011100000010
#
# BIT_MASK_CN_OBJECT_P = 0b0000010000000011
# BIT_MASK_CE_OBJECT_P = 0b0000010100000011
# BIT_MASK_CS_OBJECT_P = 0b0000011000000011
# BIT_MASK_CW_OBJECT_P = 0b0000011100000011
#
# BIT_MASK_CN_HOLE_P = 0b0000010000000100
# BIT_MASK_CE_HOLE_P = 0b0000010100000100
# BIT_MASK_CS_HOLE_P = 0b0000011000000100
# BIT_MASK_CW_HOLE_P = 0b0000011100000100
BIT_MASK_WALL = 0b1111100000000000
BIT_MASK_NOTHING = 0b0000100000000000
BIT_MASK_WALL_N = 0b0001000000000000
BIT_MASK_WALL_E = 0b0010000000000000
BIT_MASK_WALL_S = 0b0100000000000000
BIT_MASK_WALL_W = 0b1000000000000000
# BIT_MASK_WALL_A = 0b1111000000000000
# BIT_MASK_WALL_SE = 0b0110000000000000
# BIT_MASK_WALL_SW = 0b1100000000000000
BIT_MASK_UNPUSHABLE = BIT_MASK_WALL | BIT_MASK_SPECIAL
BIT_MASK_BLOCKS_LOS = BIT_MASK_WALL | BIT_MASK_PUSHABLE | BIT_MASK_OBJECT_U
STYLE_DEFAULT_PARTS = {
BIT_MASK_NOTHING : EMOJI_NOTHING.as_emoji,
BIT_MASK_WALL_E : Emoji.precreate(568838488464687169, name = '0P').as_emoji,
BIT_MASK_WALL_S : Emoji.precreate(568838546853462035, name = '0N').as_emoji,
BIT_MASK_WALL_W : Emoji.precreate(568838580278132746, name = '0K').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_S | BIT_MASK_WALL_W
: Emoji.precreate(578678249518006272, name = '0X').as_emoji,
BIT_MASK_WALL_E | BIT_MASK_WALL_S : Emoji.precreate(568838557318250499, name = '0M').as_emoji,
BIT_MASK_WALL_S | BIT_MASK_WALL_W : Emoji.precreate(568838569087598627, name = '0L').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_E : Emoji.precreate(574312331849498624, name = '01').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_W : Emoji.precreate(574312332453216256, name = '00').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_S
: Emoji.precreate(578648597621506048, name = '0R').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_S | BIT_MASK_WALL_W
: Emoji.precreate(578648597546139652, name = '0S').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_S : Emoji.precreate(578654051848421406, name = '0T').as_emoji,
BIT_MASK_WALL_E | BIT_MASK_WALL_W : Emoji.precreate(578674409968238613, name = '0U').as_emoji,
BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_W
: Emoji.precreate(578676096829227027, name = '0V').as_emoji,
BIT_MASK_WALL_E | BIT_MASK_WALL_S | BIT_MASK_WALL_W
: Emoji.precreate(578676650389274646, name = '0W').as_emoji,
}
STYLE_REIMU = {
**STYLE_DEFAULT_PARTS,
BIT_MASK_WALL_N : Emoji.precreate(580141387631165450, name = '0O').as_emoji,
BIT_MASK_FLOOR : Emoji.precreate(574211101638656010, name = '0H').as_emoji,
BIT_MASK_TARGET : Emoji.precreate(574234087645249546, name = '0A').as_emoji,
BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_HOLE_P : Emoji.precreate(574202754134835200, name = '0I').as_emoji,
BIT_MASK_BOX : Emoji.precreate(574212211434717214, name = '0G').as_emoji,
BIT_MASK_BOX_TARGET : Emoji.precreate(574213002190913536, name = '0F').as_emoji,
BIT_MASK_BOX_HOLE : Emoji.precreate(574212211434717214, name = '0G').as_emoji,
BIT_MASK_BOX_OBJECT : EMOJI_NOTHING.as_emoji,
BIT_MASK_HOLE_U : Emoji.precreate(574187906642477066, name = '0J').as_emoji,
BIT_MASK_OBJECT_U : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_FLOOR : Emoji.precreate(574214258871500800, name = '0D').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_FLOOR : Emoji.precreate(574213472347226114, name = '0E').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_FLOOR : Emoji.precreate(574220751662612502, name = '0B').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_FLOOR : Emoji.precreate(574218036156825629, name = '0C').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_TARGET : Emoji.precreate(574249292496371732, name = '04').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_TARGET : Emoji.precreate(574249292026478595, name = '07').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_TARGET : Emoji.precreate(574249292261490690, name = '06').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_TARGET : Emoji.precreate(574249292487720970, name = '05').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_HOLE_P : Emoji.precreate(574249293662388264, name = '02').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_HOLE_P : Emoji.precreate(574249291074240523, name = '09').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_HOLE_P : Emoji.precreate(574249291145543681, name = '08').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_HOLE_P : Emoji.precreate(574249292957614090, name = '03').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
}
STYLE_FLAN = {
**STYLE_DEFAULT_PARTS,
BIT_MASK_WALL_N : Emoji.precreate(580143707534262282, name = '0X').as_emoji,
BIT_MASK_FLOOR : Emoji.precreate(580150656501940245, name = '0Y').as_emoji,
BIT_MASK_TARGET : Emoji.precreate(580153111545511967, name = '0b').as_emoji,
BIT_MASK_OBJECT_P : Emoji.precreate(580163014045728818, name = '0e').as_emoji,
BIT_MASK_HOLE_P : Emoji.precreate(580159124466303001, name = '0d').as_emoji,
BIT_MASK_BOX : Emoji.precreate(580151963937931277, name = '0a').as_emoji,
BIT_MASK_BOX_TARGET : Emoji.precreate(580188214086598667, name = '0f').as_emoji,
BIT_MASK_BOX_HOLE : Emoji.precreate(580151963937931277, name = '0a').as_emoji,
BIT_MASK_BOX_OBJECT : Emoji.precreate(580151963937931277, name = '0a').as_emoji,
BIT_MASK_HOLE_U : Emoji.precreate(580156463888990218, name = '0c').as_emoji,
BIT_MASK_OBJECT_U : Emoji.precreate(580151385258065925, name = '0Z').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_FLOOR : Emoji.precreate(580357693022142485, name = '0g').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_FLOOR : Emoji.precreate(580357693093576714, name = '0h').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_FLOOR : Emoji.precreate(580357693160685578, name = '0i').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_FLOOR : Emoji.precreate(580357693152165900, name = '0j').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_TARGET : Emoji.precreate(580357693018210305, name = '0k').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_TARGET : Emoji.precreate(580357693085188109, name = '0l').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_TARGET : Emoji.precreate(580357693181657089, name = '0m').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_TARGET : Emoji.precreate(580357693361881089, name = '0n').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_HOLE_P : Emoji.precreate(580357693324132352, name = '0o').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_HOLE_P : Emoji.precreate(580357693072736257, name = '0p').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_HOLE_P : Emoji.precreate(580357693131456513, name = '0q').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_HOLE_P : Emoji.precreate(580357693366337536, name = '0r').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_OBJECT_P : Emoji.precreate(580357693143777300, name = '0s').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_OBJECT_P : Emoji.precreate(580357692711763973, name = '0t').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_OBJECT_P : Emoji.precreate(580357693269606410, name = '0u').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_OBJECT_P : Emoji.precreate(580357693387177984, name = '0v').as_emoji,
}
STYLE_YUKARI = {
**STYLE_DEFAULT_PARTS,
BIT_MASK_WALL_N : Emoji.precreate(593179300270702593, name = '0w').as_emoji,
BIT_MASK_FLOOR : Emoji.precreate(593179300426022914, name = '0x').as_emoji,
BIT_MASK_TARGET : Emoji.precreate(593179300019306556, name = '0y').as_emoji,
BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_HOLE_P : Emoji.precreate(593179300287479833, name = '0z').as_emoji,
BIT_MASK_BOX : Emoji.precreate(593179300296130561, name = '10').as_emoji,
BIT_MASK_BOX_TARGET : Emoji.precreate(593179300136615936, name = '11').as_emoji,
BIT_MASK_BOX_HOLE : Emoji.precreate(593179300149067790, name = '12').as_emoji,
BIT_MASK_BOX_OBJECT : EMOJI_NOTHING.as_emoji,
BIT_MASK_HOLE_U : Emoji.precreate(593179300153262196, name = '13').as_emoji,
BIT_MASK_OBJECT_U : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_FLOOR : Emoji.precreate(593179300161650871, name = '14').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_FLOOR : Emoji.precreate(593179300153262257, name = '15').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_FLOOR : Emoji.precreate(593179300300324887, name = '16').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_FLOOR : Emoji.precreate(593179300237410314, name = '17').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_TARGET : Emoji.precreate(593179300207919125, name = '18').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_TARGET : Emoji.precreate(593179300145135646, name = '19').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_TARGET : Emoji.precreate(593179300170301451, name = '1A').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_TARGET : Emoji.precreate(593179300153262189, name = '1B').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_HOLE_P : Emoji.precreate(593179300199399531, name = '1C').as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_HOLE_P : Emoji.precreate(593179300300193800, name = '1D').as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_HOLE_P : Emoji.precreate(593179300216176760, name = '1E').as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_HOLE_P : Emoji.precreate(593179300153524224, name = '1F').as_emoji,
BIT_MASK_CHAR_N | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_E | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_S | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
BIT_MASK_CHAR_W | BIT_MASK_OBJECT_P : EMOJI_NOTHING.as_emoji,
}
RULES_HELP = Embed(
'Rules of Dungeon sweeper',
(
f'Your quest is to help our cute Touhou characters to put their stuffs on places, where they supposed be. '
f'These places are marked with an {BUILTIN_EMOJIS["x"]} on the floor. Because our characters are lazy, the '
f'less steps required to sort their stuffs, makes them give you a better rating.\n'
f'\n'
f'You can move with the buttons under the embed, to activate your characters\' skill, or go back, reset the '
f'map or cancel the game:\n'
f'{EMOJI_NORTH_WEST}{EMOJI_NORTH}{EMOJI_NORTH_EAST}{EMOJI_BACK}\n'
f'{EMOJI_WEST}{EMOJI_REIMU}{EMOJI_EAST}{EMOJI_RESET}\n'
f'{EMOJI_SOUTH_WEST}{EMOJI_SOUTH}{EMOJI_SOUTH_EAST}{EMOJI_CANCEL}\n'
f'\n'
f'You can push boxes by moving towards them, but you cannot push more at the same time or push into the '
f'wall:\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}{STYLE_REIMU[BIT_MASK_FLOOR]}'
f'{EMOJI_EAST}'
f'{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}'
f'\n'
f'You can push the boxes into the holes to pass them, but be careful, you might lose too much boxes to finish'
f'the stages!\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}{STYLE_REIMU[BIT_MASK_HOLE_U]}'
f'{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
f'{STYLE_REIMU[BIT_MASK_HOLE_P]}{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_FLOOR]}'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_HOLE_P]}\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}{STYLE_REIMU[BIT_MASK_HOLE_P]}'
f'{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
f'{STYLE_REIMU[BIT_MASK_BOX_HOLE]}\n'
f'If you get a box on the it\'s desired place it\'s color will change:\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}{STYLE_REIMU[BIT_MASK_TARGET]}'
f'{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
f'{STYLE_REIMU[BIT_MASK_BOX_TARGET]}\n'
f'The game has 3 chapters. *(there will be more maybe.)* Each chapter introduces a different character to '
f'play with.'
),
color = DUNGEON_SWEEPER_COLOR,
).add_field(
f'Chapter 1 {EMOJI_REIMU}',
(
f'Your character is Hakurei Reimu (博麗 霊夢), who needs some help at her basement to sort her *boxes* out.\n'
f'Reimu can jump over a box or hole.\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}{STYLE_REIMU[BIT_MASK_FLOOR]}'
f'{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_BOX]}'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}\n'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]:}{STYLE_REIMU[BIT_MASK_HOLE_U]}{STYLE_REIMU[BIT_MASK_FLOOR]}'
f'{EMOJI_EAST}{STYLE_REIMU[BIT_MASK_FLOOR]}{STYLE_REIMU[BIT_MASK_HOLE_U]}'
f'{STYLE_REIMU[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
),
).add_field(
f'Chapter 2 {EMOJI_FLAN}',
(
f'Your character is Scarlet Flandre (スカーレット・フランドール Sukaaretto Furandooru), who want to put her '
f'*bookshelves* on their desired place.\n'
f'Flandre can destroy absolutely anything and everything, and she will get rid of the pillars for you.\n'
f'{STYLE_FLAN[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_FLAN[BIT_MASK_OBJECT_U]}{EMOJI_EAST}'
f'{STYLE_FLAN[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_FLAN[BIT_MASK_OBJECT_P]}{EMOJI_EAST}'
f'{STYLE_FLAN[BIT_MASK_FLOOR]}{STYLE_FLAN[BIT_MASK_CHAR_E | BIT_MASK_OBJECT_P]}\n'
f'{STYLE_FLAN[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_FLAN[BIT_MASK_BOX]}{STYLE_FLAN[BIT_MASK_OBJECT_P]}'
f'{EMOJI_EAST}{STYLE_FLAN[BIT_MASK_FLOOR]}{STYLE_FLAN[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
f'{STYLE_FLAN[BIT_MASK_BOX_OBJECT]}'
),
).add_field(
f'Chapter 3 {EMOJI_YUKARI}',
(
f'Your character is Yakumo Yukari (八雲 紫). Her beddings needs some replacing at her home.\n'
f'Yukari can create gaps and travel trough them. She will open gap to the closest place straightforward, '
f'which is separated by a bedding or with wall from her.\n'
f'{STYLE_YUKARI[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_YUKARI[BIT_MASK_WALL_N]}{STYLE_YUKARI[BIT_MASK_WALL_N]}'
f'{STYLE_YUKARI[BIT_MASK_FLOOR]}{EMOJI_EAST}{STYLE_YUKARI[BIT_MASK_FLOOR]}{STYLE_YUKARI[BIT_MASK_WALL_N]}'
f'{STYLE_YUKARI[BIT_MASK_WALL_N]}{STYLE_YUKARI[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}\n'
f'{STYLE_YUKARI[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}{STYLE_YUKARI[BIT_MASK_BOX]}{STYLE_YUKARI[BIT_MASK_BOX]}'
f'{STYLE_YUKARI[BIT_MASK_FLOOR]}{EMOJI_EAST}{STYLE_YUKARI[BIT_MASK_FLOOR]}{STYLE_YUKARI[BIT_MASK_BOX]}'
f'{STYLE_YUKARI[BIT_MASK_BOX]}{STYLE_YUKARI[BIT_MASK_CHAR_E | BIT_MASK_FLOOR]}'
),
).add_footer(
'Game based on Sweeper of Suika.'
)
BUTTON_UP_ENABLED = Button(
emoji = EMOJI_UP,
custom_id = CUSTOM_ID_UP,
style = ButtonStyle.blue,
)
BUTTON_UP_DISABLED = BUTTON_UP_ENABLED.copy_with(enabled = False)
BUTTON_DOWN_ENABLED = Button(
emoji = EMOJI_DOWN,
custom_id = CUSTOM_ID_DOWN,
style = ButtonStyle.blue,
)
BUTTON_DOWN_DISABLED = BUTTON_DOWN_ENABLED.copy_with(enabled = False)
BUTTON_UP2_ENABLED = Button(
emoji = EMOJI_UP2,
custom_id = CUSTOM_ID_UP2,
style = ButtonStyle.blue,
)
BUTTON_UP2_DISABLED = BUTTON_UP2_ENABLED.copy_with(enabled = False)
BUTTON_DOWN2_ENABLED = Button(
emoji = EMOJI_DOWN2,
custom_id = CUSTOM_ID_DOWN2,
style = ButtonStyle.blue,
)
BUTTON_DOWN2_DISABLED = BUTTON_DOWN2_ENABLED.copy_with(enabled = False)
BUTTON_LEFT_ENABLED = Button(
emoji = EMOJI_LEFT,
custom_id = CUSTOM_ID_LEFT,
style = ButtonStyle.blue,
)
BUTTON_LEFT_DISABLED = BUTTON_LEFT_ENABLED.copy_with(enabled = False)
BUTTON_RIGHT_ENABLED = Button(
emoji = EMOJI_RIGHT,
custom_id = CUSTOM_ID_RIGHT,
style = ButtonStyle.blue,
)
BUTTON_RIGHT_DISABLED = BUTTON_RIGHT_ENABLED.copy_with(enabled = False)
BUTTON_SELECT_ENABLED = Button(
emoji = EMOJI_SELECT,
custom_id = CUSTOM_ID_SELECT,
style = ButtonStyle.green,
)
BUTTON_SELECT_DISABLED = BUTTON_SELECT_ENABLED.copy_with(enabled = False)
BUTTON_EMPTY_1 = Button(
emoji = EMOJI_NOTHING,
custom_id = CUSTOM_ID_EMPTY_1,
style = ButtonStyle.gray,
enabled = False,
)
BUTTON_EMPTY_2 = BUTTON_EMPTY_1.copy_with(custom_id = CUSTOM_ID_EMPTY_2)
BUTTON_EMPTY_3 = BUTTON_EMPTY_1.copy_with(custom_id = CUSTOM_ID_EMPTY_3)
BUTTON_EMPTY_4 = BUTTON_EMPTY_1.copy_with(custom_id = CUSTOM_ID_EMPTY_4)
BUTTON_SKILL_REIMU_ENABLED = Button(
emoji = EMOJI_REIMU,
custom_id = CUSTOM_ID_SKILL,
style = ButtonStyle.blue,
)
BUTTON_SKILL_REIMU_DISABLED = BUTTON_SKILL_REIMU_ENABLED.copy_with(enabled = False)
BUTTON_SKILL_REIMU_USED = BUTTON_SKILL_REIMU_DISABLED.copy_with(button_style = ButtonStyle.gray)
BUTTON_SKILL_REIMU_ACTIVATED = BUTTON_SKILL_REIMU_ENABLED.copy_with(button_style = ButtonStyle.green)
BUTTON_SKILL_FLAN_ENABLED = BUTTON_SKILL_REIMU_ENABLED.copy_with(emoji = EMOJI_FLAN)
BUTTON_SKILL_FLAN_DISABLED = BUTTON_SKILL_FLAN_ENABLED.copy_with(enabled = False)
BUTTON_SKILL_FLAN_USED = BUTTON_SKILL_FLAN_DISABLED.copy_with(button_style = ButtonStyle.gray)
BUTTON_SKILL_FLAN_ACTIVATED = BUTTON_SKILL_FLAN_ENABLED.copy_with(button_style = ButtonStyle.green)
BUTTON_SKILL_YUKARI_ENABLED = BUTTON_SKILL_REIMU_ENABLED.copy_with(emoji = EMOJI_YUKARI)
BUTTON_SKILL_YUKARI_DISABLED = BUTTON_SKILL_YUKARI_ENABLED.copy_with(enabled = False)
BUTTON_SKILL_YUKARI_USED = BUTTON_SKILL_YUKARI_DISABLED.copy_with(button_style = ButtonStyle.gray)
BUTTON_SKILL_YUKARI_ACTIVATED = BUTTON_SKILL_YUKARI_ENABLED.copy_with(button_style = ButtonStyle.green)
BUTTON_WEST_ENABLED = Button(
emoji = EMOJI_WEST,
custom_id = CUSTOM_ID_WEST,
style = ButtonStyle.blue,
)
BUTTON_WEST_DISABLED = BUTTON_WEST_ENABLED.copy_with(enabled = False)
BUTTON_NORTH_ENABLED = Button(
emoji = EMOJI_NORTH,
custom_id = CUSTOM_ID_NORTH,
style = ButtonStyle.blue,
)
BUTTON_NORTH_DISABLED = BUTTON_NORTH_ENABLED.copy_with(enabled = False)
BUTTON_SOUTH_ENABLED = Button(
emoji = EMOJI_SOUTH,
custom_id = CUSTOM_ID_SOUTH,
style = ButtonStyle.blue,
)
BUTTON_SOUTH_DISABLED = BUTTON_SOUTH_ENABLED.copy_with(enabled = False)
BUTTON_EAST_ENABLED = Button(
emoji = EMOJI_EAST,
custom_id = CUSTOM_ID_EAST,
style = ButtonStyle.blue,
)
BUTTON_EAST_DISABLED = BUTTON_EAST_ENABLED.copy_with(enabled = False)
BUTTON_NORTH_TO_EAST_ENABLED = Button(
emoji = EMOJI_NORTH_EAST,
custom_id = CUSTOM_ID_NORTH_TO_EAST,
style = ButtonStyle.blue,
)
BUTTON_EAST_TO_NORTH_ENABLED = BUTTON_NORTH_TO_EAST_ENABLED.copy_with(custom_id = CUSTOM_ID_EAST_TO_NORTH)
BUTTON_NORTH_EAST_DISABLED = BUTTON_NORTH_TO_EAST_ENABLED.copy_with(
custom_id = CUSTOM_ID_EMPTY_1,
enabled = False,
)
BUTTON_NORTH_TO_WEST_ENABLED = Button(
emoji = EMOJI_NORTH_WEST,
custom_id = CUSTOM_ID_NORTH_TO_WEST,
style = ButtonStyle.blue,
)
BUTTON_WEST_TO_NORTH_ENABLED = BUTTON_NORTH_TO_WEST_ENABLED.copy_with(custom_id = CUSTOM_ID_WEST_TO_NORTH)
BUTTON_NORTH_WEST_DISABLED = BUTTON_NORTH_TO_WEST_ENABLED.copy_with(
custom_id = CUSTOM_ID_EMPTY_2,
enabled = False,
)
BUTTON_SOUTH_TO_EAST_ENABLED = Button(
emoji = EMOJI_SOUTH_EAST,
custom_id = CUSTOM_ID_SOUTH_TO_EAST,
style = ButtonStyle.blue,
)
BUTTON_EAST_TO_SOUTH_ENABLED = BUTTON_SOUTH_TO_EAST_ENABLED.copy_with(custom_id = CUSTOM_ID_EAST_TO_SOUTH)
BUTTON_SOUTH_EAST_DISABLED = BUTTON_SOUTH_TO_EAST_ENABLED.copy_with(
custom_id = CUSTOM_ID_EMPTY_3,
enabled = False,
)
BUTTON_SOUTH_TO_WEST_ENABLED = Button(
emoji = EMOJI_SOUTH_WEST,
custom_id = CUSTOM_ID_SOUTH_TO_WEST,
style = ButtonStyle.blue,
)
BUTTON_WEST_TO_SOUTH_ENABLED = BUTTON_SOUTH_TO_WEST_ENABLED.copy_with(custom_id = CUSTOM_ID_WEST_TO_SOUTH)
BUTTON_SOUTH_WEST_DISABLED = BUTTON_SOUTH_TO_WEST_ENABLED.copy_with(
custom_id = CUSTOM_ID_EMPTY_4,
enabled = False,
)
BUTTON_BACK_ENABLED = Button(
emoji = EMOJI_BACK,
custom_id = CUSTOM_ID_BACK,
style = ButtonStyle.blue,
)
BUTTON_BACK_DISABLED = BUTTON_BACK_ENABLED.copy_with(enabled = False)
BUTTON_RESET_ENABLED = Button(
emoji = EMOJI_RESET,
custom_id = CUSTOM_ID_RESET,
style = ButtonStyle.blue,
)
BUTTON_RESET_DISABLED = BUTTON_RESET_ENABLED.copy_with(enabled = False)
BUTTON_CANCEL = Button(
emoji = EMOJI_CANCEL,
custom_id = CUSTOM_ID_CANCEL,
style = ButtonStyle.blue,
)
BUTTON_NEXT = Button(
emoji = EMOJI_NEXT,
custom_id = CUSTOM_ID_NEXT,
style = ButtonStyle.blue,
)
BUTTON_NEXT_DISABLED = BUTTON_NEXT.copy_with(enabled = False)
BUTTON_CLOSE = Button(
emoji = EMOJI_CLOSE,
custom_id = CUSTOM_ID_CLOSE,
style = ButtonStyle.blue,
)
BUTTON_RESTART = Button(
emoji = EMOJI_RESTART,
custom_id = CUSTOM_ID_RESTART,
style = ButtonStyle.blue,
)
RATING_MAX = 5
RATINGS = ('S', 'A', 'B', 'C', 'D', 'E')
RATING_REWARDS = (750, 500, 400, 300, 200, 100)
NEW_RECORD_REWARD = 1000
def stage_source_sort_key(stage_source):
"""
Sort key used when sorting stage sources based on their identifier.
Parameters
----------
stage_source : ``StageSource``
The stage source to get sort key of.
Returns
-------
identifier : `int`
Sort key.
"""
return stage_source.id
async def save_stage_sources():
"""
Saves stage sources.
This function is a coroutine.
"""
async with FILE_LOCK:
stage_sources = sorted(STAGES_BY_ID.values(), key = stage_source_sort_key)
data = pretty_dump_stage_sources(stage_sources)
with await AsyncIO(FILE_PATH, 'w') as file:
await file.write(data)
def set_new_best(stage, steps):
"""
Sets a new best value to the given stage.
Parameters
----------
stage : ``StageSource``
The stage to modify it's best value.
steps : `int`
The stage's new best rating.
"""
stage.best = steps
Task(KOKORO, save_stage_sources())
def get_rating_for(stage, steps):
"""
Gets the rating for the given stage and step combination.
Parameters
----------
stage : ``StageSource``
The stage to get the rating of.
steps : `int`
The user's step count.
Returns
-------
rating : `str`
The step's rating.
"""
stage_best = stage.best
rating_factor = floor(stage_best / 20.0) + 5.0
rating_level = ceil((steps - stage_best) / rating_factor)
if rating_level > RATING_MAX:
rating_level = RATING_MAX
return RATINGS[rating_level]
def get_reward_for_steps(stage_id, steps):
"""
Gets reward amount for the given amount of steps.
Parameters
----------
stage_id : `int`
The stage's identifier.
steps : `int`
The amount of steps.
Returns
-------
reward : `str`
The user's rewards.
"""
stage = STAGES_BY_ID[stage_id]
stage_best = stage.best
if steps < stage_best:
set_new_best(stage, steps)
return NEW_RECORD_REWARD + RATING_REWARDS[0]
rating_factor = floor(stage_best / 20.0) + 5.0
rating_level = ceil((steps - stage_best) / rating_factor)
if rating_level > RATING_MAX:
rating_level = RATING_MAX
return RATING_REWARDS[rating_level]
def get_reward_difference(stage_id, steps_1, steps_2):
"""
Gets additional reward if a user received better result.
Parameters
----------
stage_id : `int`
The stage's identifier.
steps_1 : `int`
The amount of steps.
steps_2 : `int`
The new amount of steps.
Returns
-------
reward : `int`
Extra hearts, what the respective user should get.
"""
stage = STAGES_BY_ID[stage_id]
stage_best = stage.best
rating_factor = floor(stage_best / 20.0) + 5.0
rating_level = ceil((steps_1 - stage_best) / rating_factor)
if rating_level > RATING_MAX:
rating_level = RATING_MAX
reward_1 = RATING_REWARDS[rating_level]
if steps_2 < stage_best:
set_new_best(stage, steps_2)
reward_2 = NEW_RECORD_REWARD + RATING_REWARDS[0]
else:
rating_level = ceil((steps_2 - stage_best) / rating_factor)
if rating_level > RATING_MAX:
rating_level = RATING_MAX
reward_2 = RATING_REWARDS[rating_level]
reward = reward_2 - reward_1
if reward < 0:
reward = 0
return reward
MOVE_DIRECTION_NORTH = 1
MOVE_DIRECTION_EAST = 2
MOVE_DIRECTION_SOUTH = 3
MOVE_DIRECTION_WEST = 4
MOVE_DIRECTION_NORTH_TO_EAST = 5
MOVE_DIRECTION_NORTH_TO_WEST = 6
MOVE_DIRECTION_SOUTH_TO_EAST = 7
MOVE_DIRECTION_SOUTH_TO_WEST = 8
MOVE_DIRECTION_EAST_TO_NORTH = 9
MOVE_DIRECTION_EAST_TO_SOUTH = 10
MOVE_DIRECTION_WEST_TO_NORTH = 11
MOVE_DIRECTION_WEST_TO_SOUTH = 12
class MoveDirections:
"""
Container class to store to which positions a character can move or use skill.
Attributes
----------
directions : `set`
"""
__slots__ = ('directions',)
def __new__(cls):
"""
Creates a new move direction holder.
It holds to which directions the player can move.
"""
self = object.__new__(cls)
self.directions = set()
return self
def _set(self, direction, value):
"""
Sets the given direction identifier to the given value.
Parameters
----------
direction : `int`
The direction to set.
value : `bool`
Whether to enable the direction.
"""
if value:
self.directions.add(direction)
else:
self.directions.discard(direction)
def _get(self, direction):
"""
Gets whether the given direction is enabled.
Parameters
----------
direction : `int`
The direction to set.
Returns
-------
value : `bool`
"""
return (direction in self.directions)
def set_north(self, value):
"""
Sets the `north` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_NORTH, value)
def set_east(self, value):
"""
Sets the `east` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_EAST, value)
def set_south(self, value):
"""
Sets the `south` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_SOUTH, value)
def set_west(self, value):
"""
Sets the `west` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_WEST, value)
def set_north_to_east(self, value):
"""
Sets the `north to east` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_NORTH_TO_EAST, value)
def set_north_to_west(self, value):
"""
Sets the `north to west` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_NORTH_TO_WEST, value)
def set_south_to_east(self, value):
"""
Sets the `south to west` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_SOUTH_TO_EAST, value)
def set_south_to_west(self, value):
"""
Sets the `south to west` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_SOUTH_TO_WEST, value)
def set_east_to_north(self, value):
"""
Sets the `east to north` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_EAST_TO_NORTH, value)
def set_east_to_south(self, value):
"""
Sets the `east to south` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_EAST_TO_SOUTH, value)
def set_west_to_north(self, value):
"""
Sets the `west to north` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_WEST_TO_NORTH, value)
def set_west_to_south(self, value):
"""
Sets the `west to south` direction to the given value.
Parameters
----------
value : `bool`
Whether to enable the direction.
"""
self._set(MOVE_DIRECTION_WEST_TO_SOUTH, value)
def get_button_north(self):
"""
Gets the `north` button to show depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_NORTH):
button = BUTTON_NORTH_ENABLED
else:
button = BUTTON_NORTH_DISABLED
return button
def get_button_east(self):
"""
Gets the `east` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_EAST):
button = BUTTON_EAST_ENABLED
else:
button = BUTTON_EAST_DISABLED
return button
def get_button_south(self):
"""
Gets the `south` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_SOUTH):
button = BUTTON_SOUTH_ENABLED
else:
button = BUTTON_SOUTH_DISABLED
return button
def get_button_west(self):
"""
Gets the `west` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_WEST):
button = BUTTON_WEST_ENABLED
else:
button = BUTTON_WEST_DISABLED
return button
def get_button_north_east(self):
"""
gets the `north-east` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_NORTH_TO_EAST):
button = BUTTON_NORTH_TO_EAST_ENABLED
elif self._get(MOVE_DIRECTION_EAST_TO_NORTH):
button = BUTTON_EAST_TO_NORTH_ENABLED
else:
button = BUTTON_NORTH_EAST_DISABLED
return button
def get_button_north_west(self):
"""
gets the `north-west` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_NORTH_TO_WEST):
button = BUTTON_NORTH_TO_WEST_ENABLED
elif self._get(MOVE_DIRECTION_WEST_TO_NORTH):
button = BUTTON_WEST_TO_NORTH_ENABLED
else:
button = BUTTON_NORTH_WEST_DISABLED
return button
def get_button_south_east(self):
"""
gets the `south-east` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_SOUTH_TO_EAST):
button = BUTTON_SOUTH_TO_EAST_ENABLED
elif self._get(MOVE_DIRECTION_EAST_TO_SOUTH):
button = BUTTON_EAST_TO_SOUTH_ENABLED
else:
button = BUTTON_SOUTH_EAST_DISABLED
return button
def get_button_south_west(self):
"""
gets the `south-west` button depending which directions are allowed.
Returns
-------
button : ``ComponentButton``
"""
if self._get(MOVE_DIRECTION_SOUTH_TO_WEST):
button = BUTTON_SOUTH_TO_WEST_ENABLED
elif self._get(MOVE_DIRECTION_WEST_TO_SOUTH):
button = BUTTON_WEST_TO_SOUTH_ENABLED
else:
button = BUTTON_SOUTH_WEST_DISABLED
return button
DIRECTION_SETTERS_MAIN = (
MoveDirections.set_north,
MoveDirections.set_east ,
MoveDirections.set_south,
MoveDirections.set_west ,
)
DIRECTION_SETTERS_DIAGONAL = (
(MoveDirections.set_north_to_east, MoveDirections.set_east_to_north),
(MoveDirections.set_east_to_south, MoveDirections.set_south_to_east),
(MoveDirections.set_south_to_west, MoveDirections.set_west_to_south),
(MoveDirections.set_west_to_north, MoveDirections.set_north_to_west),
)
def REIMU_SKILL_CAN_ACTIVATE(game_state):
"""
Returns whether Reimu skill can be activated.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
can_active : `bool`
"""
x_size = game_state.stage.x_size
position = game_state.position
map_ = game_state.map
for step in ( -x_size, 1, x_size, -1):
target_tile=map_[position + step]
if not target_tile & (BIT_MASK_PUSHABLE | BIT_MASK_SPECIAL):
continue
after_tile = map_[position + (step << 1)]
if not after_tile & BIT_MASK_PASSABLE:
continue
return True
return False
def REIMU_SKILL_GET_DIRECTIONS(game_state):
"""
Returns to which directions Reimu's skill could be used.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
move_directions : ``MoveDirections``
"""
x_size = game_state.stage.x_size
position = game_state.position
map_ = game_state.map
move_directions = MoveDirections()
for step, setter in zip((-x_size, 1, x_size, -1), DIRECTION_SETTERS_MAIN):
target_tile = map_[position + step]
if target_tile & (BIT_MASK_PUSHABLE | BIT_MASK_SPECIAL):
after_tile = map_[position + (step << 1)]
if after_tile & BIT_MASK_PASSABLE:
can_go_to_directory = True
else:
can_go_to_directory = False
else:
can_go_to_directory = False
setter(move_directions, can_go_to_directory)
return move_directions
def REIMU_SKILL_USE(game_state, step, align):
"""
Uses Reimu's skill to the represented directory.
Parameters
----------
game_state : ``GameState``
The respective game state.
step : `int`
Difference between 2 adjacent tile-s translated to 1 dimension based on the map's size.
align : `int`
The character's new align if the move is successful.
Returns
-------
success : `bool`
Whether the move was completed successfully.
"""
map_ = game_state.map
position = game_state.position
target_tile = map_[position + step]
if not target_tile & (BIT_MASK_PUSHABLE | BIT_MASK_SPECIAL):
return False
after_tile = map_[position + (step << 1)]
if not after_tile & BIT_MASK_PASSABLE:
return False
actual_tile = map_[position]
game_state.history.append(
HistoryElement(position, True, ((position, actual_tile), (position + (step << 1), after_tile)))
)
map_[position] = actual_tile & BIT_MASK_PASSABLE
game_state.position = position = position + (step << 1)
map_[position] = after_tile | align
game_state.has_skill = False
return True
def FLAN_SKILL_CAN_ACTIVATE(game_state):
"""
Returns whether Flandre skill can be activated.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
can_active : `bool`
"""
x_size = game_state.stage.x_size
position = game_state.position
map_ = game_state.map
for step in ( -x_size, 1, x_size, -1):
target_tile = map_[position + step]
if target_tile == BIT_MASK_OBJECT_U:
return True
return False
def FLAN_SKILL_GET_DIRECTIONS(game_state):
"""
Returns to which directions Flandre's skill could be used.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
move_directions : ``MoveDirections``
"""
x_size = game_state.stage.x_size
position = game_state.position
map_ = game_state.map
move_directions = MoveDirections()
for step, setter in zip((-x_size, 1, x_size, -1), DIRECTION_SETTERS_MAIN):
target_tile = map_[position + step]
if target_tile == BIT_MASK_OBJECT_U:
can_go_to_directory = True
else:
can_go_to_directory = False
setter(move_directions, can_go_to_directory)
return move_directions
def FLAN_SKILL_USE(game_state, step, align):
"""
Uses Flan's skill to the represented directory.
Parameters
----------
game_state : ``GameState``
The respective game state.
step : `int`
Difference between 2 adjacent tile-s translated to 1 dimension based on the map's size.
align : `int`
The character's new align if the move is successful.
Returns
-------
success : `bool`
Whether the move was completed successfully.
"""
map_ = game_state.map
position = game_state.position
target_tile = map_[position + step]
if target_tile != BIT_MASK_OBJECT_U:
return False
actual_tile = map_[position]
game_state.history.append(HistoryElement(position, True, ((position, actual_tile), (position + step, target_tile))))
map_[position] = actual_tile & BIT_MASK_PASSABLE | align
map_[position + step] = BIT_MASK_OBJECT_P
game_state.has_skill = False
return True
def YUKARI_SKILL_CAN_ACTIVATE(game_state):
"""
Returns whether Yukari skill can be activated.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
can_active : `bool`
"""
map_ = game_state.map
x_size = game_state.stage.x_size
y_size = len(map_) // x_size
position = game_state.position
y_position, x_position = divmod(position, x_size)
# x_min = x_size*y_position
# x_max = x_size*(y_position + 1)-1
# y_min = x_position
# y_max = x_position + (x_size*(y_size-1))
for step, limit in (
(-x_size , -x_size ,),
(1 , x_size * (y_position + 1) - 1 ,),
(x_size , x_position + (x_size * (y_size - 1)),),
( -1 , x_size * y_position ,),
):
target_position = position + step
if target_position == limit:
continue
if not map_[target_position]&BIT_MASK_BLOCKS_LOS:
continue
while True:
target_position = target_position + step
if target_position == limit:
break
target_tile = map_[target_position]
if target_tile & BIT_MASK_BLOCKS_LOS:
continue
if target_tile & BIT_MASK_PASSABLE:
return True
break
return False
def YUKARI_SKILL_GET_DIRECTIONS(game_state):
"""
Returns to which directions Yukari's skill could be used.
Parameters
----------
game_state : ``GameState``
The respective game state.
Returns
-------
move_directions : ``MoveDirections``
"""
map_ = game_state.map
x_size = game_state.stage.x_size
y_size = len(map_) // x_size
move_directions = MoveDirections()
position = game_state.position
y_position, x_position = divmod(position, x_size)
# x_min = x_size*y_position
# x_max = x_size*(y_position + 1)-1
# y_min = x_position
# y_max = x_position + (x_size*(y_size-1))
for (step, limit), setter in zip(
(
( -x_size , -x_size ,),
(1 , x_size * (y_position + 1) - 1 ,),
(x_size , x_position + (x_size * (y_size - 1)),),
( -1 , x_size * y_position ,),
),
DIRECTION_SETTERS_MAIN,
):
target_position = position + step
if target_position == limit:
can_go_to_directory = False
else:
if map_[target_position]&BIT_MASK_BLOCKS_LOS:
while True:
target_position = target_position + step
if target_position == limit:
can_go_to_directory = False
break
target_tile = map_[target_position]
if target_tile & BIT_MASK_BLOCKS_LOS:
continue
if target_tile & BIT_MASK_PASSABLE:
can_go_to_directory = True
break
can_go_to_directory = False
break
else:
can_go_to_directory = False
setter(move_directions, can_go_to_directory)
return move_directions
def YUKARI_SKILL_USE(game_state, step, align):
"""
Uses Yukari's skill to the represented directory.
Parameters
----------
game_state : ``GameState``
The respective game state.
step : `int`
Difference between 2 adjacent tile-s translated to 1 dimension based on the map's size.
align : `int`
The character's new align if the move is successful.
Returns
-------
success : `bool`
Whether the move was completed successfully.
"""
map_ = game_state.map
x_size = game_state.stage.x_size
y_size = len(map_) // x_size
position = game_state.position
y_position, x_position = divmod(position, x_size)
if step > 0:
if step == 1:
limit = x_size * (y_position + 1) - 1
else:
limit = x_position + (x_size * (y_size - 1))
else:
if step == -1:
limit = x_size * y_position
else:
limit = -x_size
target_position = position + step
if target_position == limit:
return False
if not map_[target_position]&BIT_MASK_BLOCKS_LOS:
return False
while True:
target_position = target_position + step
if target_position == limit:
return False
target_tile = map_[target_position]
if target_tile & BIT_MASK_BLOCKS_LOS:
continue
if target_tile & BIT_MASK_PASSABLE:
break
return False
actual_tile = map_[position]
game_state.history.append(HistoryElement(position, True, ((position, actual_tile), (target_position, target_tile))))
map_[position] = actual_tile & BIT_MASK_PASSABLE
game_state.position = target_position
map_[target_position] = target_tile | align
game_state.has_skill = False
return True
DIRECTION_MOVE_STATE_NONE = 0
DIRECTION_MOVE_STATE_CAN = 1
DIRECTION_MOVE_STATE_PUSH = 2
DIRECTION_MOVE_DIAGONAL_1 = 3
DIRECTION_MOVE_DIAGONAL_2 = 4
def can_move_to(map_, position, step):
"""
Returns whether the player can move to the given direction.
Parameters
----------
map_ : `list` of `int`
The map where the player is.
position : `int`
The player's position on the map.
step : `int`
The step to do.
Returns
-------
move_state : `int`
Whether the player can move.
Can be any of the following values:
+---------------------------+-------+
| Respective name | Value |
+===========================+=======+
| DIRECTION_MOVE_STATE_NONE | 0 |
+---------------------------+-------+
| DIRECTION_MOVE_STATE_CAN | 1 |
+---------------------------+-------+
| DIRECTION_MOVE_STATE_PUSH | 2 |
+---------------------------+-------+
"""
target_tile = map_[position + step]
if target_tile & BIT_MASK_UNPUSHABLE:
move_state = DIRECTION_MOVE_STATE_NONE
elif target_tile & BIT_MASK_PASSABLE:
move_state = DIRECTION_MOVE_STATE_CAN
else:
after_tile = map_[position + (step << 1)]
if target_tile & BIT_MASK_PUSHABLE and after_tile & (BIT_MASK_PASSABLE | BIT_MASK_HOLE_U):
move_state = DIRECTION_MOVE_STATE_PUSH
else:
move_state = DIRECTION_MOVE_STATE_NONE
return move_state
def can_move_diagonal(map_, position, step_1, step_2):
"""
Returns whether the player can move diagonally.
Parameters
----------
map_ : `list` of `int`
The map where the player is.
position : `int`
The player's position on the map.
step : `int`
The step to do.
Returns
-------
move_state : `int`
Whether the player can move diagonally.
Can be any of the following values:
+---------------------------+-------+
| Respective name | Value |
+===========================+=======+
| DIRECTION_MOVE_STATE_NONE | 0 |
+---------------------------+-------+
| DIRECTION_MOVE_DIAGONAL_1 | 3 |
+---------------------------+-------+
| DIRECTION_MOVE_DIAGONAL_2 | 4 |
+---------------------------+-------+
"""
step_1_1_state = can_move_to(map_, position, step_1)
if step_1_1_state == DIRECTION_MOVE_STATE_NONE:
step_1_2_state = DIRECTION_MOVE_STATE_NONE
else:
step_1_2_state = can_move_to(map_, position + step_1, step_2)
step_2_1_state = can_move_to(map_, position, step_2)
if step_2_1_state == DIRECTION_MOVE_STATE_NONE:
step_2_2_state = DIRECTION_MOVE_STATE_NONE
else:
step_2_2_state = can_move_to(map_, position + step_2, step_1)
if (
(step_1_1_state == DIRECTION_MOVE_STATE_CAN) and
(step_1_2_state == DIRECTION_MOVE_STATE_CAN)
):
move_state = DIRECTION_MOVE_DIAGONAL_1
elif (
(step_2_1_state == DIRECTION_MOVE_STATE_CAN) and
(step_2_2_state == DIRECTION_MOVE_STATE_CAN)
):
move_state = DIRECTION_MOVE_DIAGONAL_2
elif (
(
(step_2_1_state == DIRECTION_MOVE_STATE_NONE) or
(step_2_2_state == DIRECTION_MOVE_STATE_NONE)
) and
(step_1_1_state != DIRECTION_MOVE_STATE_NONE) and
(step_1_2_state != DIRECTION_MOVE_STATE_NONE)
):
move_state = DIRECTION_MOVE_DIAGONAL_1
elif (
(
(step_1_1_state == DIRECTION_MOVE_STATE_NONE) or
(step_1_2_state == DIRECTION_MOVE_STATE_NONE)
) and
(step_2_1_state != DIRECTION_MOVE_STATE_NONE) and
(step_2_2_state != DIRECTION_MOVE_STATE_NONE)
):
move_state = DIRECTION_MOVE_DIAGONAL_2
else:
move_state = DIRECTION_MOVE_STATE_NONE
return move_state
class HistoryElement:
"""
An element of a ``GameState``'s history.
Attributes
----------
changes : `tuple` of (`tuple` (`int`, `int`), ...)
A tuple containing each changed tile inside of a `position - tile` value pair.
position : `int`
The character's old position.
was_skill : `bool`
Whether the step was skill usage.
"""
__slots__ = ('changes', 'position', 'was_skill')
def __init__(self, position, was_skill, changes):
"""
Creates a new ``HistoryElement`` from the given parameters.
Parameters
----------
position : `int`
The character's old position.
was_skill : `bool`
Whether the step was skill usage.
changes : `tuple` of (`tuple` (`int`, `int`), ...)
A tuple containing each changed tile inside of a `position - tile` value pair.
"""
self.position = position
self.was_skill = was_skill
self.changes = changes
@classmethod
def from_json(cls, data):
"""
Creates a new history element from json data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Decoded json data.
Returns
-------
self : ``HistoryElement``
"""
self = object.__new__(cls)
self.position = data[JSON_KEY_HISTORY_ELEMENT_POSITION]
self.was_skill = data[JSON_KEY_HISTORY_ELEMENT_WAS_SKILL]
self.changes = tuple(tuple(change) for change in data[JSON_KEY_HISTORY_ELEMENT_CHANGES])
return self
def to_json(self):
"""
Converts the history element to json serializable data.
Returns
-------
data : `dict` of (`str`, `Any`) items
"""
return {
JSON_KEY_HISTORY_ELEMENT_POSITION: self.position,
JSON_KEY_HISTORY_ELEMENT_WAS_SKILL: self.was_skill,
JSON_KEY_HISTORY_ELEMENT_CHANGES: self.changes,
}
TILE_NAME_TO_VALUE = {
'FLOOR' : BIT_MASK_FLOOR,
'TARGET' : BIT_MASK_TARGET,
'BOX' : BIT_MASK_BOX,
'BOX_TARGET': BIT_MASK_BOX_TARGET,
'HOLE_U' : BIT_MASK_HOLE_U,
'HOLE_P' : BIT_MASK_HOLE_P,
'OBJECT_U' : BIT_MASK_OBJECT_U,
'CN_FLOOR' : BIT_MASK_CHAR_N | BIT_MASK_FLOOR,
'CE_FLOOR' : BIT_MASK_CHAR_E | BIT_MASK_FLOOR,
'CS_FLOOR' : BIT_MASK_CHAR_S | BIT_MASK_FLOOR,
'CW_FLOOR' : BIT_MASK_CHAR_W | BIT_MASK_FLOOR,
'NOTHING' : BIT_MASK_NOTHING,
'WALL_N' : BIT_MASK_WALL_N,
'WALL_E' : BIT_MASK_WALL_E,
'WALL_S' : BIT_MASK_WALL_S,
'WALL_W' : BIT_MASK_WALL_W,
'WALL_HV' : BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_S | BIT_MASK_WALL_W,
'WALL_SE' : BIT_MASK_WALL_E | BIT_MASK_WALL_S,
'WALL_SW' : BIT_MASK_WALL_S | BIT_MASK_WALL_W,
'WALL_NE' : BIT_MASK_WALL_N | BIT_MASK_WALL_E,
'WALL_NW' : BIT_MASK_WALL_N | BIT_MASK_WALL_W,
'WALL_HE' : BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_S,
'WALL_HW' : BIT_MASK_WALL_N | BIT_MASK_WALL_S | BIT_MASK_WALL_W,
'WALL_H' : BIT_MASK_WALL_N | BIT_MASK_WALL_S,
'CN_TARGET' : BIT_MASK_CHAR_N | BIT_MASK_TARGET,
'CE_TARGET' : BIT_MASK_CHAR_E | BIT_MASK_TARGET,
'CS_TARGET' : BIT_MASK_CHAR_S | BIT_MASK_TARGET,
'CW_TARGET' : BIT_MASK_CHAR_W | BIT_MASK_TARGET,
'WALL_V' : BIT_MASK_WALL_E | BIT_MASK_WALL_W,
'WALL_NV' : BIT_MASK_WALL_E | BIT_MASK_WALL_S | BIT_MASK_WALL_W,
'WALL_SV' : BIT_MASK_WALL_N | BIT_MASK_WALL_E | BIT_MASK_WALL_W,
}
TILE_VALUE_TO_NAME = {value: key for key, value in TILE_NAME_TO_VALUE.items()}
JSON_KEY_STAGE_SOURCE_BEST = 'b'
JSON_KEY_STAGE_SOURCE_CHAPTER_INDEX = 'c'
JSON_KEY_STAGE_SOURCE_DIFFICULTY_INDEX = 'd'
JSON_KEY_STAGE_SOURCE_STAGE_INDEX = 's'
JSON_KEY_STAGE_SOURCE_ID = 'i'
JSON_KEY_STAGE_SOURCE_START = 'p'
JSON_KEY_STAGE_SOURCE_TARGET_COUNT = 't'
JSON_KEY_STAGE_SOURCE_MAP = 'm'
JSON_KEY_STAGE_SOURCE_X_SIZE = 'x'
JSON_KEY_HISTORY_ELEMENT_POSITION = '0'
JSON_KEY_HISTORY_ELEMENT_WAS_SKILL = '1'
JSON_KEY_HISTORY_ELEMENT_CHANGES = '2'
JSON_KEY_RUNNER_STATE_STAGE_ID = '0'
JSON_KEY_RUNNER_STATE_MAP = '1'
JSON_KEY_RUNNER_STATE_POSITION = '2'
JSON_KEY_RUNNER_STATE_HAS_SKILL = '3'
JSON_KEY_RUNNER_STATE_NEXT_SKILL = '4'
JSON_KEY_RUNNER_STATE_HISTORY = '5'
JSON_KEY_RUNNER_STATE_STAGE_BEST = '6'
STAGES_BY_ID = {}
STAGES_BY_ACCESS_ROUTE = {}
class StageSource:
"""
A stage's source.
Attributes
----------
after_stage_source : `None`, ``StageSource``
The next stage source.
before_stage_source : `None`, ``StageSource``
The before stage source.
best : `int`
The lowest amount of steps needed to solve the stage.
chapter_index : `int`
The index of the stage's chapter.
difficulty_index : `int`
The index of the stage's difficulty inside of it's chapter.
id : `int`
The identifier of the stage.
index : `int`
The local index of the stage.
map : `list` of `int`
The stage's map.
stage_index : `int`
The stage's index inside of it's difficulty.
start : `int`
The position, where the character starts on the stage.
target_count : `int`
The amount of targets on the map to fulfill.
x_size : `int`
The map's size on the x axis.
"""
__slots__ = (
'after_stage_source', 'before_stage_source', 'best', 'chapter_index', 'difficulty_index', 'id', 'index', 'map',
'stage_index', 'start', 'target_count', 'x_size'
)
@classmethod
def from_json(cls, data):
"""
Creates a new a ``StageSource`` from json data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Decoded json data.
Returns
-------
self : ``StageSource``
"""
chapter_index = data[JSON_KEY_STAGE_SOURCE_CHAPTER_INDEX]
difficulty_index = data[JSON_KEY_STAGE_SOURCE_DIFFICULTY_INDEX]
stage_index = data[JSON_KEY_STAGE_SOURCE_STAGE_INDEX]
identifier = data[JSON_KEY_STAGE_SOURCE_ID]
self = object.__new__(cls)
self.best = data[JSON_KEY_STAGE_SOURCE_BEST]
self.chapter_index = chapter_index
self.difficulty_index = difficulty_index
self.stage_index = stage_index
self.id = identifier
self.start = data[JSON_KEY_STAGE_SOURCE_START]
self.target_count = data[JSON_KEY_STAGE_SOURCE_TARGET_COUNT]
self.map = [TILE_NAME_TO_VALUE[tile_name] for tile_name in data[JSON_KEY_STAGE_SOURCE_MAP]]
self.x_size = data[JSON_KEY_STAGE_SOURCE_X_SIZE]
self.index = 0
self.before_stage_source = None
self.after_stage_source = None
STAGES_BY_ID[identifier] = self
STAGES_BY_ACCESS_ROUTE[(chapter_index, difficulty_index, stage_index)] = self
return self
@property
def chapter(self):
"""
Returns the stage source's chapter.
Returns
-------
chapter : ``Chapter``
"""
return CHAPTERS[self.chapter_index]
def __repr__(self):
"""Returns the stage source's representation."""
return f'<{self.__class__.__name__} id = {self.id!r}>'
def pretty_dump_stage_sources(stage_sources):
"""
Dumps the given stages into pretty json format.
Parameters
----------
stage_sources : `list` of ``StageSource``
The stages to save.
Returns
-------
json_data : `str`
"""
json_parts = []
json_parts.append('[\n')
is_first = True
for stage_source in stage_sources:
if is_first:
json_parts.append(' ' * 4)
is_first = False
else:
json_parts.append(' ')
json_parts.append('{')
json_parts.append(' ' * 8)
json_parts.append('\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_CHAPTER_INDEX)
json_parts.append('": ')
json_parts.append(repr(stage_source.chapter_index))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_DIFFICULTY_INDEX)
json_parts.append('": ')
json_parts.append(repr(stage_source.difficulty_index))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_STAGE_INDEX)
json_parts.append('": ')
json_parts.append(repr(stage_source.stage_index))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_ID)
json_parts.append('": ')
json_parts.append(repr(stage_source.id))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_START)
json_parts.append('": ')
json_parts.append(repr(stage_source.start))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_TARGET_COUNT)
json_parts.append('": ')
json_parts.append(repr(stage_source.target_count))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_X_SIZE)
json_parts.append('": ')
json_parts.append(repr(stage_source.x_size))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_BEST)
json_parts.append('": ')
json_parts.append(repr(stage_source.best))
json_parts.append(',\n')
json_parts.append(' ' * 8)
json_parts.append('"')
json_parts.append(JSON_KEY_STAGE_SOURCE_MAP)
json_parts.append('": [\n')
map_ = stage_source.map[::-1]
while map_:
is_first_in_line = True
for _ in range(stage_source.x_size):
tile_value = map_.pop()
tile_name = TILE_VALUE_TO_NAME[tile_value]
if is_first_in_line:
is_first_in_line = False
else:
json_parts.append(' ')
json_parts.append('"')
json_parts.append(tile_name)
json_parts.append('"')
json_parts.append(' ' * (10 - len(tile_name)))
json_parts.append(',')
json_parts.append('\n')
if (json_parts[-1] == '\n') and (json_parts[-2] == ','):
del json_parts[-2]
json_parts.append(' ' * 8)
json_parts.append(']\n')
json_parts.append(' ' * 4)
json_parts.append('}')
json_parts.append(',')
if json_parts[-1] == ',':
del json_parts[-1]
json_parts.append('\n]\n')
return ''.join(json_parts)
CHAPTER_REIMU_NAME = 'REIMU'
CHAPTER_FLAN_NAME = 'FLAN'
CHAPTER_YUKARI_NAME = 'YUKARI'
CHAPTER_REIMU_INDEX = 0
CHAPTER_FLAN_INDEX = 1
CHAPTER_YUKARI_INDEX = 2
CHAPTER_NAME_TO_INDEX = {
CHAPTER_REIMU_NAME: CHAPTER_REIMU_INDEX,
CHAPTER_FLAN_NAME: CHAPTER_FLAN_INDEX,
CHAPTER_YUKARI_INDEX: CHAPTER_YUKARI_INDEX,
}
CHAPTERS = {}
class Chapter:
"""
A chapter storing exact data about it's stages, skills and buttons.
Attributes
----------
button_skill_activated : ``ComponentButton``
The skill button when the next move is a skill.
button_skill_disabled : ``ComponentButton``
The skill button, when the skill cannot be used.
button_skill_enabled : ``ComponentButton``
The skill button, when the skill can be used.
button_skill_activated : ``ComponentButton``
The skill button, when it was already used.
difficulties : `dict` of (`int`, `dict` (`int`, ``StageSource``) items) items
The difficulties of the chapter.
emoji : ``Emoji``
The chapter's character's emoji.
id : `int`
The chapter's identifier.
skill_can_activate : `Function`
Checks whether the chapter's character's skill can be activated.
Accepts the following parameters.
+---------------+---------------+
| Name | Type |
+===============+===============+
| game_state | ``GameState`` |
+---------------+---------------+
Should returns the following values.
+---------------+---------------+
| Name | Type |
+===============+===============+
| can_active | `bool` |
+---------------+---------------+
skill_get_move_directions : `Function`
Checks whether the chapter's character's skill can be activated.
Accepts the following parameters.
+---------------+---------------+
| Name | Type |
+===============+===============+
| game_state | ``GameState`` |
+---------------+---------------+
Should returns the following values.
+-------------------+-----------------------+
| Name | Type |
+===================+=======================+
| move_directions | ``MoveDirections`` |
+-------------------+-----------------------+
skill_use : `Function`
Uses the skill of the chapter's character.
Accepts the following parameters.
+---------------+---------------+
| Name | Type |
+===============+===============+
| game_state | ``GameState`` |
+---------------+---------------+
| step | `int` |
+---------------+---------------+
| align | `int` |
+---------------+---------------+
Should returns the following values.
+---------------+---------------+
| Name | Type |
+===============+===============+
| success | `bool` |
+---------------+---------------+
stages_sorted : `list` of ``StageSource``
The stages of the chapter in order.
style : `dict` of (`int`, `str`) items
The tiles of the stage based on the tile's value.
next_stage_unlock_id : `int`
The stage to complete for the next chapter.
"""
__slots__ = (
'button_skill_activated', 'button_skill_disabled', 'button_skill_enabled', 'button_skill_used', 'difficulties',
'emoji', 'id', 'skill_can_activate', 'skill_get_move_directions', 'skill_use', 'stages_sorted', 'style',
'next_stage_unlock_id'
)
def __init__(
self, identifier, emoji, style, button_skill_enabled, button_skill_disabled, button_skill_used,
button_skill_activated, skill_can_activate, skill_get_move_directions, skill_use
):
"""
Creates a new stage from the given parameters.
Parameters
----------
identifier : `int`
The chapter's identifier.
emoji : ``Emoji``
The chapter's character's emoji.
style : `dict` of (`int`, `str`) items
The tiles of the stage based on the tile's value.
button_skill_enabled : ``ComponentButton``
The skill button, when the skill can be used.
button_skill_disabled : ``ComponentButton``
The skill button, when the skill cannot be used.
button_skill_used : ``ComponentButton``
The skill button, when it was already used.
button_skill_activated : ``ComponentButton``
The skill button when the next move is a skill.
skill_can_activate : `Function`
Checks whether the chapter's character's skill can be activated.
skill_get_move_directions : `Function`
Checks whether the chapter's character's skill can be activated.
skill_use : `Function`
Uses the skill of the chapter's character.
"""
self.id = identifier
self.difficulties = {}
self.stages_sorted = []
self.emoji = emoji
self.style = style
self.button_skill_enabled = button_skill_enabled
self.button_skill_disabled = button_skill_disabled
self.button_skill_used = button_skill_used
self.button_skill_activated = button_skill_activated
self.skill_can_activate = skill_can_activate
self.skill_get_move_directions = skill_get_move_directions
self.skill_use = skill_use
self.next_stage_unlock_id = 0
CHAPTERS[CHAPTER_REIMU_INDEX] = Chapter(
CHAPTER_REIMU_INDEX,
EMOJI_REIMU,
STYLE_REIMU,
BUTTON_SKILL_REIMU_ENABLED,
BUTTON_SKILL_REIMU_DISABLED,
BUTTON_SKILL_REIMU_USED,
BUTTON_SKILL_REIMU_ACTIVATED,
REIMU_SKILL_CAN_ACTIVATE,
REIMU_SKILL_GET_DIRECTIONS,
REIMU_SKILL_USE,
)
CHAPTERS[CHAPTER_FLAN_INDEX] = Chapter(
CHAPTER_FLAN_INDEX,
EMOJI_FLAN,
STYLE_FLAN,
BUTTON_SKILL_FLAN_ENABLED,
BUTTON_SKILL_FLAN_DISABLED,
BUTTON_SKILL_FLAN_USED,
BUTTON_SKILL_FLAN_ACTIVATED,
FLAN_SKILL_CAN_ACTIVATE,
FLAN_SKILL_GET_DIRECTIONS,
FLAN_SKILL_USE,
)
CHAPTERS[CHAPTER_YUKARI_INDEX] = Chapter(
CHAPTER_YUKARI_INDEX,
EMOJI_YUKARI,
STYLE_YUKARI,
BUTTON_SKILL_YUKARI_ENABLED,
BUTTON_SKILL_YUKARI_DISABLED,
BUTTON_SKILL_YUKARI_USED,
BUTTON_SKILL_YUKARI_ACTIVATED,
YUKARI_SKILL_CAN_ACTIVATE,
YUKARI_SKILL_GET_DIRECTIONS,
YUKARI_SKILL_USE,
)
def load_stages():
"""
Loads the stages and fills the chapters with them up.
"""
with open(FILE_PATH, 'r') as file:
stage_source_datas = from_json_file(file)
stage_sources = []
for stage_source_data in stage_source_datas:
stage_source = StageSource.from_json(stage_source_data)
stage_sources.append(stage_source)
chapter_dictionaries = {}
for stage_source in stage_sources:
try:
chapter_dictionary = chapter_dictionaries[stage_source.chapter_index]
except KeyError:
chapter_dictionary = chapter_dictionaries[stage_source.chapter_index] = {}
try:
difficulty_dictionary = chapter_dictionary[stage_source.difficulty_index]
except KeyError:
difficulty_dictionary = chapter_dictionary[stage_source.difficulty_index] = {}
difficulty_dictionary[stage_source.stage_index] = stage_source
sorted_chapters = []
for expected_chapter_index, (chapter_index, chapter_dictionary) in enumerate(sorted(chapter_dictionaries.items())):
if expected_chapter_index != chapter_index:
raise RuntimeError(
f'expected_chapter_index={expected_chapter_index!r} != '
f'chapter_index={chapter_index!r})'
)
if chapter_index not in CHAPTERS:
raise RuntimeError(
f'chapter_index={chapter_index} not in '
f'CHAPTERS={CHAPTERS}'
)
sorted_difficulty = []
sorted_chapters.append(sorted_difficulty)
for expected_difficulty_index, (difficulty_index, difficulty_dictionary) in \
enumerate(sorted(chapter_dictionary.items())):
if expected_difficulty_index != difficulty_index:
raise RuntimeError(
f'expected_difficulty_index={expected_difficulty_index!r} != '
f'difficulty_index={difficulty_index!r})'
)
sorted_stages = []
sorted_difficulty.append(sorted_stages)
for expected_stage_index, (stage_index, stage) in enumerate(sorted(difficulty_dictionary.items())):
if expected_difficulty_index != difficulty_index:
raise RuntimeError(
f'expected_stage_index={expected_stage_index!r} != '
f'stage_index={stage_index!r})'
)
sorted_stages.append(stage)
for chapter_index, sorted_chapter in enumerate(sorted_chapters):
chapter_dictionary = chapter_dictionaries[chapter_index]
chapter = CHAPTERS[chapter_index]
chapter.difficulties.update(chapter_dictionary)
try:
difficulty_dictionary = chapter_dictionary[CHAPTER_UNLOCK_DIFFICULTY]
except KeyError:
pass
else:
try:
stage = difficulty_dictionary[CHAPTER_UNLOCK_STAGE]
except KeyError:
pass
else:
chapter.next_stage_unlock_id = stage.id
chapter_stages_sorted = chapter.stages_sorted
for sorted_difficulty in sorted_chapter:
chapter_stages_sorted.extend(sorted_difficulty)
chapter_stages_sorted_length = len(chapter_stages_sorted)
if chapter_stages_sorted_length > 1:
stage = chapter_stages_sorted[0]
stage.after_stage_source = chapter_stages_sorted[1]
stage.index = 0
index = chapter_stages_sorted_length - 1
stage = chapter_stages_sorted[index]
stage.before_stage_source = chapter_stages_sorted[chapter_stages_sorted_length - 2]
for index in range(1, chapter_stages_sorted_length - 1):
stage = chapter_stages_sorted[index]
stage.after_stage_source = chapter_stages_sorted[index + 1]
stage.before_stage_source = chapter_stages_sorted[index - 1]
stage.index = index
load_stages()
class StageResult:
"""
Represents a user's state of a stage.
Attributes
----------
id : `int`
The entry's identifier in the database.
stage_id : `int`
The stage's identifier.
best : `int`
The user's best solution of the stage.
"""
__slots__ = ('best', 'id', 'stage_id')
def __new__(cls, identifier, stage_id, best):
"""
Creates a new stage state from the given parameters.
Parameters
----------
identifier : `int`
The entry's identifier in the database.
stage_id : `int`
The stage's identifier.
best : `int`
The user's best solution of the stage.
"""
self = object.__new__(cls)
self.id = identifier
self.stage_id = stage_id
self.best = best
return self
@classmethod
def from_entry(cls, entry):
"""
Creates a new ``StageResult`` from the given entry.
Parameters
----------
entry : `sqlalchemy.???`
Returns
-------
self : ``StageResult``
"""
self = object.__new__(cls)
self.id = entry.id
self.stage_id = entry.stage_id
self.best = entry.best
return self
def __repr__(self):
"""Returns the stage result's representation."""
return f'<{self.__class__.__name__} id = {self.id!r}, stage_id = {self.stage_id!r}, best = {self.best!r}>'
async def get_user_state(user_id):
"""
Requests the user's state from the database.
This function is a coroutine.
Returns
-------
game_state : `None`, ``GameState``
The state of the actual game.
stage_results: `dict` of (`int`, ``StageResult``) items
Result of each completed stage by the user.
selected_stage_id : `int`
The selected stage's identifier.
field_exists : `bool`
Whether the field is stored in the database.
entry_id : `int`
The field identifier in the database.
"""
async with DB_ENGINE.connect() as connector:
response = await connector.execute(
DS_V2_TABLE.select(
ds_v2_model.user_id == user_id,
)
)
result = await response.fetchone()
if result is None:
return _return_default_user_state()
game_state_data = result.game_state
if (game_state_data is None):
game_state = None
else:
game_state_json_data = from_json(decompress(game_state_data))
game_state = GameState.from_json(game_state_json_data)
selected_stage_id = result.selected_stage_id
field_exists = True
entry_id = result.id
response = await connector.execute(
DS_V2_RESULT_TABLE.select(
ds_v2_result_model.user_id == user_id,
)
)
results = await response.fetchall()
stage_results = {}
if results:
for result in results:
stage_result = StageResult.from_entry(result)
stage_results[stage_result.stage_id] = stage_result
else:
selected_stage_id = CHAPTERS[0].difficulties[0][0].id
if (entry_id is not None):
await connector.execute(
DS_V2_TABLE.update(
ds_v2_model.id == entry_id,
).values(
game_state = None,
)
)
return (
game_state,
stage_results,
selected_stage_id,
field_exists,
entry_id,
)
def _return_default_user_state():
"""
Returns a freshly create user state.
Returns
-------
game_state : `None`, ``GameState``
The state of the actual game.
stage_results: `dict` of (`int`, ``StageResult``) items
Result of each completed stage by the user.
selected_stage_id : `int`
The selected stage's identifier.
field_exists : `bool`
Whether the field is stored in the database.
entry_id : `int`
The field identifier in the database.
"""
return (
None,
{},
CHAPTERS[0].difficulties[0][0].id,
False,
0,
)
async def game_state_upload_init_failure(entry_id, game_state_data):
"""
Saves the game's state. This function is called when initialization fails.
This function is a coroutine.
Parameters
----------
entry_id : `int`
The field identifier in the database.
game_state_data : `bytes`
Compressed data storing the current game's state.
"""
async with DB_ENGINE.connect() as connector:
await connector.execute(
DS_V2_TABLE.update(
ds_v2_model.id == entry_id
).values(
game_state = game_state_data,
)
)
async def game_state_upload_update(entry_id, game_state_data, selected_stage_id):
"""
Saves the game's state. This function is called when the entry already exists.
This function is a coroutine.
Parameters
----------
entry_id : `int`
The field identifier in the database.
game_state_data : `bytes`
Compressed data storing the current game's state.
selected_stage_id : `int`
The currently selected stage's identifier.
"""
async with DB_ENGINE.connect() as connector:
await connector.execute(
DS_V2_TABLE.update(
ds_v2_model.id == entry_id,
).values(
game_state = game_state_data,
selected_stage_id = selected_stage_id,
)
)
async def game_state_upload_create(user_id, game_state_data, selected_stage_id):
"""
Saves the game's state. This function is called when the entry do not yet exists.
This function is a coroutine.
Parameters
----------
user_id : `int`
The user's identifier who owns the game state.
game_state_data : `bytes`
Compressed data storing the current game's state.
selected_stage_id : `int`
The currently selected stage's identifier.
Returns
-------
entry_id : `int`
The field identifier in the database.
"""
async with DB_ENGINE.connect() as connector:
response = await connector.execute(
DS_V2_TABLE.insert().values(
user_id = user_id,
game_state = game_state_data,
selected_stage_id = selected_stage_id,
).returning(
ds_v2_model.id,
)
)
result = await response.fetchone()
entry_id = result[0]
return entry_id
async def save_stage_result_and_reward(user_id, stage_id, steps, self_best, stage_result):
"""
Saves stage result and gives reward depending on the delta.
This function is a coroutine.
Returns
-------
entry_id : `int`
The identifier of the entry in the database.
stage_id : `int`
The played stage's identifier.
steps : `int`
The amount of steps the user did.
self_best : `int`
The amount of steps player did. Defaults to `-1` if not applicable.
stage_result : `None`, ``StageResult``
The actual result of the user for the given stage.
Returns
-------
stage_result_entry_id : `int`
The existing or created database entry's identifier.
"""
async with DB_ENGINE.connect() as connector:
if stage_result is None:
response = await connector.execute(
DS_V2_RESULT_TABLE.insert().values(
user_id = user_id,
stage_id = stage_id,
best = steps,
).returning(
ds_v2_result_model.id,
)
)
result = await response.fetchone()
stage_result_entry_id = result[0]
else:
stage_result_entry_id = stage_result.id
await connector.execute(
DS_V2_RESULT_TABLE.update(
ds_v2_result_model.id == stage_result_entry_id,
).values(
best = steps,
)
)
if stage_result is None:
reward = get_reward_for_steps(stage_id, steps)
else:
reward = get_reward_difference(stage_id, self_best, steps)
if reward:
response = await connector.execute(
select(
[
user_common_model.id,
]
).where(
user_common_model.user_id == user_id,
)
)
results = await response.fetchall()
if results:
entry_id = results[0][0]
to_execute = USER_COMMON_TABLE.update(
user_common_model.id == entry_id
).values(
total_love = user_common_model.total_love + reward
)
else:
to_execute = get_create_common_user_expression(
user_id,
total_love = reward,
)
await connector.execute(to_execute)
return stage_result_entry_id
# If we have no db support, we yeet all db method.
if DB_ENGINE is None:
@copy_docs(get_user_state)
async def get_user_state(user_id):
return _return_default_user_state()
@copy_docs(game_state_upload_init_failure)
async def game_state_upload_init_failure(entry_id, game_state_data):
pass
@copy_docs(game_state_upload_update)
async def game_state_upload_update(entry_id, game_state_data, selected_stage_id):
pass
@copy_docs(game_state_upload_create)
async def game_state_upload_create(user_id, game_state_data, selected_stage_id):
return 0
@copy_docs(save_stage_result_and_reward)
async def save_stage_result_and_reward(user_id, stage_id, steps, self_best, stage_result):
return 0
class UserState:
"""
A user's state in dungeon sweeper.
Attributes
----------
entry_id : `int`
The field identifier in the database.
field_exists : `bool`
Whether the field is stored in the database.
game_state : `None`, ``GameState``
The state of the actual game.
selected_stage_id : `int`
The selected stage's identifier.
stage_results: `dict` of (`int`, ``StageResult``) items
Result of each completed stage by the user.
user_id : `int`
The respective user's identifier.
"""
__slots__ = ('entry_id', 'field_exists', 'game_state', 'selected_stage_id', 'stage_results', 'user_id')
async def __new__(cls, user_id):
"""
Creates a new ``UserState`` based on he given `user_id`.
This method is a coroutine.
Parameters
----------
user_id : `int`
The user' respective identifier.
"""
game_state, stage_results, selected_stage_id, field_exists, entry_id = await get_user_state(user_id)
self = object.__new__(cls)
self.game_state = game_state
self.selected_stage_id = selected_stage_id
self.field_exists = field_exists
self.entry_id = entry_id
self.stage_results = stage_results
self.user_id = user_id
return self
def get_game_state_data(self):
"""
Gets the user state's game state's data in json serializable from.
Returns
-------
game_state_data : `None`, `bytes`
"""
game_state = self.game_state
if (game_state is None) or (not game_state.history):
game_state_data = None
else:
game_state_json_data = game_state.to_json()
game_state_data = compress(to_json(game_state_json_data).encode())
return game_state_data
async def upload_game_state_on_init_failure(self):
"""
Uploads only the game's state if applicable. Only called when exception occurs at initialization.
This method is a coroutine.
"""
if self.field_exists:
game_state_data = self.get_game_state_data()
if (game_state_data is not None):
await game_state_upload_init_failure(self.entry_id, game_state_data)
async def upload(self):
"""
Saves the current state of the game state.
This method is a coroutine.
"""
game_state_data = self.get_game_state_data()
if self.field_exists:
await game_state_upload_update(self.entry_id, game_state_data, self.selected_stage_id)
else:
self.entry_id = await game_state_upload_create(self.user_id, game_state_data, self.selected_stage_id)
self.field_exists = True
async def set_best(self, stage_id, steps):
"""
Updates the state of the given stage.
This method is a coroutine.
Parameters
----------
stage_id : `int`
The respective stage's identifier.
steps : `int`
The step count of the user.
"""
if not self.field_exists:
game_state_data = self.get_game_state_data()
self.entry_id = await game_state_upload_create(self.user_id, game_state_data, self.selected_stage_id)
self.field_exists = True
stage_result = self.stage_results.get(stage_id, None)
if stage_result is None:
self_best = -1
else:
self_best = stage_result.best
if (self_best == -1) or (steps < self_best):
entry_id = await save_stage_result_and_reward(self.user_id, stage_id, steps, self_best, stage_result)
if stage_result is None:
self.stage_results[stage_id] = StageResult(entry_id, stage_id, steps)
else:
stage_result.best = steps
class GameState:
"""
A user's actual game's state.
Attributes
----------
best : `int`
The user's best solution for the stage. Set as `-1` by default.
chapter : ``Chapter``
The stage's chapter.
has_skill : `bool`
Whether the character' skill in the game was not yet used.
history : `list` of ``HistoryElement``
The done steps in the game.
map : `list` of `int`
The game's actual map.
next_skill : `bool`
Whether the next step is a skill usage.
position : `int`
The position of the selected stage.
stage : ``StageSource``
The represented stage.
"""
__slots__ = ('best', 'chapter', 'has_skill', 'history', 'map', 'next_skill', 'position', 'stage',)
def __init__(self, stage, best):
"""
Creates a new game state instance from the given parameters.
Parameters
----------
stage : ``StageSource``
The stage to execute by the game.
best : `int`
The user's best solution for the stage.
"""
self.chapter = stage.chapter
self.stage = stage
self.map = stage.map.copy()
self.position = stage.start
self.history = []
self.has_skill = True
self.next_skill = False
self.best = best
def restart(self):
"""
Restarts the game.
"""
steps = len(self.history)
best = self.best
if (best == -1) or (steps < best):
self.best = steps
stage = self.stage
self.stage = stage
self.map = stage.map.copy()
self.position = stage.start
self.history.clear()
self.has_skill = True
self.next_skill = False
@classmethod
def from_json(cls, data):
"""
Creates stage state from the given json data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Json data.
Returns
-------
self : ``GameState``
"""
self = object.__new__(cls)
stage_id = data[JSON_KEY_RUNNER_STATE_STAGE_ID]
stage = STAGES_BY_ID[stage_id]
self.chapter = stage.chapter
self.stage = stage
self.best = data.get(JSON_KEY_RUNNER_STATE_STAGE_BEST, -1)
try:
map_ = data[JSON_KEY_RUNNER_STATE_MAP]
except KeyError:
map_ = stage.map.copy()
self.map = map_
try:
position = data[JSON_KEY_RUNNER_STATE_POSITION]
except KeyError:
position = stage.start
self.position = position
self.has_skill = data.get(JSON_KEY_RUNNER_STATE_HAS_SKILL, True)
self.next_skill = data.get(JSON_KEY_RUNNER_STATE_NEXT_SKILL, True)
try:
history_datas = data[JSON_KEY_RUNNER_STATE_HISTORY]
except KeyError:
history = []
else:
history = [HistoryElement.from_json(history_data) for history_data in history_datas]
self.history = history
return self
def to_json(self):
"""
Converts the stage state to json serializable data.
Returns
-------
data : `dict` of (`str`, `Any`) items
"""
data = {}
stage = self.stage
data[JSON_KEY_RUNNER_STATE_STAGE_ID] = stage.id
best = self.best
if best != -1:
data[JSON_KEY_RUNNER_STATE_STAGE_BEST] = best
if not self.has_skill:
data[JSON_KEY_RUNNER_STATE_HAS_SKILL] = False
if not self.next_skill:
data[JSON_KEY_RUNNER_STATE_NEXT_SKILL] = False
history = self.history
if history:
data[JSON_KEY_RUNNER_STATE_HISTORY] = [history_element.to_json() for history_element in history]
data[JSON_KEY_RUNNER_STATE_POSITION] = self.position
data[JSON_KEY_RUNNER_STATE_MAP] = self.map.copy()
return data
def done(self):
"""
Returns whether all the targets on the stage are satisfied.
Returns
-------
done : `bool`
"""
target_count = self.stage.target_count
for tile in self.map:
if tile == BIT_MASK_BOX_TARGET:
target_count -= 1
if not target_count:
if (self.best == -1) or (self.best > len(self.history)):
self.best = len(self.history)
return True
return False
def move_north(self):
"""
Moves the character north.
Returns
-------
moved : `bool`
Whether the character move successfully.
"""
return self.move(-self.stage.x_size, BIT_MASK_CHAR_N)
def move_east(self):
"""
Moves the character east.
Returns
-------
moved : `bool`
Whether the character move successfully.
"""
return self.move(1, BIT_MASK_CHAR_E)
def move_south(self):
"""
Moves the character south.
Returns
-------
moved : `bool`
Whether the character move successfully.
"""
return self.move(self.stage.x_size, BIT_MASK_CHAR_S)
def move_west(self):
"""
Moves the character west.
Returns
-------
moved : `bool`
Whether the character move successfully.
"""
return self.move(-1, BIT_MASK_CHAR_W)
def get_move_directions(self):
"""
Returns to which directions the character can move.
Returns
-------
move_directions : ``MoveDirections``
"""
if self.next_skill:
return self.chapter.skill_get_move_directions(self)
else:
return self.get_own_move_directions()
def get_own_move_directions(self):
"""
Returns to which directions can the character move, excluding the skill of the character.
Returns
-------
move_directions : ``MoveDirections``
"""
x_size = self.stage.x_size
position = self.position
map_ = self.map
move_directions = MoveDirections()
for step, setter in zip((-x_size, 1, x_size, -1), DIRECTION_SETTERS_MAIN):
if can_move_to(map_, position, step) != DIRECTION_MOVE_STATE_NONE:
setter(move_directions, True)
for steps, setters in zip(
((-x_size, 1), (1, x_size), (x_size, -1), (-1, -x_size),), DIRECTION_SETTERS_DIAGONAL
):
move_state = can_move_diagonal(map_, position, *steps)
if move_state != DIRECTION_MOVE_STATE_NONE:
setters[move_state == DIRECTION_MOVE_DIAGONAL_2](move_directions, True)
return move_directions
def move(self, step, align):
"""
Moves the character to the given directory.
Parameters
----------
step : `int`
Difference between 2 adjacent tile-s translated to 1 dimension based on the map's size.
align : `int`
The character's new align if the move is successful.
Returns
-------
success : `bool`
Whether the move was completed successfully.
"""
if self.next_skill:
result = self.chapter.skill_use(self, step, align)
if result:
self.next_skill = False
return result
map_ = self.map
position = self.position
actual_tile = map_[position]
target_tile = map_[position + step]
if target_tile & BIT_MASK_UNPUSHABLE:
return False
if target_tile & BIT_MASK_PASSABLE:
self.history.append(
HistoryElement(
position,
False,
(
(position, actual_tile),
(position + step, target_tile),
),
)
)
map_[position] = actual_tile & BIT_MASK_PASSABLE
self.position = position = position + step
map_[position] = target_tile | align
return True
after_tile = map_[position + (step << 1)]
if target_tile & BIT_MASK_PUSHABLE and after_tile & (BIT_MASK_PASSABLE | BIT_MASK_HOLE_U):
self.history.append(
HistoryElement(
position,
False,
(
(position, actual_tile),
(position + step, target_tile),
(position + (step << 1), after_tile),
),
)
)
map_[position] = actual_tile & BIT_MASK_PASSABLE
self.position = position = position + step
map_[position] = (target_tile >> 3) | align
if after_tile & BIT_MASK_PASSABLE:
map_[position + step] = after_tile << 3
else:
map_[position + step] = BIT_MASK_HOLE_P
return True
return False
def skill_can_activate(self):
"""
Activates the character's skill if applicable.
Returns
-------
success : `bool`
Whether the skill was activated.
"""
if not self.has_skill:
return False
if self.chapter.skill_can_activate(self):
return True
return False
def skill_activate(self):
"""
Activates (or deactivates) the character's skill.
Returns
-------
success : `bool`
Whether the skill could be (de)activated.
"""
if not self.has_skill:
return False
if self.next_skill:
self.next_skill = False
return True
if self.chapter.skill_can_activate(self):
self.next_skill = True
return True
return False
def button_skill_get(self):
"""
Gets the actual button skill to show up.
Returns
-------
button : `ComponentButton``
"""
chapter = self.chapter
if self.next_skill:
button = chapter.button_skill_activated
elif not self.has_skill:
button = chapter.button_skill_used
elif chapter.skill_can_activate(self):
button = chapter.button_skill_enabled
else:
button = chapter.button_skill_disabled
return button
def render_description(self):
"""
Renders the description of the game's embed.
Returns
-------
description : `str`
"""
style = self.chapter.style
result = []
map_ = self.map
limit = len(map_)
step = self.stage.x_size
if limit <= MAX_RENDER_EMOJI:
start = 0
shift = 0
else:
step_count = limit // step
if step_count < step:
if (step_count * (step - 2)) <= MAX_RENDER_EMOJI:
start = 1
step -= 2
shift = 2
else:
start = step + 1
limit -= step
step -= 2
shift = 2
else:
if ((step_count - 2) * step) <= MAX_RENDER_EMOJI:
start = step
limit -= step
shift = 0
else:
start = step + 1
limit -= step
step -= 2
shift = 2
while start < limit:
end = start + step
result.append(''.join([style[element] for element in map_[start:end]]))
start = end + shift
return '\n'.join(result)
def render_playing(self):
"""
Renders the game's embeds and components.
Returns
-------
embed : ``Embed``
The game's embed.
components : `tuple` of ``Row`` of ``ComponentButton``
The components of the game.
"""
stage = self.stage
difficulty_name = DIFFICULTY_NAMES.get(stage.difficulty_index, '???')
title = (
f'Chapter {stage.chapter_index + 1} {self.chapter.emoji.as_emoji}, {difficulty_name}: '
f'{stage.stage_index + 1}'
)
description = self.render_description()
color = DIFFICULTY_COLORS.get(stage.difficulty_index, DUNGEON_SWEEPER_COLOR)
embed = Embed(title, description, color = color)
steps = len(self.history)
best = self.best
if (best == -1):
footer = f'steps : {steps}'
else:
footer = f'steps : {steps}, best : {best}'
embed.add_footer(footer)
button_skill = self.button_skill_get()
directions = self.get_move_directions()
button_north = directions.get_button_north()
button_north_east = directions.get_button_north_east()
button_east = directions.get_button_east()
button_south_east = directions.get_button_south_east()
button_south = directions.get_button_south()
button_south_west = directions.get_button_south_west()
button_west = directions.get_button_west()
button_north_west = directions.get_button_north_west()
if self.can_back_or_reset():
button_back = BUTTON_BACK_ENABLED
button_reset = BUTTON_RESET_ENABLED
else:
button_back = BUTTON_BACK_DISABLED
button_reset = BUTTON_RESET_DISABLED
components = (
Row(button_north_west , button_north , button_north_east , button_back ,),
Row(button_west , button_skill , button_east , button_reset ,),
Row(button_south_west , button_south , button_south_east , BUTTON_CANCEL ,),
)
return embed, components
def render_end_screen(self):
"""
Renders the game's end-game screen and components.
Returns
-------
embed : ``Embed``
The game's embed.
components : `tuple` of ``Row`` of ``ComponentButton``
The components of the game.
"""
stage = self.stage
steps = len(self.history)
rating = get_rating_for(self.stage, steps)
best = self.best
if (best == -1) or (best > steps):
best = steps
difficulty_name = DIFFICULTY_NAMES.get(stage.difficulty_index, '???')
title = (
f'Chapter {stage.chapter_index + 1} {self.chapter.emoji.as_emoji} {difficulty_name} '
f'{stage.stage_index + 1} finished with {steps} steps with {rating} rating!'
)
description = self.render_description()
color = DIFFICULTY_COLORS.get(stage.difficulty_index, DUNGEON_SWEEPER_COLOR)
embed = Embed(title, description, color = color).add_footer(f'steps : {steps}, best : {best}')
if self.stage.after_stage_source is None:
button_next = BUTTON_NEXT_DISABLED
else:
button_next = BUTTON_NEXT
components = (
Row(BUTTON_CLOSE , BUTTON_RESTART , button_next ,),
)
return embed, components
def can_back_or_reset(self):
"""
Returns whether the character can go back, or resetting the game is available.
Returns
-------
can_back_or_reset : `bool`
"""
if self.next_skill:
return True
if self.history:
return True
return False
def back(self):
"""
Goes back one step.
Returns
-------
success : `bool`
Whether the character could go back one step.
"""
if self.next_skill:
self.next_skill = False
return True
history = self.history
if not history:
return False
element = history.pop( -1)
map_ = self.map
self.position = element.position
for position, value in element.changes:
map_[position] = value
if element.was_skill:
self.has_skill = True
return True
def reset(self):
"""
Resets the game.
Returns
-------
success : `bool`
Whether the map was reset.
"""
history = self.history
if not history:
return False
history.clear()
self.position = self.stage.start
self.map = self.stage.map.copy()
self.has_skill = True
return True
def can_play_selected_stage(user_state):
"""
Returns whether the user can play the selected chapter.
Returns
-------
user_state : ``UserState``
The respective user state.
Returns
-------
can_play_selected_stage : `bool`
Whether the selected chapter can be played.
"""
selected_stage_id = user_state.selected_stage_id
try:
stage = STAGES_BY_ID[selected_stage_id]
except KeyError:
stage = STAGES_BY_ACCESS_ROUTE[(CHAPTER_REIMU_INDEX, 0, 0)]
user_state.selected_stage_id = stage.id
return True
if stage.chapter_index == CHAPTER_REIMU_INDEX:
return True
stage_results = user_state.stage_results
if stage.id in stage_results:
return True
stage_index = stage.stage_index
difficulty_index = stage.difficulty_index
chapter_index = stage.chapter_index
if stage_index:
stage_index -= 1
else:
if difficulty_index:
difficulty_index -= 1
else:
if chapter_index:
if CHAPTERS[chapter_index - 1].next_stage_unlock_id in stage_results:
return True
else:
return False
else:
return True
if stage.chapter.difficulties[difficulty_index][stage_index].id in stage_results:
return True
return False
def get_selectable_stages(user_state):
"""
Parameters
----------
user_state : ``UserState``
The respective user state.
Returns
-------
selectable_stages : `list` of (``StageSource``, `int`, `bool`)
The selectable stages in a list of tuples. Contains 3 elements: `stage` , `best`, `is_selected`.
"""
try:
selected_stage = STAGES_BY_ID[user_state.selected_stage_id]
except KeyError:
selected_stage = STAGES_BY_ACCESS_ROUTE[(CHAPTER_REIMU_INDEX, 0, 0)]
user_state.selected_stage_id = selected_stage.id
stages = []
stage_source = selected_stage
for times in range(3):
stage_source = stage_source.before_stage_source
if stage_source is None:
break
stages.append(stage_source)
continue
stages.reverse()
stages.append(selected_stage)
stage_source = selected_stage
for times in range(3):
stage_source = stage_source.after_stage_source
if stage_source is None:
break
stages.append(stage_source)
continue
selectable_stages = []
stage_results = user_state.stage_results
for stage in stages:
if stage is selected_stage:
is_selected = True
else:
is_selected = False
stage_id = stage.id
try:
stage_result = stage_results[stage_id]
except KeyError:
user_best = -1
else:
user_best = stage_result.best
selectable_stages.append((stage, user_best, is_selected))
if user_best == -1:
break
selectable_stages.reverse()
return selectable_stages
def render_menu(user_state):
"""
Renders the user state's menu's embeds and components.
Parameters
----------
user_state : ``UserState``
The respective user state.
Returns
-------
embed : ``Embed``
The menu's embed.
components : `tuple` of ``Row`` of ``ComponentButton``
The components of the menu.
"""
try:
stage = STAGES_BY_ID[user_state.selected_stage_id]
except KeyError:
# something went wrong
chapter = CHAPTERS[CHAPTER_REIMU_INDEX]
user_state.selected_stage_id = STAGES_BY_ACCESS_ROUTE[(CHAPTER_REIMU_INDEX, 0, 0)].id
else:
chapter = stage.chapter
embed = Embed(f'Chapter {chapter.id + 1}').add_thumbnail(chapter.emoji.url)
if can_play_selected_stage(user_state):
get_selectable = get_selectable_stages(user_state)
color = DIFFICULTY_COLORS[0]
for stage, best, is_selected in get_selectable:
difficulty_name = DIFFICULTY_NAMES.get(stage.difficulty_index, '???')
field_name = f'{difficulty_name} level {stage.stage_index + 1}'
if best == -1:
field_value = 'No results recorded yet!'
else:
rating = get_rating_for(stage, best)
field_value = f'rating {rating}; steps : {best}'
if is_selected:
field_name = f'**{field_name} <--**'
field_value = f'**{field_value}**'
color = DIFFICULTY_COLORS.get(stage.difficulty_index, DUNGEON_SWEEPER_COLOR)
embed.add_field(field_name, field_value)
embed.color = color
if get_selectable[0][2]:
button_stage_after = BUTTON_UP_DISABLED
button_stage_after2 = BUTTON_UP2_DISABLED
else:
button_stage_after = BUTTON_UP_ENABLED
button_stage_after2 = BUTTON_UP2_ENABLED
if get_selectable[-1][2]:
button_stage_before = BUTTON_DOWN_DISABLED
button_stage_before2 = BUTTON_DOWN2_DISABLED
else:
button_stage_before = BUTTON_DOWN_ENABLED
button_stage_before2 = BUTTON_DOWN2_ENABLED
button_select = BUTTON_SELECT_ENABLED
else:
embed.color = COLOR_TUTORIAL
embed.description = (
f'**You must finish chapter {chapter.id} {CHAPTER_UNLOCK_DIFFICULTY_NAME} '
f'{CHAPTER_UNLOCK_STAGE + 1} first.**'
)
button_stage_before = BUTTON_DOWN_DISABLED
button_stage_before2 = BUTTON_DOWN2_DISABLED
button_stage_after = BUTTON_UP_DISABLED
button_stage_after2 = BUTTON_UP2_DISABLED
button_select = BUTTON_SELECT_DISABLED
if chapter.id + 1 in CHAPTERS:
button_chapter_next = BUTTON_RIGHT_ENABLED
else:
button_chapter_next = BUTTON_RIGHT_DISABLED
if chapter.id == 0:
button_chapter_before = BUTTON_LEFT_DISABLED
else:
button_chapter_before = BUTTON_LEFT_ENABLED
components = (
Row(BUTTON_EMPTY_1 , button_stage_after , button_stage_after2 , BUTTON_EMPTY_2 ,),
Row(button_chapter_before , button_select , BUTTON_CLOSE , button_chapter_next ,),
Row(BUTTON_EMPTY_3 , button_stage_before , button_stage_before2 , BUTTON_EMPTY_4 ,),
)
return embed, components
async def action_processor_up(dungeon_sweeper_runner):
"""
Processes `up` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `up` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
selected_stage_id = user_state.selected_stage_id
if selected_stage_id not in user_state.stage_results:
return False
selected_stage = STAGES_BY_ID[selected_stage_id]
selected_stage = selected_stage.after_stage_source
if selected_stage is None:
return False
user_state.selected_stage_id = selected_stage.id
return True
return False
async def action_processor_up2(dungeon_sweeper_runner):
"""
Processes `up2` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `up2` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
selected_stage_id = user_state.selected_stage_id
stage_results = user_state.stage_results
if selected_stage_id not in stage_results:
return False
selected_stage = STAGES_BY_ID[selected_stage_id]
for x in range(STAGE_STEP_MULTI_STEP_BUTTON):
next_stage = selected_stage.after_stage_source
if next_stage is None:
if x:
break
return False
selected_stage = next_stage
if selected_stage.id not in stage_results:
break
user_state.selected_stage_id = selected_stage.id
return True
return False
async def action_processor_down(dungeon_sweeper_runner):
"""
Processes `down` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `down` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
selected_stage_id = user_state.selected_stage_id
selected_stage = STAGES_BY_ID[selected_stage_id]
selected_stage = selected_stage.before_stage_source
if selected_stage is None:
return False
user_state.selected_stage_id = selected_stage.id
return True
return False
async def action_processor_down2(dungeon_sweeper_runner):
"""
Processes `down2` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `down` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
selected_stage_id = user_state.selected_stage_id
selected_stage = STAGES_BY_ID[selected_stage_id]
for x in range(STAGE_STEP_MULTI_STEP_BUTTON):
next_stage = selected_stage.before_stage_source
if next_stage is None:
if x:
break
return False
selected_stage = next_stage
user_state.selected_stage_id = selected_stage.id
return True
return False
async def action_processor_left(dungeon_sweeper_runner):
"""
Processes `left` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `left` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
stage_source = STAGES_BY_ID[user_state.selected_stage_id]
try:
chapter = CHAPTERS[stage_source.chapter.id - 1]
except KeyError:
return False
index = stage_source.index
chapter_stages_sorted = chapter.stages_sorted
chapter_stages_sorted_length = len(chapter_stages_sorted)
if index >= chapter_stages_sorted_length:
index = chapter_stages_sorted_length - 1
user_state.selected_stage_id = chapter_stages_sorted[index].id
return True
return False
async def action_processor_right(dungeon_sweeper_runner):
"""
Processes `right` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `right` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
stage_source = STAGES_BY_ID[user_state.selected_stage_id]
try:
chapter = CHAPTERS[stage_source.chapter.id + 1]
except KeyError:
return False
index = stage_source.index
chapter_stages_sorted = chapter.stages_sorted
chapter_stages_sorted_length = len(chapter_stages_sorted)
if index >= chapter_stages_sorted_length:
index = chapter_stages_sorted_length - 1
stage_source_id = chapter_stages_sorted[index].id
stage_results = user_state.stage_results
if (
(stage_source_id not in stage_results) and
(chapter_stages_sorted[index - 1].id not in stage_results)
):
stage_source_id = chapter.difficulties[0][0].id
user_state.selected_stage_id = stage_source_id
return True
return False
async def action_processor_select(dungeon_sweeper_runner):
"""
Processes `select` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `select` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_MENU:
user_state = dungeon_sweeper_runner.user_state
if not can_play_selected_stage(user_state):
return False
selected_stage_id = user_state.selected_stage_id
selected_stage = STAGES_BY_ID[selected_stage_id]
try:
stage_result = user_state.stage_results[selected_stage_id]
except KeyError:
best = -1
else:
best = stage_result.best
user_state.game_state = GameState(selected_stage, best)
dungeon_sweeper_runner._runner_state = RUNNER_STATE_PLAYING
return True
return False
async def action_processor_west(dungeon_sweeper_runner):
"""
Processes `west` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
user_state = dungeon_sweeper_runner.user_state
game_state = user_state.game_state
success = game_state.move_west()
if success and game_state.done():
await user_state.set_best(game_state.stage.id, len(game_state.history))
dungeon_sweeper_runner._runner_state = RUNNER_STATE_END_SCREEN
return success
return False
async def action_processor_north(dungeon_sweeper_runner):
"""
Processes `north` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
user_state = dungeon_sweeper_runner.user_state
game_state = user_state.game_state
success = game_state.move_north()
if success and game_state.done():
await user_state.set_best(game_state.stage.id, len(game_state.history))
dungeon_sweeper_runner._runner_state = RUNNER_STATE_END_SCREEN
return success
return False
async def action_processor_south(dungeon_sweeper_runner):
"""
Processes `south` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `south` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
user_state = dungeon_sweeper_runner.user_state
game_state = user_state.game_state
success = game_state.move_south()
if success and game_state.done():
await user_state.set_best(game_state.stage.id, len(game_state.history))
dungeon_sweeper_runner._runner_state = RUNNER_STATE_END_SCREEN
return success
return False
async def action_processor_east(dungeon_sweeper_runner):
"""
Processes `east` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `east` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
user_state = dungeon_sweeper_runner.user_state
game_state = user_state.game_state
success = game_state.move_east()
if success and game_state.done():
await user_state.set_best(game_state.stage.id, len(game_state.history))
dungeon_sweeper_runner._runner_state = RUNNER_STATE_END_SCREEN
return success
return False
async def action_processor_back(dungeon_sweeper_runner):
"""
Processes `back` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `back` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
game_state = dungeon_sweeper_runner.user_state.game_state
return game_state.back()
return False
async def action_processor_reset(dungeon_sweeper_runner):
"""
Processes `reset` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `reset` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
game_state = dungeon_sweeper_runner.user_state.game_state
return game_state.reset()
return False
async def action_processor_cancel(dungeon_sweeper_runner):
"""
Processes `cancel` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `cancel` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
dungeon_sweeper_runner._runner_state = RUNNER_STATE_MENU
user_state = dungeon_sweeper_runner.user_state
user_state.game_state = None
return True
return False
async def action_processor_skill(dungeon_sweeper_runner):
"""
Processes `skill` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `skill` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_PLAYING:
game_state = dungeon_sweeper_runner.user_state.game_state
return game_state.skill_activate()
return False
async def action_processor_close(dungeon_sweeper_runner):
"""
Processes `close` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `close` button could be pressed.
"""
runner_state = dungeon_sweeper_runner._runner_state
if runner_state == RUNNER_STATE_END_SCREEN:
dungeon_sweeper_runner.user_state.game_state = None
dungeon_sweeper_runner._runner_state = RUNNER_STATE_MENU
return True
if runner_state == RUNNER_STATE_MENU:
dungeon_sweeper_runner._runner_state = RUNNER_STATE_CLOSED
return True
return False
async def action_processor_next(dungeon_sweeper_runner):
"""
Processes `next` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `next` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_END_SCREEN:
user_state = dungeon_sweeper_runner.user_state
game_state = user_state.game_state
stage_source = game_state.stage
selected_stage = stage_source.after_stage_source
if selected_stage is None:
return False
dungeon_sweeper_runner._runner_state = RUNNER_STATE_PLAYING
selected_stage_id = selected_stage.id
user_state.selected_stage_id = selected_stage_id
try:
stage_result = user_state.stage_results[selected_stage_id]
except KeyError:
best = -1
else:
best = stage_result.best
user_state.game_state = GameState(selected_stage, best)
return True
return False
async def action_processor_restart(dungeon_sweeper_runner):
"""
Processes `restart` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `restart` button could be pressed.
"""
if dungeon_sweeper_runner._runner_state == RUNNER_STATE_END_SCREEN:
dungeon_sweeper_runner.user_state.game_state.restart()
dungeon_sweeper_runner._runner_state = RUNNER_STATE_PLAYING
return True
return False
async def action_processor_north_to_east(dungeon_sweeper_runner):
"""
Processes `north -> east` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_north(dungeon_sweeper_runner):
await action_processor_east(dungeon_sweeper_runner)
return True
return False
async def action_processor_north_to_west(dungeon_sweeper_runner):
"""
Processes `north -> west` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_north(dungeon_sweeper_runner):
await action_processor_west(dungeon_sweeper_runner)
return True
return False
async def action_processor_south_to_east(dungeon_sweeper_runner):
"""
Processes `south -> east` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_south(dungeon_sweeper_runner):
await action_processor_east(dungeon_sweeper_runner)
return True
return False
async def action_processor_south_to_west(dungeon_sweeper_runner):
"""
Processes `south -> west` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_south(dungeon_sweeper_runner):
await action_processor_west(dungeon_sweeper_runner)
return True
return False
async def action_processor_east_to_north(dungeon_sweeper_runner):
"""
Processes `east -> north` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_east(dungeon_sweeper_runner):
await action_processor_north(dungeon_sweeper_runner)
return True
return False
async def action_processor_east_to_south(dungeon_sweeper_runner):
"""
Processes `east -> south` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_east(dungeon_sweeper_runner):
await action_processor_south(dungeon_sweeper_runner)
return True
return False
async def action_processor_west_to_north(dungeon_sweeper_runner):
"""
Processes `west -> north` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_west(dungeon_sweeper_runner):
await action_processor_north(dungeon_sweeper_runner)
return True
return False
async def action_processor_west_to_south(dungeon_sweeper_runner):
"""
Processes `west -> south` button click.
This function is a coroutine.
Parameters
----------
dungeon_sweeper_runner : ``DungeonSweeperRunner``
The respective dungeon sweeper runner.
Returns
-------
success : `bool`
Whether the `west` button could be pressed.
"""
if await action_processor_west(dungeon_sweeper_runner):
await action_processor_south(dungeon_sweeper_runner)
return True
return False
ACTION_PROCESSORS = {
CUSTOM_ID_UP : action_processor_up,
CUSTOM_ID_DOWN : action_processor_down,
CUSTOM_ID_UP2 : action_processor_up2,
CUSTOM_ID_DOWN2 : action_processor_down2,
CUSTOM_ID_RIGHT : action_processor_right,
CUSTOM_ID_LEFT : action_processor_left,
CUSTOM_ID_SELECT : action_processor_select,
CUSTOM_ID_WEST : action_processor_west,
CUSTOM_ID_NORTH : action_processor_north,
CUSTOM_ID_SOUTH : action_processor_south,
CUSTOM_ID_EAST : action_processor_east,
CUSTOM_ID_BACK : action_processor_back,
CUSTOM_ID_RESET : action_processor_reset,
CUSTOM_ID_CANCEL : action_processor_cancel,
CUSTOM_ID_SKILL : action_processor_skill,
CUSTOM_ID_CLOSE : action_processor_close,
CUSTOM_ID_NEXT : action_processor_next,
CUSTOM_ID_RESTART : action_processor_restart,
CUSTOM_ID_NORTH_TO_EAST : action_processor_north_to_east,
CUSTOM_ID_NORTH_TO_WEST : action_processor_north_to_west,
CUSTOM_ID_SOUTH_TO_EAST : action_processor_south_to_east,
CUSTOM_ID_SOUTH_TO_WEST : action_processor_south_to_west,
CUSTOM_ID_EAST_TO_NORTH : action_processor_east_to_north,
CUSTOM_ID_EAST_TO_SOUTH : action_processor_east_to_south,
CUSTOM_ID_WEST_TO_NORTH : action_processor_west_to_north,
CUSTOM_ID_WEST_TO_SOUTH : action_processor_west_to_south,
}
class DungeonSweeperRunner:
"""
Dungeon sweeper game runner.
Attributes
----------
_canceller : None`, `CoroutineFunction`
Canceller set as `._canceller_function``, meanwhile the gui is not cancelled.
_gui_state : `int`
The gui's state.
Can be any of the following:
+-------------------------------+-------+
| Respective name | Value |
+===============================+=======+
| GUI_STATE_NONE | 0 |
+===============================+=======+
| GUI_STATE_READY | 1 |
+-------------------------------+-------+
| GUI_STATE_EDITING | 2 |
+-------------------------------+-------+
| GUI_STATE_CANCELLING | 3 |
+-------------------------------+-------+
| GUI_STATE_CANCELLED | 4 |
+-------------------------------+-------+
| GUI_STATE_SWITCHING_CONTEXT | 5 |
+-------------------------------+-------+
_runner_state : `int`
The state of the runner.
Can be any of the following:
+-------------------------------+-------+
| Respective name | Value |
+===============================+=======+
| RUNNER_STATE_MENU | 1 |
+-------------------------------+-------+
| RUNNER_STATE_PLAYING | 2 |
+-------------------------------+-------+
| RUNNER_STATE_END_SCREEN | 3 |
+-------------------------------+-------+
_timeouter : `None`, ``Timeouter``
Timeouts the gui if no action is performed within the expected time.
client : ``Client``
The client, who executes the requests.
message : ``Message``
The message edited by the runner.
user : ``ClientUserBase``
The user, who requested the game.
user_state : ``UserState``
The user's user state.
"""
__slots__ = ('_canceller', '_gui_state', '_runner_state', '_timeouter', 'client', 'message', 'user', 'user_state')
async def __new__(cls, client, event):
"""
Creates a new dungeon sweeper runner.
This method is a coroutine.
Parameters
----------
client : ``Client``
The source client.
event : ``InteractionEvent``
The received client.
"""
if not event.channel.cached_permissions_for(client).can_manage_messages:
await client.interaction_response_message_create(
event,
'I need manage messages permission in the channel to execute this command.',
show_for_invoking_user_only = True,
)
return
user_id = event.user.id
try:
existing_game = DUNGEON_SWEEPER_GAMES[user_id]
except KeyError:
pass
else:
if (existing_game is None):
await client.interaction_response_message_create(
event,
'A game is already starting somewhere else.',
show_for_invoking_user_only = True,
)
else:
await existing_game.renew(event)
return
DUNGEON_SWEEPER_GAMES[user_id] = None
user_state = None
try:
task_user_state_create = Task(KOKORO, UserState(user_id))
task_interaction_acknowledge = Task(KOKORO, client.interaction_response_message_create(event))
await TaskGroup(KOKORO, [task_user_state_create, task_interaction_acknowledge]).wait_all()
user_state = task_user_state_create.get_result()
try:
task_interaction_acknowledge.get_result()
except BaseException as err:
if (
isinstance(err, ConnectionError) or
(
isinstance(err, DiscordException) and
err.code == ERROR_CODES.unknown_interaction
)
):
await user_state.upload_game_state_on_init_failure()
return # Happens, I guess
else:
raise
game_state = user_state.game_state
if game_state is None:
embed, components = render_menu(user_state)
runner_state = RUNNER_STATE_MENU
else:
if game_state.done():
embed, components = user_state.game_state.render_end_screen()
runner_state = RUNNER_STATE_END_SCREEN
else:
embed, components = user_state.game_state.render_playing()
runner_state = RUNNER_STATE_PLAYING
user = event.user
embed.add_author(user.full_name, user.avatar_url_as('png', 32))
message = await client.interaction_followup_message_create(event, embed = embed, components = components)
except:
if (user_state is not None):
await user_state.upload_game_state_on_init_failure()
del DUNGEON_SWEEPER_GAMES[user_id]
raise
self = object.__new__(cls)
self._canceller = cls._canceller_function
self.client = client
self.user = event.user
self.message = message
self.user_state = user_state
self._timeouter = Timeouter(self, GUI_TIMEOUT)
self._gui_state = GUI_STATE_READY
self._runner_state = runner_state
DUNGEON_SWEEPER_GAMES[user_id] = self
client.slasher.add_component_interaction_waiter(message, self)
return self
async def renew(self, event):
"""
Renews the interaction gui creating a new message.
This method is a generator.
Parameters
----------
event : ``InteractionEvent``
The received interaction event.
"""
if self._gui_state in (GUI_STATE_CANCELLING, GUI_STATE_CANCELLED, GUI_STATE_SWITCHING_CONTEXT):
return
user_state = self.user_state
runner_state = self._runner_state
if runner_state == RUNNER_STATE_MENU:
embed, components = render_menu(user_state)
elif runner_state == RUNNER_STATE_PLAYING:
embed, components = user_state.game_state.render_playing()
elif runner_state == RUNNER_STATE_END_SCREEN:
embed, components = user_state.game_state.render_end_screen()
else:
# Hacker trying to hack Huyane
return
self._gui_state = GUI_STATE_SWITCHING_CONTEXT
client = self.client
try:
await client.interaction_response_message_create(event)
message = await client.interaction_followup_message_create(event, embed = embed, components = components)
except BaseException as err:
if self._gui_state == GUI_STATE_SWITCHING_CONTEXT:
self._gui_state = GUI_STATE_READY
if (
isinstance(err, ConnectionError) or
(
isinstance(err, DiscordException) and
err.code == ERROR_CODES.unknown_interaction
)
):
return
raise
try:
await client.message_edit(self.message, components = None)
except BaseException as err:
if not (
isinstance(err, ConnectionError) or
(
isinstance(err, DiscordException) and
err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
)
)
):
await client.events.error(client, f'{self!r}.renew', err)
client.slasher.remove_component_interaction_waiter(self.message, self)
client.slasher.add_component_interaction_waiter(message, self)
self.message = message
if self._gui_state == GUI_STATE_SWITCHING_CONTEXT:
self._gui_state = GUI_STATE_READY
timeouter = self._timeouter
if (timeouter is not None):
timeouter.set_timeout(GUI_TIMEOUT)
async def __call__(self, event):
"""
Calls the dungeon sweeper runner, processing a component event.
This method is a coroutine.
Parameters
----------
event : ``InteractionEvent``
The received client.
"""
client = self.client
if event.user is not self.user:
await client.interaction_component_acknowledge(event)
return
gui_state = self._gui_state
if gui_state != GUI_STATE_READY:
await client.interaction_component_acknowledge(event)
return
custom_id = event.interaction.custom_id
try:
action_processor = ACTION_PROCESSORS[custom_id]
except KeyError:
return
user_state = self.user_state
if not await action_processor(self):
return
runner_state = self._runner_state
if runner_state == RUNNER_STATE_MENU:
embed, components = render_menu(user_state)
elif runner_state == RUNNER_STATE_PLAYING:
embed, components = user_state.game_state.render_playing()
elif runner_state == RUNNER_STATE_END_SCREEN:
embed, components = user_state.game_state.render_end_screen()
elif runner_state == RUNNER_STATE_CLOSED:
self.cancel(CancelledError())
return
else:
# Hacker trying to hack Huyane
return
user = self.user
embed.add_author(user.full_name, user.avatar_url_as('png', 32))
self._gui_state = GUI_STATE_EDITING
try:
try:
await client.interaction_component_message_edit(event, embed = embed, components = components)
except DiscordException as err:
if err.status >= 500:
pass
elif err.code != ERROR_CODES.unknown_interaction:
pass
else:
raise
await client.message_edit(self.message, embed = embed, components = components)
except BaseException as err:
self.cancel(err)
raise
if self._gui_state == GUI_STATE_EDITING:
self._gui_state = GUI_STATE_READY
timeouter = self._timeouter
if (timeouter is not None):
timeouter.set_timeout(GUI_TIMEOUT)
def cancel(self, exception = None):
"""
Cancels the dungeon sweeper gui with the given exception if applicable.
Parameters
----------
exception : `None`, ``BaseException``, Optional
Exception to cancel the pagination with. Defaults to `None`
Returns
-------
canceller_task : `None`, ``Task``
"""
if self._gui_state in (GUI_STATE_READY, GUI_STATE_EDITING, GUI_STATE_CANCELLING):
self._gui_state = GUI_STATE_CANCELLED
canceller = self._canceller
if canceller is None:
return
self._canceller = None
timeouter = self._timeouter
if (timeouter is not None):
timeouter.cancel()
return Task(KOKORO, canceller(self, exception))
async def _canceller_function(self, exception):
"""
Cancels the gui state, saving the current game if needed.
This method is a coroutine.
Parameters
----------
exception : `None`, ``BaseException``
"""
await self.user_state.upload()
user_id = self.user.id
if DUNGEON_SWEEPER_GAMES.get(user_id, None) is self:
del DUNGEON_SWEEPER_GAMES[user_id]
client = self.client
message = self.message
client.slasher.remove_component_interaction_waiter(message, self)
if self._gui_state == GUI_STATE_SWITCHING_CONTEXT:
# the message is not our, we should not do anything with it.
return
self._gui_state = GUI_STATE_CANCELLED
if not await self._handle_close_exception(exception):
await client.events.error(client, f'{self!r}._canceller_function', exception)
async def _handle_close_exception(self, exception):
"""
Handles close exception if any.
This method is a coroutine.
Parameters
----------
exception : `None`, `BaseException`
The close exception to handle.
Returns
-------
exception_handled : `bool`
Whether the exception was handled.
"""
if exception is None:
return True
client = self.client
message = self.message
if isinstance(exception, CancelledError):
try:
await client.message_delete(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.missing_access, # client removed
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, TimeoutError):
try:
await client.message_edit(message, components = None)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, SystemExit):
user = self.user
embed = Embed(
f'I am restarting',
(
'Your progress has been saved, please try using the command again later.\n'
'\n'
'I am sorry for the inconvenience. See ya later qtie!'
),
).add_author(
user.full_name,
user.avatar_url_as('png', 32),
).add_thumbnail(
EMOJI_KOISHI_WAVE.url,
)
try:
await client.message_edit(message, embed = embed, components = None)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, PermissionError):
return True
return False
def __repr__(self):
"""Returns the dungeon sweep runner's representation."""
repr_parts = [
'<', self.__class__.__name__,
' client = ', repr(self.client),
', channel = ', repr(self.message.channel),
', gui_state = '
]
gui_state = self._gui_state
repr_parts.append(repr(gui_state))
repr_parts.append(' (')
gui_state_name = GUI_STATE_VALUE_TO_NAME[gui_state]
repr_parts.append(gui_state_name)
repr_parts.append('), ')
runner_state = self._runner_state
repr_parts.append(repr(runner_state))
repr_parts.append(' (')
runner_state_name = RUNNER_STATE_VALUE_TO_NAME[runner_state]
repr_parts.append(runner_state_name)
repr_parts.append('), ')
repr_parts.append('>')
return ''.join(repr_parts)
DUNGEON_SWEEPER = SLASH_CLIENT.interactions(
None,
name = 'ds',
description = 'Touhou themed puzzle game.',
is_global = True,
)
@DUNGEON_SWEEPER.interactions
async def rules(client, event):
"""Shows the rules of DS!"""
if not event.channel.cached_permissions_for(client).can_use_external_emojis:
abort('I have no permissions at this channel to render this message.')
return RULES_HELP
@DUNGEON_SWEEPER.interactions(is_default = True)
async def play(client, event):
"""Starts the game"""
game = DUNGEON_SWEEPER_GAMES.get(event.user.id, None)
if game is None:
await DungeonSweeperRunner(client, event)
else:
await game.renew(event)
@SLASH_CLIENT.events
async def shutdown(client):
tasks = []
exception = SystemExit()
for game in DUNGEON_SWEEPER_GAMES.values():
task = game.cancel(exception)
if (task is not None):
tasks.append(task)
task = None
game = None
if tasks:
await TaskGroup(KOKORO, tasks).wait_all()
|
from alltrain.Train import *
import torch
from torch.utils.tensorboard import SummaryWriter
import time
# import alltrain.bratsUtils as bratsUtils
import alltrain.atlasUtils as atlasUtils
from multiatlasDataset import *
from tqdm import tqdm
from torch.utils.data import DataLoader
import json
import os
class MATrain(Train):
def __init__(self, expconfig, split = 0):
super(MATrain, self).__init__(expconfig)
self.expconfig = expconfig
self.startingTime = time.time()
self.device = torch.device("cuda")
self.expconfig.net = expconfig.net.to(self.device)
self.tb = SummaryWriter(comment=expconfig.experiment_name)
self.bestMeanDice = 0
self.bestMeanDiceEpoch = 0
self.meanDice = 0
self.smallmeanDice = 0
trainDataset = MultiAtlasDataset(expconfig, mode="train", randomCrop=None, hasMasks=True, returnOffsets=False, split = split)
validDataset = MultiAtlasDataset(expconfig, mode="validation", randomCrop=None, hasMasks=True, returnOffsets=False, split = split)
self.trainDataLoader = DataLoader(dataset=trainDataset, num_workers=1, batch_size=expconfig.batchsize, shuffle=True)
self.valDataLoader = DataLoader(dataset=validDataset, num_workers=1, batch_size=expconfig.batchsize, shuffle=False)
self.save_dict = {'original':{} ,'small':{}}
self.split = split
def step(self, expcf, inputs, labels, total_loss):
inputs, labels = inputs.to(self.device).half(), labels.to(self.device)
expcf.net.half()
inputs = inputs.type(torch.cuda.HalfTensor)
#forward and backward pass
outputs, _ = expcf.net(inputs)
loss = expcf.loss(outputs.half(), labels)
total_loss += loss.item()
del inputs, outputs, labels
loss.backward()
#update params
expcf.optimizer.step()
expcf.optimizer.zero_grad()
del loss
return total_loss
def train(self):
expcf = self.expconfig
expcf.optimizer.zero_grad()
print("#### EXPERIMENT : {} | ID : {} ####".format(expcf.experiment_name, expcf.id))
print("#### TRAIN SET :", len(self.trainDataLoader))
print("#### VALID SET :", len(self.valDataLoader))
total_time = 0.0
# self.validate(0)
# exit(0)
for epoch in range(expcf.epoch):
startTime = time.time()
expcf.net.train()
total_loss = 0
for i, data in tqdm(enumerate(self.trainDataLoader), total = int(len(self.trainDataLoader))) :
#load data
if expcf.look_small:
inputs, labels, _ = data
else:
inputs, labels = data
total_loss = self.step(expcf, inputs, labels, total_loss)
del inputs, labels
print("epoch: {}, total_loss: {}, mem: {}".format(epoch, total_loss/int(len(self.trainDataLoader)), self.convert_bytes(torch.cuda.max_memory_allocated())))
epochTime = time.time() - startTime
total_time += epochTime
#validation at end of epoch
if epoch % expcf.validate_every_k_epochs == expcf.validate_every_k_epochs - 1:
validTime = self.validate(epoch)
#take lr sheudler step
if expcf.lr_scheduler != None:
expcf.lr_scheduler.step()
total_time += validTime
self.tb.add_scalar("totalTime", total_time, epoch)
self.tb.add_scalar("train_loss", total_loss/int(len(self.trainDataLoader)), epoch)
self.tb.add_scalar("meanDice", self.meanDice, epoch)
self.tb.add_scalar("smallmeanDice", self.smallmeanDice, epoch)
print("epoch: {}, bestMeanDice: {}, meanDice: {}, smallMeanDice: {}".format(epoch, self.bestMeanDice, self.meanDice, self.smallmeanDice))
self.tb.close()
def convert_bytes(self, size):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.2f %s" % (size, x)
size /= 1024.0
return size
def validate(self, epoch):
expcf = self.expconfig
startTime = time.time()
with torch.no_grad():
expcf.net.eval()
dice = []
smalldice = []
for i, data in tqdm(enumerate(self.valDataLoader), total = int(len(self.valDataLoader))):#enumerate(self.valDataLoader):
if expcf.look_small:
inputs, labels, smalllabels = data
inputs, labels, smalllabels = inputs.to(self.device).half(), labels.to(self.device), smalllabels.to(self.device)
inputs = inputs.type(torch.cuda.HalfTensor)
outputs, smalloutputs = expcf.net(inputs)
del inputs
else:
inputs, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
inputs = inputs.type(torch.cuda.HalfTensor)
outputs, _ = expcf.net(inputs)
del inputs
outputs = torch.argmax(outputs, 1)
if expcf.look_small:
smalloutputs = torch.argmax(smalloutputs, 1)
masks, smallmasks = [], []
labels = torch.argmax(labels, 1)
if expcf.look_small:
smalllabels = torch.argmax(smalllabels, 1)
label_masks, smalllabel_masks = [], []
for i in range(12):
masks.append(atlasUtils.getMask(outputs, i))
label_masks.append(atlasUtils.getMask(labels, i))
dice.append(atlasUtils.dice(masks[i], label_masks[i]))
if expcf.look_small:
smallmasks.append(atlasUtils.getMask(smalloutputs, i))
smalllabel_masks.append(atlasUtils.getMask(smalllabels, i))
smalldice.append(atlasUtils.dice(smallmasks[i], smalllabel_masks[i]))
del outputs, labels, label_masks, masks
if expcf.look_small:
del smalloutputs, smalllabels, smallmasks, smalllabel_masks
meanDices, smallmeanDices = [], []
for i in range(12):
meanDices.append(np.mean(dice[i]))
self.save_dict['original'][self.expconfig.classes_name[i]] = meanDices[i]
if expcf.look_small:
smallmeanDices.append(np.mean(smalldice[i]))
self.save_dict['small'][self.expconfig.classes_name[i]] = smallmeanDices[i]
self.meanDice = np.mean([j for j in meanDices])
self.save_dict['meanDice'] = self.meanDice
if expcf.look_small:
self.smallmeanDice = np.mean([j for j in smallmeanDices])
self.save_dict['smallmeanDice'] = self.smallmeanDice
self.save_dict['epoch'] = epoch
self.save_dict['memory'] = self.convert_bytes(torch.cuda.max_memory_allocated())
self.save_dict['training_time'] = time.time() - self.startingTime
self.save_results()
return time.time() - startTime
def saveToDisk(self, epoch):
#gather things to save
saveDict = {"net_state_dict": self.expconfig.net.state_dict(),
"optimizer_state_dict": self.expconfig.optimizer.state_dict(),
"epoch": epoch,
"bestMeanDice": self.bestMeanDice,
"bestMeanDiceEpoch": self.bestMeanDiceEpoch
}
if self.expconfig.lr_scheduler != None:
saveDict["lr_scheduler_state_dict"] = self.expconfig.lr_scheduler.state_dict()
#save dict
basePath = self.expconfig.checkpointsBasePathSave + "{}".format(self.expconfig.id)
path = basePath + "/e_{}.pt".format(epoch)
if not os.path.exists(basePath):
os.makedirs(basePath)
torch.save(saveDict, path)
def save_results(self):
with open(os.path.join(self.expconfig.checkpointsBasePath, self.expconfig.experiment_name+'_split_'+str(self.split)+'.json'), 'w') as f:
json.dump(self.save_dict, f) |
from flask_assets import Bundle
bundles = {
'js': Bundle(
'js/loader.js',
'js/cache_timer.js',
'js/index.js',
'js/info.js',
'js/profile.js',
output='gen/main.js'),
'css': Bundle(
'css/loader.css',
'css/base.css',
'css/index.css',
'css/info.css',
'css/package.css',
'css/profile.css',
output='gen/main.css'),
'img': Bundle(
'img/favicon.ico')
}
|
import argparse
import os
from collections import OrderedDict
import random
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from PIL import Image
import pdb
def get_video_list(dir):
new_list = [name for name in os.listdir(dir) if len(name)>2]
new_list = make_video_name(new_list)
return new_list
def make_box_anno(llist):
box = [llist[2], llist[3], llist[4], llist[5]]
# print(box)
return [float(b) for b in box]
def read_kinetics_annotations(anno_file):
lines = open(anno_file, 'r').readlines()
annotations = {}
is_train = anno_file.find('train')>-1
for line in lines:
line = line.rstrip('\n')
line_list = line.split(',')
# print(line_list)
video_name = line_list[0]
if video_name not in annotations:
annotations[video_name] = {}
time_stamp = float(line_list[1])
numf = int(line_list[-1])
ts = str(int(time_stamp))
if len(line_list)>2:
box = make_box_anno(line_list)
label = int(line_list[6])
if ts not in annotations[video_name]:
annotations[video_name][ts] = [[time_stamp, box, label, numf]]
else:
annotations[video_name][ts] += [[time_stamp, box, label, numf]]
elif not is_train:
if video_name not in annotations:
annotations[video_name][ts] = [[time_stamp, None, None, numf]]
else:
annotations[video_name][ts] += [[time_stamp, None, None, numf]]
return annotations
def main(frames_dir, input_csv, dataset):
annotations = read_kinetics_annotations(input_csv)
for ii, video_name in enumerate(annotations):
for ts in annotations[video_name]:
time_stamp = annotations[video_name][ts][0][0]
src_frames_dir = os.path.join(frames_dir, video_name)
if dataset != 'ava':
image_name = os.path.join(src_frames_dir, '{:06d}.jpg'.format(int(time_stamp*30)+1))
else:
image_name = os.path.join(src_frames_dir, '{:s}_{:06d}.jpg'.format(video_name, int((time_stamp-900)*30 + 1)))
print(video_name, src_frames_dir, image_name)
img = Image.open(image_name)
fig, ax = plt.subplots()
w, h = img.size
plt.imshow(img)
print(img.size)
for anno in annotations[video_name][ts]:
box = anno[1] #[x1, y1, x2, y2]
x1 = int(box[0]*w)
y1 = int(box[1]*h)
bw = int((box[2]-box[0])*w)
bh = int((box[3]-box[1])*h)
label = anno[2]
print(x1,y1, bw, bh)
rect = patches.Rectangle((x1, y1), bw, bh, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show(block=False)
plt.waitforbuttonpress(5)
plt.close()
if __name__ == '__main__':
description = 'Helper script for downloading and trimming kinetics videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument('--frames_dir', default='/home/gusingh/ava/frames_x256/', type=str,
help='Output directory where videos will be saved.')
p.add_argument('--input_csv', type=str, default='/home/gusingh/ava/annotations/ava_train_v2.2.csv', #'ava_kinetics_updated_csv/kinetics_train_v1.0.csv',
help=('CSV file containing the following format: '
'YouTube Identifier,Start time,End time,Class label'))
p.add_argument('--dataset', type=str, default='ava',
help=('specify the dataset type '))
args = p.parse_args()
main(args.frames_dir, args.input_csv, args.dataset)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-01-26 22:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genres', '0003_auto_20200126_2216'),
]
operations = [
migrations.AlterField(
model_name='genre',
name='number',
field=models.DecimalField(decimal_places=0, default=0, max_digits=6),
),
]
|
import logging
import os
import sys
from pathlib import Path
from typing import cast
import click
import click_pathlib
from rich.console import Console
from rich.emoji import Emoji
from rich.logging import RichHandler
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from isomodder import (
AutoInstallBuilder,
IsoFile,
IsoModderFatalException,
ProgressReporter,
UbuntuServerIsoFetcher,
)
MIN_PYTHON = (3, 6)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
def get_rich() -> Progress:
return Progress(
TextColumn("[bold blue]{task.description}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
console = Console()
logging.basicConfig(level="NOTSET", format="%(message)s", datefmt="[%X]", handlers=[RichHandler()])
class EnumChoice(click.Choice):
def __init__(self, enum, case_sensitive=False, use_value=False):
self.enum = enum
self.use_value = use_value
choices = [str(e.value) if use_value else e.name for e in self.enum]
super().__init__(choices, case_sensitive)
def convert(self, value, param, ctx):
try:
return self.enum[value]
except KeyError:
pass
result = super().convert(value, param, ctx)
# Find the original case in the enum
if not self.case_sensitive and result not in self.choices:
result = next(c for c in self.choices if result.lower() == c.lower())
if self.use_value:
return next(e for e in self.enum if str(e.value) == result)
return self.enum[result]
def get_default_cache_dir() -> str:
xdg_cache_home = Path(os.environ.get("XDG_CACHE_HOME", f"{os.environ['HOME']}/.cache"))
app_cache = xdg_cache_home / "ubautoiso"
app_cache.mkdir(parents=True, exist_ok=True)
return str(app_cache)
def get_default_output_file() -> str:
output_file = Path.cwd() / "autoinstall.iso"
return str(output_file)
@click.command()
@click.option(
"-o",
"--output",
type=click_pathlib.Path(),
default=get_default_output_file,
show_default="$CWD/autoinstall.iso",
)
@click.option(
"-c",
"--cache-dir",
type=click_pathlib.Path(exists=True, file_okay=False, writable=True),
default=get_default_cache_dir,
show_default="$XDG_CACHE_HOME/ubautoiso",
)
@click.option("-r", "--release", default="20.04", show_default=True)
@click.option("--no-prompt", is_flag=True)
@click.option("--no-mbr", is_flag=True)
@click.option("--no-efi", is_flag=True)
@click.argument("autoinstall_file", type=click_pathlib.Path(exists=True, dir_okay=False, readable=True))
def cli(output, cache_dir, prompt, autoinstall_file, no_efi, no_mbr):
fetcher = UbuntuServerIsoFetcher(working_dir=cache_dir, release="20.04")
with get_rich() as progress:
iso_path = fetcher.fetch(cast(ProgressReporter, progress))
iso_file = IsoFile(iso_path)
builder = AutoInstallBuilder(
source_iso=iso_file,
autoinstall_yaml=autoinstall_file,
grub_entry_stamp="paranoidNAS AutoInstall",
autoinstall_prompt=prompt,
supports_efi=(not no_efi),
supports_mbr=(not no_mbr),
)
builder.build()
if output.exists():
output.unlink()
with get_rich() as progress:
iso_file.write_iso(output, cast(ProgressReporter, progress))
logging.info(f"You're ready to burn! {Emoji('fire')}")
# print out info here
def main():
try:
cli()
except SystemExit:
pass
except IsoModderFatalException as exc:
console.print()
console.print(f":cross_mark: [bold red] {exc} :cross_mark:")
console.print()
except BaseException:
console.print()
console.print(
":pile_of_poo: :whale: [bold red] Something totally unexpected has happened. Let's see..."
)
console.print()
console.print_exception()
console.print()
if __name__ == "__main__":
main()
|
import pkg_resources
try:
pkg_resources.get_distribution('RelStorage')
except pkg_resources.DistributionNotFound:
HAS_RELSTORAGE = False
else:
HAS_RELSTORAGE = True
from relstorage.storage import RelStorage
from relstorage.adapters.stats import OracleStats
def get_object_count(db):
"""Returns the number of objects in the ZODB
"""
count = db.objectCount()
if count != 0:
return count
# Might be RelStorage with Oracle, where object count is approximate and
# therefore has been disabled for the time being.
if HAS_RELSTORAGE:
if isinstance(db.storage, RelStorage):
stats = db.storage._adapter.stats
if isinstance(stats, OracleStats):
return _get_object_count_oracle(stats.connmanager)
return 0
def _get_object_count_oracle(connmanager):
"""Returns the number of objects in the database.
See relstorage.adapters.stats.OracleStats @2df8f8df
"""
conn, cursor = connmanager.open(
connmanager.isolation_read_only)
try:
stmt = """
SELECT NUM_ROWS
FROM USER_TABLES
WHERE TABLE_NAME = 'CURRENT_OBJECT'
"""
cursor.execute(stmt)
res = cursor.fetchone()[0]
if res is None:
res = 0
else:
res = int(res)
return res
finally:
connmanager.close(conn, cursor)
|
from utils import ModuleHandler
class BaseModuleHandler(ModuleHandler):
def __init__(self, settings):
super(BaseModuleHandler,self).__init__(settings)
def handle_command(self, command):
if command[0] == 'settings':
self.__print_settings()
return True
elif command[0] == 'quit':
print 'Bye'
exit(0)
return False
def list_commands(self):
help = [
('quit', 'Quit this application.'),
('settings', 'Print the current settings used by the application.')
]
return help
def __print_settings(self):
print 'RichCoin Administrator Settings'
print ' Hostname : {0}'.format(self.settings['hostname'])
print ' Port : {0}'.format(self.settings['port'])
print ' API Key : {0}'.format(self.settings['apikey'])
def get_module_handler(settings):
return BaseModuleHandler(settings)
|
"""
Tic Tac Toe Player
"""
from copy import deepcopy
from math import inf
X = "X"
O = "O"
EMPTY = None
# ---------------HELPER---------------
def maxMinValue(board):
if terminal(board):
return utility(board)
value = -inf
for action in actions(board):
value = max(value, minimum(result(board, action)))
return value
def minimum(board):
if terminal(board):
return utility(board)
value = inf
for action in actions(board):
value = min(value, maxMinValue(result(board, action)))
return value
# ---------------HELPER----------------
def initial_state():
"""
Returns starting state of the board.
"""
return [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
def player(board):
"""
Returns player who has the next turn on a board.
"""
xCount = 0
oCount = 0
for row in board:
for cell in row:
if cell == X:
xCount += 1
elif cell == O:
oCount += 1
if xCount <= oCount:
return 'X'
else:
return 'O'
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
possibleActionSet = set()
for row in range(3):
for cell in range(3):
if board[row][cell] == EMPTY:
possibleActionSet.add((row, cell))
return possibleActionSet
def result(board, action):
"""
Returns the board that results from making move (i, j) on the board.
"""
if terminal(board):
raise ValueError("Game over.")
row = action[0]
cell = action[1]
copyBoard = deepcopy(board)
copyBoard[row][cell] = player(board)
return copyBoard
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
# checks diagonal decline
diagonalCheck = board[0][0]
diagonalCount = 0
for cell in range(3):
if board[cell][cell] == diagonalCheck:
diagonalCount += 1
else:
break
if diagonalCount == 3:
return diagonalCheck
# checks horizontally for wins
for row in range(3):
xScore = 0
oScore = 0
for column in range(3):
if board[row][column] == X:
xScore += 1
elif board[row][column] == O:
oScore += 1
if xScore == 3:
return X
elif oScore == 3:
return O
# checks vertically for wins
for row in range(3):
xScore = 0
oScore = 0
for column in range(3):
if board[column][row] == X:
xScore += 1
elif board[column][row] == O:
oScore += 1
if xScore == 3:
return X
elif oScore == 3:
return O
if board[0][2] == board[1][1] == board[2][0] != None:
return board[0][2]
return None
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
# if there is a winner end game
if winner(board) != None:
return True
# if there are no more space return true
for row in range(3):
for column in range(3):
if board[row][column] == None:
return False
return True
def utility(board):
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
if winner(board) == X:
return 1
elif winner(board) == O:
return -1
else:
return 0
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
if board == initial_state():
return (0, 0)
elif player(board) == X:
optimalMove = None
value = -inf
for action in actions(board):
minValueResult = inf
if terminal(board):
minValueResult = utility(board)
else:
for action in actions(board):
minValueResult = min(
minValueResult, maxMinValue(result(board, action)))
if minValueResult > value:
value = minValueResult
optimalMove = action
elif player(board) == O:
optimalMove = None
value = inf
for action in actions(board):
maxValueResult = -inf
if terminal(board):
maxValueResult = utility(board)
else:
for action in actions(board):
maxValueResult = max(maxValueResult, minimum(result(board, action)))
if maxValueResult < value:
value = maxValueResult
optimalMove = action
return optimalMove
|
import googleapi
results = googleapi.standard_search.search("albert einstein")
print("%s - %s" % (len(results), results[0].description))
|
__all__ = [
'Section', 'StringField', 'IntegerField', 'BooleanField', 'FloatField', 'IniConnector', 'ListField'
]
from .ConfigORM import Section
from .Fields import StringField
from .Fields import IntegerField
from .Fields import BooleanField
from .Fields import FloatField
from .Fields import ListField
from .Connectors import IniConnector
|
#рисує коло по точках
import math
from tkinter import *
root = Tk()
root.title("PythonicWay Pong")
# встановлюєм канву
c = Canvas(root, width=400, height=400)
c.pack()
# радіус кола
r = 120
# створюєм пустий лист для запису в нього точок, по яких побудується нижня частина кола
circl1 = []
# створюєм пустий лист для запису в нього точок, по яких побудується верхня частина кола
circl2 = []
#створюєм змінну яка ставить центр кола в задані координати, якщо цього не зробити то коло побудується в 0б0 координатах
shift=200
#в цьому циклі розраховуються та записуються в масив координати для побудови нижньої частини кола
for x in range(-r,r+1):
y = math.sqrt((r**2)-x**2)
circl1.append(x+shift)
circl1.append(y+shift)
#в цьому циклі розраховуються та записуються в масив координати для побудови верхньої частини кола
for x in range(-r,r+1):
y = math.sqrt((r**2)-x**2)
circl2.append(x+shift)
circl2.append(-y+shift)
#будуємо дві половинки кола за допомогою методу create_line, та масиву точок
colo1 = c.create_line(circl1, width=3)
colo2 = c.create_line(circl2, width=3)
root.mainloop() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import redis
def main():
r = redis.Redis(host='127.0.0.1', port=6379, db=0)
# 提取1到100页的url
r.lpush("taobao", 'jianjian')
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
def find_local_min_max(series, x):
state = {
'pmin': None,
'pmin_ind': None,
'pmax': None,
'pmax_ind': None,
'lmin': None,
'lmax': None
}
state['pmin_ind'] = series.index.tolist()[0]
state['pmin'] = series.iat[0]
state['pmax_ind'] = series.index.tolist()[0]
state['pmax'] = series.iat[0]
local_min_ind = []
local_min = []
local_max_ind = []
local_max = []
for ind, value in series.iteritems():
if state['pmin'] is not None and state['pmax'] is not None:
if value <= state['pmin']:
state['pmin'] = value
state['pmin_ind'] = ind
if value <= state['pmax'] * (1 - x):
state['lmax'] = state['pmax']
local_max_ind.append(state['pmax_ind'])
local_max.append(state['pmax'])
state['pmax'] = None
state['pmax_ind'] = None
elif value >= state['pmax']:
state['pmax'] = value
state['pmax_ind'] = ind
if value > state['pmin'] * (1 + x):
state['lmin'] = state['pmin']
local_min_ind.append(state['pmin_ind'])
local_min.append(state['pmin'])
state['pmin'] = None
state['pmin_ind'] = None
else:
pass
elif state['pmax'] is not None and state['lmin'] is not None:
if value >= state['pmax']:
state['pmax'] = value
state['pmax_ind'] = ind
elif value <= state['lmin']:
state['lmax'] = state['pmax']
local_max_ind.append(state['pmax_ind'])
local_max.append(state['pmax'])
state['pmax'] = None
state['pmax_ind'] = None
state['pmin'] = value
state['pmin_ind'] = ind
else:
state['lmin'] = None
state['pmin'] = value
state['pmin_ind'] = ind
elif state['pmin'] is not None and state['lmax'] is not None:
if value <= state['pmin']:
state['pmin'] = value
state['pmin_ind'] = ind
elif value >= state['lmax']:
state['lmin'] = state['pmin']
local_min_ind.append(state['pmin_ind'])
local_min.append(state['pmin'])
state['pmin'] = None
state['pmin_ind'] = None
state['pmax'] = value
state['pmax_ind'] = ind
else:
state['lmax'] = None
state['pmax'] = value
state['pmin_ind'] = ind
else:
print('strange')
return local_min_ind, local_min, local_max_ind, local_max
if __name__ == '__main__':
close_price = pd.read_csv('close_price.csv', header=None, index_col=[0])
close_price = close_price.iloc[:, 0]
local_min_ind, local_min, local_max_ind, local_max = find_local_min_max(close_price, .25)
|
"""
Utility methods for development
"""
import json
def dict_to_file(dict_input, filename):
with open(filename, 'w') as f:
json.dump(dict_input, f, indent=4, sort_keys=True)
def file_to_dict(filename):
with open(filename, 'r') as f:
return json.load(f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.