index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,500 | afa63a8ee650c8bf83aae28e7d61d8a46f02108b | # -*- encoding: utf-8
from suplemon.suplemon_module import Module
class SaveAll(Module):
def run(self, app, editor, args):
if not self.app.ui.query_bool("Save all files?", False):
return False
for file in app.get_files():
file.save()
module = {
"class": SaveAll,
"name": "save_all",
}
|
18,501 | 1b219c355dde80796b22688ac8be8fb71801cf20 | from box_plot import mnist_get_correct_and_incorrect_test_images
from keras.datasets import mnist, cifar10
from keras.models import load_model, Model
import argparse
from keras import utils
import numpy as np
from utils import load_file, load_all_files
import matplotlib.pyplot as plt
import numpy as np
from utils import write_file
CLIP_MIN = -0.5
CLIP_MAX = 0.5
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--d", "-d", help="Dataset", type=str, default="mnist")
parser.add_argument(
"--prob", "-prob", help="Confidence score", action="store_true"
)
parser.add_argument(
"--adv", "-adv", help="Calculate the confidence score of adversarial examples ", action="store_true"
)
args = parser.parse_args()
assert args.d in ["mnist", "cifar"], "Dataset should be either 'mnist' or 'cifar'"
batch_size = 64
if args.d == "mnist":
if args.adv:
# load adversarial
x_adv = np.load('./adv/adv_mnist.npy')
model = load_model('./model_tracking/model_improvement-04-0.99_mnist.h5')
model.summary()
layer_names = ['dense_2']
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names],
)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(x_adv, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
x_adv, batch_size=batch_size, verbose=1
)
output = layer_outputs[0]
output = list(np.amax(output, axis=1))
write_file('./sa/prob_adv_no_normalize_%s.txt' % (args.d), output)
else:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
x_train = x_train.astype("float32")
x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
x_test = x_test.astype("float32")
x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)
# number of class
num_class = 10
y_train = utils.to_categorical(y_train, num_class)
y_test = utils.to_categorical(y_test, num_class)
model = load_model('./model_tracking/model_improvement-04-0.99_mnist.h5')
model.summary()
layer_names = ['dense_2']
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names],
)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(x_test, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
x_test, batch_size=batch_size, verbose=1
)
output = layer_outputs[0]
output = list(np.amax(output, axis=1))
write_file('./sa/prob_no_normalize_%s.txt' % (args.d), output)
if args.d == "cifar":
if args.adv:
# load adversarial
x_adv = np.load('./adv/adv_cifar.npy')
x_train = x_train.astype("float32")
x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
x_test = x_test.astype("float32")
x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)
# Load pre-trained model.
model = load_model('./model_tracking/cifar_model_improvement-496-0.87.h5')
model.summary()
layer_names = ['dense_2']
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names],
)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(x_adv, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
x_adv, batch_size=batch_size, verbose=1
)
output = layer_outputs[0]
output = list(np.amax(output, axis=1))
write_file('./sa/prob_adv_no_normalize_%s.txt' % (args.d), output)
else:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype("float32")
x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
x_test = x_test.astype("float32")
x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)
# Load pre-trained model.
model = load_model('./model_tracking/cifar_model_improvement-496-0.87.h5')
model.summary()
layer_names = ['dense_2']
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names],
)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(x_test, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
x_test, batch_size=batch_size, verbose=1
)
output = layer_outputs[0]
output = list(np.amax(output, axis=1))
write_file('./sa/prob_no_normalize_%s.txt' % (args.d), output)
|
18,502 | 275c99d38ce1320849865c654affe4d45e946ca2 | # -*- coding: utf-8 -*-
"""Gather traffic data for nacr.us"""
from __future__ import print_function
from functools import wraps
from .traffic import TrafficService
import requests
import datetime
import pytz
import re
from pprint import pprint
def datetime_to_epoch(indate):
"""Converts a datetime object to an epoch timestamp."""
origin = datetime.datetime(1970, 1, 1)
if indate.tzinfo:
origin = pytz.timezone('UTC').localize(origin)
return (indate - origin).total_seconds()
class NacrTrafficService(TrafficService):
"""Exposes traffic data for Nacr.us as service."""
def __init__(self):
"""Initialize self."""
source_url = "http://lelandbatey.com/netinfo/local.vnstat.log"
self.source_url = source_url
self.source_label = "nacr.us"
self.date_fmt = "%m/%d/%y"
def get_traffic(self):
"""Returns parsed form of traffic data from 'self.source_url'"""
req = requests.get(self.source_url)
text = req.text
interface, rv = self.parse_traffic(text)
host = ", ".join([self.source_label, interface])
return host, rv
def parse_traffic(self, text):
"""Parse the output of vnstat into the data we desire."""
interface = text.splitlines()[1].split('/')[0].strip()
# pprint(interface)
raw_entries = []
for line in text.splitlines():
if '/s' in line:
line = line.replace('|', '')
line = [l.strip() for l in line.split(' ') if len(l) > 0]
raw_entries.append((line[0], line[-1]))
holder = {}
entries = []
for date_str, rate in raw_entries:
date = datetime.datetime.strptime(date_str, self.date_fmt)
epoch = datetime_to_epoch(date)
rate, unit = rate.split(' ')
holder['unit'] = unit
entries.append({'timestamp': epoch, "rate": float(rate)})
holder['measurements'] = entries
return interface, {'data': holder}
|
18,503 | b5152397584ee0a87da84ba0f0fdf462a6e3be55 | # Copyright 2021-2022 python-tuf contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""Repository Abstraction for metadata management"""
import logging
from abc import ABC, abstractmethod
from contextlib import contextmanager, suppress
from copy import deepcopy
from typing import Dict, Generator, Optional, Tuple
from tuf.api.metadata import (
Metadata,
MetaFile,
Root,
Signed,
Snapshot,
Targets,
Timestamp,
)
logger = logging.getLogger(__name__)
class AbortEdit(Exception):
"""Raise to exit the edit() contextmanager without saving changes"""
class Repository(ABC):
"""Abstract class for metadata modifying implementations
NOTE: The repository module is not considered part of the python-tuf
stable API yet.
This class is intended to be a base class used in any metadata editing
application, whether it is a real repository server or a developer tool.
Implementations must implement open() and close(), and can then use the
edit() contextmanager to implement actual operations. Note that signing
an already existing version of metadata (as could be done for threshold
signing) does not fit into this model of open()+close() or edit().
A few operations (snapshot and timestamp) are already implemented
in this base class.
"""
@abstractmethod
def open(self, role: str) -> Metadata:
"""Load a roles metadata from storage or cache, return it
If role has no metadata, create first version from scratch.
"""
raise NotImplementedError
@abstractmethod
def close(self, role: str, md: Metadata) -> None:
"""Write roles metadata into storage
Update expiry and version and replace signatures with ones from all
available keys. Keep snapshot_info and targets_infos updated.
"""
raise NotImplementedError
@property
def targets_infos(self) -> Dict[str, MetaFile]:
"""Returns the MetaFiles for current targets metadatas
This property is used by do_snapshot() to update Snapshot.meta:
Repository implementations should override this property to enable
do_snapshot().
Note that there is a difference between this return value and
Snapshot.meta: This dictionary reflects the targets metadata that
currently exists in the repository but Snapshot.meta also includes
metadata that used to exist, but no longer exists, in the repository.
"""
raise NotImplementedError
@property
def snapshot_info(self) -> MetaFile:
"""Returns the MetaFile for current snapshot metadata
This property is used by do_timestamp() to update Timestamp.meta:
Repository implementations should override this property to enable
do_timestamp().
"""
raise NotImplementedError
@contextmanager
def edit(self, role: str) -> Generator[Signed, None, None]:
"""Context manager for editing a role's metadata
Context manager takes care of loading the roles metadata (or creating
new metadata), updating expiry and version. The caller can do
other changes to the Signed object and when the context manager exits,
a new version of the roles metadata is stored.
Context manager user can raise AbortEdit from inside the with-block to
cancel the edit: in this case none of the changes are stored.
"""
md = self.open(role)
with suppress(AbortEdit):
yield md.signed
self.close(role, md)
@contextmanager
def edit_root(self) -> Generator[Root, None, None]:
"""Context manager for editing root metadata. See edit()"""
with self.edit(Root.type) as root:
if not isinstance(root, Root):
raise RuntimeError("Unexpected root type")
yield root
@contextmanager
def edit_timestamp(self) -> Generator[Timestamp, None, None]:
"""Context manager for editing timestamp metadata. See edit()"""
with self.edit(Timestamp.type) as timestamp:
if not isinstance(timestamp, Timestamp):
raise RuntimeError("Unexpected timestamp type")
yield timestamp
@contextmanager
def edit_snapshot(self) -> Generator[Snapshot, None, None]:
"""Context manager for editing snapshot metadata. See edit()"""
with self.edit(Snapshot.type) as snapshot:
if not isinstance(snapshot, Snapshot):
raise RuntimeError("Unexpected snapshot type")
yield snapshot
@contextmanager
def edit_targets(
self, rolename: str = Targets.type
) -> Generator[Targets, None, None]:
"""Context manager for editing targets metadata. See edit()"""
with self.edit(rolename) as targets:
if not isinstance(targets, Targets):
raise RuntimeError(f"Unexpected targets ({rolename}) type")
yield targets
def root(self) -> Root:
"""Read current root metadata"""
root = self.open(Root.type).signed
if not isinstance(root, Root):
raise RuntimeError("Unexpected root type")
return root
def timestamp(self) -> Timestamp:
"""Read current timestamp metadata"""
timestamp = self.open(Timestamp.type).signed
if not isinstance(timestamp, Timestamp):
raise RuntimeError("Unexpected timestamp type")
return timestamp
def snapshot(self) -> Snapshot:
"""Read current snapshot metadata"""
snapshot = self.open(Snapshot.type).signed
if not isinstance(snapshot, Snapshot):
raise RuntimeError("Unexpected snapshot type")
return snapshot
def targets(self, rolename: str = Targets.type) -> Targets:
"""Read current targets metadata"""
targets = self.open(rolename).signed
if not isinstance(targets, Targets):
raise RuntimeError("Unexpected targets type")
return targets
def do_snapshot(
self, force: bool = False
) -> Tuple[bool, Dict[str, MetaFile]]:
"""Update snapshot meta information
Updates the snapshot meta information according to current targets
metadata state and the current snapshot meta information.
Arguments:
force: should new snapshot version be created even if meta
information would not change?
Returns: Tuple of
- True if snapshot was created, False if not
- MetaFiles for targets versions removed from snapshot meta
"""
# Snapshot update is needed if
# * any targets files are not yet in snapshot or
# * any targets version is incorrect
update_version = force
removed: Dict[str, MetaFile] = {}
with self.edit_snapshot() as snapshot:
for keyname, new_meta in self.targets_infos.items():
if keyname not in snapshot.meta:
update_version = True
snapshot.meta[keyname] = deepcopy(new_meta)
continue
old_meta = snapshot.meta[keyname]
if new_meta.version < old_meta.version:
raise ValueError(f"{keyname} version rollback")
if new_meta.version > old_meta.version:
update_version = True
snapshot.meta[keyname] = deepcopy(new_meta)
removed[keyname] = old_meta
if not update_version:
# prevent edit_snapshot() from storing a new version
raise AbortEdit("Skip snapshot: No targets version changes")
if not update_version:
# this is reachable as edit_snapshot() handles AbortEdit
logger.debug("Snapshot update not needed") # type: ignore[unreachable]
else:
logger.debug("Snapshot v%d", snapshot.version)
return update_version, removed
def do_timestamp(
self, force: bool = False
) -> Tuple[bool, Optional[MetaFile]]:
"""Update timestamp meta information
Updates timestamp according to current snapshot state
Returns: Tuple of
- True if timestamp was created, False if not
- MetaFile for snapshot version removed from timestamp (if any)
"""
update_version = force
removed = None
with self.edit_timestamp() as timestamp:
if self.snapshot_info.version < timestamp.snapshot_meta.version:
raise ValueError("snapshot version rollback")
if self.snapshot_info.version > timestamp.snapshot_meta.version:
update_version = True
removed = timestamp.snapshot_meta
timestamp.snapshot_meta = deepcopy(self.snapshot_info)
if not update_version:
raise AbortEdit("Skip timestamp: No snapshot version changes")
if not update_version:
# this is reachable as edit_timestamp() handles AbortEdit
logger.debug("Timestamp update not needed") # type: ignore[unreachable]
else:
logger.debug("Timestamp v%d", timestamp.version)
return update_version, removed
|
18,504 | f116b06ec2be733bfbe63e63f219335241f0ae11 | class Solution:
def isAdditiveNumber(self, num: str) -> bool:
def dfs(a: int, b: int, c: int) -> bool:
# 递归边界
if c == n:
return True
num1 = int(num[a:b])
num2 = int(num[b:c])
# 如果考虑大数问题,换成字符串加
the_sum = str(num1 + num2)
len_sum = len(the_sum)
# 先比长度
if c + len_sum > n:
return False
# 逐位比较
for i in range(len_sum):
if num[c + i] != the_sum[i]:
return False
# 比较下一位数
return dfs(b, c, c + len_sum)
n = len(num)
if n < 3:
return False
for b in range(1, n - 1):
# 第一个数前导零
if b > 1 and num[0] == '0':
break
for c in range(b + 1, n):
# 第二个数前导零
if c > b + 1 and num[b] == '0':
break
if dfs(0, b, c):
return True
return False
print(Solution().isAdditiveNumber("0123"))
print(Solution().isAdditiveNumber("1023"))
print(Solution().isAdditiveNumber("112358"))
print(Solution().isAdditiveNumber("199100199"))
|
18,505 | b33ea5dcd76662933c2a59e4748d5fec76bdb5e7 | import numpy as np
import matplotlib.pyplot as plt
from xgboost import plot_importance
import pandas as pd
import math
import numpy as np
from sklearn.metrics import mean_squared_error, \
explained_variance_score, mean_absolute_error, median_absolute_error, \
r2_score
from toolz.curried import *
def feature_importance(model):
plot_importance(model)
fig = plt.gcf()
fig.set_size_inches(20, 20)
plt.show()
def get_top_players(eval_df, target, prediction_col, real=True,
position='all', num_of_players=100):
if real:
eval_df.sort_values(target, ascending=False)
else:
eval_df.sort_values(prediction_col, ascending=False)
if position != 'all':
pos_df = eval_df.loc[eval_df.Posicao == position]
sampled_df = pos_df.head(num_of_players)
else:
sampled_df = eval_df.head(num_of_players)
return sampled_df
def get_best_team(eval_df, target, prediction_col, rodada, ano, real=True,
team_formation=(2, 2, 3, 3)):
df = eval_df.loc[(eval_df.ano == ano) & (eval_df.Rodada == rodada)]
return pd.concat(list(map(
get_top_players(df, target, prediction_col, real=real, position=_,
num_of_players=_), team_formation,
(2, 3, 4, 5))))
def get_performance_without_outliers(eval_df, target, prediction_col,
outlier_prct=0.1, real=True):
if real:
eval_df.sort_values(target, ascending=False)
else:
eval_df.sort_values(prediction_col, ascending=False)
df = eval_df.iloc[
round(outlier_prct * eval_df.shape[0]):round(
(1 - outlier_prct) * eval_df.shape[0])]
return df.head(50)
|
18,506 | 1e2b9b3a7757a5ff834987f720d396a5b841f429 | from pygame.sprite import Group
from ship import Ship
from game_function import *
from setting import Settings
def run_game():
# 初始化游戏并创建一个屏幕对象
pygame.init()
ai_settings = Settings() # 导入设置
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption('Alien Invasion')
ship = Ship(screen, ai_settings)
bullets = Group()
aliens = Group()
creat_fleet(ai_settings, aliens, screen)
# 开始游戏的主循环
start_game()
while 1:
# 监视键盘和鼠标事件
check_event(ai_settings, screen, ship, bullets)
ship.update()
update_bullets(bullets, aliens, ai_settings, screen)
update_aliens(aliens, ai_settings)
update_screen(screen, ai_settings.bg_color, ship, bullets, aliens)
if __name__ == '__main__':
run_game()
|
18,507 | e87ccbf445794d9552716ec2124e61caa4703337 | import time
from items import Inventory
# Function to start the game, which is called at the bottom of the file in main.
def cell(player):
print('\n')
print('Current inventory: ', player.totalinventory)
print('\n')
print('---PRISON CELL---')
time.sleep(1)
print('\n1: Look at cell door.')
print("2: Look at prison cell walls.")
print("3: Look at anything going on outside of the cell.")
cmdlist = ['1','2','3']
cmd = getcmd(cmdlist)
time.sleep(1)
if cmd == '1':
print("Cell is locked, of course. The keyhole on the other side is reachable, but I need a key.")
time.sleep(1)
print("\n1: Attempt to unlock the door.")
print("2: Return.")
cmdlist = ['1','2']
cmd = getcmd(cmdlist)
if cmd == '1':
if 'Guard Key' in player.totalinventory:
print("Successfully picked the lock with the Guard Key.")
time.sleep(2)
print("You are now out of the cell, and make a dash for the near by armory.")
time.sleep(1)
player.remove_item('Guard Key')
armory(player)
else:
time.sleep(1)
print("You do not have a key, time to try something else.")
time.sleep(2)
cell(player)
if cmd == '2':
print("You see all of the days you have tallied since arriving at the prison: 95 days.")
time.sleep(1)
print("Some of the stone is loose, but you doubt at this point anything can be done here.")
time.sleep(2)
print("1: Return.")
print("2: Try prying the stones apart.")
cmdlist = ['1','2']
cmd = getcmd(cmdlist)
if cmd == '1':
cell(player)
if cmd == '3':
print("You see a guard with a set of keys jingling from his belt.")
print("\n1: Call the guard over")
time.sleep(0.5)
print("\n2: Return")
cmdlist = ['1','2']
cmd = getcmd(cmdlist)
if cmd == '1':
print("\nYou yell out: 'Oi, guard, I'm not feeling so good over here!'")
time.sleep(2)
print("\nThe guard walks over and places himself right in front of the bars.")
time.sleep(1)
print("\n'Bloody hell, what is the matter?'")
time.sleep(2)
print("\n1: Wave him off and say, 'Oh nevermind, I felt sick for a moment, I am fine now.'")
print ("\n2: Quickly reach through the bars, and smash his head against the bars.")
cmdlist = ['1','2']
cmd = getcmd(cmdlist)
if cmd =='1':
print("The guard sighs, throwing his hands up in disbelief. And mutters under his breath 'Idiot...'")
cell(player)
if cmd == '2':
print("\nYou successfully knock the guard unconscious, and are able to grab his keys.")
player.add_item('Guard Key')
time.sleep(1)
print("You have obtained: Guard Key")
time.sleep(1)
cell(player)
def armory(player):
print('\n')
print('Current inventory: ', player.totalinventory)
print('\n')
print('---PRISON ARMORY---')
time.sleep(1)
print('You reach the armory: the door is open and you peak inside.')
time.sleep(1)
print('Inside you see four guards: three sitting at a table in a corner of the armory, and one is\n')
print('equipping himself with a sword. What do you want to do?')
time.sleep(1)
print('\n1: It\'s too risky: step away from the armory and go further into the prison.')
time.sleep(1)
print('\n2:A weapon is important: attempt to sneak in and grab a weapon without anyone noticing.')
cmdlist = ['1','2']
cmd = getcmd(cmdlist)
if cmd == '1':
print('You sneak past the door, and continue deeper into the prison.')
time.sleep(1)
cell(player)
if cmd == '2':
print('You enter the cell, and attempt to grab a near by sword.')
player.add_item('Sword')
print(player.totalinventory)
time.sleep(20)
def getcmd(cmdlist):
cmd = input("Enter your choice: ")
if cmd in cmdlist:
return cmd
if __name__ == "__main__":
player = Inventory('Player 1', '', '', '')
print("The prisoner looks around the cell, scoffing at his present situation. He decides it's time to escape. Find a way out.")
time.sleep(3)
cell(player)
|
18,508 | 2a2cbbd41182293e83fb63e83dfd1ba0b71265ec | """Package for holding scenes."""
SCENE_LIST = [
"cryo",
"bridge",
"mess",
"engine",
"machine",
"crew_quarters",
"map",
"manual",
]
INITIAL_SCENE = 'cryo'
|
18,509 | e306797e9eabaa35a70c586a7b358173cdc42ecc | import os
import math
import time
import copy
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
import networkx as nx
import matplotlib.pyplot as plt
plt.switch_backend('agg')
tqdm.pandas()
class TempDepNode:
def __init__(self, value):
self.value = value
self.embedding = None
def set_embedding(self, embedding):
self.embedding = embedding
def get_embedding(self):
return self.embedding
class TempDepEdge:
def __init__(self, from_node, target_node):
self.from_node = from_node
self.target_node = target_node
self.temp_list = list()
self.embedding = None
def add_event(self, diff):
self.temp_list.append(diff)
def get_embedding(self):
return self.embedding
class TempDepGraph:
def __init__(self, file_name, dataset_name, time_name, attr_name, data, time_diff_threshold=600, intervals = 20):
self.file_name = file_name
self.dataset_name = dataset_name
self.attr_name = attr_name
self.time = time_name
self.data = data
self.nodes = dict()
self.edges = dict()
self.time_diff_threshold = time_diff_threshold
self.intervals = intervals
self.edges_freq_weight = None
self.graph_data = self.discret()
self.domain = set(self.graph_data[self.attr_name])
self.build_graph()
self.set_edge_freq_weight()
def info(self):
print(
f'File name: {self.file_name}, Time: {self.time}, Attribute: {self.attr_name}, Nodes: {len(self.nodes)}, Edges: {len(self.edges)}, Events: {self.event_description()}')
def discret(self):
graph_data = copy.deepcopy(self.data)
graph_data[self.attr_name] = pd.cut(graph_data[self.attr_name], self.intervals)
graph_data[self.attr_name] = graph_data[self.attr_name].apply(lambda x:x.right)
return graph_data
def get_min_max(self):
domain = list(self.domain)
return min(domain), max(domain)
def get_temp_list_domain(self):
temp_list_domain = set()
for value, edge in self.edges.items():
temp_list_domain = temp_list_domain.union(set(edge.temp_list))
return temp_list_domain
def set_edge_freq_weight(self):
total_weight = sum([len(edge.temp_list) for _, edge in self.edges.items()])
self.edges_freq_weight = {value:(len(edge.temp_list) / total_weight) for value, edge in self.edges.items()}
return
def set_edge_embedding(self):
# edge embedding: (emb_i+emb_j) where emb_i is the embedding of from_node and emb_j is the embedding of end_node
for node_pair, edge in self.edges.items():
value_i, value_j = node_pair
vi, vj = self.nodes[value_i], self.nodes[value_j]
emb_i = vi.get_embedding()
emb_j = vj.get_embedding()
if (emb_i is not None) and (emb_j is not None):
edge.embedding = emb_i+emb_j
def aggregation(self):
embeddigns = np.array([self.edges_freq_weight[value] * edge.embedding for value, edge in self.edges.items()])
graph_vec = np.mean(embeddigns, axis=0)
return graph_vec
def event_description(self):
event_description = {}
event_stats = [len(edge.temp_list) for _, edge in self.edges.items()]
event_description['Num'] = sum(event_stats)
event_description['Min'] = min(event_stats)
event_description['Max'] = max(event_stats)
event_description['Mean'] = sum(event_stats) / len(event_stats)
return event_description
def build_graph(self):
"""
Build graph in which edges are attached between two most recent occurrences of different values
:return:
"""
start_time = time.time()
# init temp node
for value in self.domain:
node = TempDepNode(value)
self.nodes[value] = node
attr_data = self.graph_data[self.attr_name]
print(f'{len(attr_data)} records in data')
# init temp edge
for source_ix, value_i in tqdm(attr_data.items()):
visited = set()
for target_ix, value_j in attr_data[source_ix+1:].items():
if value_j in visited:
continue
else:
visited.add(value_j)
time_diff = self.graph_data[self.time][target_ix] - \
self.graph_data[self.time][source_ix]
if time_diff > self.time_diff_threshold:
break
if (value_i, value_j) not in self.edges or (value_j, value_i) not in self.edges:
self.edges[(value_i, value_j)] = TempDepEdge(value_i, value_j)
self.edges[(value_j, value_i)] = TempDepEdge(value_j, value_i)
self.edges[(value_i, value_j)].add_event(time_diff)
if value_i != value_j:
self.edges[(value_j, value_i)].add_event(time_diff)
end_time = time.time()
print(f'{end_time-start_time} seconds for graph building')
|
18,510 | 03f626e70d004b3d9df94e34c870b23800ace5b5 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'persons', views.PersonViewSet)
router.register(r'households', views.HouseholdViewSet)
router.register(r'student_teams', views.StudentTeamViewSet)
router.register(r'dues_plans', views.DuesPlanViewSet)
router.register(r'keyfobs', views.KeyfobViewSet)
urlpatterns = [
path('', include(router.urls)),
]
|
18,511 | ac24fe03f48fbae6328c60faafa29ed202c9c671 | #from my_http_server import MyHTTPRequestHandler, run
import my_http_server
import re
class HTTPRequestHandler(my_http_server.MyHTTPRequestHandler):
def register_routes(self):
d = {
'category_id':r'\d+',
'firstname_like':r'[A-Za-z]+',
'lastname_like':r'[A-Za-z]+',
'email_like':r'[-A-Za-z.@_]+',
'gender_id':r'\d+',
'limit':r'\d+',
'offset':r'\d+',
'age':r'\d+',
'bday':r'\d+',
'bmonth':r'\d+',
'byear':r'\d+',
'min_age':r'\d+',
'max_age':r'\d+',
}
d_str = r'|'.join('(?:%s\=%s)'%(re.escape(k),v)
for k,v in d.items())
self.register_route(r'^/api/data/\?((?:(?:%s)\&?)*)/?$'%d_str,self.get_data)
self.register_route(r'^/api/dictionaries/?$',self.get_dictionaries)
def get_data(self,match):
global sql_select_all
d=dict(
tuple(i.split('='))
for i in match.group(1).split('&')
)
cursor = my_http_server.sql_connection.cursor(buffered=True)
sql = [sql_select_all]
params=[]
where_dict = {
'category_id':('category_id = %s',lambda x:[int(x)]),
'gender_id':('gender_id = %s',lambda x:[int(x)]),
'age':(
r'''(birthDate > date_sub(curdate(),interval %s year)) and
(birthDate <= date_sub(curdate(),interval %s year))''',
lambda x:[int(x)+1,int(x)]
),
'min_age':(
'birthDate <= date_sub(curdate(),interval %s year)',
lambda x:[int(x)]
),
'max_age':(
'birthDate > date_sub(curdate(),interval %s year)',
lambda x:[int(x)+1]
),
'bday':(
'day(birthDate) = %s',
lambda x:[int(x)]
),
'bmonth':(
'month(birthDate) = %s',
lambda x:[int(x)]
),
'byear':(
'year(birthDate) = %s',
lambda x:[int(x)]
),
}
where = [(v,where_dict[k])
for k,v in d.items() if k in where_dict]
where_str = [] if not where else ['where %s'%(
' and '.join(s for v,(s,f) in where)
)]
sql.extend(where_str)
params.extend([i for v,(s,f) in where for i in f(v)])
if any(i in d for i in ['age','max_age','min_age']):
sql.append('order by birthDate asc')
if all(i in d for i in ['limit','offset']):
sql.append('limit %s offset %s')
params.extend([int(d['limit']),int(d['offset'])])
print('\n'.join(sql[1:]),tuple(params),sep='\n')
cursor.execute(' '.join(sql),tuple(params))
res = list(cursor)
cursor.close()
return self.send_response_headers_json(res,gzip=True)
def get_dictionaries(self,match):
d = {
'gender':'select id,name from test.gender',
'category':'select id,name from test.category',
'age':'select distinct cast(%s as signed) from test.client'%(
sql_select_age('birthDate')
)
}
for i in 'year,month,day'.split(','):
d['b%s'%i]='select distinct %s(birthDate) from test.client'%i
cursor = my_http_server.sql_connection.cursor(buffered=True)
res = {}
for key,sql in d.items():
cursor.execute(sql)
res[key]=dict(sorted(i if len(i)==2 else i*2 for i in cursor))
cursor.close()
return self.send_response_headers_json(res,gzip=True)
#https://stackoverflow.com/a/2533913
sql_select_age = lambda date_field:(
r'''DATE_FORMAT(NOW(), '%Y') -
DATE_FORMAT('''+date_field+''', '%Y') -
(DATE_FORMAT(NOW(), '00-%m-%d') < DATE_FORMAT('''+date_field+''', '00-%m-%d'))'''
)
sql_select_all = r'''
SELECT
client.id as id,
category.name as category,
firstname,
lastname,
email,
gender.name as gender,
cast(birthDate as char) as birthDate,
%s as age
FROM test.client
join test.gender on gender.id=client.gender_id
join test.category on category.id=client.category_id
'''%sql_select_age('birthDate')
my_http_server.run(HTTPRequestHandler,{
'user':'root',
'password':'12345678',
'database':'test',
'port':33061,
})
|
18,512 | 853fc5e1ad11ac38238b9e62f7ef7a519ce67e00 | #!/usr/bin/env python3
"""A python script to perform audio watermark embedding/detection
on the basis of direct-sequence spread spectrum method."""
# Copyright (C) 2020 by Akira TAMAMORI
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from scipy.io import wavfile
HOST_SIGNAL_FILE = "host.wav" # 透かし埋め込み先のファイル
WATERMARK_SIGNAL_FILE = "wmed_signal.wav" # 透かしを埋め込んだファイル
PSEUDO_RAND_FILE = 'pseudo_rand.dat' # 疑似乱数列のファイル
WATERMARK_ORIGINAL_FILE = 'watermark_ori.dat' # オリジナルの透かし信号
WATERMARK_EXTENDED_FILE = 'watermark_extended.dat' # 拡張透かし信号
REP_CODE = True # 繰り返し埋め込みを使う
FRAME_LENGTH = 1024 # フレーム長
CONTROL_STRENGTH = 0.03 # 埋め込み強度
OVERLAP = 0.0 # フレーム分析のオーバーラップ率
NUM_REPS = 3 # 埋め込みの繰り返し数
def fix(xs):
"""
A emuration of MATLAB 'fix' function.
borrowed from https://ideone.com/YjJwOh
"""
if xs >= 0:
res = np.floor(xs)
else:
res = np.ceil(xs)
return res
def embed():
""" Embed watermark."""
# 疑似乱数系列を生成 (pseudo random sequence; PRS)
prs = np.random.rand(1, FRAME_LENGTH) - 0.5
# 疑似乱数系列を保存
with open(PSEUDO_RAND_FILE, 'w') as f:
for d in np.squeeze(prs):
f.write("%f\n" % d)
# 埋め込み先の音声ファイルを開く
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
signal_len = len(host_signal)
# フレームの移動量 (hop_length)
frame_shift = int(FRAME_LENGTH * (1 - OVERLAP))
# 隣接フレームとのオーバーラップ長
overlap_length = int(FRAME_LENGTH * OVERLAP)
# 埋め込み可能なビット数
embed_nbit = fix((signal_len - overlap_length) / frame_shift)
if REP_CODE:
# 実質的な埋め込み可能ビット数
effective_nbit = np.floor(embed_nbit / NUM_REPS)
embed_nbit = effective_nbit * NUM_REPS
else:
effective_nbit = embed_nbit
# 整数化
frame_shift = int(frame_shift)
effective_nbit = int(effective_nbit)
embed_nbit = int(embed_nbit)
# オリジナルの透かし信号を作成(0と1のビット列)
wmark_original = np.random.randint(2, size=int(effective_nbit))
# オリジナルの透かし信号を保存
with open(WATERMARK_ORIGINAL_FILE, 'w') as f:
for d in wmark_original:
f.write("%d\n" % d)
# 透かし信号を拡張する
if REP_CODE:
wmark_extended = np.repeat(wmark_original, NUM_REPS)
else:
wmark_extended = wmark_original
# 拡張された透かし信号を保存する
with open(WATERMARK_EXTENDED_FILE, 'w') as f:
for d in np.squeeze(wmark_extended):
f.write("%f\n" % d)
# 透かしが埋め込まれた信号(watermarked signal)を生成
pointer = 0
wmed_signal = np.zeros((frame_shift * embed_nbit)) # watermarked signal
for i in range(embed_nbit):
frame = host_signal[pointer: (pointer + FRAME_LENGTH)]
alpha = CONTROL_STRENGTH * np.max(np.abs(frame))
# ビット値に応じて情報を埋め込む
if wmark_extended[i] == 1:
frame = frame + alpha * prs
else:
frame = frame - alpha * prs
wmed_signal[frame_shift * i: frame_shift * (i+1)] = \
frame[0, 0:frame_shift]
pointer = pointer + frame_shift
wmed_signal = np.concatenate(
(wmed_signal, host_signal[len(wmed_signal): signal_len]))
# 透かしが埋め込まれた信号をwavとして保存
wmed_signal = wmed_signal.astype(np.int16) # convert float into integer
wavfile.write(WATERMARK_SIGNAL_FILE, sr, wmed_signal)
def detect():
""" Detect watermark."""
# 埋め込み先の音声ファイルを開く
_, host_signal = wavfile.read(HOST_SIGNAL_FILE)
# 埋め込み済みの音声ファイルを開く
_, eval_signal = wavfile.read(WATERMARK_SIGNAL_FILE)
signal_len = len(eval_signal)
frame_shift = FRAME_LENGTH * (1 - OVERLAP)
embed_nbit = fix((signal_len - FRAME_LENGTH * OVERLAP) / frame_shift)
if REP_CODE:
# 実質的な埋め込み可能ビット数
effective_nbit = np.floor(embed_nbit / NUM_REPS)
embed_nbit = effective_nbit * NUM_REPS
else:
effective_nbit = embed_nbit
frame_shift = int(frame_shift)
effective_nbit = int(effective_nbit)
embed_nbit = int(embed_nbit)
# オリジナルの透かし信号をロード
with open(WATERMARK_ORIGINAL_FILE, 'r') as f:
wmark_original = f.readlines()
wmark_original = np.array([int(w.rstrip()) for w in wmark_original])
# 透かし埋め込みに用いた擬似乱数列をロード
with open(PSEUDO_RAND_FILE, 'r') as f:
prs = f.readlines()
rr = np.array([float(x.rstrip()) for x in prs])
pointer = 0
detected_bit = np.zeros(embed_nbit)
for i in range(embed_nbit):
frame = eval_signal[pointer: pointer + FRAME_LENGTH] - \
host_signal[pointer: pointer + FRAME_LENGTH]
comp = np.correlate(frame, rr, "full")
maxp = np.argmax(np.abs(comp))
if comp[maxp] >= 0:
detected_bit[i] = 1
else:
detected_bit[i] = 0
pointer = pointer + frame_shift
if REP_CODE:
count = 0
wmark_recovered = np.zeros(effective_nbit)
for i in range(effective_nbit):
ave = np.sum(detected_bit[count:count+NUM_REPS]) / NUM_REPS
if ave >= 0.5:
wmark_recovered[i] = 1
else:
wmark_recovered[i] = 0
count = count + NUM_REPS
else:
wmark_recovered = detected_bit
# ビット誤り率を表示
BER = np.sum(np.abs(wmark_recovered - wmark_original)) / \
effective_nbit * 100
print(f'bit error rate = {BER} %')
# SNRを表示
SNR = 10 * np.log10(
np.sum(np.square(host_signal.astype(np.float32)))
/ np.sum(np.square(host_signal.astype(np.float32)
- eval_signal.astype(np.float32))))
print(f'SNR = {SNR}dB')
def main():
"""Main routine. """
embed() # 透かしの埋め込み
detect() # 透かしの検出
if __name__ in '__main__':
main()
|
18,513 | 5d157c954f5bd69c32537d0ced64d086566d50b7 | """seven11 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from apps.service.ajax import *
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('social_auth.urls')),
url(r'^$', 'core.views.home'),
url(r'^login/', 'core.views.login'),
url(r'^ajax/templates/albums/', 'core.views.albums'),
url(r'^ajax/templates/album/', 'core.views.album'),
url(r'^ajax/templates/landing/', 'core.views.landing'),
url(r'^accounts/profile/', 'core.views.logged'),
url(r'^ajax/templates/blog/', 'core.views.blog'),
url(r'^ajax/templates/post/', 'core.views.post'),
url(r'^ajax/templates/vacation/', 'core.views.vacation'),
url(r'^ajax/templates/category/', 'core.views.category'),
url(r'^ajax/templates/service_type/', 'core.views.service_type'),
url(r'^ajax/templates/service/', 'core.views.service'),
url(r'^ajax/templates/actions/', 'core.views.actions'),
url(r'^ajax/templates/action/', 'core.views.action'),
url(r'^api/v1/blog/$', BlogView.as_view()),
url(r'^api/v1/category/$', CategoriesView.as_view()),
url(r'^api/v1/category/(\d+)/$', ServiceTypeView.as_view()),
url(r'^api/v1/albums/$', AlbumsView.as_view()),
url(r'^api/v1/album/(\d+)/$', AlbumView.as_view()),
url(r'^api/v1/categories/(\w+)/$', CategoryView.as_view()),
url(r'^api/v1/posts/(\d+)/$', PostView.as_view()),
url(r'^api/v1/service_type/(\d+)/services/$', ServicesByTypeView.as_view()),
url(r'^api/v1/services/(\d+)/$', ServiceView.as_view()),
url(r'^api/v1/actions/$', ActionsView.as_view()),
url(r'^api/v1/action/(\d+)/$', ActionView.as_view()),
]
|
18,514 | 4eeefa429ecdb422bcb8f672af03edd85f884a2d | from sys import stdout
def factorial(n):
fact = 1
if n==0:
return fact
for i in range(1,n+1):
fact = fact * i
#i = i+1
return fact
print("Developed by Anant and Shreyas\n\n")
print("Enter number of levels")
level = int(input())
#level = 6
#level = level - 1
if level == 0:
print("Please enter a number greater than 0")
print("")
if level < 0:
print("Enter Positive Numbers")
if level > 11:
print("Restricting to 10 levels. Remove this 'if' if your screen permits")
exit()
for i in range (0,level):
for k in range(0,level-i):
print ("\t",end=" ")
for j in range (0,i+1):
num = int((factorial(i)/(factorial(i - j)*factorial(j))))
for l in range(0,2):
print ("\t",end=" ")
print (num, end =" ")
print("\n")
|
18,515 | c0810dd56b3de2b6f973bd4818fd22b9239b3635 | # =================================================================================================
# =================================================================================================
# =================================================================================================
# Title: MiniGNI Functions
# Author: Chung Taing
# Date Updated: 13 April 2020
# Description: This script provides functions for generating slide environment files. Slide
# # environment files are necessary for the GNI microscope software in analyzing the sample slides.
# # The microscope software obtains a raw count of the number of SSA on each slide. That is then
# # converted into a number concentration based on information from the slide environment file.
# =================================================================================================
# =================================================================================================
# =================================================================================================
# import packages
import datetime as dt
import math
import numpy as np
import os
import pandas as pd
# define directories
miniGNI_dir = 'C:/Users/ntril/Dropbox/mini-GNI'
flight_dir = miniGNI_dir + '/miniGNI_data'
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: clean_GNI
# Parameters:
# # df: the mini-GNI data set
# # begin_time: the time at the start of sampling
# # time_corrector: a correction factor to account for drift in the mini-GNI's real time clock
# Description: This function sets column names for miniGNI data file. It also filters out data
# # prior to the beginning of the sampling so that you only have data from a single sampling day.
# # It also adjusts the numbers so that each variable has the following units: altitude in meters,
# # barometer temperature in degrees Celsius, pressure in Pascals, relative humidity in percent,
# # temperature in degrees Celsius, temperature in Kelvin, and alpha, beta, and gamma in radians.
# =================================================================================================
def clean_GNI(df, begin_time, time_corrector):
# names the columns in the dataset
df.columns = ['timedate','altitude','t_barometer','pressure','rh','temperature','door','alpha','beta','gamma']
# adjusts each variable for the desired unit listed in description
df.altitude/=100 # meters
df.pressure/=100 # Pascals
df.t_barometer/=100 # Pascals
df.rh/=100 # fraction
df.temperature/=100 # Celsius
# makes all orientation angles positive
df.beta = np.where(df.beta < 0, df.beta+360, df.beta)
df.gamma = np.where(df.gamma < 0, df.gamma+360, df.gamma)
# converts orientation angles to radians
df.alpha*=(np.pi/180)
df.beta*=(np.pi/180)
df.gamma*=(np.pi/180)
# removes anomalous data: humidity sensor rarely records strange spikes of >100% RH
df = df[df.rh <= 100]
# converts the timedate column into datetime objects
df.timedate = [dt.datetime.strptime(date, '%Y%m%dT%H%M%S') for date in df.timedate]
# applies the time correction factor to the timedate column
df.timedate += time_corrector
# removes any data from before sampling
df = df[df.timedate > begin_time]
# sets the timedate column as the index
df.set_index('timedate', inplace=True)
# creates a new column that is the temperature in Kelvin
df['t_kelvin'] = df.temperature + 273.15
return df
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: clean_XQ
# Parameters:
# # df: the iMet-XQ2 dataset
# # begin_time: the time at the start of sampling
# Description: This function sets column names for the XQ dataset and removes useless columns. It
# # also filters out data prior to beginning of sampling and data in periods when the iMet-XQ2 was
# # was connected to fewer than 5 satellites. It also adjusts the variables to the correct units:
# # pressure in Pascals, temperature in Celsius and in Kelvin, relative humidity in %, humidity
# # temperature in Celsius, altitude in meters.
# =================================================================================================
def clean_XQ(df, begin_time):
# only the columns that start with 'XQ-iMet-XQ' are needed
df = df[df.columns[pd.Series(df.columns).str.startswith('XQ-iMet-XQ')]]
df.columns = ['pressure', 'temperature', 'rh', 't_humidity', 'date', 'time', 'lon', 'lat', 'altitude', 'sat_count']
df = df[df.sat_count >= 5] # removing data where connected to fewer than 5 satellites
df['timedate'] = df.date + ' ' + df.time
try: # there is one iMet-XQ2 instrument that records date in a different way from the others
df.timedate = [dt.datetime.strptime(date, '%m/%d/%Y %H:%M:%S') for date in df.timedate]
except ValueError:
df.timedate = [dt.datetime.strptime(date, '%Y/%m/%d %H:%M:%S') for date in df.timedate]
df.timedate = df.timedate - dt.timedelta(hours=10) # convert from UTC to HST
df = df[df.timedate > begin_time] # filter out data prior to sampling
df.set_index('timedate', drop=True, append=False, inplace=True) # set timedate as index
df.drop(['date', 'time'], axis=1, inplace=True) # drop the date and time columns
df.pressure*=100 # convert pressure to Pa
df['t_kelvin'] = df.temperature + 273.15
return df
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: fix_XQ
# Parameters:
# # df: the iMet-XQ2 dataset
# Description: Occasionally, an iMet-XQ2 instrument will experience errors in recording data. This
# # functions fixes those errors.
# =================================================================================================
def fix_XQ(df):
df.temperature/=100
df.rh/=10
df.t_humidity/=100
df.lon/=10000000
df.lat/=10000000
df.altitude/=1000
return df
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: add_dewpoint_rho
# Parameters:
# # df: the mini-GNI data set
# Description: This function adds dew point temperature in degrees Celsius and air density (rho)
# # in kg/m3 to the mini-GNI dataset. It calculcates these using the temperature and pressure. This
# # also works for iMet-XQ2 datasets.
# =================================================================================================
def add_dewpoint_rho(df):
a = np.log(df.rh/100)
b = 17.625*df.temperature/(243.04+df.temperature) # this temperature in Celsius
df['dew_point'] = 243*(a + b)/(17.625 - a - b)
es = 6.113*np.exp(5423*((1/273.15) - (1/df.t_kelvin))) # this temperature in Kelvin (t_kelvin)
e = es*df.rh/100
df['rho'] = (0.028964*(df.pressure-(e*100)) + 0.018016*e*100)/(8.314*df.t_kelvin)
return df
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: correct_altitude
# Parameters
# # df: the mini-GNI data set
# # surface_pressure: the mean surface pressure given by some other instrument
# # surface_altitude: the altitude of the surface you are standing on
# Description: This function corrects the barometer's pressure given a known surface pressure
# and calculates altitude using that surface pressure and the altitude of the surface.
# The equation used to calculate altitude is h = 44330.77*(1-(p/p0)**0.1902632) + offset
# where p is the pressure, p0 is the surface pressure, and offset is the surface altitude
# =================================================================================================
def correct_altitude(df, surface_pressure, surface_altitude):
# calculating the difference between altitudes at each data point in the time series
df['altdiff'] = df.altitude.diff()
df.iloc[0, df.columns.get_loc('altdiff')] = 0
# defining a cut off altimeter altitude so that we can calculate surface pressure
cutoffAlt = df.altitude.min() + 10
# calculating mean surface pressure at altitude below the cutoff altitude
gni_surface_pressure = df[(df['altdiff'] < 1) & (df['altitude'] < cutoffAlt)].pressure.mean()
# getting the difference between the known surface pressure and the gni's measured surface pressure
pressure_diff = surface_pressure - gni_surface_pressure
# correcting the pressure by that pressure difference
df['pressure'] += pressure_diff
df.drop('altdiff', axis=1, inplace=True)
# recalculates altitude using mean surface pressure, taking into account ground elevation
df['altitude'] = 44330.77*(1-(df.pressure/surface_pressure)**0.1902632) + surface_altitude
return df
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: retrieve_samples
# Parameters
# # df: the mini-GNI data set
# Description: This function detects the sampling periods by finding where the door was open.
# Additionally, it separates the different sampling times and returns a list of data frames where
# each data frame is the data frame for the duration of a sampling period.
# =================================================================================================
def retrieve_samples(df):
# reduces the data to where the door was open
samples = df[(gnidf.door == 1)]
samples['myIndex'] = range(1, len(samples)+1)
# finds the time difference between each data point
samples['tdiff'] = samples.index.to_series().diff().dt.seconds
samples.ix[0, 'tdiff'] = 0.0
# When the time difference between 2 points is very great, that means those 2 points represent
# # the end of one sample and the start of another. The function splits the data frame there.
# # THe time difference threshold is arbitrary: it is five minutes here because more than five
# # minutes pass between each sample period.
indxList = samples.myIndex[samples['tdiff'] > 300]
indxList = pd.concat([pd.Series([1]), indxList, pd.Series([len(samples)])], ignore_index=True)
sample_list = [samples[(samples.myIndex >= indxList[i]) & (samples.myIndex < indxList[i+1])] for i in range(len(indxList)-1)]
return sample_list
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: retrieve_xqSample
# Parameters
# # xqdf: the iMet-XQ2 data set
# # gnidf: the mini-GNI data set
# Description: This function returns the part of the iMet-XQ2 data frame that matches up to the
# # time of the mini-GNI data frame.
# =================================================================================================
def retrieve_xqSample(xqdf, gnidf):
t1 = gnidf.index[0]
t2 = gnidf.index[-1]
sampleXQ = xqdf[(xqdf.index >= t1) & (xqdf.index <= t2)]
return sampleXQ
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: rename_environment_files
# Parameters:
# # sample_year: the year when the samples were taken
# # sample_month: the month when the samples were taken
# # sample_day: the day when the samples were taken
# Description: This function renames all the slide environment files in a folder and also corrects
# # the slide number in each file. Recall that the slide environment files are required by the
# # GNI microscope software to calculate number concentration.
# =================================================================================================
def rename_environment_files(sample_year, sample_month, sample_day):
date_label = sample_year[2:] + sample_month + sample_day
environment_dir = flight_dir + '/' + sample_year + '_' + sample_month + '_' + sample_day + '/slides'
# slide number counter
counter = 1
for subdir, dirs, files in os.walk(environment_dir):
for file in files:
if file.startswith(date_label + 'gni'):
filepath = environment_dir + '/' + file
new_filepath = environment_dir + '/' + 'sli_env_' + date_label + 'a' + str(counter)
myfile = open(filepath, 'r')
filelines = myfile.readlines()
myfile.close()
if counter < 10:
filelines[3] = ' {}. slide_number \n'.format(counter)
else:
filelines[3] = ' {}. slide_number \n'.format(counter)
new_file = open(new_filepath, 'w')
new_file.writelines(filelines)
new_file.close()
counter += 1
# =================================================================================================
# =================================================================================================
# =================================================================================================
# Function: generate_environment_files
# Parameters:
# # gni_number: the mini-GNI instrument number that took the samples as a string
# # sample_year: the year when the samples were taken as a string
# # sample_month: the month when the samples were taken as a string
# # sample_day: the day when the samples were taken as a string
# # sample_hour: the hour when the samples started being taken as a string
# # sample_minute: the minutes when the samples started being taken as a string
# # windspeed_bar: a list of average surface wind speed (float format) for each sample taken
# # windspeed_min: same as previous except the minimum wind speeds
# # windspeed_max: same as previous except the maximum wind speeds
# # wind_dir: the average wind direction of the sampling day (float format)
# # surface_height: the height where surface wind speed measurements are taken (float format)
# # lon: the longitude of the sampling location (float format)
# # lat: the latitude of the sampling location (float format)
# # time_corrector: the correction factor for the mini-GNI instrument because its real time clock
# # # tends to drift over time (datetime timedelta object)
# Description: This function generates slide environment files for all samples taken with a
# # specific mini-GNI instrument on a particular sample date.
# =================================================================================================
def generate_environment_files(gni_number, sample_year, sample_month, sample_day, sample_hour, sample_minute, windspeed_bar, windspeed_min, windspeed_max, wind_dir, surface_height, lon, lat, time_corrector=dt.timedelta(0)):
# create a date label of the sample date
date_label = sample_year[2:] + sample_month + sample_day
# define the directory where we will pull mini-GNI and iMet-XQ2 data from for the sample day
sample_dir = flight_dir + '/' + sample_year + '_' + sample_month + '_' + sample_day
# convert the time string variables defined above into a datetime
sample_time = dt.datetime.strptime(sample_year + sample_month + sample_day + sample_hour + sample_minute + '00', '%Y%m%d%H%M%S')
# retrieve miniGNI data corresponding to the correct mini-GNI number
gni_dir = sample_dir + '/data/' + date_label + '_gni' + gni_number + '.csv'
gniDF = pd.read_csv(gni_dir, header=None)
gniDF = clean_GNI(gniDF, begin_time=sample_time, time_corrector=time_corrector)
# This walks through the sample_dir directory to search for surface_XQ data. This iMet-XQ2
# # data set is only necessary for when the mini-GNI does not have a humidity sensor.
for subdir, dirs, files in os.walk(sample_dir + '/data'):
for file in files:
if file.startswith(dateLabel + '_surface_xq'):
surfaceXQ = pd.read_csv(sample_dir + '/data/' + file)
surfaceXQ = clean_XQ(surfaceXQ, begin_time=sample_time)
surfaceXQ = add_dewpoint_rho(surfaceXQ)
# add dew point and air density data if the mini-GNI has a humidity sensor
gni_has_RH = bool(gniDF['rh'].mean() > 0) # checks if the RH data exists
if gni_has_RH:
gniDF = add_dewpoint_rho(gniDF)
# calibrate the mini-GNI pressure to mean surface pressure read by then surface iMet-XQ2
# # and also recalculate the altitude from the calibrated pressure
xq_surface_pressure = surfaceXQ['pressure'].mean()
gniDF = correct_altitude(gniDF, surface_pressure=xq_surface_pressure, surface_altitude=surface_height)
# obtain a list of data frames, one data frame for each sampling period
all_samples = retrieve_samples(gniDF)
# This generates slide environment files for each sample period. If windspeed_bar contains
# # a -1 value, then that sample period is considered invalid, and the environment file
# # is not generated for that sample. e.g. if windspeed_bar = [4.5, -1, 3.0], then the second
# # data frame in all_samples is not analyzed for sampling data.
# What this does is pull relevant data from the mini-GNI sampling period data set and then
# # puts them all into strings. These strings will all become lines in a text file.
for i in range(len(windspeed_bar)):
if windspeed_bar[i] > 0:
# pulls the ith sampling data from the list of data frames
gni_sample = all_samples[i]
# creates the year month day string
yymmdd_str = ' {}. yymmdd'.format(date_label)
# get the beginning and end of the sampling period
hhmmss_begin = gni_sample.index[0].strftime('%H%M%S')
hhmmss_end = gni_sample.index[-1].strftime('%H%M%S')
# creates strings marking the begining and ending time of the sampling period
hhmmss_begin_str = ' {}.0 hhmmss_begin'.format(hhmmss_begin)
hhmmss_end_str = ' {}.0 hhmmss_end'.format(hhmmss_end)
# This creates the slide number string. Note that the function rename_environment_files
# # edits these lines later as they will be incorrect right now since we are generating
# # environment files for each mini-GNI instrument individually whereas the slide
# # number depends on how many instruments were used and how many samples were taken
# # per instrument.
slide_number_str = ' {}. slide_number'.format(i+1)
# field project number string, which defaults to 0 and doesn't need to be worried about
project_str = ' 0. field_project'
# =====================================================================================
# This calculates the wind speed average, min, and max aloft. It uses the power law
# # to convert average, min, and max surface wind to aloft wind. The power law is
# # u = u_surface * (z / z_surface)^0.143. This also rounds each to the nearest
# # hundredths place.
wind_aloft_bar = windspeed_bar[i]*((gni_sample['altitude'].mean())/surface_height)**0.143
wind_aloft_bar = round(wind_aloft_bar, 2)
wind_aloft_min = windspeed_min[i]*((gni_sample['altitude'].mean())/surface_height)**0.143
wind_aloft_min = round(wind_aloft_min, 2)
wind_aloft_max = windspeed_max[i]*((gni_sample['altitude'].mean())/surface_height)**0.143
wind_aloft_max = round(wind_aloft_max, 2)
# This creates the strings for wind speed aloft average, min, and max. Note that it
# # creates both tas string and windspeed string. This is because the mini-GNI sampling
# # was done on a kite platform rather than a moving platform (such as an aircraft,
# # drone, or UAS). Thus, the air speed and the wind speed are the same. Note that the
# # string lengths change based on the value of the wind speed due to the number of
# # digits. This does not account for wind speed over 99.99 m/s because this is not
# # possible for the kite platform to observe. If this method were to be adapted for
# # a moving platform, this section of the code would have to be changed.
# wind average
if wind_aloft_bar < 10:
tas_bar_str = ' {:.2f} tas_bar'.format(wind_aloft_bar)
windspeed_bar_str = ' {:.2f} wind_speed_bar'.format(wind_aloft_bar)
else:
tas_bar_str = ' {:.2f} tas_bar'.format(wind_aloft_bar)
windspeed_bar_str = ' {:.2f} wind_speed_bar'.format(wind_aloft_bar)
# wind minimum
if wind_aloft_min < 10:
tas_min_str = ' {:.2f} tas_min'.format(wind_aloft_min)
windspeed_min_str = ' {:.2f} wind_speed_min'.format(wind_aloft_min)
else:
tas_min_str = ' {:.2f} tas_min'.format(wind_aloft_min)
windspeed_min_str = ' {:.2f} wind_speed_min'.format(wind_aloft_min)
# wind maximum
if wind_aloft_max < 10:
tas_max_str = ' {:.2f} tas_max'.format(wind_aloft_max)
windspeed_max_str = ' {:.2f} wind_speed_max'.format(wind_aloft_max)
else:
tas_max_str = ' {:.2f} tas_max'.format(wind_aloft_max)
windspeed_max_str = ' {:.2f} wind_speed_max'.format(wind_aloft_max)
# =====================================================================================
# Check if miniGNI has humidity data. If not, then the iMet-XQ2 data surfaceXQ will
# # be used to help calculate relative humidity, dew point, and air density aloft.
# # Average, min, and max values are required for each variable.
if gni_has_RH: # then the data can be pulled directly from the mini-GNI
# relative humidity
rh_bar = round(gni_sample['rh'].mean()/100, 4)
rh_min = round(gni_sample['rh'].min()/100, 4)
rh_max = round(gni_sample['rh'].max()/100, 4)
# air density
rho_bar = round(gni_sample['rho'].mean(), 3)
rho_min = round(gni_sample['rho'].min(), 3)
rho_max = round(gni_sample['rho'].max(), 3)
# dew point temperature
td_bar = round(gni_sample['dew_point'].mean() + 273.15, 2)
td_min = round(gni_sample['dew_point'].min() + 273.15, 2)
td_max = round(gni_sample['dew_point'].max() + 273.15, 2)
else:
# retreive XQ data for corresponding sampling period
xqSample = retrieve_xqSample(xqdf=surfaceXQ, gnidf=gni_sample)
# get XQ surface data for temperature and relative humidity
xq_temp_bar = xqSample['t_kelvin'].mean()
xq_rh_bar = xqSample['rh'].mean()
xq_rh_min = xqSample['rh'].min()
xq_rh_max = xqSample['rh'].max()
# calculate average saturation vapor pressure at surface
xq_sat_vapor = 6.113*math.exp(5423*((1/273.15) - (1/xq_temp_bar)))
# calculate vapor pressure average, minimum, maximum at surface
vapor_bar = xq_sat_vapor*(xq_rh_bar/100)
vapor_min = xq_sat_vapor*(xq_rh_min/100)
vapor_max = xq_sat_vapor*(xq_rh_max/100)
# get miniGNI temperature aloft
gni_temp_bar = gni_sample['t_kelvin'].mean()
gni_temp_min = gni_sample['t_kelvin'].min()
gni_temp_max = gni_sample['t_kelvin'].max()
# calculate average saturation vapor pressure at miniGNI aloft
gni_sat_vapor = 6.113*math.exp(5423*((1/273.15) - (1/gni_temp_bar)))
# This now calculate the variables at the miniGNI aloft. It is assumed that the
# # atmosphere up to the height of the mini-GNI is well mixed so that the vapor
# # pressure is the same at the surface and at aloft.
# relative humidity (in fraction form)
rh_bar = vapor_bar/gni_sat_vapor
rh_min = vapor_min/gni_sat_vapor
rh_max = vapor_max/gni_sat_vapor
# gets temperature for dew point calculation
# # (the calculation uses Celsius, so need to convert)
gni_temp_bar -= 273.15
gni_temp_min -= 273.15
gni_temp_max -= 273.15
# dew point calculation
td_bar = 243.04*(math.log(rh_bar)+((17.625*gni_temp_bar)/(243.04+gni_temp_bar)))/(17.625-math.log(rh_bar)-((17.625*gni_temp_bar)/(243.04+gni_temp_bar)))
td_min = 243.04*(math.log(rh_bar)+((17.625*gni_temp_min)/(243.04+gni_temp_min)))/(17.625-math.log(rh_bar)-((17.625*gni_temp_min)/(243.04+gni_temp_min)))
td_max = 243.04*(math.log(rh_bar)+((17.625*gni_temp_max)/(243.04+gni_temp_max)))/(17.625-math.log(rh_bar)-((17.625*gni_temp_max)/(243.04+gni_temp_max)))
# convert variables back to form required in sli_env file
# # temperatures in Kelvin
td_bar += 273.15
td_min += 273.15
td_max += 273.15
gni_temp_bar += 273.15
gni_temp_min += 273.15
gni_temp_max += 273.15
# calculate air density aloft
# # uses miniGNI air pressure, vapor pressure, temperature in Kelvin
gni_pres_bar = gni_sample['pressure'].mean()
rho_bar = (0.028964*(gni_pres_bar - 100*vapor_bar) + 0.018016*100*vapor_bar)/(8.314*gni_temp_bar)
rho_min = (0.028964*(gni_pres_bar - 100*vapor_max) + 0.018016*100*vapor_max)/(8.314*gni_temp_bar)
rho_max = (0.028964*(gni_pres_bar - 100*vapor_min) + 0.018016*100*vapor_min)/(8.314*gni_temp_bar)
# round all variables so we can generate string lines
rh_bar = round(rh_bar, 4)
rh_min = round(rh_min, 4)
rh_max = round(rh_max, 4)
td_bar = round(td_bar, 2)
td_min = round(td_min, 2)
td_max = round(td_max, 2)
rho_bar = round(rho_bar, 3)
rho_min = round(rho_min, 3)
rho_max = round(rho_max, 3)
# creates string lines for relative humidity
rh_bar_str = ' {:.4f} rel_hum_bar'.format(rh_bar)
rh_min_str = ' {:.4f} rel_hum_min'.format(rh_min)
rh_max_str = ' {:.4f} rel_hum_max'.format(rh_max)
# creats string lines for air density
rho_bar_str = ' {:.3f} rho_air_bar'.format(rho_bar)
rho_min_str = ' {:.3f} rho_air_min'.format(rho_min)
rho_max_str = ' {:.3f} rho_air_max'.format(rho_max)
# get the mini-GNI temperature data
temp_bar = round(gni_sample['t_kelvin'].mean(), 2)
temp_min = round(gni_sample['t_kelvin'].min(), 2)
temp_max = round(gni_sample['t_kelvin'].max(), 2)
# creates string lines for temperature
temp_bar_str = ' {:.2f} t_bar'.format(temp_bar)
temp_min_str = ' {:.2f} t_min'.format(temp_min)
temp_max_str = ' {:.2f} t_max'.format(temp_max)
# create string lines for dew point temperature
td_bar_str = ' {:.2f} td_bar'.format(td_bar)
td_min_str = ' {:.2f} td_min'.format(td_min)
td_max_str = ' {:.2f} td_max'.format(td_max)
# =====================================================================================
# get the mini-GNI pressure data
pres_bar = int(gni_sample['pressure'].mean())
pres_min = int(gni_sample['pressure'].min())
pres_max = int(gni_sample['pressure'].max())
# This create string lines for pressure. Note that the number of digits for pressure
# # can change, so the string length changes.
# pressure average
if pres_bar < 100000:
pres_bar_str = ' {}. p_bar'.format(pres_bar)
else:
pres_bar_str = ' {}. p_bar'.format(pres_bar)
# pressure minimum
if pres_min < 100000:
pres_min_str = ' {}. p_min'.format(pres_min)
else:
pres_min_str = ' {}. p_min'.format(pres_min)
# pressure maximum
if pres_max < 100000:
pres_max_str = ' {}. p_max'.format(pres_max)
else:
pres_max_str = ' {}. p_max'.format(pres_max)
# =====================================================================================
# get the mini-GNI altitude data
z_bar = int(gni_sample['altitude'].mean())
z_min = int(gni_sample['altitude'].min())
z_max = int(gni_sample['altitude'].max())
z_begin = int(gni_sample['altitude'].iloc[0])
z_end = int(gni_sample['altitude'].iloc[-1])
# This creates string lines for altitude. Note that the number of digits for altitude
# # can change, so the string length changes. Currently, this section accounts for
# # altitudes from 1-9999 meters.
# altitude average
if z_bar >= 1000:
z_bar_str = ' {}. z_bar'.format(z_bar)
elif z_bar >= 100:
z_bar_str = ' {}. z_bar'.format(z_bar)
elif z_bar >= 10:
z_bar_str = ' {}. z_bar'.format(z_bar)
else:
z_bar_str = ' {}. z_bar'.format(z_bar)
# altitude minimum
if z_min >= 1000:
z_min_str = ' {}. z_min'.format(z_min)
elif z_min >= 100:
z_min_str = ' {}. z_min'.format(z_min)
elif z_min >= 10:
z_min_str = ' {}. z_min'.format(z_min)
else:
z_min_str = ' {}. z_min'.format(z_min)
# altitude maximum
if z_max >= 1000:
z_max_str = ' {}. z_max'.format(z_max)
elif z_max >= 100:
z_max_str = ' {}. z_max'.format(z_max)
elif z_max >= 10:
z_max_str = ' {}. z_max'.format(z_max)
else:
z_max_str = ' {}. z_max'.format(z_max)
# altitude at beginning of sample period
if z_begin >= 1000:
z_begin_str = ' {}. z_begin'.format(z_begin)
elif z_begin >= 100:
z_begin_str = ' {}. z_begin'.format(z_begin)
elif z_begin >= 10:
z_begin_str = ' {}. z_begin'.format(z_begin)
else:
z_begin_str = ' {}. z_begin'.format(z_begin)
# altitude at end of sampling period
if z_end >= 1000:
z_end_str = ' {}. z_end'.format(z_end)
elif z_end >= 100:
z_end_str = ' {}. z_end'.format(z_end)
elif z_end >= 10:
z_end_str = ' {}. z_end'.format(z_end)
else:
z_end_str = ' {}. z_end'.format(z_end)
# =====================================================================================
# This creates the wind direction string. Note that the number of digits in wind
# # direction changes as the value changes. The string length therefore needs to be
# # varied. This accounts for all wind directions from 0-360 degrees.
if wind_dir >= 100:
wind_dir_str = ' {:.2f} wind_direction_bar'.format(wind_dir)
elif wind_dir >= 10:
wind_dir_str = ' {:.2f} wind_direction_bar'.format(wind_dir)
else:
wind_dir_str = ' {:.2f} wind_direction_bar'.format(wind_dir)
# =====================================================================================
# This creates the longitude and latitude strings. Note that the average longitude
# # and latitude is currently being used for all of the average, minimum, and maximum
# # longitude and latitude strings. This accounts for all longitudes from -180 to 180
# # degrees and all latitudes from -90 to 90 degrees.
# longitude
if lon >= 100:
lon_bar_str = ' {:.2f} longitude_bar'.format(lon)
lon_min_str = ' {:.2f} longitude_min'.format(lon)
lon_max_str = ' {:.2f} longitude_max'.format(lon)
elif lon >= 10:
lon_bar_str = ' {:.2f} longitude_bar'.format(lon)
lon_min_str = ' {:.2f} longitude_min'.format(lon)
lon_max_str = ' {:.2f} longitude_max'.format(lon)
elif lon >= 0:
lon_bar_str = ' {:.2f} longitude_bar'.format(lon)
lon_min_str = ' {:.2f} longitude_min'.format(lon)
lon_max_str = ' {:.2f} longitude_max'.format(lon)
elif lon > -10:
lon_bar_str = ' {:+.2f} longitude_bar'.format(lon)
lon_min_str = ' {:+.2f} longitude_min'.format(lon)
lon_max_str = ' {:+.2f} longitude_max'.format(lon)
elif lon > -100:
lon_bar_str = ' {:+.2f} longitude_bar'.format(lon)
lon_min_str = ' {:+.2f} longitude_min'.format(lon)
lon_max_str = ' {:+.2f} longitude_max'.format(lon)
else:
lon_bar_str = ' {:+.2f} longitude_bar'.format(lon)
lon_min_str = ' {:+.2f} longitude_min'.format(lon)
lon_max_str = ' {:+.2f} longitude_max'.format(lon)
# latitude
if lat >= 10:
lat_bar_str = ' {:.2f} latitude_bar'.format(lat)
lat_min_str = ' {:.2f} latitude_min'.format(lat)
lat_max_str = ' {:.2f} latitude_max'.format(lat)
elif lat >= 0:
lat_bar_str = ' {:.2f} latitude_bar'.format(lat)
lat_min_str = ' {:.2f} latitude_min'.format(lat)
lat_max_str = ' {:.2f} latitude_max'.format(lat)
elif lat > -10:
lat_bar_str = ' {:+.2f} latitude_bar'.format(lat)
lat_min_str = ' {:+.2f} latitude_min'.format(lat)
lat_max_str = ' {:+.2f} latitude_max'.format(lat)
else:
lat_bar_str = ' {:+.2f} latitude_bar'.format(lat)
lat_min_str = ' {:+.2f} latitude_min'.format(lat)
lat_max_str = ' {:+.2f} latitude_max'.format(lat)
# =====================================================================================
# Now, all the strings created can be written to an environment file. First, the file
# # name is generated using the date_label and the mini-GNI number. The naming system
# # used here is essential for the function rename_environment_files to work. The
# # reason that the files have to later be renamed is that this function only generates
# # environment files for one particular mini-GNI, defined by gni_number. The files
# # to be named taking into account the number of mini-GNIs used, so they have to be
# # renamed at a later time. The naming system used here results in the text files
# # being ordered in the directory like so (with X being used to substitude date
# # values): X_gni1s1, X_gni1s2, X_gni1s3, X_gni2s1, X_gni2s2, etc. This later allows
# # them to be renamed in order to Xa1, Xa2, Xa3, Xa4, Xa5, etc.
environment_file_name = sample_dir + '/slides/' + date_label + 'gni' + gni_number + 's' + str(i+1) + '.txt'
# putting all the strings into lines of a text file in a specific order
environment_file_lines = [yymmdd_str, hhmmss_begin_str, hhmmss_end_str, slide_number_str, project_str, tas_bar_str, tas_min_str, tas_max_str, rh_bar_str, rh_min_str, rh_max_str, rho_bar_str, rho_min_str, rho_max_str, temp_bar_str, temp_min_str, temp_max_str, td_bar_str, td_min_str, td_max_str, pres_bar_str, pres_min_str, pres_max_str, z_bar_str, z_min_str, z_max_str, z_begin_str, z_end_str, windspeed_bar_str, windspeed_min_str, windspeed_max_str, wind_dir_str, lon_bar_str, lon_min_str, lon_max_str, lat_bar_str, lat_min_str, lat_max_str]
environment_file_lines = [s + ' \n' for s in environment_file_lines]
f = open(environment_file_name, 'w')
f.writelines(environment_file_lines)
f.close()
|
18,516 | ce384112e3ed28c1dd0a6f7106cee1ac6d067005 | #!/usr/bin/env python
from palindrome import getInput,formatStr,isPalindrome
def test_format1():
assert formatStr("\n\n\n\n\n") == ""
def test_format2():
assert formatStr("123\n\rlol90") == "123lol90"
def test_format3():
assert formatStr("02/02/2020") == "02022020"
def test_palin1():
assert isPalindrome("02022020")
# Fails on purpose
def test_palin2():
assert isPalindrome("Able was I ere I saw Elba")
def test_palin3():
assert isPalindrome("1111111111")
|
18,517 | 3fdf9b6c8705b565d51e5a9be370000dc7970733 | # Copyright 2023 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MIT License
#
# Copyright (c) 2021-2022 aesara-devs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
from copy import copy
from functools import singledispatch
from typing import Callable, List, Sequence, Tuple
from pytensor.graph.basic import Apply, Variable
from pytensor.graph.op import Op
from pytensor.graph.utils import MetaType
from pytensor.tensor import TensorVariable
from pytensor.tensor.elemwise import Elemwise
from pytensor.tensor.random.op import RandomVariable
@singledispatch
def _logprob(
op: Op,
values: Sequence[TensorVariable],
*inputs: TensorVariable,
**kwargs,
):
"""Create a graph for the log-density/mass of a ``RandomVariable``.
This function dispatches on the type of ``op``, which should be a subclass
of ``RandomVariable``. If you want to implement new density/mass graphs
for a ``RandomVariable``, register a new function on this dispatcher.
"""
raise NotImplementedError(f"Logprob method not implemented for {op}")
def _logprob_helper(rv, *values, **kwargs):
"""Helper that calls `_logprob` dispatcher."""
logprob = _logprob(rv.owner.op, values, *rv.owner.inputs, **kwargs)
for rv in values:
if rv.name:
logprob.name = f"{rv.name}_logprob"
break
return logprob
@singledispatch
def _logcdf(
op: Op,
value: TensorVariable,
*inputs: TensorVariable,
**kwargs,
):
"""Create a graph for the logcdf of a ``RandomVariable``.
This function dispatches on the type of ``op``, which should be a subclass
of ``RandomVariable``. If you want to implement new logcdf graphs
for a ``RandomVariable``, register a new function on this dispatcher.
"""
raise NotImplementedError(f"LogCDF method not implemented for {op}")
def _logcdf_helper(rv, value, **kwargs):
"""Helper that calls `_logcdf` dispatcher."""
logcdf = _logcdf(rv.owner.op, value, *rv.owner.inputs, name=rv.name, **kwargs)
if rv.name:
logcdf.name = f"{rv.name}_logcdf"
return logcdf
@singledispatch
def _icdf(
op: Op,
value: TensorVariable,
*inputs: TensorVariable,
**kwargs,
):
"""Create a graph for the inverse CDF of a `RandomVariable`.
This function dispatches on the type of `op`, which should be a subclass
of `RandomVariable`.
"""
raise NotImplementedError(f"Inverse CDF method not implemented for {op}")
def _icdf_helper(rv, value, **kwargs):
"""Helper that calls `_icdf` dispatcher."""
rv_icdf = _icdf(rv.owner.op, value, *rv.owner.inputs, **kwargs)
if rv.name:
rv_icdf.name = f"{rv.name}_icdf"
return rv_icdf
class MeasurableVariable(abc.ABC):
"""A variable that can be assigned a measure/log-probability"""
MeasurableVariable.register(RandomVariable)
class UnmeasurableMeta(MetaType):
def __new__(cls, name, bases, dict):
if "id_obj" not in dict:
dict["id_obj"] = None
return super().__new__(cls, name, bases, dict)
def __eq__(self, other):
if isinstance(other, UnmeasurableMeta):
return hash(self.id_obj) == hash(other.id_obj)
return False
def __hash__(self):
return hash(self.id_obj)
class UnmeasurableVariable(metaclass=UnmeasurableMeta):
"""
id_obj is an attribute, i.e. tuple of length two, of the unmeasurable class object.
e.g. id_obj = (NormalRV, noop_measurable_outputs_fn)
"""
def get_measurable_outputs(op: Op, node: Apply) -> List[Variable]:
"""Return only the outputs that are measurable."""
if isinstance(op, MeasurableVariable):
return _get_measurable_outputs(op, node)
else:
return []
@singledispatch
def _get_measurable_outputs(op, node):
return node.outputs
@_get_measurable_outputs.register(RandomVariable)
def _get_measurable_outputs_RandomVariable(op, node):
return node.outputs[1:]
def noop_measurable_outputs_fn(*args, **kwargs):
return []
def assign_custom_measurable_outputs(
node: Apply,
measurable_outputs_fn: Callable = noop_measurable_outputs_fn,
type_prefix: str = "Unmeasurable",
) -> Apply:
"""Assign a custom ``_get_measurable_outputs`` dispatch function to a measurable variable instance.
The node is cloned and a custom `Op` that's a copy of the original node's
`Op` is created. That custom `Op` replaces the old `Op` in the cloned
node, and then a custom dispatch implementation is created for the clone
`Op` in `_get_measurable_outputs`.
If `measurable_outputs_fn` isn't specified, a no-op is used; the result is
a clone of `node` that will effectively be ignored by
`factorized_joint_logprob`.
Parameters
----------
node
The node to recreate with a new cloned `Op`.
measurable_outputs_fn
The function that will be assigned to the new cloned `Op` in the
`_get_measurable_outputs` dispatcher.
The default is a no-op function (i.e. no measurable outputs)
type_prefix
The prefix used for the new type's name.
The default is ``"Unmeasurable"``, which matches the default
``"measurable_outputs_fn"``.
"""
new_node = node.clone()
op_type = type(new_node.op)
if op_type in _get_measurable_outputs.registry.keys() and isinstance(op_type, UnmeasurableMeta):
if _get_measurable_outputs.registry[op_type] != measurable_outputs_fn:
raise ValueError(
f"The type {op_type.__name__} with hash value {hash(op_type)} "
"has already been dispatched a measurable outputs function."
)
return node
new_op_dict = op_type.__dict__.copy()
new_op_dict["id_obj"] = (new_node.op, measurable_outputs_fn)
new_op_dict.setdefault("original_op_type", op_type)
new_op_type = type(
f"{type_prefix}{op_type.__name__}", (op_type, UnmeasurableVariable), new_op_dict
)
new_node.op = copy(new_node.op)
new_node.op.__class__ = new_op_type
_get_measurable_outputs.register(new_op_type)(measurable_outputs_fn)
return new_node
class MeasurableElemwise(Elemwise):
"""Base class for Measurable Elemwise variables"""
valid_scalar_types: Tuple[MetaType, ...] = ()
def __init__(self, scalar_op, *args, **kwargs):
if not isinstance(scalar_op, self.valid_scalar_types):
raise TypeError(
f"scalar_op {scalar_op} is not valid for class {self.__class__}. "
f"Acceptable types are {self.valid_scalar_types}"
)
super().__init__(scalar_op, *args, **kwargs)
MeasurableVariable.register(MeasurableElemwise)
|
18,518 | adb9e0c7b6b70b87bde3cd5e4c0f58ffe1967055 | import os
import argparse
import soccer
import filter
from os import listdir
from os.path import isfile, join, exists
import pandas
import cv2
import pickle
import numpy as np
import matplotlib
import plotly as py
import utils.draw as draw
import utils.camera as cam_utils
from tqdm import tqdm
import math
import sys
import glog
# to print full matrices
np.set_printoptions(threshold=sys.maxsize)
number_of_frames = 215 # set manually or take the n_frames value from the Soccer class
number_of_keypoints = 18
number_of_cameras = 2
################################################################################
# run: python3 demo/fusion.py --path_to_data ~/path/to/data
################################################################################
# CMD Line arguments
parser = argparse.ArgumentParser(description='Estimate the poses')
# --path_to_data: where the images are
parser.add_argument('--path_to_data', default='~/path/to/data/', help='path')
opt, _ = parser.parse_known_args()
################################################################################
# initialization of the data
################################################################################
# initialize databases for all cameras and load the data (COCO)
# modify for an other dataset
def init_soccerdata(mylist):
# load corresponding metadata
data_dict = {}
if 0 in mylist:
db_K1 = soccer.SoccerVideo(os.path.join(opt.path_to_data, 'K1'))
db_K1.name = "K1"
db_K1.digest_metadata()
db_K1.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
data_dict.update({0:db_K1})
if 1 in mylist:
db_K8 = soccer.SoccerVideo(os.path.join(opt.path_to_data, 'K8'))
db_K8.name = "K8"
db_K8.digest_metadata()
db_K8.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
data_dict.update({1:db_K8})
if 2 in mylist:
db_K9 = soccer.SoccerVideo(os.path.join(opt.path_to_data, 'K9'))
db_K9.name = "K9"
db_K9.digest_metadata()
db_K9.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
data_dict.update({2:db_K9})
if 3 in mylist:
db_Left = soccer.SoccerVideo(os.path.join(opt.path_to_data, 'Left'))
db_Left.name = "Left"
db_Left.digest_metadata()
db_Left.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
data_dict.update({3:db_Left})
if 4 in mylist:
db_Right = soccer.SoccerVideo(os.path.join(opt.path_to_data, 'Right'))
db_Right.name = "Right"
db_Right.digest_metadata()
db_Right.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
data_dict.update({4:db_Right})
return data_dict
# initialize the csv data
def init_csv():
players = []
# DANMARK
# 0 - Dan_1: Schmeichel
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Schmeichel_Kasper.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 1 - Dan_4: Kjar
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Kjar_Simon.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 2 - Dan_8: Delaney
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Delaney_Thomas.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 3 - Dan_9: Jorgensen Nicolai
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Jorgensen_Nicolai.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 4 - Dan_10: Eriksen
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Christian_Eriksen.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 5 - Dan_11 Braithwaite
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Braithwaite_Christensen_Martin.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 6 - Dan_13: Jorgensen Mathias aka Zanka
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Zanka.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 7 - Dan_14: Dalsgaard
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Dalsgaard_Henrik.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 8 - Dan_17: Larsen
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Stryger-Larsen_Jens.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 9 - Dan_19: Schöne
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Schone_Lasse.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 10 - Dan_20: Poulsen
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Denmark/lib/approximation_data/Poulsen_Yussuf.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# SWITZERLAND
# 11 - CH_1: Sommer
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Sommer_Yann.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 12 - CH_4: Elvedi
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Elvedi_Nico.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 13 - CH_5: Akanji
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Akanji_Manuel_Obafemi.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 14 - CH_7: Embolo
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Embolo_Breel-Donald.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 15 - CH_8: Freuler
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Freuler_Remo.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 16 - CH_9: Ajeti
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Ajeti_Albian.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 17 - CH_10: Xhaka
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Xhaka_Granit.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 18 - CH_13: Rodriguez
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Rodriguez_Ricardo.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 19 - CH_14: Zuber
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Zuber_Steven.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 20 - CH_17: Zakaria
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Zakaria_Denis.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
# 21 - CH_23: Mbabu
players.append(pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Mbabu_Kevin.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], nrows=505, names=['time', 'x', 'y']))
ball = pandas.read_csv('/home/bunert/Data/Smash/Switzerland_-_Denmark_Fitness_RAW_data_Switzerland/lib/approximation_data/Ball.csv',
sep = ';', decimal=",", skiprows=5, usecols=[0,1,2], names=['time', 'x', 'y'])
ball_min = ball.min()
ball_max = ball.max()
W = abs(ball_max[1]- ball_min[1])
H = abs(ball_max[2]- ball_min[2])
for i in range(len(players)):
players[i]['y'] -= 33.5
# players[i]['x'] *= 1.03693
players[i]['y'] *= -1
return players
# Hardcoded uniform skeletons
def init_3d_players(x,z,alpha):
# W/X = 104.73, H/Y = 67.74 Meter
body = []
# 0: neck
body.append([0. ,1.6875 ,0.])
# 1: middle shoulder
body.append([0. ,1.5075 , 0.])
# 2: right shoulder
body.append([0. ,1.5075 ,0.15])
# 3: right ellbow
body.append([0.,1.125 ,0.325])
# 4: right hand
body.append([0.2, 0.8 ,0.3])
# 5: left shoulder
body.append([0. ,1.5075 , -0.15])
# 6: left ellbow
body.append([0.,1.125 , -0.325])
# 7: left hand
body.append([0.2, 0.8, -0.3])
# 8: right hip
body.append([0., 0.9, 0.15])
# 9: right knee
body.append([0., 0.45, 0.2])
# 10: right feet
body.append([0., 0., 0.25])
# 11: left hip
body.append([0., 0.9, -0.15])
# 12: left knee
body.append([0., 0.45, -0.2])
# 13: left feet
body.append([0., 0., -0.25])
# 14: right head
body.append([0., 1.7375, 0.075])
# 15: left head
body.append([0., 1.7375, -0.075])
# 16: right ear
body.append([0., 1.6875, 0.075])
# 17: left ear
body.append([0., 1.6875, -0.075])
body = np.asmatrix(body)
# building rotation matrix if necessary:
theta = np.radians(alpha)
c = np.cos(theta)
s = np.sin(theta)
R = np.matrix([[c, 0., s],[0., 1., 0.], [-s,0., c]])
# person rotated about alpha degrees
body = R.dot(body.T).T
# translate with x and z
body[:,0] = body[:,0] + x
body[:,2] = body[:,2] + z
return body
# project each csv player on the field
def init_all_3d_players(csv_players, frame):
players = {}
for i in range(len(csv_players)):
#according to the view direction for the two different teams
if (i <= 10):
players.update({i:init_3d_players(csv_players[i].iloc[frame][1],csv_players[i].iloc[frame][2],180)})
else:
players.update({i:init_3d_players(csv_players[i].iloc[frame][1],csv_players[i].iloc[frame][2],0)})
return players
# Project all players to screen coordinates for the camera db_cam
def project_players_2D(db_cam, players_3d, frame):
frame_name = db_cam.frame_basenames[frame]
camera = cam_utils.Camera("Cam", db_cam.calib[frame_name]['A'], db_cam.calib[frame_name]['R'], db_cam.calib[frame_name]['T'], db_cam.shape[0], db_cam.shape[1])
players_2d = {}
cmap = matplotlib.cm.get_cmap('hsv')
img = db_cam.get_frame(frame, dtype=np.uint8)
for k in players_3d:
points2d = []
for i in range(len(players_3d[k])):
tmp, depth = camera.project(players_3d[k][i])
behind_points = (depth < 0).nonzero()[0]
tmp[behind_points, :] *= -1
points2d.append(tmp)
players_2d.update({k:points2d})
return players_2d
# Get all poses from openpose for a specific frame
def get_actual_2D_keypoints(data_dict, frame):
actual_keypoint_dict = {}
for i in data_dict:
frame_name = data_dict[i].frame_basenames[frame]
actual_keypoint_dict.update({i: data_dict[i].poses[frame_name]})
return actual_keypoint_dict
# Return dictionary for all cameras, with all the poses (players_3d projected on the camera screen)
def project_players_allCameras_2D(data_dict, players_3d, frame):
projected_players_2d_dict = {}
for i in data_dict:
projected_players_2d_dict.update({i:project_players_2D(data_dict[i], players_3d, frame)})
return projected_players_2d_dict
# return nearest player of openpose poses to all the projected players
def nearest_player(keypoints, projected_players_2d):
minimal_distance = sys.float_info.max
number = False
distance = 0.
for i in projected_players_2d:
numb = 0
for k in range(len(keypoints)):
if (keypoints[k][2] != 0):
numb += 1
x1, y1 = keypoints[k][0], keypoints[k][1]
x2, y2 = projected_players_2d[i][k][0][0], projected_players_2d[i][k][0][1]
distance += math.sqrt(math.pow(x1-x2,2)+ math.pow(y1-y2,2))
distance = distance / numb
# update if distance in smaller
if (distance < minimal_distance):
minimal_distance = distance
number = i
pose = keypoints
return number, pose
# Return dictionary for all cameras, with all the poses as tuples with corresponding player numbering
def assign_player_to_poses(projected_players_2d_dict, keypoint_dict):
players_2d_dict = {}
for i in keypoint_dict:
players_2d = {}
for k in range(len(keypoint_dict[i])):
number,pose = nearest_player(keypoint_dict[i][k], projected_players_2d_dict[i])
players_2d.update({number:pose})
players_2d_dict.update({i:players_2d})
return players_2d_dict
# Dump a video with the poses of the uniform skeletons
def dump_csv_video_poses(data_dict, csv_players, vidtype, fps=25.0, scale=1, mot_tracks=None, one_color=True):
if vidtype not in ['test']:
raise Exception('Uknown video format')
glog.info('Dumping {0} video'.format(vidtype))
for i in data_dict:
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # MP4V
out_file = join('/home/bunert/Data/results', data_dict[i].name +'_{0}.mp4'.format(vidtype))
# FPS: 5.0
out = cv2.VideoWriter(out_file, fourcc, fps,
(data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
font = cv2.FONT_HERSHEY_SIMPLEX
cmap = matplotlib.cm.get_cmap('hsv')
if mot_tracks is not None:
n_tracks = max(np.unique(mot_tracks[:, 1]))
for frame, basename in enumerate(tqdm(data_dict[i].frame_basenames)):
players_3d = init_all_3d_players(csv_players, frame)
projected_players_2d_dict = project_players_allCameras_2D(data_dict, players_3d, frame)
img = data_dict[i].get_frame(frame, dtype=np.uint8)
# draw_skeleton_on_image_2dposes(img, poses, cmap_fun, one_color=False, pose_color=None)
if vidtype == 'test':
# Pose (db_cam, players_2d, frame, player=False)
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes(img, projected_players_2d_dict[i][k], cmap, one_color=True)
if vidtype == 'kalman':
# Pose
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes(img, projected_players_2d_dict[i][k], cmap, one_color=True)
img = cv2.resize(img, (data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
out.write(np.uint8(img[:, :, (2, 1, 0)]))
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
# Dump a video with the posses from all_players_3d_array for one KF
def dump_video_poses(data_dict, all_players_3d, vidtype, fps=25.0, scale=1, mot_tracks=None, one_color=True):
if vidtype not in ['kalman']:
raise Exception('Uknown video format')
glog.info('Dumping {0} video'.format(vidtype))
for i in data_dict:
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # MP4V
out_file = join('/home/bunert/Data/results', data_dict[i].name +'_{0}.mp4'.format(vidtype))
# FPS: 10.0
out = cv2.VideoWriter(out_file, fourcc, fps,
(data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
font = cv2.FONT_HERSHEY_SIMPLEX
cmap = matplotlib.cm.get_cmap('hsv')
if mot_tracks is not None:
n_tracks = max(np.unique(mot_tracks[:, 1]))
for frame, basename in enumerate(tqdm(data_dict[i].frame_basenames)):
if (frame >= len(all_players_3d)):
break
players_3d_dict = state_to_player_3d_dict(all_players_3d[frame])
projected_players_2d_dict = project_players_allCameras_2D(data_dict, players_3d_dict, frame)
img = data_dict[i].get_frame(frame, dtype=np.uint8)
if vidtype == 'test':
# Pose (db_cam, players_2d, frame, player=False)
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes(img, projected_players_2d_dict[i][k], cmap, one_color=True)
if vidtype == 'kalman':
# Pose
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes(img, projected_players_2d_dict[i][k], cmap, one_color=True)
img = cv2.resize(img, (data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
out.write(np.uint8(img[:, :, (2, 1, 0)]))
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
# Dump a video with the posses from all_players_3d_array for multiple KF
def dump_video_multiple_poses(data_dict, all_players_3d_array, vidtype, rq, fps=25.0, scale=1, mot_tracks=None, one_color=True):
if vidtype not in ['kalman']:
raise Exception('Uknown video format')
glog.info('Dumping {0} video'.format(vidtype))
length = len(all_players_3d_array[0])
for i in data_dict:
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # MP4V
out_file = join('/home/bunert/Data/results', data_dict[i].name +'_{0}.mp4'.format(vidtype))
out = cv2.VideoWriter(out_file, fourcc, fps,
(data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
font = cv2.FONT_HERSHEY_SIMPLEX
cmap = matplotlib.cm.get_cmap('hsv')
if mot_tracks is not None:
n_tracks = max(np.unique(mot_tracks[:, 1]))
# rgb(30, 30, 250)
# rgb(60, 180, 30)
# rgb(250, 10, 10)
listOfColors = [(30, 30, 250), (250, 10, 10)]
for frame, basename in enumerate(tqdm(data_dict[i].frame_basenames)):
if (frame >= length):
break
img = data_dict[i].get_frame(frame, dtype=np.uint8)
for j in range (len(all_players_3d_array)):
players_3d_dict = state_to_player_3d_dict(all_players_3d_array[j][frame])
projected_players_2d_dict = project_players_allCameras_2D(data_dict, players_3d_dict, frame)
if (len(all_players_3d_array)==1):
if vidtype == 'kalman':
# Pose
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes(img, projected_players_2d_dict[i][k], cmap, one_color=True)
# draw_skeleton_on_image_2dposes(img, poses, cmap_fun, one_color=False, pose_color=None)
else:
if vidtype == 'kalman':
# Pose
for k in range (len(projected_players_2d_dict[i])):
draw.draw_skeleton_on_image_2dposes_color(img, projected_players_2d_dict[i][k], listOfColors[j], 5-3*j)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255,255,255)
lineType = 2
cv2.putText(img,'frame={0}'.format(frame), (data_dict[i].shape[1] -450 ,50), font, fontScale,fontColor, lineType)
cv2.putText(img,'Poses1: R={0}, Q={1}'.format(rq[0], rq[1]), (data_dict[i].shape[1] -450 ,100), font, fontScale,listOfColors[0], lineType)
if (len(rq) > 3):
cv2.putText(img,'Poses2: R={0}, Q={1}'.format(rq[2], rq[3]), (data_dict[i].shape[1] -450 ,150), font, fontScale,listOfColors[1], lineType)
img = cv2.resize(img, (data_dict[i].shape[1] // scale, data_dict[i].shape[0] // scale))
out.write(np.uint8(img[:, :, (2, 1, 0)]))
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
# Get the actual z vector for the KF -> workaround to get all poses
def get_actual_z_vector(data_dict, players_2d_dict, player_number, projected_new_z, kalman_dict, frame):
actual_z = np.zeros((number_of_keypoints * (2*number_of_cameras),1))
for camera in data_dict:
if (number_of_cameras==2):
camera_offset = (camera-1)*2
else:
camera_offset = (camera)*2
# openpose keypoint available
if player_number in players_2d_dict[camera].keys():
#print("Openpose key exists")
head_x, head_y = players_2d_dict[camera][player_number][0,0], players_2d_dict[camera][player_number][0,1]
left_shoulder_x, left_shoulder_y, left_shoulder_conf = players_2d_dict[camera][player_number][5,0], players_2d_dict[camera][player_number][5,1], players_2d_dict[camera][player_number][5,2]
right_shoulder_x, right_shoulder_y, right_shoulder_conf = players_2d_dict[camera][player_number][2,0], players_2d_dict[camera][player_number][2,1], players_2d_dict[camera][player_number][2,2]
neck_x, neck_y, neck_conf = players_2d_dict[camera][player_number][1,0], players_2d_dict[camera][player_number][1,1], players_2d_dict[camera][player_number][1,2]
# set the shoulder keypoint when one is missing
if (left_shoulder_conf == 0 and right_shoulder_conf == 0):
# print("Both shoulders conf == 0")
elif (left_shoulder_conf == 0):
# print("Left shoulder set")
if (neck_x >= right_shoulder_x):
left_shoulder_x = neck_x + (neck_x -right_shoulder_x)
else:
left_shoulder_x = neck_x - (right_shoulder_x - neck_x)
if (neck_y >= right_shoulder_y):
left_shoulder_y = neck_y + (neck_y - right_shoulder_y)
else:
left_shoulder_y = neck_y - (right_shoulder_y - neck_y)
elif (right_shoulder_conf == 0):
# print("Right shoulder set")
if (neck_x >= left_shoulder_x):
right_shoulder_x = neck_x + (neck_x - left_shoulder_x)
else:
right_shoulder_x = neck_x - (left_shoulder_x - neck_x)
if (neck_y >= left_shoulder_y):
right_shoulder_y = neck_y + (neck_y - left_shoulder_y)
else:
right_shoulder_y = neck_y - (left_shoulder_y - neck_y)
for index in range (number_of_keypoints):
actual_index = index*(2*number_of_cameras) + camera_offset
if (players_2d_dict[camera][player_number][index,2] != 0): # confidence != 0
#print("Method: normal openpose keypoint taken ", index)
actual_z[actual_index] = players_2d_dict[camera][player_number][index, 0]
actual_z[actual_index+1] = players_2d_dict[camera][player_number][index, 1]
else: #confidence == 0
if (neck_conf == 0):
#print ("NECK CONF == 0!")
# Head Keypoint set
if (index == 0):
right_hip_x, right_hip_y, right_hip_conf = players_2d_dict[camera][player_number][8,0], players_2d_dict[camera][player_number][8,1], players_2d_dict[camera][player_number][8,2]
left_hip_x, left_hip_y, left_hip_conf = players_2d_dict[camera][player_number][11,0], players_2d_dict[camera][player_number][11,1], players_2d_dict[camera][player_number][11,2]
right_hand_x, right_hand_y, right_hand_conf = players_2d_dict[camera][player_number][4,0], players_2d_dict[camera][player_number][4,1], players_2d_dict[camera][player_number][4,2]
left_hand_x, left_hand_y, left_hand_conf = players_2d_dict[camera][player_number][7,0], players_2d_dict[camera][player_number][7,1], players_2d_dict[camera][player_number][7,2]
if (right_hip_conf != 0):
dist = math.sqrt(math.pow(neck_x-right_hip_x,2)+ math.pow(neck_y-right_hip_y,2))
dist = 4./10. * dist
elif (left_hip_conf != 0):
dist = math.sqrt(math.pow(neck_x-left_hip_x,2)+ math.pow(neck_y-left_hip_y,2))
dist = 4./10. * dist
elif (right_hand_conf != 0):
dist = math.sqrt(math.pow(neck_x-right_hand_x,2)+ math.pow(neck_y-right_hand_y,2))
dist = 3./10. * dist
elif (left_hand_conf != 0):
dist = math.sqrt(math.pow(neck_x-left_hand_x,2)+ math.pow(neck_y-left_hand_y,2))
dist = 3./10. * dist
else:
#print("HIP FAILED")
dist = 10
actual_z[actual_index] = neck_x
head_x = neck_x
actual_z[actual_index+1] = neck_y - dist
head_y = neck_y - dist
# Shoulders already set
elif (index == 2):
actual_z[actual_index] = right_shoulder_x
actual_z[actual_index+1] = right_shoulder_y
elif (index == 5):
actual_z[actual_index] = left_shoulder_x
actual_z[actual_index+1] = left_shoulder_y
# if keypoint 14,15,16,17 (head) take hardcoded values (proportional to shoulder)
elif (index in [14,15,16,17] and (left_shoulder_conf != 0 or right_shoulder_conf != 0)):
#print("Method: head Keypoint hardcoded ", index)
if (index == 14):
if (right_shoulder_x <= head_x):
actual_z[actual_index] = head_x + ((head_x - right_shoulder_x)/10.)
else:
actual_z[actual_index] = head_x - ((right_shoulder_x - head_x)/10.)
actual_z[actual_index+1] = head_y - (head_y - right_shoulder_y)/10.
elif (index == 15):
if (left_shoulder_x <= head_x):
actual_z[actual_index] = head_x - ((head_x - left_shoulder_x)/10.)
else:
actual_z[actual_index] = head_x + ((left_shoulder_x - head_x)/10.)
actual_z[actual_index+1] = head_y - (head_y - left_shoulder_y)/10.
elif (index == 16):
actual_z[actual_index] = (head_x + right_shoulder_x)/2.
actual_z[actual_index+1] = head_y
elif (index == 17):
actual_z[actual_index] = (head_x + left_shoulder_x)/2.
actual_z[actual_index+1] = head_y
else: # should not enter this section
print("Head point not updated for actual measurement vector.")
exit()
else: # confidence == 0 but not a head point or shoulder&head also not found
if (frame > 0):
#print("Method: take z from last iteration ", index)
actual_z[actual_index] = kalman_dict.filter.z[actual_index]
actual_z[actual_index+1] = kalman_dict.filter.z[actual_index+1]
else:
#print("Method: take predicted keypoint ", index)
actual_z[actual_index] = projected_new_z[actual_index]
actual_z[actual_index+1] = projected_new_z[actual_index+1]
# no openpose keypoint exists
else:
#print("Camera: "+ str(camera) + " - Openpose not available for player: "+ str(player_number))
for index in range (number_of_keypoints):
actual_index = index*(2*number_of_cameras) + camera_offset
actual_z[actual_index] = projected_new_z[actual_index]
actual_z[actual_index+1] = projected_new_z[actual_index+1]
return actual_z
# player dictionary to state vector
def state_to_player_3d_dict(all_players_3d):
players = {}
for numb in range(len(all_players_3d)):
body = []
for index in range(number_of_keypoints):
x, y, z = all_players_3d[numb][index*6,0], all_players_3d[numb][(index*6)+1,0], all_players_3d[numb][(index*6)+2,0]
body.append([x,y,z])
body = np.asmatrix(body)
players.update({numb:body})
return players
# Kalman Filter iterations:
def iterate_kalman(data_dict, csv_players, players_3d_dict, R, Q, number_of_iterations):
all_players_3d = []
# prepare a kalman filter for every player
R_std = R
Q_var = Q
kalman_dict = {}
for i in players_3d_dict:
kalman_dict.update({i:filter.Kalman(number_of_cameras, 18, R_std=R_std, Q_var=Q_var)})
kalman_dict[i].initialize_state(players_3d_dict[i])
for actual_frame in range (number_of_frames):
if (actual_frame > number_of_iterations):
break
print("\n Kalman iteration for frame: ", actual_frame)
# get dict for every player with the 3D points
actual_players_3d_dict = init_all_3d_players(csv_players, actual_frame)
# dictionary for every camera, project the 3D players into camera coordinates
projected_players_2d_dict = project_players_allCameras_2D(data_dict, actual_players_3d_dict, actual_frame)
# unordered openpose keypoints
keypoint_dict = get_actual_2D_keypoints(data_dict, actual_frame)
# assigned the unordered openposes to player numbers (smallest distance)
players_2d_dict = assign_player_to_poses(projected_players_2d_dict, keypoint_dict)
players = []
for player_number in kalman_dict:
# predict next state
new_state = kalman_dict[player_number].predict()
# project the new state vector x into measurement space (2D)
projected_new_z = kalman_dict[player_number].Hx(new_state, data_dict)
# get actual measurement vector from openpose or if not available from prediction
actual_z = get_actual_z_vector(data_dict, players_2d_dict, player_number, projected_new_z, kalman_dict[player_number], actual_frame)
# update the EKF
kalman_dict[player_number].update(actual_z, data_dict)
# add state vector x to player list
players.append(kalman_dict[player_number].filter.x)
# add all state vectors for one iteration to the list
all_players_3d.append(players)
# to store the the data i a pickle file
store_data = join('/home/bunert/Data/results/', 'R={0}_Q={1}_data.p'.format(R_std,Q_var))
with open(store_data, 'wb') as f:
pickle.dump(all_players_3d, f)
return all_players_3d
################################################################################
# Main Function
################################################################################
def main():
# Read camera data
data_dict = init_soccerdata([1,2]) #([0,1,2])
# Read data from csv files
csv_players = init_csv()
# dict of all player in 3D (0-10 Denmar, 11-21 Swiss)
players_3d_dict = init_all_3d_players(csv_players, 0)
# values for R_std and Q_var for the first EKF
# R= .1. Q= .5
r1, q1 = .1, .5
# values for R_std and Q_var for the second EKF
r2, q2 = .1, .5
# number of frames to iterate
number_of_iterations = 50
test1 = iterate_kalman(data_dict, csv_players, players_3d_dict, r1, q1, number_of_iterations)
#test2 = iterate_kalman(data_dict, csv_players, players_3d_dict, r2, q2, number_of_iterations)
all_players_3d_array = [test1]#, test2]
rq = [r1, q1]#, r2, q2]
# to load the pickle data:
# store_data = join('/home/bunert/Data/results/', 'Two.p')
# with open(store_data, 'rb') as f:
# test1 = pickle.load(f)
# store_data = join('/home/bunert/Data/results/', 'Three.p')
# with open(store_data, 'rb') as f:
# test2 = pickle.load(f)
# all_players_3d_array = [test1,test2]
# dump video with the state vectors
# dump_video_poses(data_dict, test1, 'kalman', fps=2.0)
# dump_video_multiple_poses(data_dict, all_players_3d_array, 'kalman', rq=rq, fps=5.0)
if __name__ == "__main__":
main()
|
18,519 | 6b8c673159a666effda3791da60c911a0b42fd36 | import pickle,socket,binascii
from pip._vendor.colorama import Fore
from counter.counter import encryption, decryption
msg=[]
m=[]
soc2 = socket.socket()
soc2.bind(('127.0.0.1',5001))
soc2.connect(('127.0.0.1',5000))
def sock():
loop()
def encrypt():
Input = input('enter message to bob :')
msg =encryption((Input))
print(Fore.RED,"Ciphertext is : ", binascii.hexlify(msg[0]))
m.append(msg[1])
m.append(msg[2])
data = pickle.dumps(m)
soc2.send(msg[0])
soc2.send(data)
def decrypt():
m.clear()
msg.clear()
response1 = soc2.recv(1024)
response2 = soc2.recv(1024)
resp2 = pickle.loads(response2)
message = decryption(response1, resp2[0], resp2[1])
print(Fore.WHITE+'Message from Bob : ', str(message, 'utf-8') ,"(decrypted text)")
def loop():
while True:
decrypt()
encrypt()
print(Fore.BLUE+ "Welcome to the chat ")
sock() |
18,520 | a429692d6b330bfbdb654bcd1ada86bd30ecd562 | # -*- coding:utf-8 -*-
# 判断某数组是不是二叉搜索树后序遍历的结果
class Solution:
# 二叉搜索树,左节点比根节点小,右节点比根节点大
# 后序遍历: 左右中
def VerifySquenceOfBST(self, sequence):
if not sequence:
return False
if len(sequence) == 1:
return True
root = sequence[-1]
# 右子树分界点
r = -1
for i in range(len(sequence)-1):
if sequence[i] > root:
r = i
break
# 判断右子树是否符合条件
if r >= 0:
for j in range(r, len(sequence)-1):
if sequence[j] < root:
return False
# 说明只有左子树或只有右子树
if r <= 0:
return self.VerifySquenceOfBST(sequence[:-1])
else:
return self.VerifySquenceOfBST(sequence[:r]) and self.VerifySquenceOfBST(sequence[r:])
if __name__ == '__main__':
s = Solution()
seq = [7,4,6,5]
print(s.VerifySquenceOfBST(seq))
|
18,521 | 544ce7befa0bddf497e49437be1c6f9c0ed053b4 | import cv2
from datetime import datetime, timedelta
import math
import matplotlib.pyplot as plt
from multiprocessing import Process
import starter as start
class recording_device():
def __init__(self, model_used):
self.model_used = model_used
def counter(self):
now = datetime.now()
then = datetime.now() + timedelta(seconds=5.05)
fl = 0
while(datetime.now() < then):
sec = datetime.now() - now
if sec.seconds != fl:
print(fl)
fl = sec.seconds
def image_record(self):
now = datetime.now()
then = datetime.now() + timedelta(seconds=5.05)
fl = 0
cap = cv2.VideoCapture(0)
while(datetime.now() < then):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
ret, jpeg = cv2.imencode('.jpg', frame)
# Display the resulting frame
cv2.imshow('',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
frame = cv2.resize(frame, (64,64))
self.frame = frame.reshape(1,3,64,64)
return self.frame
def modeler(self):
if self.model_used == 'CNN':
self.model = start.open_CNN()
else:
self.network = start.incep_network()
self.model = start.random_19()
self.labels = start.labeler()
return self.model
def scaler(self):
self.scaler = start.reload_scaler()
def predict(self):
rd.image_record()
if self.model_used == 'CNN':
return start.cnn_predict(self.model, self.frame)
else:
self.frame = self.frame.reshape(1,64,64,3)
flat_map = self.network.predict(self.frame).reshape(1,2048)
pred_gen = self.model.predict_proba(flat_map)
arr_sort = pred_gen.argsort()[0][-3:][::-1]
return [self.labels[t] for t in arr_sort], pred_gen[0][arr_sort]
if __name__=="__main__":
rd = recording_device(model_used = 'Pre-Trained')
rd.modeler()
|
18,522 | 6e34d98edac3ee7ef1420a466b2d7c8b8ccc12c9 | from prefix_tree import TrieNode
# Word Search
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board:
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
if self.is_match(board, i, j, word, 0):
return True
return False
# use dfs instead of bfs that visits each cell once, see test case 2
def is_match(self, board, i, j, word, k):
if k == len(word) - 1 and board[i][j] == word[k]:
return True
if board[i][j] != word[k]:
return False
m, n = len(board), len(board[0])
# backtracking
char = board[i][j]
board[i][j] = ''
for x, y in [(i - 1, j), (i, j - 1), (i + 1, j), (i, j + 1)]:
if 0 <= x < m and 0 <= y < n and self.is_match(board, x, y, word, k + 1):
return True
board[i][j] = char
return False
# test case 1:
# [["a"]]
# "a"
# => true
# test case 2:
# [["A","B","C","E"],
# ["S","F","E","S"],
# ["A","D","E","E"]]
# "ABCESEEEFS"
# => true
# Word Search II
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
root = TrieNode()
for word in words:
curr = root
for char in word:
curr.children[char] = curr.children.get(char, TrieNode())
curr = curr.children[char]
curr.is_word = True
self.matched = []
for i in range(len(board)):
for j in range(len(board[0])):
self.dfs(i, j, board, root, '')
return self.matched
def dfs(self, i, j, board, node, path):
char = board[i][j]
# stop condition
if char == '#' or char not in node.children:
return
path += char
child = node.children[char]
# keep searching down after a valid word is found
if child.is_word:
self.matched.append(path)
child.is_word = False # avoid adding the same word twice
board[i][j] = '#' # mark as visited
for x, y in [(i - 1, j), (i, j - 1), (i + 1, j), (i, j + 1)]:
if 0 <= x < len(board) and 0 <= y < len(board[0]):
self.dfs(x, y, board, child, path)
board[i][j] = char # reset as original value
|
18,523 | 8c6e058ac987c52bb6fd06de64d9ee8bd41bd752 | from birdsong_recognition.utils import add_channel_dim, get_sample_labels, get_spectrogram, load_mp3, preprocess_file, wrapper_split_file_by_window_size
import tensorflow as tf
import os
import numpy as np
from colorama import Style, Fore
import logging
logging.basicConfig(filename='error_log', filemode='w')
EBIRDS = ['norcar', 'blujay', 'bkcchi']
### reload trained model
model = tf.keras.models.load_model('./model/model.h5')
### get an audio sample
audio_dir = './dataset/'
# error file: norcar/XC381575.mp3
debug_one = False
if debug_one:
EBIRDS = ['norcar']
debug_file = ['XC381575.mp3']
for sample_label in EBIRDS:
# sample_label = np.random.choice(EBIRDS)
sample_label_all_files = os.listdir(os.path.join(audio_dir, sample_label))
if debug_one:
sample_label_all_files = debug_file
sample_label_all_files = [file_name for file_name in sample_label_all_files if 'mp3' in file_name]
for sample_audio in sample_label_all_files:
# sample_audio = np.random.choice(sample_label_all_files)
sample_audio = os.path.join(audio_dir, sample_label, sample_audio)
print(sample_audio)
### preprocess the audio
sample_audio_label= get_sample_labels([sample_audio], EBIRDS)
decoded_audio, label = load_mp3(sample_audio_label[0])
decoded_audio, label = preprocess_file(decoded_audio, label)
decoded_audio, label = wrapper_split_file_by_window_size(decoded_audio, label)
decoded_audio, label = get_spectrogram(decoded_audio, label)
decoded_audio, label = add_channel_dim(decoded_audio, label)
### Predict
predict = model.predict(decoded_audio)
predict_category = predict.argmax(axis=1)
pred_idx, pred_count = np.unique(predict_category, return_counts=True)
majority_prediction = pred_idx[pred_count.argmax()]
predicted_bird = EBIRDS[majority_prediction]
confidence_level = pred_count.max()/len(predict_category)
correct_prediction = False
if sample_label == predicted_bird:
correct_prediction = True
print('True label: {}, predicted bird: {}.'.format(sample_label, predicted_bird))
print('confidence level: {}'.format(confidence_level))
if not correct_prediction:
print(f'{Fore.RED}wrong prediction{Style.RESET_ALL}')
pred_birds = [EBIRDS[i] for i in pred_idx]
print('predicted labels: {}'.format(pred_birds))
print('prediction count: {}'.format(pred_count))
logging.error(sample_audio)
logging.error('predicted labels:')
logging.error(pred_birds)
logging.error('predicted count:')
logging.error(pred_count)
logging.error('\n')
print('**********')
print('done') |
18,524 | 67f37ff96658e6123eae3f38aaf6a08cfb8b8700 | def errors(err_code):
err_dic = {
0: ("OP_ERR_NONE", "정상처리"),
-100: ("OP_ERR_LOGIN", "사용자정보교환실패"),
-101: ("OP_ERR_CONNECT", "서버접속실패"),
-102: ("OP_ERR_VERSION", "버전처리실패"),
-106: ("OP_ERR_SOCKET_CLOSED", "통신연결종료"),
}
result = err_dic[err_code]
return result
|
18,525 | 3563ccc3a0e5bc7a8a1cb47f4aec5dc84c015320 | from model import connection as conex
import pandas as pd
from tabulate import tabulate
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 1000)
class Gerente(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def verificaSupervisionados(self, cpfGerente):
conexao = conex.Connection()
print('')
data = pd.read_sql('''SELECT "verifica_supervisionados"(%(cpfGerente)s);''', conexao.conn,
params={"cpfGerente":cpfGerente})
data = data.rename({"verifica_supervisionados":"(CPF,Nome)"}, axis='columns')
print('')
print(tabulate(data, showindex=False, headers=data.columns, numalign="left"))
conexao.close()
def retornaGerentes(self):
conexao = conex.Connection()
print('')
conexao.query('SELECT * FROM gerente')
conexao.queryResult()
conexao.close() |
18,526 | b0b55941bf605d825efbd7869291fcd78c8ee47b | from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
if response is not None:
if 'detail' in response.data:
response.data['error'] = response.data['detail']
del response.data['detail']
else:
error_msg = {'error' : response.data}
response.data = error_msg
return response |
18,527 | 7d92d6c592f30811a7bcb7393f17255c8f3e4410 | import matplotlib.pyplot as plt
import math
def kosi_hitac(v0, theta, x0, y0):
dt = 0.01
v0_x = v0*math.cos(math.radians(theta))
v0_y = v0*math.sin(math.radians(theta))
v0x = []
v0y = []
v = []
t0 = 0
t = []
x = []
y = []
for i in range(0,1000):
t0 = dt*i
t.append(t0)
v0x.append(v0_x)
v0_y = v0_y - 9.81*dt
v0y.append(v0_y)
brzina = math.sqrt(v0_x**2 + v0_y**2)
v.append(brzina)
x0 = x0 + v0_x*dt
x.append(x0)
y0 = y0 + v0_y*dt
y.append(y0)
plt.plot(t,x)
plt.xlabel("Vrijeme [s]")
plt.ylabel("x [m]")
plt.show()
plt.plot(t,y)
plt.xlabel("Vrijeme [s]")
plt.ylabel("y [m]")
plt.show()
plt.plot(t,v)
plt.xlabel("Vrijeme [s]")
plt.ylabel("Brzina [m/s]")
plt.show()
plt.plot(x,y)
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.show()
kosi_hitac(50, 60, 0, 0)
|
18,528 | b4e839e334740548c8df380b2c23ff79c3419923 | """
Django settings for Slam project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*9o6+rdu-7*kq!cjfa)tuzam61miy#b#=&y7d-z#-0p)0^%z)s'
LOGIN_REDIRECT_URL = '/auth'
LOGIN_URL = '/profile/select/'
LOGIN_ERROR_URL = '/profile/select/'
SOCIAL_AUTH_TWITTER_KEY = ''
SOCIAL_AUTH_TWITTER_SECRET = ''
SOCIAL_AUTH_FACEBOOK_KEY = '136149443801150'
SOCIAL_AUTH_FACEBOOK_SECRET = '79b6ed60878e579232727fd61e4be9e1'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['public_profile', 'email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {'fields': 'first_name,last_name,gender,picture,link'}
SOCIAL_AUTH_GOOGLE_OAUTH_SCOPE = ['https://www.googleapis.com/auth/userinfo.profile',]
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '583868431865-qtvc1pi8tmrlmodto7cos4oqqp8th5tr.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'Jp57w7YSixPfVynCqiFxy_1_'
GOOGLE_OAUTH2_EXTRA_DATA = [ ('id', 'id'),
('email', 'email'),
('username', 'username'),
('birthday', 'birthday'), ]
# Email Credentials
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'kshitijwarungase@gmail.com'
EMAIL_HOST_PASSWORD = 'Kshitij@421'
EMAIL_PORT = 587
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'social.apps.django_app.default',
'social_django',
'django.contrib.staticfiles',
'Slamapp',
'HomeView',
'ProfileView',
]
# TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
# 'django.core.context_processors.csrf',
# 'django.core.context_processors.i18n',
# 'django.core.context_processors.media',
# 'django.core.context_processors.static',
# 'django.core.context_processors.tz',
# 'django.contrib.messages.context_processors.messages',
# 'social.apps.django_app.context_processors.backends',
# 'social.apps.django_app.context_processors.login_redirect',
# )
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'ProfileView.pipeline.socialprofile_extra_values', # This is a path of your pipeline.py
#and get_avatar is the function.
)
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Slam.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.csrf',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
# 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
# 'django.core.context_processors.i18n',
# 'django.core.context_processors.media',
# 'django.core.context_processors.static',
# 'django.core.context_processors.tz',
# 'django.contrib.messages.context_processors.messages',
# 'social.apps.django_app.context_processors.backends',
# 'social.apps.django_app.context_processors.login_redirect',
],
'debug' : DEBUG,
},
},
]
WSGI_APPLICATION = 'Slam.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/etc/mysql/my.cnf',
},
},
'sqlite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# FEATURES: {
# "AUTH_USE_OPENID_PROVIDER": true,
# "AUTOMATIC_AUTH_FOR_TESTING": false,
# "CERTIFICATES_ENABLED": true,
# "ENABLE_DISCUSSION_SERVICE": true,
# "ENABLE_INSTRUCTOR_ANALYTICS": false,
# "ENABLE_MKTG_SITE": false,
# "ENABLE_S3_GRADE_DOWNLOADS": true,
# "ENABLE_THIRD_PARTY_AUTH": true,
# "PREVIEW_LMS_BASE": "portal.edu4africa.com",
# "SUBDOMAIN_BRANDING": false,
# "SUBDOMAIN_COURSE_LISTINGS": false,
# "USE_CUSTOM_THEME": true
# },
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
|
18,529 | 79beb34fa6864f594c9dd6dee36404612e278ab4 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import greedy_music.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('genre_name', models.CharField(unique=True, max_length=50)),
('user', models.OneToOneField(related_name='user_genre', null=True, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['genre_name'],
'verbose_name_plural': 'Genres',
},
),
migrations.CreateModel(
name='MusicTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('music_title', models.CharField(max_length=100)),
('music_track', models.FileField(upload_to=greedy_music.models.get_track_url)),
('artist_name', models.CharField(max_length=100)),
('ratings', models.DecimalField(max_digits=2, decimal_places=1)),
('genre', models.ManyToManyField(related_name='music_track', to='greedy_music.Genre')),
('user', models.OneToOneField(related_name='user_music_track', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['artist_name', 'music_track'],
'verbose_name_plural': 'MusicTracks',
},
),
migrations.AlterUniqueTogether(
name='musictrack',
unique_together=set([('music_track', 'artist_name')]),
),
]
|
18,530 | 949d9caf6541f9741e062cdbff961e8b3cc2f2df | class Solution:
def search(self, nums: List[int], target: int) -> int:
if len(nums) == 0:
return -1
mid = len(nums)//2
if nums[mid] == target:
print(nums, mid)
return mid
elif target < nums[mid]:
return self.search(nums[:mid], target)
else:
a = self.search(nums[mid+1:], target)
print(nums, mid)
return a + (mid+1) if a is not -1 else -1
|
18,531 | b55c2bd23eeb3c63488cc58937f26800d717da68 | import sys
chars = open(sys.argv[1]).read()
print(chars.count('(') - chars.count(')'))
curr = 0
for c in range(len(chars)):
if chars[c] == '(':
curr +=1
else:
curr -= 1
if curr < 0:
print(c+1)
break
|
18,532 | fa6acac4864a3b8be9f3c8251e005485684f1a3e | import allure
from selenium.webdriver.common.by import By
from base.base_action import Action
class NewMmS(Action):
new_mms_button = By.ID, "com.android.mms:id/action_compose_new"
@allure.step(title="点击新建短信按钮")
def click_new_mms(self):
self.click(self.new_mms_button) |
18,533 | 44c443dd0968a99dcc44bc1501e5568c13a8700f | import argparse
# PATHs
id='8'
xp_name = 'strept_pneumo_31'
#xp_name = 'esche_amox_31'
#xp_name='acineto_imip_20'
mode='serv' #can be ['local', 'serv', 'laptop']
dtype='sparse' #can be ['df','sparse']
if mode == 'serv':
pathtoxp = '/mnt/cbib/MAGITICS_Yvan/experiments_kmer_count/'
pathtodata='/scratch/MAGITICS_data/'
# testdir='Pseudomonas_aeruginosa/levofloxacin/test/test'
# testdir='Escherichia_coli/test/test'
# data='Escherichia_coli/traindata/'
# data = 'Pseudomonas_aeruginosa/levofloxacin/traindata/'
data = 'Streptococcus_pneumoniae/datajie/'
testdir = 'Streptococcus_pneumoniae/testjie/'
#data='Acinetobacter_baumanii/traindata/'
#testdir='Acinetobacter_baumanii/test/test/'
elif mode == 'local':
pathtoxp = '/home/ylucas/toydata_pseudomonas_levofloxacin/'
pathtodata='/home/ylucas/toydata_pseudomonas_levofloxacin/'
data='traindatabis'
testdir='test/test'
elif mode == 'laptop':
pathtoxp='/home/ylucas/Bureau/expe_postdoc/xp'
pathtodata='/home/ylucas/Bureau/expe_postdoc/data_postdoc/'
data= ''
testdir='test'
# Kmer extraction parameters
kmer_count = 1 # 1: kmer count, 0: presence/absence
len_kmers = 20
MIC=True
min_abundance = 3 #not used atm
kmer_count=1 #1: kmer count, 0: presence/absence
len_kmers=20
# Learning parameters
model = 'gradient' # can be ['rf','SCM', 'gradient', 'Ada']
rf_grid = {'max_features': ['sqrt', 'log2'],
'max_depth': [4, 8]}
SCM_grid = {'p': [1, 10], 'max_rules': [1, 3 ,10], 'model_type':['conjunction','disjunction']}
gradient_grid = {'max_depth': [1, 2, 4],
'n_estimators': [10, 6,4, 2] }
ada_grid = {'n_estimators': [ 5, 10, 20]}
pruning_tresh=0.9
def get_lenkmers():
parser=argparse.ArgumentParser()
parser.add_argument('--len_kmers', type=int, default=31)
arg=parser.parse_args()
return arg.len_kmers
len_kmers= 20 #int(get_lenkmers())
id='MICkmers20penicillin'#+str(int(get_lenkmers()))
|
18,534 | ffc6f199c7024a5d9c1d3d3eec04bfbe72ff2777 | s = input()
t = input()
def isAnagram(s,t):
sSorted = sorted(s)
tSorted = sorted(t)
if sSorted == tSorted:
print("Yes")
else:
print("No")
isAnagram(s,t) |
18,535 | 6c0587343bdec26863a772ac204092f93b6649a7 | import fnmatch
import os
from shutil import copyfile, copy
from tkinter import filedialog, Tk
if __name__ == '__main__':
window = Tk()
targetPath = filedialog.askdirectory(parent=window,
initialdir=os.getcwd(),
title="Choose destination")
try:
for _, dirs, _ in os.walk(os.curdir):
for dir in dirs:
print(dir)
if not os.path.isdir(targetPath+'/'+dir):
os.mkdir(targetPath+'/'+dir)
#for _,dirs2, filenames in os.walk("%s/%s"%(os.curdir,dir)):
print("listdir:", os.listdir(dir))
for filename in os.listdir(dir):
#for filename in filenames:
if fnmatch.fnmatch(filename, 'message*'):
print(os.curdir+'/'+dir+'/'+filename)
copy(os.curdir+'/'+dir+'/'+filename, targetPath+'/'+dir)
catch Exception as e:
print(e) |
18,536 | 3fd040c4f0d1a5d2f2387325c3eff8eb5a780b32 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
self.inorder = inorder
self.postorder = postorder
self.inPos = {}
for i in range(len(inorder)):
self.inPos[inorder[i]] = i
return self.createTree(0,len(inorder)-1,0,len(postorder)-1)
def createTree(self,inBeg,inEnd,posBeg,posEnd):
if inBeg > inEnd:
return None
value = self.postorder[posEnd]
tree = TreeNode(value)
pos = self.inPos[value]
tree.left = self.createTree(inBeg,pos-1,posBeg,posBeg + pos - 1 - inBeg)
tree.right = self.createTree(pos + 1, inEnd,posBeg + pos - inBeg,posEnd-1)
return tree
|
18,537 | 4f7c4729ae3d9873a85baca1739440bd10d22087 | print ("Hello World")
print ('Love')
print('wassup') |
18,538 | 7bf585843e15d40b29614bfd73a41f4886409820 | from django.db import models
from briefcase.core.models import Document
class Spreadsheet(Document):
data = models.TextField(blank=True)
def __unicode__(self):
return "%s - %s" % (self.file_name, unicode(self.owner))
|
18,539 | d1787007c35876be359eb05307c11e19ae284632 | # Generated by Django 2.2 on 2019-05-20 08:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cities_light', '0008_city_timezone'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=1024)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cities_light.City')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cities_light.Country')),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cities_light.Region')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=255)),
('Address', models.CharField(max_length=255)),
('TelephoneNumber', models.CharField(max_length=12)),
('ImagePath', models.CharField(max_length=255)),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.Place')),
],
),
]
|
18,540 | 2262c9b89336f39f8e3a240479d3fd4885cf2f6f | def mergesort(arr1, option):
"""
implementation of bottom-up two-array-swap merge sort
basically, start by pairing indices 1:1 from the beginning.
then step up to 2:2, 4:4 until (step > length)
"""
length = len(arr1)
if length < 2:
return arr1
# make a copy, since mergesort needs O(n) space
arr2 = arr1[:]
step = 1
while step < length:
i, curr = 0, 0 # beginning of each pair, current write point
while i + step < length:
left, right = i, i + step
while curr < length and curr < i + 2 * step:
if right == length or right == i + 2 * step:
arr2[curr] = arr1[left]
left += 1
elif left == i + step:
arr2[curr] = arr1[right]
right += 1
elif arr1[left] <= arr1[right]:
arr2[curr] = arr1[left]
left += 1
elif arr1[left] > arr1[right]:
arr2[curr] = arr1[right]
right += 1
curr += 1
i += 2 * step
# important: copy the untouched end to the new array. It's not applied yet
while curr < length:
arr2[curr] = arr1[curr]
curr += 1
step *= 2
# swap two arrays for cost efficiency
arr1, arr2 = arr2, arr1
return arr1
import random
arrRand = [random.randint(0, 10) for i in range(30)]
print mergesort(arrRand) |
18,541 | 86c37c33a7ab3bec960ed1625d1ba0bf4106325c | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat May 12 12:25:13 2018
@author: tanvi
"""
import json
import glob
glob_data = []
for file in glob.glob('/home/tanvi/Documents/files/Tweets*.json'):
with open(file) as json_file:
print(json_file.name)
data = json.load(json_file)
i = 0
while i < len(data):
glob_data.append(data[i])
i += 1
with open('/home/tanvi/Documents/files/mergedTweetsFile.json', 'w') as f:
json.dump(glob_data, f, indent=4)
print('file written')
|
18,542 | 7da3fcc58ad30dbf1413b5bc4aef2bdff7319d3b | import random
import datetime
from main import *
def addpost():
user = User.query.get(1)
tag_one = Tag('Python')
tag_two = Tag('Flask')
tag_three = Tag('SQLAlechemy')
tag_four = Tag('Jinja')
tag_list = [tag_one,tag_two,tag_three,tag_four]
s = "Example text"
for i in range(100):
new_post = Post('post' + str(i))
new_post.user = user
new_post.publish_date = datetime.datetime.now()
new_post.text = s
new_post.tags = random.sample(tag_list,random.randint(1,3))
db.session.add(new_post)
db.session.commit()
if __name__ == '__main__':
addpost()
|
18,543 | 417e8ab783de01f408ed50230f0a4a828ba78c8c | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from unittest import mock
import numpy as np
import pytest
from helpers.scenario import temp_scenario
from smarts.core.coordinates import Heading, Pose
from smarts.core.plan import Plan
from smarts.core.scenario import Scenario
from smarts.core.sensors import DrivenPathSensor, TripMeterSensor, WaypointsSensor
from smarts.core.utils.math import squared_dist
from smarts.sstudio import gen_scenario
from smarts.sstudio import types as t
AGENT_ID = "Agent-007"
def test_driven_path_sensor():
vehicle = mock.Mock()
sim = mock.Mock()
max_path_length = 5
sensor = DrivenPathSensor(max_path_length=max_path_length)
positions = [(x, 0, 0) for x in range(0, 100, 10)]
sim_times = list(range(0, 50, 5))
for idx, (position, sim_time) in enumerate(zip(positions, sim_times)):
sim.elapsed_sim_time = sim_time
vehicle.pose.position = position
sensor.track_latest_driven_path(sim.elapsed_sim_time, vehicle)
if idx >= 3:
assert sensor.distance_travelled(sim.elapsed_sim_time, last_n_steps=3) == 30
assert (
sensor.distance_travelled(sim.elapsed_sim_time, last_n_seconds=10) == 20
)
assert len(sensor()) <= max_path_length
sensor.teardown()
@pytest.fixture
def scenarios():
with temp_scenario(name="straight", map="maps/6lane.net.xml") as scenario_root:
ego_missions = [
t.Mission(
t.Route(
begin=("edge-west-WE", 0, 10),
end=("edge-east-WE", 0, "max"),
)
),
]
gen_scenario(
t.Scenario(ego_missions=ego_missions),
output_dir=scenario_root,
)
yield Scenario.variations_for_all_scenario_roots(
[str(scenario_root)], [AGENT_ID]
)
def test_trip_meter_sensor(scenarios):
scenario: Scenario = next(scenarios)
sim = mock.Mock()
vehicle_state = mock.Mock()
vehicle_state.pose = Pose(
position=np.array([33, -65, 0]),
orientation=np.array([0, 0, 0, 0]),
heading_=Heading(0),
)
vehicle_state.length = 3.68
mission = scenario.missions[AGENT_ID]
plan = Plan(scenario.road_map, mission)
sensor = TripMeterSensor()
waypoints_sensor = WaypointsSensor()
positions = [(x, 0, 0) for x in range(0, 100, 10)]
sim_times = list(range(0, 50, 5))
for idx, (position, sim_time) in enumerate(zip(positions, sim_times)):
sim.elapsed_sim_time = sim_time
vehicle_state.position = position
vehicle_state.pose = Pose(
position=vehicle_state.position,
orientation=np.array([0, 0, 0, 0]),
heading_=Heading(0),
)
waypoint_paths = waypoints_sensor(vehicle_state, plan, scenario.road_map)
sensor.update_distance_wps_record(
waypoint_paths, vehicle_state, plan, sim.road_map
)
assert sensor() == sum(
wpf.dist_to(wps.pos)
for wpf, wps in zip(sensor._wps_for_distance[:-1], sensor._wps_for_distance[1:])
)
sensor.teardown()
def test_waypoints_sensor(scenarios):
scenario = next(scenarios)
vehicle_state = mock.Mock()
vehicle_state.pose = Pose(
position=np.array([33, -65, 0]),
orientation=np.array([0, 0, 0, 0]),
heading_=Heading(0),
)
mission = scenario.missions[AGENT_ID]
plan = Plan(scenario.road_map, mission)
sensor = WaypointsSensor()
waypoints = sensor(vehicle_state, plan, scenario.road_map)
assert len(waypoints) == 3
sensor.teardown()
|
18,544 | 73054b6b5eed4c10da466078355aeed39473b64b |
import time
def isPrime(n):
if n <= 1:
return False
elif n <= 3:
return True
elif n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i+2) == 0:
return False
i = i+6
return True
def solution():
s = 0
t = time.time()
for i in range(0,2000000):
if isPrime(i):
s = s + i
print s
print time.time() - t
solution()
|
18,545 | 41c6dafbb661ce3562c0a1d5c7fb00ecbae4c997 | if __name__ == "__main__":
from sys import path
from os.path import dirname
path.append(dirname(path[0]))
from Basic.fly import fly
from Basic.job import job
from Basic.sell_eqs import sell_eqs
from Basic.send_application import citizenship_or_mu_application
from Fight.auto_fight import auto_fight
from Fight.motivates import send_motivates
from add_friends import friends
else:
from .add_friends import friends
from login import login, double_click
import time
import requests
from lxml.html import fromstring
from random import choice, randint
def missions(server, missions_to_complete="ALL", action="ALL", session=""):
"""Finish missions.
* Leave "action" parameter empty if you don't need it to do specific action.
* Leave "missions_to_complete" parameter empty if you don't want to complete all missions.
* "action" must be start / complete / skip / ALL"""
URL = f"https://{server}.e-sim.org/"
if action.lower() not in ("start", "complete", "skip", "all"):
print("action must be `start`/`complete`/`skip`/`ALL`")
return
if not session:
session = login(server)
if missions_to_complete.lower() != "all":
if action.lower() != "all":
if action.lower() == "start":
c = session.post(URL + "betaMissions.html?action=START", data={"submit": "Mission start"})
if "MISSION_START_OK" not in str(c.url) and "?action=START" not in str(c.url):
print(c.url)
return
if action.lower() == "complete":
c = session.post(URL + "betaMissions.html?action=COMPLETE", data={"submit": "Receive"})
if "MISSION_REWARD_OK" not in str(c.url) and "?action=COMPLETE" not in str(c.url):
print(c.url)
return
if action.lower() == "skip":
c = session.post(URL + "betaMissions.html",
data={"action": "SKIP", "submit": "Skip this mission"})
if "MISSION_SKIPPED" not in str(c.url):
print(c.url)
return
print("Done")
return
if missions_to_complete.lower() == "all":
RANGE = 20
else:
RANGE = int(missions_to_complete)
for _ in range(1, RANGE+1):
try:
home_page = session.get(URL)
tree = fromstring(home_page.content)
check = tree.xpath('//*[@id="taskButtonWork"]//@href')
if check:
double_click(server, session=session)
my_id = str(tree.xpath('//*[@id="userName"]/@href')[0]).split("=")[1]
try:
num = int(str(tree.xpath('//*[@id="inProgressPanel"]/div[1]/strong')[0].text).split("#")[1].split(":")[0])
except:
# need to collect reward / no more missions
c = session.post(URL + "betaMissions.html?action=COMPLETE", data={"submit": "Receive"})
if "MISSION_REWARD_OK" not in str(c.url) and "?action=COMPLETE" not in str(c.url):
print(f"No more missions today. Come back tommorrow!")
return
print(c.url)
continue
if not num:
print("You have completed all your missions for today, come back tomorrow!")
return
print(f"Mission number {num}")
c = session.post(URL + "betaMissions.html?action=START", data={"submit": "Mission start"})
if "MISSION_START_OK" not in str(c.url):
c = session.post(URL + "betaMissions.html?action=COMPLETE", data={"submit": "Receive"})
if "MISSION_REWARD_OK" not in str(c.url) and "?action=COMPLETE" not in str(c.url):
if num == 1:
session.get(URL + "inboxMessages.html")
session.get(f"{URL}profile.html?id={my_id}")
elif num in (2, 4, 16, 27, 28, 36, 43, 59):
double_click(server, session=session)
elif num in (3, 7):
job(server, session)
elif num in (5, 26, 32, 35, 38, 40, 47, 51, 53, 64):
if num == 31:
restores = "3"
print(f"Hitting {restores} restores, it might take a while")
elif num == 46:
restores = "2"
print(f"Hitting {restores} restores, it might take a while")
auto_fight(server, restores="1")
elif num == 6:
session.post(f"{URL}food.html?quality=1")
elif num == 8:
session.get(URL + "editCitizen.html")
elif num == 9:
session.get(URL + "notifications.html")
elif num == 10:
session.get(URL + "newMap.html")
elif num == 11:
product_market = session.get(f"{URL}productMarket.html")
tree = fromstring(product_market.content)
productId = tree.xpath('//*[@id="command"]/input[1]')[0].value
payload = {'action': "buy", 'id': productId, 'quantity': 1, "submit": "Buy"}
session.post(URL + "productMarket.html", data=payload)
elif num in (12, 54):
Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()
apiRegions = requests.get(URL + "apiRegions.html").json()
capital = [row['id'] if row['homeCountry'] == Citizen['citizenshipId'] and
row['capital'] else 1 for row in apiRegions][0]
fly(server, capital, 3, session=session)
elif num in (13, 66):
session.get(URL + 'friends.html?action=PROPOSE&id=8')
citizenAchievements = session.get(URL + "citizenAchievements.html")
tree = fromstring(citizenAchievements.content)
ID = str(tree.xpath('//*[@id="userName"]/@href')[0]).split("=")[1]
session.post(URL + "citizenAchievements.html",
data={"id": ID, "submit": "Recalculate achievements"})
elif num == 14:
i = session.get(URL + 'storage.html?storageType=EQUIPMENT')
tree = fromstring(i.content)
ID = tree.xpath(f'//*[starts-with(@id, "cell")]/a/text()')[0]
payload = {'action': "EQUIP", 'itemId': ID.replace("#", "")}
session.post(URL + "equipmentAction.html", data=payload)
elif num == 15:
session.post(f"{URL}vote.html?id=1")
# day 2
elif num == 18:
shout_body = choice(["Mission: Say hello", "Hi", "Hello", "Hi guys :)", "Mission"])
payload = {'action': "POST_SHOUT", 'body': shout_body, 'sendToCountry': "on",
"sendToMilitaryUnit": "on", "sendToParty": "on", "sendToFriends": "on"}
session.post(f"{URL}shoutActions.html", data=payload)
elif num == 19:
Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()
monetaryMarket = session.get(
URL + 'monetaryMarket.html?buyerCurrencyId=0&sellerCurrencyId=' + str(
int(Citizen['currentLocationRegionId'] / 6)))
tree = fromstring(monetaryMarket.content)
ID = tree.xpath("//tr[2]//td[4]//form[1]//input[@value][2]")[0].value
payload = {'action': "buy", 'id': ID, 'ammount': 0.5, "submit": "OK"}
session.post(URL + "monetaryMarket.html", data=payload)
elif num == 21:
i = session.get(URL + 'storage.html?storageType=EQUIPMENT')
tree = fromstring(i.content)
ID = tree.xpath(f'//*[starts-with(@id, "cell")]/a/text()')[0]
sell_eqs(server, ID, 0.01, 48, session)
elif num == 22:
Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()
payload = {'product': "GRAIN", 'countryId': Citizen['citizenshipId'], 'storageType': "PRODUCT",
"action": "POST_OFFER", "price": 0.1, "quantity": 100}
sell_grain = session.post(URL + "storage.html", data=payload)
print(sell_grain.url)
elif num == 25:
payload = {'setBg': "LIGHT_I", 'action': "CHANGE_BACKGROUND"}
session.post(URL + "editCitizen.html", data=payload)
# day 3
elif num == 29:
for article_id in range(2, 7):
session.post(f"{URL}vote.html?id={article_id}")
elif num == 30:
session.post(f"{URL}sub.html?id=1")
elif num == 31:
citizenship_or_mu_application(server, randint(1, 21), "mu", session)
# day 4
elif num == 37:
shout_body = choice(["Mission: Get to know the community better", "Hi",
"Hello", "Hi guys :)", "Mission", "IRC / Skype / TeamSpeak"])
payload = {'action': "POST_SHOUT", 'body': shout_body, 'sendToCountry': "on",
"sendToMilitaryUnit": "on", "sendToParty": "on", "sendToFriends": "on"}
session.post(f"{URL}shoutActions.html", data=payload)
elif num == 39:
session.get(URL + 'friends.html?action=PROPOSE&id=1')
elif num == 41:
for _ in range(10):
ID = randint(1, 100)
payload = {"action": "NEW", "key": f"Article {ID}", "submit": "Publish",
"body": choice(["Mission", "Hi", "Hello there", "hello", "Discord?"])}
comment = session.post(URL + "comment.html", data=payload)
if "MESSAGE_POST_OK" in str(comment.url):
break
elif num == 42:
try:
b = session.get(URL + "partyStatistics.html?statisticType=MEMBERS")
tree = fromstring(b.content)
ID = str(tree.xpath('//*[@id="esim-layout"]//table//tr[2]//td[3]//@href')[0]).split("=")[1]
payload1 = {"action": "JOIN", "id": ID, "submit": "Join"}
b = session.post(URL + "partyStatistics.html", data=payload1)
if str(b.url) != URL + "?actionStatus=PARTY_JOIN_ALREADY_IN_PARTY":
print(b.url)
except:
pass
# day 5
elif num == 45:
session.post(URL + "replyToShout.html?id=1",
data={"body": choice(["OK", "Whatever", "Thanks", "Discord?"]),
"submit": "Shout!"})
elif num == 46:
payload = {'itemType': "STEROIDS", 'storageType': "SPECIAL_ITEM", 'action': "BUY", "quantity": 1}
session.post(URL + "storage.html", data=payload)
elif num == 49:
i = session.get(URL + 'storage.html?storageType=EQUIPMENT')
tree = fromstring(i.content)
ID = tree.xpath(f'//*[starts-with(@id, "cell")]/a/text()')[0]
payload = {'action': "EQUIP", 'itemId': ID.replace("#", "")}
session.post(URL + "equipmentAction.html", data=payload)
elif num == 50:
session.post(f"{URL}shoutVote.html?id=1&vote=1")
elif num == 52:
fly(server, 1, 3, session)
elif num == 55:
requests.get(URL + f"lan.{my_id}/")
elif num in (61, 55):
send_motivates(server, "ALL", session)
elif num == 57:
Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()
payload = {'receiverName': f"{Citizen['citizenship']} Org", "title": "Hi",
"body": choice(["Hi", "Can you send me some gold?", "Hello there!", "Discord?"]), "action": "REPLY", "submit": "Send"}
session.post(URL + "composeMessage.html", data=payload)
elif num == 58:
session.post(f"{URL}sub.html?id=2")
elif num == 60:
friends(server, "online", session)
elif num == 63:
session.post(f"{URL}medkit.html")
# if food & gift limits > 10 it won't work.
else:
print("I don't know how to finish this mission. you have few seconds to stop me before i skip it")
time.sleep(randint(1, 7))
c = session.post(URL + "betaMissions.html?action=COMPLETE", data={"submit": "Receive"})
if "MISSION_REWARD_OK" not in str(c.url) and "?action=COMPLETE" not in str(c.url):
c = session.post(URL + "betaMissions.html?action=COMPLETE", data={"submit": "Receive"})
if "MISSION_REWARD_OK" not in str(c.url) and "?action=COMPLETE" not in str(c.url):
c = session.post(URL + "betaMissions.html",
data={"action": "SKIP", "submit": "Skip this mission"})
if "MISSION_SKIPPED" not in str(c.url) and "?action=SKIP" not in str(c.url):
return
else:
print(f"Skipped mission {num}")
print(c.url)
except Exception as error:
print(error)
time.sleep(5)
if __name__ == "__main__":
print(missions.__doc__)
server = input("Server: ")
missions_to_complete = input("How many missions you wish to complete? (for all of them, press Enter) ")
if not missions_to_complete:
missions_to_complete = "all"
action = input("Action (start / complete / skip / ALL): ")
if not action:
action = "all"
missions(server, missions_to_complete, action)
input("Press any key to continue")
|
18,546 | 51026a7c1af670366e6554fc80b9abd7f6b41048 | #!/usr/bin/env python3
import time
import cv2
import mss
import numpy as np
import keypress as kb
import os
import checkeredflag as cf
from alexnet import alexnet
from pynput.keyboard import Key, Controller
keyboard = Controller()
WIDTH=319
HEIGHT=20
LR = 1e-3
EPOCHS = 8
MODEL_NAME = 'data/vdrift-{}-{}-{}.model'.format(LR, 'alexnetv2', EPOCHS)
model = alexnet(WIDTH, HEIGHT, LR)
model.load(MODEL_NAME)
shutdown = True
with mss.mss() as sct:
# Part of the screen to capture
monitor = {'top': 120, 'left': 0, 'width': 800, 'height': 450}
breaks = 0
while 'Screen capturing':
last_time = time.time()
# Get raw pixels from the screen, save it to a Numpy array
y = 90
x = 0
w = 319
h = 20
img = cv2.cvtColor(np.array(sct.grab(monitor)), cv2.COLOR_BGR2GRAY)
scale_percent = 20 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim)
# canny = cv2.Canny(resized, 170, 50)
crop_img = resized[y:y+h, x:x+w]
(thresh, blackAndWhiteImage) = cv2.threshold(crop_img, 127, 255, cv2.THRESH_BINARY)
cv2.imshow('robot vision', blackAndWhiteImage)
if cv2.waitKey(25) & 0xFF == ord('q'):
shutdown = not shutdown
if shutdown:
continue
prediction = model.predict([blackAndWhiteImage.reshape(WIDTH, HEIGHT, 1)])[0]
# print(prediction)
moves = list(np.around(prediction))
if moves == [1, 0, 0, 0]:
print("left")
keyboard.press(Key.left)
keyboard.release(Key.right)
keyboard.release(Key.up)
keyboard.release(Key.down)
elif moves == [0, 1, 0, 0]:
print("right")
keyboard.press(Key.right)
keyboard.release(Key.left)
keyboard.release(Key.up)
keyboard.release(Key.down)
elif moves == [0, 0, 1, 0]:
print("forward")
keyboard.press(Key.up)
keyboard.release(Key.right)
keyboard.release(Key.left)
keyboard.release(Key.down)
elif moves == [0, 0, 0, 1]:
breaks = breaks + 1
if(breaks > 40):
print("Should break but run")
keyboard.press(Key.up)
keyboard.release(Key.right)
keyboard.release(Key.left)
keyboard.release(Key.down)
else:
print("break!")
keyboard.press(Key.down)
keyboard.release(Key.up)
keyboard.release(Key.left)
keyboard.release(Key.right)
if(breaks > 70):
breaks = 0
# while(True):
# screen = np.array(ImageGrab.grab(bbox=(20,220, 420, 450)))
#
# last_time = time.time()
# print('Frame took {} seconds'.format(time.time() - last_time))
# # cv2.imshow('window', cv2.resize(cv2.cvtColor(screen, cv2.COLOR_RGBA2GRAY), (80, 60)))
# screen = cv2.resize(cv2.cvtColor(screen, cv2.COLOR_RGBA2GRAY), (80, 60))
#
# prediction = model.predict([screen.reshape(WIDTH, HEIGHT, 1)])[0]
# moves = list(np.around(prediction))
# print(moves, prediction)
#
# if moves == [1.0, 0, 0]:
# keyboard.press('a')
# keyboard.release('s')
# elif moves == [0, 1.0, 0]:
# keyboard.press('s')
# keyboard.release('a')
# elif moves == [0, 0, 1.0]:
# keyboard.release('a')
# keyboard.release('s')
|
18,547 | 5f9c1c1e31103250ba328b13a38ff627cd3b520e | from django.contrib import admin
from .models import Product
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'dataEntryBy','created','publish','status')
list_filter = ('status', 'created', 'publish', 'dataEntryBy')
search_fields = ('name', 'desc')
prepopulated_fields = {'slug': ('name',)}
raw_id_fields = ('dataEntryBy',)
date_hierarchy = 'publish'
ordering = ['status', 'publish']
admin.site.register(Product, ProductAdmin)
|
18,548 | aaf3c4a712c149539a3f339e6a4bc674136c464b | ###################################################################################################################
# author : ENES ÇAVUŞ
# subject : Getting Real Time trend data from Twitter via Tweepy -
###################################################################################################################
from kafka import KafkaProducer
from datetime import datetime
import sys
import json
import re
import time
from random import randint
import unicodedata
import tweepy
import csv
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import datetime
now = datetime.datetime.now()
print(now.strftime("%A"))
today = now.strftime("%A")
import os
import geocoder
import pandas as pd
from kafka import KafkaConsumer, KafkaProducer
# kafka producer on port 9092 and encoding the data for better data transfer
producer = KafkaProducer(bootstrap_servers='localhost:9092',
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
#REAL TIME ------------------------
# your API keys and tokens
# consumer_key="#"
# consumer_secret="#"
# access_token="#"
# access_token_secret="#"
# tweepy authentication
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
trends_available = api.trends_available()
for i in range(2):
# this condition gets global trend data and sending usable datas via kafka producer on topic for global transfer
if i == 0:
print("\n\n GLOBAL \n\n")
trends = api.trends_place(1) # for global
globalTrends = []
for trend in trends[0]['trends']:
if trend['tweet_volume'] is not None and trend['tweet_volume'] > 10000:
globalTrends.append((trend['name'], trend['tweet_volume']))
globalTrends.sort(key=lambda x:-x[1])
for i in range(10):
producer.send('trends1',[globalTrends[i][0],globalTrends[i][1],])
print("trend global sent!", i)
time.sleep(0.5)
# this condition gets local trend data and sending usable datas via kafka producer on topic for local transfer
elif i == 1:
print("\n\n LOCAL \n\n")
loc = "England"
g = geocoder.osm(loc) # for local
closest_loc = api.trends_closest(g.lat, g.lng)
trends = api.trends_place(closest_loc[0]['woeid'])
localTrends = []
for trend in trends[0]['trends']:
if trend['tweet_volume'] is not None and trend['tweet_volume'] > 10000:
localTrends.append((trend['name'], trend['tweet_volume']))
for i in range(10):
producer.send('trends2',[localTrends[i][0],localTrends[i][1],])
print("trend local sent!", i)
time.sleep(0.5)
# NOT real Time - from local - only for test
# trends = [["deneme1",123],["deneme2",1232],["deneme3",1213],["deneme4",1233],["deneme5",4123]]
# trendsLocal = [["local1",123],["local1",1232],["local1",1213],["local1",1233],["local1",4123]]
# for i in range(5):
# producer.send('trends1',[trends[i][0],trends[i][1]])
# print("trend global sent!")
# producer.send('trends2',[trendsLocal[i][0],trendsLocal[i][1]])
# print("trend local sent!")
# time.sleep(1)
print("Exiting")
sys.exit()
# END - ENES ÇAVUŞ - Btirme Projesi - SAU - Bahar 2021
|
18,549 | b7f687d8980b65d1fdc82f38a66241954740d990 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 12 19:32:43 2020
@author: duanyiting
"""
def hanoi(n,p1,p2,p3):
if n==1:
print("盘子从%d移到%d"%(p1,p3))
else:
hanoi(n-1,p1,p3,p2)
print("盘子从%d移到%d"%(p1,p3))
hanoi(n-1,p2,p1,p3)
j=int(input("请输入要移动盘子的数量:"))
hanoi(j,1,2,3) |
18,550 | 306ea115fced897dd2a5969074d4823b451ef403 | import os
import logging
import fnmatch
import ntpath
from pybindx.input.info_helper import CppInfoHelper
from pybindx.input.package_info import PackageInfo
from pybindx.input.free_function_info import CppFreeFunctionInfo
from pybindx.input.class_info import CppClassInfo
from pybindx.input.enum_info import CppEnumInfo
from pybindx.parsers.package_info import PackageInfoParser
from pybindx.parsers.source_parser import CppSourceParser
from pybindx.writers.header_collection_writer import CppHeaderCollectionWriter
from pybindx.writers.module_writer import CppModuleWrapperWriter
import pybindx.templates.pybind11_default as wrapper_templates
class CppWrapperGenerator(object):
def __init__(self, source_root,
source_includes=None,
wrapper_root=None,
castxml_binary='castxml',
package_info_path='package_info.yaml',
clang_binary='clang',
cflags='--std=c++11 -w',
source_header_files=None
):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
self.source_root = source_root
self.source_includes = source_includes
self.wrapper_root = wrapper_root
self.castxml_binary = castxml_binary
self.clang_binary = clang_binary
self.cflags = cflags
self.package_info_path = package_info_path
self.source_hpp_files = []
if source_header_files is not None:
self.source_hpp_files = source_header_files
self.global_ns = None
self.source_ns = None
if self.wrapper_root is None:
self.wrapper_root = self.source_root
if self.source_includes is None:
self.source_includes = [self.source_root]
# collect all location path used
self.source_dirs = {self.source_root}
self.package_info = PackageInfo("up_package", self.source_root)
# If we suspect that a valid info file has not been supplied
# fall back to the default behaviour
path_is_default = (self.package_info_path == 'package_info.yaml')
file_exists = os.path.exists(self.package_info_path)
if path_is_default and (not file_exists):
logger.info('YAML package info file not found. Using default info.')
self.package_info_path = None
def collect_source_hpp_files(self):
"""
Walk through the source root and add any files matching the provided patterns.
Keep the wrapper root out of the search path to avoid pollution.
"""
for pattern in self.package_info.source_hpp_patterns:
for filename in fnmatch.filter(self.source_hpp_files, pattern):
self.package_info.source_hpp_files.append(os.path.basename(filename))
self.source_dirs.add(os.path.abspath(os.path.dirname(filename)))
for root, _, filenames in os.walk(self.source_root, followlinks=True):
for pattern in self.package_info.source_hpp_patterns:
for filename in fnmatch.filter(filenames, pattern):
if "pybindx" not in filename:
self.package_info.source_hpp_files.append(os.path.join(root, filename))
self.package_info.source_hpp_files = [path for path in self.package_info.source_hpp_files
if self.wrapper_root not in path]
def generate_header_collection(self):
"""
Write the header collection to file
"""
header_collection_writer = CppHeaderCollectionWriter(self.package_info,
self.wrapper_root)
header_collection_writer.write()
header_collection_path = self.wrapper_root + "/"
header_collection_path += header_collection_writer.header_file_name
return header_collection_path
def parse_header_collection(self, header_collection_path):
"""
Parse the header collection with pygccxml and Castxml
to population the global and source namespaces
"""
source_parser = CppSourceParser(self.source_dirs,
header_collection_path,
self.castxml_binary,
self.source_includes,
self.cflags,
self.clang_binary)
source_parser.parse()
self.global_ns = source_parser.global_ns
self.source_ns = source_parser.source_ns
@staticmethod
def get_wrapper_template():
"""
Return the string templates for the wrappers
"""
return wrapper_templates.template_collection
def update_free_function_info(self):
"""
Update the free function info parsed on pygccxml output
"""
for eachModule in self.package_info.module_info:
if eachModule.use_all_free_functions:
free_functions = self.source_ns.free_functions(allow_empty=True)
for eachFunction in free_functions:
if eachModule.is_decl_in_source_path(eachFunction):
function_info = CppFreeFunctionInfo(eachFunction.name)
function_info.module_info = eachModule
function_info.decl = eachFunction
eachModule.free_function_info.append(function_info)
else:
for eachFunction in eachModule.free_function_info:
functions = self.source_ns.free_functions(eachFunction.name,
allow_empty=True)
if len(functions) == 1:
eachFunction.decl = functions[0]
def update_enum_info(self):
"""
Update the enum info parsed on pygccxml output
"""
for eachModule in self.package_info.module_info:
if eachModule.use_all_enums:
enumerations = self.source_ns.enumerations(allow_empty=True)
for eachEnum in enumerations:
if eachModule.is_decl_in_source_path(eachEnum):
enum_info = CppEnumInfo(eachEnum.name)
enum_info.module_info = eachModule
enum_info.decl = eachEnum
eachModule.enum_info.append(enum_info)
else:
for eachEnum in eachModule.enum_info:
enumerations = self.source_ns.enumerations(eachEnum.name,
allow_empty=True)
if len(enumerations) == 1:
eachEnum.decl = enumerations[0]
def update_class_info(self):
"""
Update the class info parsed on pygccxml output
"""
for eachModule in self.package_info.module_info:
if eachModule.use_all_classes:
classes = self.source_ns.classes(allow_empty=True)
for eachClass in classes:
if eachModule.is_decl_in_source_path(eachClass):
class_info = CppClassInfo(eachClass.name)
class_info.module_info = eachModule
class_info.decl = eachClass
eachModule.class_info.append(class_info)
else:
for eachClass in eachModule.class_info:
classes = self.source_ns.classes(eachClass.name,
allow_empty=True)
if len(classes) == 1:
eachClass.decl = classes[0]
def generate_wrapper(self):
"""
Main method for wrapper generation
"""
# If there is an input file, parse it
if self.package_info_path is not None:
info_parser = PackageInfoParser(self.package_info_path,
self.source_root)
info_parser.parse()
self.package_info = info_parser.package_info
else:
pass
# Generate a header collection
self.collect_source_hpp_files()
# Attempt to assign source paths to each class, assuming the containing
# file name is the class name
for eachModule in self.package_info.module_info:
for eachClass in eachModule.class_info:
for eachPath in self.package_info.source_hpp_files:
base = ntpath.basename(eachPath)
if eachClass.name == base.split('.')[0]:
eachClass.source_file_full_path = eachPath
if eachClass.source_file is None:
eachClass.source_file = base
# Attempt to automatically generate template args for each class
for eachModule in self.package_info.module_info:
info_generator = CppInfoHelper(eachModule)
for eachClass in eachModule.class_info:
info_generator.expand_templates(eachClass, "class")
# Generate the header collection
header_collection_path = self.generate_header_collection()
# Parse the header collection
self.parse_header_collection(header_collection_path)
# Update the Class and Free Function Info from the parsed code
self.update_class_info()
self.update_free_function_info()
self.update_enum_info()
# Write the modules
for eachModule in self.package_info.module_info:
module_writer = CppModuleWrapperWriter(self.global_ns,
self.source_ns,
eachModule,
self.get_wrapper_template(),
self.wrapper_root)
module_writer.write()
|
18,551 | 7385f092e66564de91279dbd4ac23f1bcb6d74a0 |
for A in range(0,10):
for B in range(0,10):
if (A*10+B)*(B*10+A) == 3154:
print(A+B, A, B)
if (A*10+B)*A == 114:
print("result:",A,B)
|
18,552 | 5d77b4bebba64d6091fd1bc0286f9fee8025d038 | 由于Python是动态语言,根据类创建的实例可以任意绑定属性。
给实例绑定属性的方法是通过实例变量,或者通过self变量:
class Student(object):
def __init__(self, name):
self.name = name
s = Student('Bob')
s.score = 90
但是,如果Student类本身需要绑定一个属性呢?可以直接在class中定义属性,这种属性是类属性,归Student类所有:
class Student(object):
name = 'Student'
当我们定义了一个类属性后,这个属性虽然归类所有,但类的所有实例都可以访问到。
小结
实例属性属于各个实例所有,互不干扰;
类属性属于类所有,所有实例共享一个属性;
不要对实例属性和类属性使用相同的名字,否则将产生难以发现的错误。
|
18,553 | b8cce2452a735ac2dc3a3f72e20550447b58f3ae | import fretboardgtr
from fretboardgtr import chordgtr
import cairosvg
import numpy as np
import svgwrite
from fretboardgtr.fretboardgtr import FretBoardGtr
import imageio
def get_fingering(labels, f, round_labels=False):
if round_labels:
labels = np.around(labels)
fingering=[]
for string in labels[f]:
try:
fingering.append(np.where(string == 1)[0][0])
except:
fingering.append(0)
return fingering
def draw_fretboard(annot, i):
fingering = []
for string in annot[i]:
fingering.append(np.where(string == 1)[0][0])
if fingering == [0, 0, 0, 0, 0, 0]:
F = open_fret(fingering=fingering)
else:
F = fretboardgtr.ChordGtr(fingering=fingering, root='C')
F.theme(show_note_name=True, color_chord=False)
F.draw()
d = cairosvg.svg2png(F.dwg.tostring())
return F, d
def open_fret(fingering):
#this function was created to support an input of [0, 0, 0, 0, 0, 0]
F = fretboardgtr.ChordGtr()
F.emptybox()
F.background_fill()
F.createfretboard()
F.nut()
F.show_tuning(fingering)
fingname=F.tuning
inter=FretBoardGtr.find_intervals(fingname,F.root)
for i in range(0,len(F.tuning),1):
X=F.wf*(1+i)+F._ol
Y=F.hf*(fingering[i]+1/2)+F._ol
color=F.dic_color[inter[i]]
name_text=fingname[i]
F.dwg.add(F.dwg.circle((X,Y),r=F.R,fill=F.open_circle_color,stroke=color,stroke_width=F.open_circle_stroke_width))
t=svgwrite.text.Text(name_text, insert=(X,Y),font_size=F.fontsize_text,font_weight="bold",fill=F.open_text_color,
style="text-anchor:middle;dominant-baseline:central")
F.dwg.add(t)
return F
def label_results(y_pred, y_gt, f, num_frets=10):
"""
:param y_pred: predicted labels
:param y_gt: ground truth labels
:param f: frame number
:return: F: the fretboard object, d: PNG output readable by imageio to write video files
"""
# dictionary of defined colours
colour_dict = {"red": 'rgb(231, 0, 0)',
"yellow": 'rgb(249, 229, 0)',
"orange": 'rgb(249, 165, 0)',
"green": 'rgb(0, 154, 0)',
"navy": 'rgb(0, 15, 65)',
"blue": 'rgb(0, 73, 151)',
"brown": 'rgb(168, 107, 98)',
"pink": 'rgb(222, 81, 108)',
"purple": 'rgb(120, 37, 134)',
"plum": 'rgb(120, 25, 98)'
}
y_pred = np.around(y_pred)
fingering_pred = get_fingering(y_pred, f)
fingering_gt = get_fingering(y_gt, f)
# initialize fretboardgtr object
F = fretboardgtr.ChordGtr()
# draw output box and background fill
F.dwg = svgwrite.Drawing(F.path,
size=(850, (F.hf) * (num_frets + 2) + F._ol),
profile='tiny')
F.dwg.add(F.dwg.rect(insert=(F.wf + F._ol, F.hf + F._ol),
size=(850, 850),
rx=None, ry=None, fill=F.background_color))
# draw the fretboard
createfretboard(F)
F.nut()
#F.show_tuning(fingering_gt)
fingname = F.tuning
#inter = FretBoardGtr.find_intervals(fingname, F.root)
fretfing = [0 if v == None else v for v in fingering_gt]
try:
minfret = min(v for v in fretfing if v > 0)
except:
pass
# Draw Legend
X_legend = F.wf * (8) + F._ol
Y_legend = F.hf * (2) + F._ol
F.dwg.add(F.dwg.circle((X_legend, Y_legend), r=F.R, fill=colour_dict['green'],
stroke=colour_dict['green'], stroke_width=F.open_circle_stroke_width))
F.dwg.add(svgwrite.text.Text('= Ground truth label', insert=(X_legend + F.wf * 0.5, Y_legend),
font_size=F.fontsize_text, font_weight="bold", fill=F.open_text_color,
style="text-anchor:start;dominant-baseline:central"))
F.dwg.add(F.dwg.circle((X_legend, Y_legend + F.hf), r=F.R, fill=colour_dict['green'],
stroke=colour_dict['yellow'], stroke_width=F.open_circle_stroke_width))
F.dwg.add(svgwrite.text.Text('= Correctly Predicted', insert=(X_legend + F.wf * 0.5, Y_legend + F.hf),
font_size=F.fontsize_text, font_weight="bold", fill=F.open_text_color,
style="text-anchor:start;dominant-baseline:central"))
F.dwg.add(F.dwg.circle((X_legend, Y_legend + 2 * F.hf), r=F.R, fill=colour_dict['red'],
stroke=colour_dict['red'], stroke_width=F.open_circle_stroke_width))
F.dwg.add(svgwrite.text.Text('= Incorrectly Predicted', insert=(X_legend + F.wf * 0.5, Y_legend + 2 * F.hf),
font_size=F.fontsize_text, font_weight="bold", fill=F.open_text_color,
style="text-anchor:start;dominant-baseline:central"))
# Draw predictions and labels on fretboard
for i in range(0, len(F.tuning), 1):
# X coordinate varies based on the string number the finger is on
X = F.wf * (1 + i) + F._ol
# Y coordinate varies based on the fret number
Y_gt = F.hf * (fingering_gt[i] + 1 / 2) + F._ol
Y_pred = F.hf * (fingering_pred[i] + 1 / 2) + F._ol
if fingering_gt[i] == 0:
# correct prediction, no finger on the fretboard
if fingering_pred[i] == 0:
pass
else:
# incorrect prediction
F.dwg.add(F.dwg.circle((X, Y_pred), r=F.R, fill=colour_dict['red'], stroke=colour_dict['red'],
stroke_width=F.open_circle_stroke_width))
elif fingering_gt[i] > 0:
# correct prediction
if fingering_pred[i] == fingering_gt[i]:
F.dwg.add(F.dwg.circle((X, Y_gt), r=F.R, fill=colour_dict['green'], stroke=colour_dict['yellow'],
stroke_width=F.open_circle_stroke_width))
# incorrect prediction of 0, only draw the ground truth
elif fingering_pred[i] == 0:
F.dwg.add(F.dwg.circle((X, Y_gt), r=F.R, fill=colour_dict['green'], stroke=colour_dict['green'],
stroke_width=F.open_circle_stroke_width))
else:
# incorrect prediction, draw prediction and ground truth
F.dwg.add(F.dwg.circle((X, Y_pred), r=F.R, fill=colour_dict['red'], stroke=colour_dict['red'],
stroke_width=F.open_circle_stroke_width))
F.dwg.add(F.dwg.circle((X, Y_gt), r=F.R, fill=colour_dict['green'], stroke=colour_dict['green'],
stroke_width=F.open_circle_stroke_width))
# convert image to readable PNG format
d = cairosvg.svg2png(F.dwg.tostring())
return F, d
def createfretboard(F, num_frets=14):
'''
Create an empty set of rectangles based on tunings.
'''
fretfing = [0 if v == None else v for v in F.fingering]
# Creation of fret
if max(fretfing) > 4:
for i in range(F.gap + 2):
# F.gap +2 : two is for the beginning and the end of the fretboard
F.dwg.add(
F.dwg.line(
start=(F.wf + F._ol, (F.hf) * (i + 1) + F._ol),
end=((F.wf) * (len(F.tuning)) + F._ol, (F.hf) * (1 + i) + F._ol),
stroke=F.fretcolor,
stroke_width=F.fretsize
)
)
else:
for i in range(num_frets):
# F.gap +1 : for the end of the fretboard and (i+2) to avoid first fret when nut
F.dwg.add(
F.dwg.line(
start=(F.wf + F._ol, (F.hf) * (i + 2) + F._ol),
end=((F.wf) * (len(F.tuning)) + F._ol, (F.hf) * (i + 2) + F._ol),
stroke=F.fretcolor,
stroke_width=F.fretsize
)
)
# creation of strings
if F.string_same_size == False:
string_size_list = [((F.string_size) - i / 4) for i in range(len(F.tuning))]
elif F.string_same_size == True:
string_size_list = [(F.string_size) for i in range(len(F.tuning))]
for i in range(len(F.tuning)):
F.dwg.add(
F.dwg.line(
start=((F.wf) * (1 + i) + F._ol, F.hf + F._ol - F.fretsize / 2),
end=((F.wf) * (1 + i) + F._ol, F.hf + F._ol + (num_frets) * F.hf + F.fretsize / 2 + F.fretsize),
stroke=F.strings_color,
stroke_width=string_size_list[i]
)
)
def get_video(filename, npzfile=None, y_pred=None, y_gt=None):
if npzfile:
labels = np.load(npzfile)
y_pred = labels['y_pred']
y_gt = labels['y_gt']
u, indices = np.unique(y_gt, return_index=True, axis=0)
frames = []
for i in sorted(indices):
F, d = label_results(y_pred, y_gt, i)
frames.append(d)
arr_frames = [imageio.imread(d) for d in frames]
imageio.mimsave(filename, arr_frames, fps=5, quality=7) |
18,554 | 72f737f26d3e3aed02b976074738a4def21f2992 | # -*- coding: utf-8 -*-
# author:Super.Shen
import pandas as pd
from build.database import date, url2
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 1000)
import warnings
warnings.filterwarnings('ignore')
def run1():
df = date(url2)
df_map = pd.read_excel('C:\\Users\Administrator\Desktop\map.xlsx')
df_map = df_map[['product_id', 'Flag']]
df['time'] = df['pay_time'].apply(lambda x: str(x).split(' ')[0])
# 合并匹配表
df = pd.merge(left=df, right=df_map, on='product_id', how='left')
df_out = pd.DataFrame()
for x, y in df.groupby(['time']):
y = pd.DataFrame(y.groupby('Flag').size())
y.columns = [x]
df_out = pd.concat([df_out, y], axis=1)
df_out.fillna(0, inplace=True)
df_out.sort_values(by=df_out.columns[-1], ascending=0, inplace=True)
df_out.reset_index(inplace=True)
df_out.rename(columns={'index': '类型', 'Flag': '类型'}, inplace=True)
print('\n第一个表运行完毕……')
return df_out
if __name__ == '__main__':
run1()
|
18,555 | 360a087487de3787226f152756f1aaa9aebe3123 | #!C:\Python\python.exe
from __future__ import print_function
from sys import exit
try:
from selenium import webdriver
from pyvirtualdisplay import Display
except ImportError:
print("\nError importing required modules\nPlease run dependencies.sh\n")
exit(-1)
import time
#display = Display(visible=0, size=(800, 500))
#display.start()
browser = webdriver.Chrome("C:\Python27\Scripts\chromedriver.exe")
time.sleep(1)
url = "http://172.16.166.10"
browser.get(url)
time.sleep(3)
uelement = browser.find_element_by_css_selector(".textfield2>input[type='text']")
pelement = browser.find_element_by_css_selector(".textfield2>input[type='password']")
lelement = browser.find_element_by_css_selector(".login2>input[type='image']")
uelement.click()
time.sleep(1)
uelement.send_keys("") # Write your entry number instead of 14bcs027
time.sleep(1)
pelement.click()
time.sleep(1)
pelement.send_keys("") # Write your password here instead of mine
time.sleep(1)
#iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')); choco install -y python python-pip wget unzip; pip install pyvirtualdisplay selenium; wget -nc http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip; unzip ~\Downloads\chromedriver.zip; mv ~\Downloads\chromedriver.exe C:\Python27\Scripts
lelement.click()
time.sleep(3)
browser.close() # Remove this line, if you don't want browser to get closed after login process.
#display.stop()
|
18,556 | 1dc8b463e8898ed1936c385ab524ddb60672204d | import datetime
import time as stdlib_time
from django.conf import settings
def time() -> float:
return stdlib_time.time()
def now(tz=None) -> datetime.datetime:
t = time()
return datetime.datetime.fromtimestamp(t, tz)
def now_us() -> int:
return int(time() * 1_000_000)
def now_with_tz() -> datetime.datetime:
return now(tz=datetime.timezone.utc)
def build_absolute_url(path: str) -> str:
base_url = getattr(settings, "SITE_BASE_URL", None)
base_url = base_url if base_url else "http://testserver"
return f"{base_url}{path}" |
18,557 | 00e8e9fb196f7bb0ca28220f608dde8506d2044d | # utility package
import argparse
import logging
import time
import os
from pathlib import Path
parser = argparse.ArgumentParser(description='Evaluation on model')
parser.add_argument('--cuda', default='0', type=str, help='select gpu on the server. (default: 0)')
parser.add_argument('--description', '--de',default='default', type=str, help='description used to define different model')
parser.add_argument('--prefix', default='Epoch_0001', type=str, help='prefix used to define logs')
parser.add_argument('--seed', default=6869, type=int, help='random seed')
parser.add_argument('--batch-size', '-b', default=80, type=int, help='mini-batch size (default: 160)')
parser.add_argument('--epochs', default=50, type=int, help='number of total epochs to run')
# parser.add_argument('--lr-min', default=0.005, type=float, help='minimum learning rate for optimizer')
parser.add_argument('--lr-max', default=0.001, type=float, help='maximum learning rate for optimizer')
# parser.add_argument('--momentum', '--mm', default=0.9, type=float, help='momentum for optimizer')
# parser.add_argument('--weight-decay', '--wd', default=0.0001, type=float, help='weight decay for model training')
parser.add_argument('--target', '-t', default=None, type=int, help='adversarial attack target label')
# parser.add_argument('--rnd-target', '--rt', action='store_true', help='non-target attack using random label as target')
parser.add_argument('--iteration', '-i', default=20, type=int, help='adversarial attack iterations (default: 20)')
parser.add_argument('--step-size', '--ss', default=0.005, type=float, help='step size for adversarial attacks')
parser.add_argument('--epsilon', default=1, type=float, help='epsilon for adversarial attacks')
parser.add_argument('--kernel-size', '-k', default=13, type=int, help='kernel size for adversarial attacks, must be odd integer')
parser.add_argument('--image-size', '--is', default=224, type=int, help='image size (default: 224 for ImageNet)')
parser.add_argument('--dataset-root', '--ds', default='/tmp2/dataset/Restricted_ImageNet_Hendrycks', \
type=str, help='input dataset, default: Restricted Imagenet Hendrycks A')
parser.add_argument('--ckpt-root', '--ckpt', default='/tmp2/aislab/adv_ckpt', \
type=str, help='root directory of checkpoints')
parser.add_argument('--opt-level', '-o', default='O0', type=str, help='Nvidia apex optimation level (default: O1)')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.cuda
# pytorch related package
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, models
from torchvision.utils import save_image
# NVIDIA apex
from apex import amp
# math and showcase
import matplotlib.pyplot as plt
import numpy as np
from utils.utils import get_loaders, deforming_medium, normalize, PGD, AET
def main():
print('pytorch version: ' + torch.__version__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# load dataset (Imagenet)
train_loader, test_loader = get_loaders(args.dataset_root, args.batch_size, \
image_size=args.image_size, augment=False)
# Load model and optimizer
model = models.resnet50(pretrained=False, num_classes=10).to(device)
# Add weight decay into the model
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_max,
# momentum=args.momentum,
# weight_decay=args.weight_decay
)
optimizer2 = torch.optim.Adam(model.parameters(), lr=args.lr_max,
# momentum=args.momentum,
# weight_decay=args.weight_decay
)
# load the pre-train model
print('==> Loading pre-trained model..')
model, [optimizer, optimizer2] = amp.initialize(model, [optimizer, optimizer2], \
opt_level=args.opt_level, verbosity=1)
ckpt_path = Path(args.ckpt_root) / args.description / (args.prefix + '.pth')
checkpoint = torch.load(ckpt_path)
prev_acc = checkpoint['acc']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
amp.load_state_dict(checkpoint['amp_state_dict'])
epoch_start = checkpoint['epoch'] + 1
torch.set_rng_state(checkpoint['rng_state'])
criterion = nn.CrossEntropyLoss().to(device)
warper = deforming_medium(args)
warper.eval()
model.eval()
start_time = time.time()
correct_normal, correct_adv, correct_adv2, total = 0, 0, 0, 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
adv = AET(model, warper, data, target, step=args.step_size, iter=args.iteration)
adv2, delta = PGD(model, data, target, eps=8/255, alpha=1/255, iter=args.iteration)
with torch.no_grad():
y_normal = model(normalize(data))
preds_normal = F.softmax(y_normal, dim=1)
preds_top_p, preds_top_class = preds_normal.topk(1, dim=1)
correct_normal += (preds_top_class.view(target.shape) == target).sum().item()
y_adv = model(normalize(adv))
preds_adv = F.softmax(y_adv, dim=1)
preds_top_p, preds_top_class = preds_adv.topk(1, dim=1)
correct_adv += (preds_top_class.view(target.shape) == target).sum().item()
y_adv2 = model(normalize(adv2))
preds_adv2 = F.softmax(y_adv2, dim=1)
preds_top_p2, preds_top_class2 = preds_adv2.topk(1, dim=1)
correct_adv2 += (preds_top_class2.view(target.shape) == target).sum().item()
total += target.size(0)
# if batch_idx == 5:
# save_image(data[0], './picture/normal.png')
# save_image(adv[0], './picture/adv.png')
# save_image(delta[0]*8, './picture/delta.png')
# if batch_idx == 10:
# break
print(correct_normal/total)
print(correct_adv/total)
print(correct_adv2/total)
eval_time = time.time()
print('Total eval time: {:.4f} minutes'.format((eval_time-start_time)/60))
if __name__ == "__main__":
main() |
18,558 | 2df4f893fe2f1962c96123c571ae3d7034ab9440 | from __future__ import annotations
from collections.abc import Iterable, Iterator, Mapping
from typing import TYPE_CHECKING
from multipledispatch import Dispatcher
import ibis.expr.datatypes as dt
from ibis.common.annotations import attribute
from ibis.common.exceptions import IntegrityError
from ibis.common.grounds import Concrete
from ibis.common.validators import frozendict_of, instance_of, validator
from ibis.util import indent
if TYPE_CHECKING:
import pandas as pd
convert = Dispatcher(
'convert',
doc="""\
Convert `column` to the pandas dtype corresponding to `out_dtype`, where the
dtype of `column` is `in_dtype`.
Parameters
----------
in_dtype : Union[np.dtype, pandas_dtype]
The dtype of `column`, used for dispatching
out_dtype : ibis.expr.datatypes.DataType
The requested ibis type of the output
column : pd.Series
The column to convert
Returns
-------
result : pd.Series
The converted column
""",
)
@validator
def datatype(arg, **kwargs):
return dt.dtype(arg)
class Schema(Concrete, Mapping):
"""An object for holding table schema information."""
fields = frozendict_of(instance_of(str), datatype)
"""A mapping of [`str`][str] to [`DataType`][ibis.expr.datatypes.DataType] objects
representing the type of each column."""
def __repr__(self) -> str:
space = 2 + max(map(len, self.names), default=0)
return "ibis.Schema {{{}\n}}".format(
indent(
''.join(
f'\n{name.ljust(space)}{str(type)}' for name, type in self.items()
),
2,
)
)
def __len__(self) -> int:
return len(self.fields)
def __iter__(self) -> Iterator[str]:
return iter(self.fields)
def __getitem__(self, name: str) -> dt.DataType:
return self.fields[name]
@attribute.default
def names(self):
return tuple(self.keys())
@attribute.default
def types(self):
return tuple(self.values())
@attribute.default
def _name_locs(self) -> dict[str, int]:
return {v: i for i, v in enumerate(self.names)}
def equals(self, other: Schema) -> bool:
"""Return whether `other` is equal to `self`.
Parameters
----------
other
Schema to compare `self` to.
Examples
--------
>>> import ibis
>>> first = ibis.schema({"a": "int"})
>>> second = ibis.schema({"a": "int"})
>>> assert first.equals(second)
>>> third = ibis.schema({"a": "array<int>"})
>>> assert not first.equals(third)
"""
if not isinstance(other, Schema):
raise TypeError(
f"invalid equality comparison between Schema and {type(other)}"
)
return self.__cached_equals__(other)
@classmethod
def from_tuples(
cls,
values: Iterable[tuple[str, str | dt.DataType]],
) -> Schema:
"""Construct a `Schema` from an iterable of pairs.
Parameters
----------
values
An iterable of pairs of name and type.
Returns
-------
Schema
A new schema
Examples
--------
>>> import ibis
>>> ibis.Schema.from_tuples([("a", "int"), ("b", "string")])
ibis.Schema {
a int64
b string
}
"""
return cls(dict(values))
def to_pandas(self):
"""Return the equivalent pandas datatypes."""
from ibis.backends.pandas.client import ibis_schema_to_pandas
return ibis_schema_to_pandas(self)
def to_pyarrow(self):
"""Return the equivalent pyarrow schema."""
from ibis.backends.pyarrow.datatypes import ibis_to_pyarrow_schema
return ibis_to_pyarrow_schema(self)
def as_struct(self) -> dt.Struct:
return dt.Struct(self)
def __gt__(self, other: Schema) -> bool:
"""Return whether `self` is a strict superset of `other`."""
return set(self.items()) > set(other.items())
def __ge__(self, other: Schema) -> bool:
"""Return whether `self` is a superset of or equal to `other`."""
return set(self.items()) >= set(other.items())
def merge(self, other: Schema) -> Schema:
"""Merge `other` to `self`.
Raise an `IntegrityError` if there are duplicate column names.
Parameters
----------
other
Schema instance to append to `self`.
Returns
-------
Schema
A new schema appended with `schema`.
Examples
--------
>>> import ibis
>>> first = ibis.Schema.from_dict({"a": "int", "b": "string"})
>>> second = ibis.Schema.from_dict({"c": "float", "d": "int16"})
>>> first.merge(second)
ibis.Schema {
a int64
b string
c float64
d int16
}
"""
if duplicates := self.keys() & other.keys():
raise IntegrityError(f'Duplicate column name(s): {duplicates}')
return self.__class__({**self, **other})
def name_at_position(self, i: int) -> str:
"""Return the name of a schema column at position `i`.
Parameters
----------
i
The position of the column
Returns
-------
str
The name of the column in the schema at position `i`.
Examples
--------
>>> import ibis
>>> sch = ibis.Schema.from_dict({"a": "int", "b": "string"})
>>> sch.name_at_position(0)
'a'
>>> sch.name_at_position(1)
'b'
"""
upper = len(self.names) - 1
if not 0 <= i <= upper:
raise ValueError(f'Column index must be between 0 and {upper:d}, inclusive')
return self.names[i]
def apply_to(self, df: pd.DataFrame) -> pd.DataFrame:
"""Apply the schema `self` to a pandas `DataFrame`.
This method mutates the input `DataFrame`.
Parameters
----------
df
Input DataFrame
Returns
-------
DataFrame
Type-converted DataFrame
Examples
--------
Import the necessary modules
>>> import numpy as np
>>> import pandas as pd
>>> import ibis
>>> import ibis.expr.datatypes as dt
Construct a DataFrame with string timestamps and an `int8` column that
we're going to upcast.
>>> data = dict(
... times=[
... "2022-01-01 12:00:00",
... "2022-01-01 13:00:01",
... "2022-01-01 14:00:02",
... ],
... x=np.array([-1, 0, 1], dtype="int8")
... )
>>> df = pd.DataFrame(data)
>>> df
times x
0 2022-01-01 12:00:00 -1
1 2022-01-01 13:00:01 0
2 2022-01-01 14:00:02 1
>>> df.dtypes
times object
x int8
dtype: object
Construct an ibis Schema that we want to cast to.
>>> sch = ibis.schema({"times": dt.timestamp, "x": "int16"})
>>> sch
ibis.Schema {
times timestamp
x int16
}
Apply the schema
>>> sch.apply_to(df)
times x
0 2022-01-01 12:00:00 -1
1 2022-01-01 13:00:01 0
2 2022-01-01 14:00:02 1
>>> df.dtypes # `df` is mutated by the method
times datetime64[ns]
x int16
dtype: object
"""
schema_names = self.names
data_columns = df.columns
assert len(schema_names) == len(
data_columns
), "schema column count does not match input data column count"
for column, dtype in zip(data_columns, self.types):
pandas_dtype = dtype.to_pandas()
col = df[column]
col_dtype = col.dtype
try:
not_equal = pandas_dtype != col_dtype
except TypeError:
# ugh, we can't compare dtypes coming from pandas,
# assume not equal
not_equal = True
if not_equal or not dtype.is_primitive():
new_col = convert(col_dtype, dtype, col)
else:
new_col = col
df[column] = new_col
# return data with the schema's columns which may be different than the
# input columns
df.columns = schema_names
return df
schema = Dispatcher('schema')
infer = Dispatcher('infer')
@schema.register(Schema)
def identity(s):
return s
@schema.register(Mapping)
def schema_from_mapping(d):
return Schema(d)
@schema.register(Iterable)
def schema_from_pairs(lst):
return Schema.from_tuples(lst)
@schema.register(type)
def schema_from_class(cls):
return Schema(dt.dtype(cls))
@schema.register(Iterable, Iterable)
def schema_from_names_types(names, types):
# validate lengths of names and types are the same
if len(names) != len(types):
raise IntegrityError('Schema names and types must have the same length')
# validate unique field names
name_locs = {v: i for i, v in enumerate(names)}
if len(name_locs) < len(names):
duplicate_names = list(names)
for v in name_locs:
duplicate_names.remove(v)
raise IntegrityError(f'Duplicate column name(s): {duplicate_names}')
# construct the schema
fields = dict(zip(names, types))
return Schema(fields)
|
18,559 | c935fdb09a5464385e68b0aa3d3c30a7044f48f3 |
# template_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = b"/\xe0il\xf0W\xb6\xf3C'\x9dp\x03}v\xaf"
_lr_action_items = {'ID':([1,3,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,26,28,29,30,31,32,33,34,35,37,38,39,40,],[-90,16,-111,-120,-375,-117,-113,-118,-112,-106,-373,16,-115,-116,-114,16,-374,-121,-91,16,16,16,-110,-119,-121,16,16,16,16,16,-86,16,]),'COMMA':([6,7,9,10,11,12,13,14,15,16,18,19,20,21,22,24,28,30,31,34,35,36,37,39,40,],[23,-111,-375,-117,-113,-118,-112,-106,26,-373,-115,-116,-114,-72,-374,-91,-122,-110,-119,-74,-73,23,-123,-86,-75,]),'ASSIGN':([7,9,10,11,12,13,14,16,18,19,20,21,22,24,30,31,34,39,],[-111,-375,-117,-113,-118,-112,-106,-373,-115,-116,-114,29,-374,-91,-110,-119,38,-86,]),'TGTHAN':([6,7,9,10,11,12,13,14,15,16,18,19,20,21,22,24,28,30,31,34,35,36,37,39,40,],[24,-111,-375,-117,-113,-118,-112,-106,24,-373,-115,-116,-114,-72,-374,-91,-122,-110,-119,-74,-73,24,-123,-86,-75,]),'DOUBLE':([1,3,8,17,23,26,29,32,33,38,],[-90,18,-120,18,-121,18,18,-121,18,18,]),'TYPEOF':([1,3,23,32,],[-90,8,8,8,]),'SHORT':([1,3,8,17,23,26,29,32,33,38,],[-90,11,-120,11,-121,11,11,-121,11,11,]),'SET':([1,3,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,26,28,29,30,31,32,33,34,35,37,38,39,40,],[-90,9,-111,-120,-375,-117,-113,-118,-112,-106,-373,9,-115,-116,-114,9,-374,-121,-91,9,9,9,-110,-119,-121,9,9,9,9,9,-86,9,]),'BYTE':([1,3,8,17,23,26,29,32,33,38,],[-90,10,-120,10,-121,10,10,-121,10,10,]),'TLTHAN':([0,7,9,10,11,12,13,14,16,18,19,20,21,22,24,28,30,31,34,35,37,39,40,],[1,-111,-375,-117,-113,-118,-112,-106,-373,-115,-116,-114,1,-374,-91,1,-110,-119,1,1,1,-86,1,]),'INFERRED':([1,3,8,17,23,26,29,32,33,38,],[-90,12,-120,12,-121,12,12,-121,12,12,]),'CHAR':([1,3,8,17,23,26,29,32,33,38,],[-90,19,-120,19,-121,19,19,-121,19,19,]),'FLOAT':([1,3,8,17,23,26,29,32,33,38,],[-90,20,-120,20,-121,20,20,-121,20,20,]),'INT':([1,3,8,17,23,26,29,32,33,38,],[-90,13,-120,13,-121,13,13,-121,13,13,]),'GET':([1,3,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,26,28,29,30,31,32,33,34,35,37,38,39,40,],[-90,22,-111,-120,-375,-117,-113,-118,-112,-106,-373,22,-115,-116,-114,22,-374,-121,-91,22,22,22,-110,-119,-121,22,22,22,22,22,-86,22,]),'$end':([2,4,5,24,25,27,],[0,-88,-89,-91,-87,-76,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'id':([3,17,21,26,28,29,33,34,35,37,38,40,],[14,14,14,14,14,14,14,14,14,14,14,14,]),'simple_templatedeflist':([3,32,],[6,36,]),'template_validate':([0,],[2,]),'templatedeflist':([3,],[15,]),'typeof_opt':([3,23,32,],[17,33,17,]),'id_var_type':([3,17,21,26,28,29,33,34,35,37,38,40,],[7,7,30,7,30,7,7,30,30,30,7,30,]),'var_type':([3,17,26,29,33,38,],[21,28,34,35,37,40,]),'template_ref':([21,28,34,35,37,40,],[31,31,31,31,31,31,]),'lthan_restrict':([0,21,28,34,35,37,40,],[3,32,32,32,32,32,32,]),'template':([0,],[4,]),'template_ref_validate':([0,],[5,]),'gthan_restrict':([6,15,36,],[25,27,39,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> template_validate","S'",1,None,None,None),
('statementlist -> statement','statementlist',1,'p_statementlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',281),
('statementlist -> statement_nonctrl','statementlist',1,'p_statementlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',282),
('statementlist -> statementlist statement','statementlist',2,'p_statementlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',283),
('statementlist -> statementlist statement_nonctrl','statementlist',2,'p_statementlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',284),
('statementlist -> <empty>','statementlist',0,'p_statementlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',285),
('push_scope -> <empty>','push_scope',0,'p_push_scope','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',305),
('pop_scope -> <empty>','pop_scope',0,'p_pop_scope','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',310),
('opt_colon_type -> COLON var_type','opt_colon_type',2,'p_opt_colon_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',315),
('opt_colon_type -> <empty>','opt_colon_type',0,'p_opt_colon_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',316),
('assign_statement -> assign COLON var_type','assign_statement',3,'p_assign_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',322),
('assign_statement -> <empty>','assign_statement',0,'p_assign_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',323),
('statement -> function','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',330),
('statement -> class','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',331),
('statement -> typed_class','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',332),
('statement -> if','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',333),
('statement -> else','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',334),
('statement -> while','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',335),
('statement -> with','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',336),
('statement -> dowhile','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',337),
('statement -> for','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',338),
('statement -> return SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',339),
('statement -> yield SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',340),
('statement -> break SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',341),
('statement -> continue SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',342),
('statement -> throw SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',343),
('statement -> try','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',344),
('statement -> catch','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',345),
('statement -> finally','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',346),
('statement -> switch','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',347),
('statement -> func_native SEMI','statement',2,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',348),
('statement -> import_decl','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',349),
('statement -> export_decl','statement',1,'p_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',350),
('import_decl -> IMPORT import_clause from_clause SEMI','import_decl',4,'p_import_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',363),
('import_decl -> IMPORT module_spec SEMI','import_decl',3,'p_import_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',364),
('import_clause -> import_def_bind','import_clause',1,'p_import_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',377),
('import_clause -> name_space_import','import_clause',1,'p_import_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',378),
('import_clause -> named_imports','import_clause',1,'p_import_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',379),
('import_clause -> import_def_bind COMMA name_space_import','import_clause',3,'p_import_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',380),
('import_clause -> import_def_bind COMMA named_imports','import_clause',3,'p_import_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',381),
('import_def_bind -> import_bind','import_def_bind',1,'p_import_def_bind','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',401),
('name_space_import -> TIMES ID import_bind','name_space_import',3,'p_name_space_import','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',406),
('named_imports -> LBRACKET RBRACKET','named_imports',2,'p_named_imports','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',415),
('named_imports -> LBRACKET import_list RBRACKET','named_imports',3,'p_named_imports','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',416),
('from_clause -> FROM module_spec','from_clause',2,'p_from_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',424),
('import_list -> import_spec','import_list',1,'p_import_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',428),
('import_list -> import_list COMMA import_spec','import_list',3,'p_import_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',429),
('import_spec -> import_bind','import_spec',1,'p_import_spec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',440),
('import_spec -> ID ID import_bind','import_spec',3,'p_import_spec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',441),
('import_bind -> binding_ident','import_bind',1,'p_import_bind','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',456),
('module_spec -> STRINGLIT','module_spec',1,'p_module_spec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',461),
('binding_ident -> ID','binding_ident',1,'p_binding_ident','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',471),
('statement_nonctrl -> expr SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',494),
('statement_nonctrl -> var_decl SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',495),
('statement_nonctrl -> funcref SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',496),
('statement_nonctrl -> SEMI','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',497),
('statement_nonctrl -> if','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',498),
('statement_nonctrl -> else','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',499),
('statement_nonctrl -> for','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',500),
('statement_nonctrl -> dowhile','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',501),
('statement_nonctrl -> while','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',502),
('statement_nonctrl -> return SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',503),
('statement_nonctrl -> yield SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',504),
('statement_nonctrl -> break SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',505),
('statement_nonctrl -> continue SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',506),
('statement_nonctrl -> throw SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',507),
('statement_nonctrl -> try','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',508),
('statement_nonctrl -> catch','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',509),
('statement_nonctrl -> finally','statement_nonctrl',1,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',510),
('statement_nonctrl -> delete SEMI','statement_nonctrl',2,'p_statement_nonctrl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',511),
('var_decl_or_type -> var_decl','var_decl_or_type',1,'p_var_decl_or_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',540),
('var_decl_or_type -> var_type','var_decl_or_type',1,'p_var_decl_or_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',541),
('templatedeflist -> var_type','templatedeflist',1,'p_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',549),
('templatedeflist -> var_type ASSIGN var_type','templatedeflist',3,'p_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',550),
('templatedeflist -> templatedeflist COMMA var_type','templatedeflist',3,'p_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',551),
('templatedeflist -> templatedeflist COMMA var_type ASSIGN var_type','templatedeflist',5,'p_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',552),
('template -> lthan_restrict templatedeflist gthan_restrict','template',3,'p_template','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',569),
('type_modifiers -> type_modifiers UNSIGNED','type_modifiers',2,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',576),
('type_modifiers -> type_modifiers SIGNED','type_modifiers',2,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',577),
('type_modifiers -> type_modifiers CONST','type_modifiers',2,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',578),
('type_modifiers -> GLOBAL','type_modifiers',1,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',579),
('type_modifiers -> VAR','type_modifiers',1,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',580),
('type_modifiers -> STATIC','type_modifiers',1,'p_type_modifiers','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',581),
('left_id -> id','left_id',1,'p_left_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',600),
('id_opt -> id','id_opt',1,'p_id_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',604),
('id_opt -> <empty>','id_opt',0,'p_id_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',605),
('template_ref -> lthan_restrict simple_templatedeflist gthan_restrict','template_ref',3,'p_template_ref','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',611),
('template_ref_validate -> lthan_restrict simple_templatedeflist gthan_restrict','template_ref_validate',3,'p_template_ref_validate','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',616),
('template_validate -> template','template_validate',1,'p_template_validate','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',622),
('template_validate -> template_ref_validate','template_validate',1,'p_template_validate','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',623),
('lthan_restrict -> TLTHAN','lthan_restrict',1,'p_lthan_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',630),
('gthan_restrict -> TGTHAN','gthan_restrict',1,'p_gthan_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',646),
('id_1 -> id','id_1',1,'p_id1','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',654),
('var_decl_no_list -> var_type','var_decl_no_list',1,'p_var_decl_no_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',660),
('var_decl_no_list -> type_modifiers var_decl_no_list','var_decl_no_list',2,'p_var_decl_no_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',661),
('var_decl_no_list -> var_decl_no_list ASSIGN expr','var_decl_no_list',3,'p_var_decl_no_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',662),
('var_decl -> type_modifiers var_type','var_decl',2,'p_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',712),
('var_decl -> var_decl ASSIGN expr','var_decl',3,'p_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',713),
('var_decl -> var_decl COMMA id','var_decl',3,'p_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',714),
('var_decl -> var_decl COMMA id ASSIGN expr','var_decl',5,'p_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',715),
('ident_arr -> id','ident_arr',1,'p_ident_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',782),
('ident_arr -> ident_arr LSBRACKET NUMBER RSBRACKET','ident_arr',4,'p_ident_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',783),
('var_decl_with_arr -> type_modifiers var_type ident_arr','var_decl_with_arr',3,'p_var_decl_with_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',793),
('var_decl_with_arr -> var_decl_with_arr ASSIGN expr','var_decl_with_arr',3,'p_var_decl_with_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',794),
('var_decl_with_arr -> var_decl_with_arr COMMA ident_arr','var_decl_with_arr',3,'p_var_decl_with_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',795),
('var_decl_with_arr -> var_decl_with_arr COMMA ident_arr ASSIGN expr','var_decl_with_arr',5,'p_var_decl_with_arr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',796),
('id_var_type -> id','id_var_type',1,'p_id_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',853),
('id_var_decl -> id','id_var_decl',1,'p_id_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',859),
('empty -> empty','empty',1,'p_empty','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',865),
('empty -> <empty>','empty',0,'p_empty','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',866),
('var_type -> var_type id_var_type','var_type',2,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',871),
('var_type -> id_var_type','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',872),
('var_type -> INT','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',873),
('var_type -> SHORT','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',874),
('var_type -> FLOAT','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',875),
('var_type -> DOUBLE','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',876),
('var_type -> CHAR','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',877),
('var_type -> BYTE','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',878),
('var_type -> INFERRED','var_type',1,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',879),
('var_type -> var_type template_ref','var_type',2,'p_var_type','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',880),
('typeof_opt -> TYPEOF','typeof_opt',1,'p_typeof_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',973),
('typeof_opt -> <empty>','typeof_opt',0,'p_typeof_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',974),
('simple_templatedeflist -> typeof_opt var_type','simple_templatedeflist',2,'p_simple_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',982),
('simple_templatedeflist -> simple_templatedeflist COMMA typeof_opt var_type','simple_templatedeflist',4,'p_simple_templatedeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',983),
('simple_var_decl -> VAR id','simple_var_decl',2,'p_simple_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1000),
('simple_var_decl -> id','simple_var_decl',1,'p_simple_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1001),
('cmplx_assign -> ASSIGNPLUS','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1021),
('cmplx_assign -> ASSIGNMINUS','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1022),
('cmplx_assign -> ASSIGNDIVIDE','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1023),
('cmplx_assign -> ASSIGNTIMES','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1024),
('cmplx_assign -> ASSIGNBOR','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1025),
('cmplx_assign -> ASSIGNBAND','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1026),
('cmplx_assign -> ASSIGNBXOR','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1027),
('cmplx_assign -> ASSIGNLSHIFT','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1028),
('cmplx_assign -> ASSIGNRSHIFT','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1029),
('cmplx_assign -> ASSIGNRRSHIFT','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1030),
('cmplx_assign -> ASSIGNLLSHIFT','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1031),
('cmplx_assign -> ASSIGN','cmplx_assign',1,'p_cmplx_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1032),
('throw -> THROW expr','throw',2,'p_throw','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1039),
('assign -> expr cmplx_assign expr','assign',3,'p_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1044),
('assign -> assign cmplx_assign expr','assign',3,'p_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1045),
('assign -> expr','assign',1,'p_assign','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1046),
('exprlist -> expr','exprlist',1,'p_exprlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1063),
('exprlist -> exprlist COMMA expr','exprlist',3,'p_exprlist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1064),
('typed_class -> TYPED CLASS id template_opt typed_class_tail','typed_class',5,'p_typed_class','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1093),
('typed_class_tail -> typed_inherit_opt LBRACKET typed_class_body_opt RBRACKET','typed_class_tail',4,'p_typed_class_tail','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1108),
('typed_class_body_opt -> typed_class_list','typed_class_body_opt',1,'p_typed_class_body_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1114),
('typed_class_body_opt -> <empty>','typed_class_body_opt',0,'p_typed_class_body_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1115),
('typed_class_list -> typed_class_element','typed_class_list',1,'p_typed_class_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1121),
('typed_class_list -> typed_class_list typed_class_element','typed_class_list',2,'p_typed_class_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1122),
('typed_class_element -> class_element','typed_class_element',1,'p_typed_class_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1145),
('typed_class_element -> var_decl_with_arr SEMI','typed_class_element',2,'p_typed_class_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1146),
('typed_inherit_opt -> EXTENDS id','typed_inherit_opt',2,'p_typed_inherit_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1152),
('typed_inherit_opt -> <empty>','typed_inherit_opt',0,'p_typed_inherit_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1153),
('class -> CLASS id template_opt class_tail','class',4,'p_class','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1164),
('exprclass -> CLASS id_opt class_tail','exprclass',3,'p_exprclass','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1179),
('class_tail -> class_heritage_opt LBRACKET class_body_opt RBRACKET','class_tail',4,'p_class_tail','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1197),
('class_parent_id -> var_type','class_parent_id',1,'p_class_parent_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1207),
('class_parent_id -> class_parent_id DOT var_type','class_parent_id',3,'p_class_parent_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1208),
('class_list -> class_parent_id','class_list',1,'p_class_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1217),
('class_list -> class_list COMMA class_parent_id','class_list',3,'p_class_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1218),
('class_heritage -> EXTENDS class_list','class_heritage',2,'p_class_heritage','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1231),
('class_heritage_opt -> class_heritage','class_heritage_opt',1,'p_class_heritage_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1237),
('class_heritage_opt -> <empty>','class_heritage_opt',0,'p_class_heritage_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1238),
('class_body_opt -> class_element_list','class_body_opt',1,'p_class_body_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1246),
('class_body_opt -> <empty>','class_body_opt',0,'p_class_body_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1247),
('class_element_list -> class_element','class_element_list',1,'p_class_element_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1260),
('class_element_list -> class_element_list class_element','class_element_list',2,'p_class_element_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1261),
('class_element -> STATIC method_def','class_element',2,'p_class_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1272),
('class_element -> method_def','class_element',1,'p_class_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1273),
('class_element -> class_property SEMI','class_element',2,'p_class_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1274),
('id_right -> id','id_right',1,'p_id_right','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1287),
('property_id -> ID','property_id',1,'p_property_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1292),
('property_id -> GET','property_id',1,'p_property_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1293),
('property_id -> SET','property_id',1,'p_property_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1294),
('property_id -> LSBRACKET expr RSBRACKET','property_id',3,'p_property_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1295),
('property_id_right -> property_id','property_id_right',1,'p_property_id_right','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1303),
('method -> property_id_right LPAREN funcdeflist RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET','method',8,'p_method','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1308),
('getset_id -> property_id','getset_id',1,'p_getset_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1326),
('getset_id -> NUMBER','getset_id',1,'p_getset_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1327),
('method_def -> method','method_def',1,'p_method_def','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1333),
('method_def -> GET getset_id LPAREN RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET','method_def',8,'p_method_def','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1334),
('method_def -> SET getset_id LPAREN setter_param_list RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET','method_def',9,'p_method_def','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1335),
('var_element -> id','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1369),
('var_element -> INT','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1370),
('var_element -> SHORT','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1371),
('var_element -> FLOAT','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1372),
('var_element -> DOUBLE','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1373),
('var_element -> CHAR','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1374),
('var_element -> BYTE','var_element',1,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1375),
('var_element -> id template_ref','var_element',2,'p_var_element','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1376),
('var_type2 -> var_element','var_type2',1,'p_var_type2','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1389),
('class_property -> var_type2 id','class_property',2,'p_class_property','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1400),
('class_property -> class_property ASSIGN expr','class_property',3,'p_class_property','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1401),
('class_property -> class_property COMMA id','class_property',3,'p_class_property','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1402),
('class_property -> class_property COMMA id ASSIGN expr','class_property',5,'p_class_property','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1403),
('setter_param_list -> var_type_opt id','setter_param_list',2,'p_setter_param_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1451),
('setter_param_list -> var_type','setter_param_list',1,'p_setter_param_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1452),
('template_ref_opt -> template_ref','template_ref_opt',1,'p_template_ref_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1472),
('template_ref_opt -> <empty>','template_ref_opt',0,'p_template_ref_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1473),
('func_call -> template_ref_opt LPAREN exprlist RPAREN','func_call',4,'p_func_call','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1481),
('func_call -> template_ref_opt LPAREN RPAREN','func_call',3,'p_func_call','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1482),
('funcdeflist -> var_decl_no_list','funcdeflist',1,'p_funcdeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1497),
('funcdeflist -> funcdeflist COMMA var_decl_no_list','funcdeflist',3,'p_funcdeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1498),
('funcdeflist -> <empty>','funcdeflist',0,'p_funcdeflist','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1499),
('template_opt -> template','template_opt',1,'p_template_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1517),
('template_opt -> <empty>','template_opt',0,'p_template_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1518),
('func_type_opt -> COLON var_type_opt','func_type_opt',2,'p_func_type_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1526),
('func_type_opt -> <empty>','func_type_opt',0,'p_func_type_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1527),
('star_opt -> TIMES','star_opt',1,'p_star_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1535),
('star_opt -> <empty>','star_opt',0,'p_star_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1536),
('funcref -> FUNCTION star_opt id template_opt push_scope LPAREN funcdeflist RPAREN func_type_opt','funcref',9,'p_funcref','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1542),
('func_native -> NATIVE push_scope FUNCTION id template_opt LPAREN funcdeflist RPAREN func_type_opt','func_native',9,'p_func_native','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1563),
('function -> FUNCTION star_opt id template_opt push_scope LPAREN funcdeflist RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET','function',12,'p_function','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1586),
('lbracket_restrict -> LBRACKET','lbracket_restrict',1,'p_lbracket_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1614),
('rbracket_restrict -> RBRACKET','rbracket_restrict',1,'p_rbracket_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1621),
('var_type_opt -> var_type','var_type_opt',1,'p_var_type_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1627),
('var_type_opt -> <empty>','var_type_opt',0,'p_var_type_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1628),
('colon_opt -> COLON','colon_opt',1,'p_colon_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1634),
('colon_opt -> <empty>','colon_opt',0,'p_colon_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1635),
('func_name_opt -> ID','func_name_opt',1,'p_func_name_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1641),
('func_name_opt -> <empty>','func_name_opt',0,'p_func_name_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1642),
('exprfunction -> FUNCTION star_opt func_name_opt template_opt push_scope LPAREN funcdeflist RPAREN colon_opt var_type_opt lbracket_restrict statementlist_opt rbracket_restrict','exprfunction',13,'p_exprfunction','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1650),
('exprfunction -> FUNCTION star_opt func_name_opt template_opt push_scope LPAREN RPAREN colon_opt var_type_opt lbracket_restrict statementlist_opt rbracket_restrict','exprfunction',12,'p_exprfunction','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1651),
('array_literal -> LSBRACKET exprlist RSBRACKET','array_literal',3,'p_array_literal','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1702),
('array_literal -> LSBRACKET RSBRACKET','array_literal',2,'p_array_literal','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1703),
('id_str_or_num -> id','id_str_or_num',1,'p_id_str_or_num','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1712),
('id_str_or_num -> NUMBER','id_str_or_num',1,'p_id_str_or_num','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1713),
('id_str_or_num -> STRINGLIT','id_str_or_num',1,'p_id_str_or_num','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1714),
('typeof -> TYPEOF expr','typeof',2,'p_typeof','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1727),
('obj_lit_list -> id_str_or_num COLON expr','obj_lit_list',3,'p_obj_lit_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1733),
('obj_lit_list -> obj_lit_list COMMA id_str_or_num COLON expr','obj_lit_list',5,'p_obj_lit_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1734),
('obj_lit_list -> obj_lit_list COMMA','obj_lit_list',2,'p_obj_lit_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1735),
('obj_literal -> lbracket_restrict push_scope obj_lit_list rbracket_restrict','obj_literal',4,'p_obj_literal','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1749),
('obj_literal -> lbracket_restrict rbracket_restrict','obj_literal',2,'p_obj_literal','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1750),
('delete -> DELETE expr','delete',2,'p_delete','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1761),
('new -> NEW expr','new',2,'p_new','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1768),
('inc -> expr INC','inc',2,'p_inc','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1774),
('inc -> INC expr','inc',2,'p_inc','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1775),
('dec -> expr DEC','dec',2,'p_dec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1784),
('dec -> DEC expr','dec',2,'p_dec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1785),
('not -> NOT expr','not',2,'p_not','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1795),
('bitinv -> BITINV expr','bitinv',2,'p_bitinv','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1800),
('strlit -> STRINGLIT','strlit',1,'p_strlit','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1805),
('lparen_restrict -> LPAREN','lparen_restrict',1,'p_lparen_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1810),
('rparen_restrict -> RPAREN','rparen_restrict',1,'p_rparen_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1818),
('lsbracket_restrict -> LSBRACKET','lsbracket_restrict',1,'p_lsbracket_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1826),
('rsbracket_restrict -> RSBRACKET','rsbracket_restrict',1,'p_rsbracket_restrict','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1834),
('expr -> NUMBER','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1843),
('expr -> strlit','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1844),
('expr -> id','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1845),
('expr -> id template_ref','expr',2,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1846),
('expr -> template_ref','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1847),
('expr -> array_literal','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1848),
('expr -> exprfunction','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1849),
('expr -> arrow_function','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1850),
('expr -> obj_literal','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1851),
('expr -> expr cmplx_assign expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1852),
('expr -> expr cmplx_assign expr COLON var_type SEMI','expr',6,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1853),
('expr -> expr RSHIFT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1854),
('expr -> expr LSHIFT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1855),
('expr -> expr LLSHIFT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1856),
('expr -> expr RRSHIFT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1857),
('expr -> expr COND_DOT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1858),
('expr -> expr DOT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1859),
('expr -> expr LAND expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1860),
('expr -> expr LOR expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1861),
('expr -> expr BOR expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1862),
('expr -> expr INSTANCEOF expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1863),
('expr -> expr BXOR expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1864),
('expr -> expr BAND expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1865),
('expr -> expr EQUAL expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1866),
('expr -> expr EQUAL_STRICT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1867),
('expr -> expr NOTEQUAL_STRICT expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1868),
('expr -> expr GTHAN expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1869),
('expr -> expr GTHANEQ expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1870),
('expr -> expr LTHAN expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1871),
('expr -> expr MOD expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1872),
('expr -> expr LTHANEQ expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1873),
('expr -> expr NOTEQUAL expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1874),
('expr -> expr PLUS expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1875),
('expr -> expr MINUS expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1876),
('expr -> expr DIVIDE expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1877),
('expr -> expr TIMES expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1878),
('expr -> expr IN expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1879),
('expr -> lparen_restrict expr rparen_restrict','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1880),
('expr -> expr func_call','expr',2,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1881),
('expr -> expr lsbracket_restrict expr rsbracket_restrict','expr',4,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1882),
('expr -> expr QEST expr COLON expr','expr',5,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1883),
('expr -> expr_uminus','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1884),
('expr -> not','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1885),
('expr -> bitinv','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1886),
('expr -> new','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1887),
('expr -> inc','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1888),
('expr -> dec','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1889),
('expr -> typeof','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1890),
('expr -> re_lit','expr',1,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1891),
('expr -> expr COMMA expr','expr',3,'p_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1892),
('expr_uminus -> MINUS expr','expr_uminus',2,'p_expr_uminus','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1948),
('paren_expr -> LPAREN expr RPAREN','paren_expr',3,'p_paren_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1955),
('paren_expr -> LPAREN RPAREN','paren_expr',2,'p_paren_expr','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1956),
('arrow_param_list -> ID','arrow_param_list',1,'p_arrow_param_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1967),
('arrow_param_list -> arrow_param_list COMMA ID','arrow_param_list',3,'p_arrow_param_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1968),
('arrow_param_list -> <empty>','arrow_param_list',0,'p_arrow_param_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1969),
('arrow_params -> ID','arrow_params',1,'p_arrow_params','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1983),
('arrow_params -> LPAREN arrow_param_list RPAREN','arrow_params',3,'p_arrow_params','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1984),
('concise_body -> expr','concise_body',1,'p_concise_body','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1996),
('concise_body -> LBRACKET statementlist_opt RBRACKET','concise_body',3,'p_concise_body','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',1997),
('arrow_binding -> LPAREN RPAREN','arrow_binding',2,'p_arrow_binding','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2010),
('arrow_binding -> expr','arrow_binding',1,'p_arrow_binding','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2011),
('arrow_function -> expr ARROW concise_body','arrow_function',3,'p_arrow_function','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2020),
('assign_opt -> assign','assign_opt',1,'p_assign_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2045),
('assign_opt -> <empty>','assign_opt',0,'p_assign_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2046),
('expr_opt -> expr','expr_opt',1,'p_expr_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2055),
('expr_opt -> <empty>','expr_opt',0,'p_expr_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2056),
('re_lit -> REGEXPR','re_lit',1,'p_re_lit','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2065),
('for_var_decl -> id','for_var_decl',1,'p_for_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2071),
('for_var_decl -> id ASSIGN expr','for_var_decl',3,'p_for_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2072),
('for_var_decl -> var_decl','for_var_decl',1,'p_for_var_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2073),
('in_or_of -> IN','in_or_of',1,'p_in_or_of','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2102),
('in_or_of -> OF','in_or_of',1,'p_in_or_of','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2103),
('for_decl -> for_var_decl SEMI expr_opt SEMI expr_opt','for_decl',5,'p_for_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2113),
('for_decl -> for_var_decl in_or_of expr','for_decl',3,'p_for_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2114),
('for -> FOR LPAREN for_decl RPAREN statement_nonctrl','for',5,'p_for','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2129),
('for -> FOR LPAREN for_decl RPAREN LBRACKET statementlist_opt RBRACKET','for',7,'p_for','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2130),
('ctrl_statement -> statement_nonctrl','ctrl_statement',1,'p_ctrl_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2142),
('ctrl_statement -> LBRACKET statementlist_opt RBRACKET','ctrl_statement',3,'p_ctrl_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2143),
('ctrl_statement -> SEMI','ctrl_statement',1,'p_ctrl_statement','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2144),
('dowhile -> DO ctrl_statement WHILE paren_expr','dowhile',4,'p_dowhile','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2157),
('while -> WHILE paren_expr statement_nonctrl','while',3,'p_while','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2166),
('while -> WHILE paren_expr LBRACKET statementlist_opt RBRACKET','while',5,'p_while','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2167),
('default_case -> DEFAULT COLON statementlist','default_case',3,'p_default_case','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2179),
('statementlist_opt -> statementlist','statementlist_opt',1,'p_statementlist_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2185),
('statementlist_opt -> <empty>','statementlist_opt',0,'p_statementlist_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2186),
('case_clause -> CASE expr COLON statementlist_opt','case_clause',4,'p_case_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2196),
('case_clauses -> case_clause','case_clauses',1,'p_case_clauses','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2203),
('case_clauses -> case_clauses case_clause','case_clauses',2,'p_case_clauses','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2204),
('case_clauses_opt -> case_clauses','case_clauses_opt',1,'p_case_clauses_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2214),
('case_clauses_opt -> <empty>','case_clauses_opt',0,'p_case_clauses_opt','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2215),
('case_block -> case_clauses','case_block',1,'p_case_block','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2225),
('case_block -> case_clauses_opt default_case case_clauses_opt','case_block',3,'p_case_block','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2226),
('switch -> SWITCH paren_expr LBRACKET case_block RBRACKET','switch',5,'p_switch','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2243),
('with -> WITH paren_expr ctrl_statement','with',3,'p_with','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2251),
('if -> IF paren_expr ctrl_statement','if',3,'p_if','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2259),
('try -> TRY statement_nonctrl','try',2,'p_try','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2267),
('try -> TRY LBRACKET statementlist RBRACKET','try',4,'p_try','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2268),
('try -> TRY LBRACKET RBRACKET','try',3,'p_try','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2269),
('finally -> FINALLY LBRACKET statementlist_opt RBRACKET','finally',4,'p_finally','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2282),
('export_decl -> EXPORT TIMES from_clause SEMI','export_decl',4,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2288),
('export_decl -> EXPORT export_clause from_clause SEMI','export_decl',4,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2289),
('export_decl -> EXPORT export_clause SEMI','export_decl',3,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2290),
('export_decl -> EXPORT var_decl SEMI','export_decl',3,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2291),
('export_decl -> EXPORT function','export_decl',2,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2292),
('export_decl -> EXPORT class','export_decl',2,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2293),
('export_decl -> EXPORT DEFAULT function','export_decl',3,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2294),
('export_decl -> EXPORT DEFAULT class','export_decl',3,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2295),
('export_decl -> EXPORT DEFAULT assign','export_decl',3,'p_export_decl','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2296),
('export_clause -> LBRACKET RBRACKET','export_clause',2,'p_export_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2350),
('export_clause -> LBRACKET exports_list RBRACKET','export_clause',3,'p_export_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2351),
('export_clause -> LBRACKET exports_list COMMA RBRACKET','export_clause',4,'p_export_clause','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2352),
('exports_list -> export_spec','exports_list',1,'p_exports_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2360),
('exports_list -> exports_list COMMA export_spec','exports_list',3,'p_exports_list','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2361),
('export_spec -> ID','export_spec',1,'p_export_spec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2373),
('export_spec -> ID ID ID','export_spec',3,'p_export_spec','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2374),
('catch -> CATCH paren_expr statement_nonctrl','catch',3,'p_catch','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2385),
('catch -> CATCH paren_expr LBRACKET statementlist RBRACKET','catch',5,'p_catch','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2386),
('else -> ELSE ctrl_statement','else',2,'p_else','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2398),
('break -> BREAK','break',1,'p_break','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2406),
('continue -> CONTINUE','continue',1,'p_continue','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2413),
('return -> RETURN expr','return',2,'p_return','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2420),
('return -> RETURN','return',1,'p_return','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2421),
('yield -> YIELD expr','yield',2,'p_yield','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2431),
('yield -> YIELD','yield',1,'p_yield','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2432),
('id -> ID','id',1,'p_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2443),
('id -> GET','id',1,'p_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2444),
('id -> SET','id',1,'p_id','c:\\dev\\fairmotion\\tools\\extjs_cc\\js_parse.py',2445),
]
|
18,560 | a157f86902345bd8d43928535feca33cdc28004c | def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if number == 0 or number == 1:
return number
if number < 0:
return None
s = 1
e = number/2
while s <= e:
mid = (s+e)//2
if (mid*mid == number):
return mid
if mid*mid < number:
s = mid+1
res = mid
else:
e = mid - 1
return res
#Test Case 1
print ("Pass" if (3 == sqrt(9)) else "Fail") #pass
#Test Case 2
print ("Pass" if (0 == sqrt(0)) else "Fail") #pass
#Test Case 3
print ("Pass" if (4 == sqrt(16)) else "Fail") #pass
#Test Case 4
print ("Pass" if (1 == sqrt(1)) else "Fail") #pass
#Test Case 5
print ("Pass" if (5 == sqrt(27)) else "Fail") #pass
print ("Pass" if (None == sqrt(-9)) else "Fail") #pass
|
18,561 | 017bd01f1101fd330225582e46007edc8913dbdf | import unittest
from mock import patch
from Calculator import Calculator
from MyExceptions import *
class TestCalculator(unittest.TestCase):
#Testing Addition Function
def test_check_addition_two_float_give_me_their_sum(self):
calculator = Calculator()
value1 = 2.13
value2 = -4.12
sum = -1.99
self.assertAlmostEqual(calculator.Addition(value1,value2), sum)
def test_check_addtion_string_and_int_raises_exception(self):
calculator = Calculator()
value1="string"
value2=2
self.assertRaises(itsNotNumber, calculator.Addition, value1, value2)
def test_check_addtion_string_and_string_raises_exception(self):
calculator = Calculator()
value1="string"
value2="second_string"
self.assertRaises(itsNotNumber, calculator.Addition, value1, value2)
#Testing Divide Function
def test_check_division_two_float_give_me_their_quotient(self):
calculator = Calculator()
value1 = 4.2
value2 = -2.1
sum = -2
self.assertAlmostEqual(calculator.Divide(value1,value2), sum)
def test_check_division_string_and_int_raises_exception(self):
calculator = Calculator()
value1="string"
value2=2
self.assertRaises(itsNotNumber, calculator.Divide, value1, value2)
def test_check_division_string_and_string_raises_exception(self):
calculator = Calculator()
value1="string"
value2="second_string"
self.assertRaises(itsNotNumber, calculator.Divide, value1, value2)
def test_check_division_per_zero_raises_exception(self):
calculator = Calculator()
value1=4
value2=0
self.assertRaises(isZero, calculator.Divide, value1, value2)
#Testing Logarithm Function
def test_check_logarithm_two_float_give_me_result(self):
calculator = Calculator()
value1 = 2
value2 = 2
sum = 1
self.assertAlmostEqual(calculator.Logarithm(value1,value2), sum)
def test_check_if_value1_lover_than_zero_raises_exception(self):
calculator = Calculator()
value1=-1
value2=2
self.assertRaises(isLoverThanZero, calculator.Logarithm, value1, value2)
def test_check_if_value2_lover_than_zero_raises_exception(self):
calculator = Calculator()
value1=3
value2=-3
self.assertRaises(isLoverThanZero, calculator.Logarithm, value1, value2)
def test_check_value1_is_one_raises_exception(self):
calculator = Calculator()
value1=1
value2=1
self.assertRaises(isOne, calculator.Logarithm, value1, value2)
#Testing Derivative Function using moc
@patch("sympy.diff", return_value='2*x')
def test_check_derivative_return_good_result(self, my_mock):
calculator = Calculator()
value1 = 'x**2'
value2 = 'x'
self.assertEqual(str(calculator.Derivative(value1, value2)), my_mock(value1, value2))
if __name__=='__main__':
unittest.main(exit=False) |
18,562 | 14023b4ec2fce31224523c432e45eb0f9165c5a9 | #!/usr/bin/python
#-*- coding:utf-8 -*-
import os,sys
import datetime
import time
from jira.client import JIRA
from jira.exceptions import JIRAError
import random
reload(sys)
sys.setdefaultencoding('UTF-8')
import json
sys.path.append('E:\workspace\SCMDB')
sys.path.append('/letv/scripts/SCMDB')
sys.path.append('/letv/scripts/mount_space')
sys.path.append('E:\workspace\central2')
#当使用django作为第三方的脚本调用时必须添加
os.environ['DJANGO_SETTINGS_MODULE'] = 'SCMDB.settings'
import json
import django
import calendar
django.setup()
from servermanage import models
import actions
from django.core.exceptions import ObjectDoesNotExist
from ssh_key import *
issue_link_type="Relates"
InUse_status_id='11'
Unassign_status_id='41'
jira=JIRA(basic_auth=("username","xxxxxxx@"), server="http://jira.letv.cn/")
#request=jira.issue('SEEVM-59')
#print request.fields.customfield_13417.value,request.fields.customfield_13428.value
#resource=jira.issue('SEERESOURCE-155')
#print resource.fields.customfield_13417.value
def generate_random_password():
random_string=''.join(random.sample('zyxwvutsrqponmlkjihgfedcba123456789',6))
return random_string
random_password=generate_random_password()
CN_user_notice=u'''
1. 虚拟机已经配置了gerrit sshkey以及.gitconfig文件, 无需再次配置
2. 考虑到目前虚拟机数量有限,如发现长时间(30天)没有使用(虚拟机磁盘连续30天没有发生变化),将会回收虚拟机;
3. 请不要修改VM的主机名,否则会造成监控系统的数据错误,从而影响到VM是否被使用的监控项,可能错误的回收您的虚拟机
4. 虚拟机里都创建了公共账号:andbase, 密码:%s
5. Samba 访问用户名:andbase 密码:%s 访问方式: \\\\你的虚机IP\workspace, 你的虚拟机IP是VMIP字段
6. 如果要使用VNC连接,下载vncviewer软件, VNC的连接密码是:%s
7. 可以通过运行命令vmcfg来统一修改4,5,6的密码
8. 虚拟化研发环境使用FAQ:http://wiki.letv.cn/pages/viewpage.action?pageId=52078163
9. 请使用“/letv/workspace”作为你的工作目录,这个目录下的磁盘空间较大
10. 如果遇到VNC问题请看http://wiki.letv.cn/pages/viewpage.action?pageId=63071871
11. 提供一个web工具处理大家日常遇到的虚拟机问题,地址http://ci.le.com/devself
12. 建议编译的时候指定 –j8,首次编译大概80分钟,之后使用ccache缓存后,效果更加明显;
13. 如果碰到不能解决的问题,请联系SEE@le.com
'''%(random_password,random_password,random_password)
US_user_notice=u'''
1. we have configured the gerrit sshkey and .gitconfig, don't need you configure it again
2. we will recycle the your VM, if your VM is not used for 30 days(we monitor the VM usage everyday)
3. Please don’t modify your VM hostname, or it will cause the monitor system data corruption, then impact VM utilization monitor items, may trigger your VM is recycled wrongly
4. default user is "andbase" default password is "%s"
5. VNC password is "%s", you need install VNCviewer software.
6. samba username is "andbase", password is "%s", access method: \\\\your VM IP\workspace, get your VM IP from VMIP fields
7. You can run command "vmcfg" to modify the password in 4,5,6 steps unified
8. Please use "/letv/workspace" as your work directory, it has big disk space
9. if you got VM issues, please read below URL to see if the issue can be fixed
http://wiki.letv.cn/pages/viewpage.action?pageId=52078163
10. a webtool to fix common VM issues,http://ci.le.com/devself
11. if you can't solve VM issues, you can contact SCM-us@le.com
'''%(random_password,random_password,random_password)
IN_user_notice=u'''
1. we have configured the gerrit sshkey and .gitconfig, don't need you configure it again
2. we will recycle the your VM, if your VM is not used for 30 days(we monitor the VM usage everyday)
3. Please don’t modify your VM hostname, or it will cause the monitor system data corruption, then impact VM utilization monitor items, may trigger your VM is recycled wrongly
4. default user is "andbase" default password is "%s"
5. VNC password is "%s", you need install VNCviewer software
6. samba username is "andbase", password is "%s", access method: \\\\your VM IP\workspace, get your VM IP from VMIP fields
7. You can run command "vmcfg" to modify the password in 4,5,6 steps unified
8. Please use "/letv/workspace" as your work directory, it has big disk space
9. if you got VM issues, please read below URL to see if the issue can be fixed
http://wiki.letv.cn/pages/viewpage.action?pageId=52078163
10. a webtool to fix common VM issues,http://ci.le.com/devself
11. if you can't solve VM issues, you can contact SCM-india@le.com
'''%(random_password,random_password,random_password)
def vnc_passwd(ip):
server=actions.Server(ip,username='letv')
server.remote_command="(echo %s;echo %s) |sudo su - andbase -c vncpasswd"%(random_password,random_password)
server.connect()
server.exec_remote_command()
def smb_passwd(ip):
server=actions.Server(ip,username='letv')
server.remote_command="(echo %s;echo %s) |sudo smbpasswd andbase"%(random_password,random_password)
server.connect()
server.exec_remote_command()
def account_passwd(ip):
server=actions.Server(ip,username='letv')
server.remote_command="echo 'andbase:%s' | sudo chpasswd"%(random_password)
server.connect()
server.exec_remote_command()
def write_random_passwd(ip):
server=actions.Server(ip,username='letv')
configfile='/home/andbase/.ssh/vmsetup/vm_setup.conf'
server.remote_command="sudo chmod 777 /home/andbase/.ssh;sudo chmod 777 %s; echo '[pass]' >> %s; echo \"pas = %s\" >> %s;sudo chmod 766 %s;sudo chmod 700 /home/andbase/.ssh"%(configfile,configfile,random_password,configfile,configfile)
#print server.remote_command
server.connect()
server.exec_remote_command()
def init_gitconfig(ip,name,emailaddress):
server=actions.Server(ip,username='letv')
server.remote_command='''sudo su - andbase -c "cd /home/andbase/bin;./env_vm_setup -m %s -n %s"''' %(emailaddress,name)
print ip,server.remote_command
server.connect()
server.exec_remote_command()
def set_random_passwd(ip):
write_random_passwd(ip)
vnc_passwd(ip)
smb_passwd(ip)
account_passwd(ip)
No_assigned_request=[]
requests=jira.search_issues("project=SEEVM and status=InUse",maxResults=700) #需要优化减小范围
print requests
for request in requests:
comments="Please do not modify your VM hostname, or it will cause the monitor system data corruption, then impact VM utilization monitor items, may trigger your VM is recycled wrongly."
jira.add_comment(request,comments)
|
18,563 | ab1373fe9fda3a47039abaf808478218fe9c1036 | import argparse
import os
import subprocess
import shlex
import requests
from enum import Enum
import sys
from typing import List
from .common_tool_methods import (
get_supported_postgres_release_versions,
get_minor_version,
)
POSTGRES_MATRIX_FILE = "postgres-matrix.yml"
POSTGRES_MATRIX_WEB_ADDRESS = "https://raw.githubusercontent.com/citusdata/packaging/all-citus/postgres-matrix.yml"
def run_command(command: str) -> int:
with subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as process:
for line in iter(process.stdout.readline, b""): # b'\n'-separated lines
print(line.decode("utf-8"), end=" ")
exitcode = process.wait()
return exitcode
class TestPlatform(Enum):
el_7 = {"name": "el/7", "docker_image_name": "el-7"}
el_8 = {"name": "el/8", "docker_image_name": "el-8"}
centos_8 = {"name": "centos/8", "docker_image_name": "centos-8"}
centos_7 = {"name": "centos/7", "docker_image_name": "centos-7"}
ol_7 = {"name": "ol/7", "docker_image_name": "ol-7"}
ol_8 = {"name": "ol/8", "docker_image_name": "ol-8"}
debian_stretch = {"name": "debian/stretch", "docker_image_name": "debian-stretch"}
debian_buster = {"name": "debian/buster", "docker_image_name": "debian-buster"}
debian_bullseye = {
"name": "debian/bullseye",
"docker_image_name": "debian-bullseye",
}
debian_bookworm = {
"name": "debian/bookworm",
"docker_image_name": "debian-bookworm",
}
ubuntu_bionic = {"name": "ubuntu/bionic", "docker_image_name": "ubuntu-bionic"}
ubuntu_focal = {"name": "ubuntu/focal", "docker_image_name": "ubuntu-focal"}
ubuntu_jammy = {"name": "ubuntu/jammy", "docker_image_name": "ubuntu-jammy"}
ubuntu_kinetic = {"name": "ubuntu/kinetic", "docker_image_name": "ubuntu-kinetic"}
undefined = {"name": "undefined", "docker_image_name": "undefined"}
def get_test_platform_for_os_release(os_release: str) -> TestPlatform:
result = TestPlatform.undefined
for tp in TestPlatform:
if tp.value["name"] == os_release:
result = tp
return result
def get_postgres_versions_from_matrix_file(project_version: str) -> List[str]:
r = requests.get(POSTGRES_MATRIX_WEB_ADDRESS, allow_redirects=True, timeout=60)
with open(POSTGRES_MATRIX_FILE, "wb") as writer:
writer.write(r.content)
pg_versions = get_supported_postgres_release_versions(
POSTGRES_MATRIX_FILE, project_version
)
return pg_versions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--project_version", required=True)
parser.add_argument("--pg_major_version")
parser.add_argument("--os_release", choices=[t.value["name"] for t in TestPlatform])
args = parser.parse_args()
test_platform = get_test_platform_for_os_release(args.os_release)
minor_project_version = get_minor_version(args.project_version)
platform = args.os_release
postgres_versions = get_postgres_versions_from_matrix_file(args.project_version)
print(f"This version of Citus supports following pg versions: {postgres_versions}")
os.chdir("test-images")
return_codes = {}
if args.pg_major_version:
postgres_versions = [p for p in postgres_versions if p == args.pg_major_version]
if len(postgres_versions) == 0:
raise ValueError("At least one supported postgres version is required")
for postgres_version in postgres_versions:
print(f"Testing package for following pg version: {postgres_version}")
docker_image_name = (
f"test:{test_platform.value['docker_image_name']}-{postgres_version}"
)
build_command = (
f"docker build --pull --no-cache "
f"-t {docker_image_name} "
f"-f {test_platform.value['docker_image_name']}/Dockerfile "
f"--build-arg CITUS_VERSION={args.project_version} "
f"--build-arg PG_MAJOR={postgres_version} "
f"--build-arg CITUS_MAJOR_VERSION={minor_project_version} ."
)
print(build_command)
return_build = run_command(build_command)
return_run = run_command(
f"docker run -e POSTGRES_VERSION={postgres_version} {docker_image_name} "
)
return_codes[f"{docker_image_name}-build"] = return_build
return_codes[f"{docker_image_name}-run"] = return_run
error_exists = False
print("-----------------Summary Report------------------")
for key, value in return_codes.items():
if value > 0:
error_exists = True
print(f"{key}: {'Success' if value == 0 else f'Fail. ErrorCode: {value}'}")
summary_error = "FAILED :(" if error_exists else "SUCCESS :)"
print(f"------------------------{summary_error}------------------------")
if error_exists:
sys.exit("Failed")
|
18,564 | 1febcabe13816a3097b3dc3308e58bd0716f98dd | #!/usr/bin/python
import sys
import os
import re
if len(sys.argv) != 3:
print('usage: ' + os.path.basename(sys.argv[0]) + ' <input> <output>')
infile = sys.argv[1]
outfile = sys.argv[2]
max_len = 10000
def split_len(s, length):
return [s[i:i+length] for i in range(0, len(s), length)]
def stringize(s):
s = s.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
return '"' + s + '"\n'
with open(infile, 'r') as src, open(outfile, 'w') as dst:
length = 0
for line in src:
for piece in split_len(line, max_len):
stringized = stringize(piece)
length += len(stringized)
if length > max_len:
dst.write(',\n')
length = len(stringized)
dst.write(stringized)
|
18,565 | 2328cebaf8d17361a7557ee6334e60bcde084804 | """Provides helper methods for parsing Bonirob data"""
import os
import numpy as np
from collections import namedtuple
def load_velo_scans(velo_files):
"""Helper method to parse velodyne binary files into a list of scans."""
scan_list = []
for filename in velo_files:
scan = np.fromfile(filename, dtype=np.float32)
scan_list.append(scan.reshape((-1, 5)))
return scan_list
def load_timestamps(ts_file):
""" Helper method to load timestamps"""
ts = []
with open(ts_file, 'r') as f:
for line in f.readlines():
line = line.split()
if line[0] != "#":
ts.append(line)
return ts
def load_velo_timestamps(velo_path):
"""Helper method to parse start and end of each velodyne scan."""
ts_start_file = os.path.join(velo_path, 'timestamps_start.txt')
ts_end_file = os.path.join(velo_path, 'timestamps_end.txt')
ts_start = load_timestamps(ts_start_file)
ts_end = load_timestamps(ts_end_file)
return ts_start, ts_end
def save_velodyne_pointcloud_as_ply(velo_scan, ply_file):
f = open(ply_file, 'w')
f.write('ply\nformat ascii 1.0\n')
f.write('element vertex %s\n' % len(velo_scan))
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for p in velo_scan:
new_point = str(p[0]) + ' ' + str(p[1]) + ' ' + str(p[2]) + '\n'
f.write(new_point)
f.close()
def generate_kinect_pointcloud(rgb, depth, calib):
# Calibration parmeters
CENTER_X = calib["projection"]["data"][2]
CENTER_Y = calib["projection"]["data"][6]
SCALING_FACTOR = 1000.0 # Verify this
FOCAL_LENGTH = calib["projection"]["data"][0]
KinectPoint = namedtuple('KinectPoint', 'x y z r g b')
points = []
for v in range(rgb.size[1]):
for u in range(rgb.size[0]):
color = rgb.getpixel((u, v))
z = depth.getpixel((u, v)) / SCALING_FACTOR
if z == 0:
continue
x = (u - CENTER_X) * z / FOCAL_LENGTH
y = (v - CENTER_Y) * z / FOCAL_LENGTH
new_point = KinectPoint(x, y, z, color[0], color[1], color[2])
points.append(new_point)
return points
def save_kinect_pointcloud_as_ply(points, ply_file):
f = open(ply_file, 'w')
f.write('ply\nformat ascii 1.0\n')
f.write('element vertex %s\n' % len(points))
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('property uchar red\n')
f.write('property uchar green\n')
f.write('property uchar blue\n')
f.write('end_header\n')
for p in points:
new_point = str(p.x) + ' ' + str(p.y) + ' ' + str(p.z) + \
' ' + str(p.r) + ' ' + str(p.g) + ' ' + str(p.b) + '\n'
f.write(new_point)
f.close()
def generate_fx8_pointcloud(range, calib):
print('To Do')
def save_fx8_pointcloud_as_ply(points, ply_file):
print('To Do')
|
18,566 | 6be58d43bd7e9af26d00aa990f729fb0ca4dff78 | import os
APP_NAME = os.environ['APP_NAME']
SQLALCHEMY_MIGRATE_REPO = os.environ['SQLALCHEMY_MIGRATE_REPO']
SQLALCHEMY_USER = os.environ['SQLALCHEMY_USER']
SQLALCHEMY_PASSWORD = os.environ['SQLALCHEMY_PASSWORD']
SQLALCHEMY_HOST = os.environ['SQLALCHEMY_HOST']
SQLALCHEMY_PORT = os.environ['SQLALCHEMY_PORT']
SQLALCHEMY_DB = os.environ['SQLALCHEMY_DB']
postgresql_string = 'postgresql://{}:{}@{}:{}/{}'
SQLALCHEMY_DATABASE_URI = postgresql_string.format(SQLALCHEMY_USER, SQLALCHEMY_PASSWORD, SQLALCHEMY_HOST, SQLALCHEMY_PORT, SQLALCHEMY_DB)
|
18,567 | c7cf04582457e05103c7cd6c5dc2483a9b0f3fda | # File for first pattern template of squares that is an instance of the user defined mode mode object.
import basic_graphics
import math
import decimal
import json
from tkinter import *
from cmu_112_graphics import *
from patternObjects import *
from allDrawFunctions import *
from userDefinedPattern import *
class patternTemplate1(buildPattern):
# updated base grid incorporating this specific mode's template
def drawBaseGrid(mode, canvas):
cx = mode.width / 2
cy = mode.height / 2
rad = min(mode.height, mode.width) / 2
if mode.circleMode == 1:
drawCircle8parts(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), 'light grey', True)
elif mode.circleMode == 2:
drawCircle12parts(canvas, cx, cy, rad, 'light grey', True)
if mode.drawSquares:
drawSquares(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), 'light blue')
if mode.drawHex:
drawHexagon(canvas, cx, cy, rad, 'light green')
if mode.drawHex2:
drawHexagon2(canvas, cx, cy, rad, 'orange')
for circ in mode.circList:
circ.drawCircle(canvas)
for pol in mode.polygonList:
pol.drawPolygon(canvas)
for shape in mode.shapeList:
shape.drawShapePoints(canvas)
shape.drawShape(canvas)
mode.drawAnnotations(canvas, cx, cy, rad)
drawSquarePattern(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), mode.squareScale, mode.userColor)
mode.drawSlider(canvas)
# draws the slider on the canvas
def drawSlider(mode, canvas):
canvas.create_line(mode.width - 20, 20, mode.width - 20, mode.height - 20, fill='light grey')
canvas.create_rectangle(mode.sliderLocation.x - 10, mode.sliderLocation.y - 25,
mode.sliderLocation.x + 10, mode.sliderLocation.y + 25, fill='white', outline='grey')
def mouseDragged(mode, event):
if not mode.drawUserPattern:
if mode.createCircle:
centerPoint = mode.currentCirc.center
result = LineSegment(centerPoint, point(event.x, event.y))
if result.getDistance() < mode.currentCirc.rad:
mode.currentCirc.center = point(event.x, event.y)
elif mode.createPolygon:
centerPoint = mode.currentPol.center
result = LineSegment(centerPoint, point(event.x, event.y))
if result.getDistance() < mode.currentPol.size * 2:
mode.currentPol.center = point(event.x, event.y)
elif mode.changeSlider: # dragging the mouse adjusts the slider for this template in order to adjust and skew the template
if abs(event.x - mode.sliderLocation.x) <= 10 and abs(event.y - (mode.sliderLocation.y)) <= 25:
if 40 <= event.y <= (mode.height - 20):
mode.sliderLocation.y = event.y
mode.squareScale = mapRange(event.y, 20, mode.height - 20, 1.065, .465)
elif mode.freeDraw:
mode.currentShape.points.append(point(event.x, event.y))
# all grid functions are the same as those found in the userDefinedPattern file
# grid functions here just account for including the specific pattern template within them
def standardGrid(mode, canvas, startPoint, endPoint, level):
a = (endPoint.x - startPoint.x) / 2 + startPoint.x
b = (endPoint.y - startPoint.y) / 2 + startPoint.y
if level == 0:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
else:
mode.standardGrid(canvas, point(startPoint.x, startPoint.y), point(a, b), level - 1)
mode.standardGrid(canvas, point(startPoint.x, b), point(a, endPoint.y), level - 1)
mode.standardGrid(canvas, point(a, startPoint.y), point(endPoint.x, b), level - 1)
mode.standardGrid(canvas, point(a, b), endPoint, level - 1)
def drawHexGrid1(mode, canvas, startPoint, endPoint, level):
a = (endPoint.x - startPoint.x) / 2 + startPoint.x
b = (endPoint.y - startPoint.y) / 2 + startPoint.y
r = (endPoint.x - startPoint.x) / 2
hexOffsetY = (startPoint.y + b) / 2
if level == 0:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
else:
mode.drawHexGrid1(canvas, point(startPoint.x, startPoint.y), point(a, b), level - 1)
mode.drawHexGrid1(canvas, point(startPoint.x, b), point(a, endPoint.y), level - 1)
mode.drawHexGrid1(canvas, point(a, hexOffsetY), point(endPoint.x, ((b + endPoint.y) / 2)), level - 1)
mode.drawHexGrid1(canvas, point(a, hexOffsetY - r), point(endPoint.x, ((b + endPoint.y) / 2) - r), level - 1)
mode.drawHexGrid1(canvas, point(a, hexOffsetY + r), point(endPoint.x, ((b + endPoint.y) / 2) + r), level - 1)
def drawHexGrid2(mode, canvas, startPoint, endPoint, level):
a = (endPoint.x - startPoint.x) / 2 + startPoint.x
b = (endPoint.y - startPoint.y) / 2 + startPoint.y
r = (endPoint.x - startPoint.x) / 2
hexOffset = r - (r * math.sqrt(3) / 2)
hexLength = r * math.sqrt(3) / 4
hexHeight = r * .75
if level == 0:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
else:
mode.drawHexGrid2(canvas, point(startPoint.x, startPoint.y), point(a, b), level - 1)
mode.drawHexGrid2(canvas, point(a - hexOffset, startPoint.y), point(endPoint.x - hexOffset, b), level - 1)
mode.drawHexGrid2(canvas, point(endPoint.x - hexOffset * 2, startPoint.y), point((endPoint.x - hexOffset * 2 + r), b), level - 1)
mode.drawHexGrid2(canvas, point(startPoint.x - hexLength, startPoint.y + hexHeight),
point(a - hexLength, b + hexHeight), level - 1)
mode.drawHexGrid2(canvas, point(startPoint.x + hexLength, startPoint.y + hexHeight),
point(a + hexLength, b + hexHeight), level - 1)
mode.drawHexGrid2(canvas, point(a - hexOffset + hexLength, startPoint.y + hexHeight),
point(endPoint.x - hexOffset + hexLength, b + hexHeight), level - 1)
mode.drawHexGrid2(canvas, point(startPoint.x, startPoint.y + hexHeight * 2),
point(a, b + hexHeight * 2), level - 1)
mode.drawHexGrid2(canvas, point(a - hexOffset, startPoint.y + hexHeight * 2),
point(endPoint.x - hexOffset, b + hexHeight * 2), level - 1)
mode.drawHexGrid2(canvas, point(endPoint.x - hexOffset * 2, startPoint.y + hexHeight * 2),
point((endPoint.x - hexOffset * 2 + r), b + hexHeight * 2), level - 1)
def subdividedGrid(mode, canvas, startPoint, endPoint, level):
a = (endPoint.x - startPoint.x) / 2 + startPoint.x
b = (endPoint.y - startPoint.y) / 2 + startPoint.y
r = (endPoint.x - startPoint.x) / 2
rprime = r * math.sqrt(2) / 2
if level == 0:
mode.tesellatePatternRotated(canvas, startPoint, endPoint, 22.5)
drawSquarePatternRotated(canvas, startPoint, endPoint, mode.squareScale, mode.userColor, 22.5)
else:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
mode.subdividedGrid(canvas, point(a - rprime , b - rprime), point(a, b), level - 1)
mode.subdividedGrid(canvas, point(a - rprime, b), point(a, b + rprime), level - 1)
mode.subdividedGrid(canvas, point(a, b - rprime), point(a + rprime, b), level - 1)
mode.subdividedGrid(canvas, point(a, b), point(a + rprime, b + rprime), level - 1)
def drawOverlappingGrid(mode, canvas, startPoint, endPoint, level):
a = (endPoint.x - startPoint.x) / 2 + startPoint.x
b = (endPoint.y - startPoint.y) / 2 + startPoint.y
if level == 0:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
else:
if level % 2 == 1:
mode.tesellatePatternRotated(canvas, startPoint, endPoint, 22.5)
drawSquarePatternRotated(canvas, startPoint, endPoint, mode.squareScale, mode.userColor, 22.5)
else:
mode.tesellatePattern2(canvas, startPoint, endPoint)
drawSquarePattern(canvas, startPoint, endPoint, mode.squareScale, mode.userColor)
mode.drawOverlappingGrid(canvas, startPoint, point(a, b), level - 1)
mode.drawOverlappingGrid(canvas, point(startPoint.x, b), point(a, endPoint.y), level - 1)
mode.drawOverlappingGrid(canvas, point(a, startPoint.y), point(endPoint.x, b), level - 1)
mode.drawOverlappingGrid(canvas, point(a, b), endPoint, level - 1)
def spiralGrid(mode, canvas, startPoint, endPoint, level, rad, rotationAngle):
cx = (endPoint.x - startPoint.x) / 2 + startPoint.x
cy = (endPoint.y - startPoint.y) / 2 + startPoint.y
goldenAngle = 360 / ((1 + math.sqrt(5)) / 2)**2
if level == 0:
mode.tesellatePatternRotated(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), rotationAngle)
drawSquarePatternRotated(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), mode.squareScale, mode.userColor, rotationAngle)
else:
for i in range(level): # a for loop is utilized in this spiral function to create an aesthetic effect with this specific pattern since its radially symmetric
drawSquarePatternRotated(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), mode.squareScale, mode.userColor, rotationAngle * 1.5 * i)
mode.tesellatePatternRotated(canvas, point(cx - rad, cy - rad), point(cx + rad, cy + rad), rotationAngle)
mode.spiralGrid(canvas, startPoint, endPoint, level - 1, rad * .75, rotationAngle - goldenAngle)
if __name__ == '__main__':
main()
|
18,568 | 2caadfa673e81e412f8684adf05dce18718a9bae | items = input().split(", ")
data = input()
def is_item_in_list(items, item):
if item in items:
return True
else:
return False
def collect_item(items, item):
if not is_item_in_list(items, item):
items.append(item)
return items
def renew_item(items, item):
if is_item_in_list(items,item):
items.remove(item)
items.append(item)
return items
def combine_item(items, old_item, new_item):
if is_item_in_list(items,old_item):
index_of_old_item=items.index(old_item)
#todo взимане на индекс
items.insert(index_of_old_item+1,new_item)
return items
def drop_this_item(items, item):
if is_item_in_list(items, item):
items.remove(item)
return items
while not data == "Craft!":
command, item = data.split(" - ")
if command == "Collect":
collect_item(items, item)
if command == "Drop":
drop_this_item(items, item)
if command == "Combine Items":
old_item, new_item = item.split(":")
combine_item(items, old_item, new_item)
if command == "Renew":
renew_item(items, item)
data = input()
print(', '.join(items))
|
18,569 | 3f1e7d7a6491799d8f6b55a2a147d806fbeda9f0 | #!/usr/bin/env python
import subprocess
import urllib
def main():
api_url = 'http://localhost:5000/check_availability'
courses = [{'level': 'under',
'session': 1151,
'subject': 'CS',
'number': 341,
'email': 'youremail@example.com'}]
for course in courses:
encoded_query = urllib.urlencode(course)
subprocess.call(['curl', '-X', 'POST', '-H', 'Cache-Control: no-cache', '-H', 'Content-Type: application/x-www-form-urlencoded', '-d', encoded_query, api_url])
if __name__ == '__main__':
main()
|
18,570 | d221f4c1b736bcd7a94da704598aa19f9c945415 | import sqlite3
conn = sqlite3.connect('books.db')
cur = conn.cursor()
'''cur.execute("""CREATE TABLE IF NOT EXISTS users(
user_id INT PRIMARY KEY,
books_list_id INT,
chat_id INT);
""")
conn.commit()
cur.execute("""CREATE TABLE IF NOT EXISTS books(
book_id INT,
books_list_id INT,
title TEXT,
number_of_pages TEXT,
number_of_words TEXT,
book_cover TEXT,
FOREIGN KEY (books_list_id) references users(books_list_id));
""")
conn.commit()
cur.execute("""CREATE TABLE IF NOT EXISTS CUROPN(
cur_opn_id INT PRIMARY KEY,
book_id INT,
cur_time_opn TEXT,
cur_time_cls TEXT,
page_cls INT,
cur_speed INT,
FOREIGN KEY (book_id) references books(book_id));
""")
conn.commit()
user = ('1', '1', '123411')
cur.execute("INSERT INTO users VALUES(?, ?, ?);", user)
conn.commit()
book1 = ('1', '1', 'Властелин колец', '1080', '101', 'https://kinogovno1.com')
book2 = ('2', '1', 'Точка обмана', '1300', '120', 'https://kinogovno2.com')
book3 = ('3', '1', 'Sapiens. Краткая история человечества', '857', '90', 'https://kinogovno3.com')
book4 = ('4', '1', 'Зачем мы спим', '677', '101', 'https://kinogovno4.com')
cur.execute("INSERT INTO books VALUES(?, ?, ?, ?, ?, ?);", book1)
conn.commit()
cur.execute("INSERT INTO books VALUES(?, ?, ?, ?, ?, ?);", book2)
conn.commit()
cur.execute("INSERT INTO books VALUES(?, ?, ?, ?, ?, ?);", book3)
conn.commit()
cur.execute("INSERT INTO books VALUES(?, ?, ?, ?, ?, ?);", book4)
conn.commit()
opn1 = ('1', '1', '2021-08-14 17:01:20', '2021-08-14 19:21:20', '24', '344')
opn2 = ('2', '1', '2021-08-15 17:01:20', '2021-08-14 19:01:20', '48', '368')
opn3 = ('3', '1', '2021-08-16 17:01:20', '2021-08-14 19:00:20', '72', '370')
cur.execute("INSERT INTO CUROPN VALUES(?, ?, ?, ?, ?, ?);", opn1)
conn.commit()
cur.execute("INSERT INTO CUROPN VALUES(?, ?, ?, ?, ?, ?);", opn2)
conn.commit()
cur.execute("INSERT INTO CUROPN VALUES(?, ?, ?, ?, ?, ?);", opn3)
conn.commit()'''
cur.execute("SELECT user_id, title, cur_speed "
"FROM books inner join users on books.books_list_id = users.books_list_id "
"join CUROPN on books.book_id = curopn.book_id "
"WHERE curopn.cur_speed > 350;")
result = cur.fetchall()
print(result) |
18,571 | f760947265a53e6d3c5754f825cdf6ecf18c6d38 | # from serial import Serial
from struct import pack
import json
ESCAPE = '\x13'
START = '\x37'
COMMAND_HELLO = '\x01'
COMMAND_LEDS = '\x02'
COMMAND_CLEAR = '\x03'
def escape(data):
return data.replace(ESCAPE, ESCAPE + ESCAPE)
def command_hello():
ret = ESCAPE + START + COMMAND_HELLO
return ret
def command_leds(leds):
packed = [pack('BBB', rgb[1], rgb[0], rgb[2]) for rgb in leds]
ret = ESCAPE + START + COMMAND_LEDS + chr(len(packed))
for led in packed:
ret += escape(led)
return ret
def command_clear():
ret = ESCAPE + START + COMMAND_CLEAR
return ret
# s = Serial('/dev/ttyUSB0', 115200, timeout=0)
#---------------------------------------------------------------------------------------
# color stuff
#---------------------------------------------------------------------------------------
import math
def hsv2rgb(h, s, v):
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0: r, g, b = v, t, p
elif hi == 1: r, g, b = q, v, p
elif hi == 2: r, g, b = p, v, t
elif hi == 3: r, g, b = p, q, v
elif hi == 4: r, g, b = t, p, v
elif hi == 5: r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
def rgb2hsv(r, g, b):
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return h, s, v
|
18,572 | 336a5e73d9d94ab342e2d1d9c9709e9fce194b80 | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
import sys
def talker():
pub = rospy.Publisher('/emergency_stop', String, queue_size=1)
rospy.init_node('mock_emergency_stop', anonymous=True)
rate = rospy.Rate(10)
initial_state = "False"
rospy.loginfo(initial_state)
pub.publish(initial_state)
# To let the user exit the application without getting frustration
exit_request = False
while not rospy.is_shutdown():
print("Engage Emergency stop? (y or n)")
try:
input = raw_input()
if input == "y":
pubStr = "True"
# Exit phrase
elif input == "q":
pubStr = "False"
exit_request = True
break
else:
pubStr = "False"
except:
rospy.sleep(0.1)
if exit_request:
sys.exit(0)
rospy.loginfo(pubStr)
pub.publish(pubStr)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
18,573 | e11e3bd5406f16359dd5e300464d4946cc4cef22 | li = [12, 24, 35, 24, 88, 120, 155]
print [x for x in li if x != 24]
|
18,574 | d53e08ded0f7eb7061bde4e94531e2802d7e6f6e | import controller
import interface
import time
import os
# ini class constructor
obj = controller.Model()
interfaceObj = interface.interfaceClass()
# load Interface obj
window = interfaceObj.load()
def update_list(element, new_val):
window.FindElement(element).Update(values=new_val)
def checkForHeader(isFileHeader, createFileBase = ''):
if (isFileHeader):
response =interfaceObj.popup_yesno(createFileBase + 'Headers detected for all sheet. Proceed with this mode?')
if (response == 'Yes'):
user_response = 1
else:
user_response = 2
else:
response =interfaceObj.popup_yesno(createFileBase + 'No common header found. Proceed with casading mode?')
if response == 'Yes':
user_response = 2
else:
user_response =1
if user_response == 1:
obj.SubsequentHeader()
else:
obj.noSubsequentHeader()
while True:
# listen for event emitter and its value
event, values = window.Read()
progress_bar = window.FindElement('progressbar')
# window close
if event in (None, 'Exit'):
break
# browse file
if event == 'browse_file':
start = time.time()
obj = controller.Model() # reini obj
filePath = values["browse_file"]
# file is selected to proceed
if len(filePath) > 0 :
# set and read file
obj.setFilePath(filePath)
# if file is processed, likely xlsx format
# return mode type if keyword dictionary is found
# mode 1 = Encryption Mode, mode 2 = Decryption Mode
if 'xlsx' in filePath:
obj.getAttribute()
# check if merging is necessary
if obj.mode == 1:
obj.readDefaultList()
if 'csv' not in filePath:
isFileHeader = obj.detectCommonHeader()
checkForHeader(isFileHeader)
else:
obj.SubsequentHeader()
# Filter encryption and Remove List
obj.readContent()
# enabling buttons and list on interface
progress_bar.UpdateBar(0)
window.FindElement('process').Update(disabled=False)
window.FindElement('password').Update(disabled=False)
not_encap = obj.offset(obj.encapsulationList)
not_remove = obj.offset(obj.removeList)
update_list('encap_list_default', not_encap)
update_list('encap_list_add', obj.encapsulationList)
update_list('remove_list_default', not_remove)
update_list('remove_list_add', obj.removeList)
# file meant for decryption process
if (obj.mode == 2):
obj.setPassword('') # override default_password
window.FindElement('remove_en').Update(disabled=True)
window.FindElement('add_en').Update(disabled=True)
window.FindElement('remove_remove').Update(disabled=True)
window.FindElement('remove_add').Update(disabled=True)
window.FindElement('process').Update('Decrypt')
# file meant for encryption process
else:
window.FindElement('remove_en').Update(disabled=False)
window.FindElement('add_en').Update(disabled=False)
window.FindElement('remove_remove').Update(disabled=False)
window.FindElement('remove_add').Update(disabled=False)
window.FindElement('process').Update('Encrypt')
# send notification
interfaceObj.popup('Data Loaded')
# listen for remove encrption <<
elif event == 'remove_en':
selection = values['encap_list_add']
# at least being selected
if len(selection) > 0:
# update class list and update interface
obj.encapsulationList.remove(selection[0])
not_en = obj.offset(obj.encapsulationList)
update_list('encap_list_default', not_en)
update_list('encap_list_add', obj.encapsulationList)
# listen for add encryption >>
elif event == 'add_en':
selection = values['encap_list_default']
# at least being selected
if len(selection) > 0:
# cross duplication in removelist
if selection[0] not in obj.removeList:
# update class list and update interface
obj.encapsulationList.append(selection[0])
not_en = obj.offset(obj.encapsulationList)
update_list('encap_list_default', not_en)
update_list('encap_list_add', obj.encapsulationList)
else:
# notify of duplication
interfaceObj.popup("Selected item exist in removed list")
# listen for remove remove <<
elif event == 'remove_remove':
selection = values['remove_list_add']
# at least one selected
if len(selection) > 0:
# update class list and update interface
obj.removeList.remove(selection[0])
not_re = obj.offset(obj.removeList)
update_list('remove_list_default', not_re)
update_list('remove_list_add',obj.removeList)
# listen for remove add >>
elif event == 'remove_add':
selection = values['remove_list_default']
# at least one selected
if len(selection) > 0:
# cross duplication in encrption list
if selection[0] not in obj.encapsulationList:
# update class list and interface
obj.removeList.append(selection[0])
not_re = obj.offset(obj.removeList)
update_list('remove_list_default', not_re)
update_list('remove_list_add', obj.removeList)
else:
# notify of duplication
interfaceObj.popup("Selected item exist in encapsulation list")
# listen for process button
elif event == 'process':
start = time.time()
# set password if fields is not blank
if len(values['password'])> 0:
obj.setPassword(values['password'])
# encryption mode
if obj.mode == 1:
obj.dropColumns()
obj.encapColumns(progress_bar)
response = interfaceObj.popup_yesno('Update Keywords?')
if (response == 'Yes'):
obj.writeDefaultList()
# decryption mode
else:
obj.decapColumns(progress_bar)
# call write method
obj.writeFile()
progress_bar.UpdateBar(100)
interfaceObj.tray('Done processing! Time taken: %0.2fs ' % (time.time() - start))
window.Close()
|
18,575 | 648cc0213cb687c35b06a979f4170230db96b924 | import matplotlib.pyplot as plt
import numpy as np
def thrust(vel, ang):
T_0 = 12.756
T_1 = -0.0941*3.28
T_2 = 0.0023*3.28**2
T_3 = -7*10**-5*3.28**3
T_4 = 0 # 4*10**-7*3.28**4
T = vel**4*T_4 + vel**3*T_3 + vel**2*T_2 + vel*T_1 + T_0
#X comp # Y Comp
return (np.cos(ang)*T, np.sin(ang)*T )
vel = np.linspace(0, 50, 100)
ang = 0
T = [thrust(x, ang) for x in vel]
plt.plot(vel, T, 'b')
plt.show() |
18,576 | 3d84efdd1ba320a8144eceba31d79f12f1411f2b | # data processing functions
from data_classes import *
import torch
import numpy as np
import gzip
from sklearn.feature_extraction.text import CountVectorizer
import re
# Constants
item_indeces = [0, 2, 4]
weight_indeces = [1, 3, 5]
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
nums_string = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
them = "THEM:"
you = "YOU:"
BATCH_SIZE = 1
LONGEST_MESSAGE_CHAR = 269
LONGEST_MESSAGE_WORD = 67 #TODO: CHNAGE TO 70
MAX_LENGTH = LONGEST_MESSAGE_WORD
TARGET_LENGTH = 3
word_map = {}
def process_file(data_file_name, num_lines=None):
file = open(data_file_name, 'r')
vec = CountVectorizer()
x = vec.fit_transform(file).toarray()
file.close()
word_map = vec.vocabulary_
lines = []
with open(data_file_name, 'r') as f:
for line in f:
lines.append(line.strip())
data_points = []
iterator = iter(lines)
old = next(iterator)
for line in iterator:
new = line
point = process_line(new, old, word_map)
if point != None:
get_target(point)
data_points.append(point)
old = new
return data_points, word_map
def process_line(p1, p2, map):
p1_reward_text, p1_text, p1_inputs, p1_outputs = seperate_line(p1)
p2_reward_text, p2_text, p2_inputs, p2_outputs = seperate_line(p2)
# Check to make sure we have a corresponding data point
check = check_lines(p1_inputs, p2_inputs, p1_text[0], p2_text[0])
if not check:
return None
items = [p1_inputs[i] for i in item_indeces]
p1_weights = [p1_inputs[i] for i in weight_indeces]
p2_weights = [p2_inputs[i] for i in weight_indeces]
indx = 0
messages = []
for text in p1_text:
if you in text:
p1 = True
message_text = text.split(you)[1]
word_list = message_text.split()
w_list = get_word_list(word_list, map)
message = Message(word_list, p1, indx, w_list)
elif them in text:
p1 = False
message_text = text.split(them)[1]
word_list = message_text.split()
w_list = get_word_list(word_list, map)
message = Message(word_list, p1, indx, w_list)
messages.append(message)
indx += 1
data_point = DataPoint(messages, p1_weights, p2_weights, items)
return data_point
# Get the terms of the deal
def get_selection(line):
m = re.search('disagree', line)
n = re.search('no agreement', line)
d = re.search('disconnect', line)
if m == None and n==None and d==None:
s = line.split("<eos>")[-2].split("<selection>")[1]
item1 = int(s[7])
item2 = int(s[15])
item3 = int(s[23])
if item1 not in nums:
print "SOMETHING WRONG"
print item1
if item2 not in nums:
print "SOMETHING WRONG"
print item2
if item3 not in nums:
print "SOMETHING WRONG"
print item3
return [item1, item2, item3]
else:
return [0, 0, 0]
# Get the reward amount
def get_reward(line):
p = line.split("<eos>")[-1]
reward = p[8]
if reward not in nums_string:
return 0
return int(reward)
# Make sure the data point is valid
def check_lines(p1_inputs, p2_inputs, p1_message, p2_message):
p1_items = [p1_inputs[i] for i in item_indeces]
p2_items = [p2_inputs[i] for i in item_indeces]
if p1_items != p2_items:
return False
if (them not in p1_message) and (them not in p2_message):
return False
if (them in p1_message) and (them in p2_message):
return False
if (them in p1_message):
p1 = p1_message.split(them)[1]
p2 = p2_message.split(you)[1]
else:
p2 = p2_message.split(them)[1]
p1 = p1_message.split(you)[1]
if p1 != p2:
return False
return True
# Get the tensor target for a data point
def get_target(point):
tar = torch.LongTensor(BATCH_SIZE, 33)
agenda = point.p2_weights
book_index = agenda[0]
hat_index = 11 + agenda[1]
ball_index = 22 + agenda[2]
array = [0]*33
array[book_index] = 1
array[hat_index] = 1
array[ball_index] = 1
tensor = torch.from_numpy(np.array(array)).long()
tar[0] = tensor
point.target = tar
def get_word_list(word_list, map):
indexes = []
for w in range(len(word_list)):
try:
indexes.append(map[word_list[w]])
except:
continue
if len(indexes) < MAX_LENGTH:
padding = [0]
indexes.extend(padding * (MAX_LENGTH - len(indexes)))
if len(indexes) > MAX_LENGTH:
indexes = indexes[:MAX_LENGTH]
return indexes
def seperate_line(p1):
p1_split = p1.split()
p1_text = p1.split("<eos>")
p1_text[0] = p1_text[0][12:]
reward_text = p1_text[-1][1:9]
p1_inputs = map(int, p1_split[:6])
p1_outputs = map(int, p1_split[-6:])
p1_text = p1_text[:-2]
return reward_text, p1_text, p1_inputs, p1_outputs
if __name__ == "__main__":
data_file_name = "data.txt"
num_lines = 1000
points, word_map = process_file(data_file_name, num_lines)
|
18,577 | 78be0b9f0c9c89b91217bd98ea22882ebf6221e4 | class Pet:
# def __init__(self, name, type, age):
# self.name = name
# self.type = type
# self.age = age
def set_name(name):
n = name
def set_type(type):
t = type
def set_age(age):
a = age
def get_name(name):
print('Pet age: ', name)
def get_type(type):
print('Pet type: ', type)
def get_age(age):
print('Pet age: ', age)
p1 = Pet()
p1.name = input("Enter the pet's name: ")
p1.type = input("Enter the pet type: ")
p1.age = input("Enter the pet's age: ")
print("The pet is a", p1.type, "whose name is"
, p1.name, "and is", p1.age, "years old.") |
18,578 | bdfb2403ecb704829f8d991e744f7ab7c0443cd0 | # -*- coding: UTF-8 -*-
import os
import input_file
def contem_pelo_menos_tres_vogais(string):
vogais = ('a', 'e', 'i', 'o', 'u')
vogais_na_string = 0
for vogal in vogais:
vogais_na_string += string.count(vogal)
if vogais_na_string > 2:
return True
return False
def nao_contem_bad_string(string):
bad_strings = ('ab', 'cd', 'pq', 'xy')
qtde_bad_string = 0
for bad_string in bad_strings:
qtde_bad_string += string.count(bad_string)
if qtde_bad_string == 0:
return True
return False
def contem_caracteres_seguidos(string):
lista_string = list(string)
qtd_mais_que_dois_caracteres_em_sequencia = 0
for caractere in lista_string:
caracter_repetido_na_string = string.count(caractere + caractere)
if caracter_repetido_na_string >= 1:
qtd_mais_que_dois_caracteres_em_sequencia += 1
if qtd_mais_que_dois_caracteres_em_sequencia > 0:
return True
return False
def contar_strings_boas(lista_strings):
qtde_strings_boas = 0
for string_percorrida in lista_strings:
if contem_pelo_menos_tres_vogais(string_percorrida) and contem_caracteres_seguidos(
string_percorrida) and nao_contem_bad_string(string_percorrida):
qtde_strings_boas += 1
return "a quantidade de strings boas é: " + str(qtde_strings_boas)
def allindices(string, sub, listindex=[], offset=0):
i = string.find(sub, offset)
while i >= 0:
listindex.append(i)
i = string.find(sub, i + 1)
return listindex
def encontrar_pares(string_percorrida):
return ["xx"]
def encontrar_indices_pares(string_percorrida):
pares = encontrar_pares(string_percorrida)
indices = list()
for par in pares:
indices.append(allindices(string_percorrida, par))
# parei aqui +/-
return indices
def encontrar_indices_letra_repetida_com_uma_no_meio(string_percorrida):
return 0, 0
def regra_louca(string_percorrida):
indices_pares = encontrar_indices_pares(string_percorrida)
indices_letra_repetida_com_uma_no_meio = encontrar_indices_letra_repetida_com_uma_no_meio(string_percorrida)
if indices_letra_repetida_com_uma_no_meio[0] >= indices_pares[0] and indices_letra_repetida_com_uma_no_meio[1] <= \
indices_pares[1]:
return True
return False
def contar_strings_boas_nova_regra(lista_strings):
qtde_strings_boas = 0
for string_percorrida in lista_strings:
if regra_louca(string_percorrida):
qtde_strings_boas += 1
return "a quantidade de strings boas na nova regra é: " + str(qtde_strings_boas)
diretorio = os.path.dirname(os.path.abspath(__file__))
strings = input_file.read_lines(diretorio)
print(contar_strings_boas(strings))
print(contar_strings_boas_nova_regra(strings))
|
18,579 | 24b9e7d69fd0d1d44092143e8b53751ec912df67 | def data_type(arg):
if isinstance(arg,str):
return len(arg)
if arg == None :
return 'no value'
if isinstance(arg,bool):
return arg
if isinstance(arg,int):
if arg < 100 :
return 'less than 100'
elif arg > 100:
return 'more than 100'
else:
return 'equal to 100'
if isinstance(arg,list) and len(arg)=>3 :
return arg[2]
else:
None
print data_type(101)
|
18,580 | b27fb2ef64875d4b09680d5586313a1f1149771b | # Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import unittest
import tempfile
import json
import os
import six
# This is necessary because io.StringIO in Python 2 does not accept str, only
# unicode. BytesIO works in Python 2, but then complains when given a str
# instead of bytes in Python 3.
if six.PY2:
from cStringIO import StringIO # pylint: disable=wrong-import-order,import-error
else:
from io import StringIO # pylint: disable=wrong-import-order
import mock
from core import perf_data_generator
from core.perf_data_generator import BenchmarkMetadata
class PerfDataGeneratorTest(unittest.TestCase):
def setUp(self):
# Test config can be big, so set maxDiff to None to see the full comparision
# diff when assertEqual fails.
self.maxDiff = None
def test_get_scheduled_non_telemetry_benchmarks(self):
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.close()
fake_perf_waterfall_file = tmpfile.name
data = {
'builder 1': {
'isolated_scripts': [
{'name': 'test_dancing'},
{'name': 'test_singing'},
{'name': 'performance_test_suite'},
],
'scripts': [
{'name': 'ninja_test'},
]
},
'builder 2': {
'scripts': [
{'name': 'gun_slinger'},
]
}
}
try:
with open(fake_perf_waterfall_file, 'w') as f:
json.dump(data, f)
benchmarks = perf_data_generator.get_scheduled_non_telemetry_benchmarks(
fake_perf_waterfall_file)
self.assertIn('ninja_test', benchmarks)
self.assertIn('gun_slinger', benchmarks)
self.assertIn('test_dancing', benchmarks)
self.assertIn('test_singing', benchmarks)
finally:
os.remove(fake_perf_waterfall_file)
class TestIsPerfBenchmarksSchedulingValid(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.original_GTEST_BENCHMARKS = copy.deepcopy(
perf_data_generator.GTEST_BENCHMARKS)
self.original_OTHER_BENCHMARKS = copy.deepcopy(
perf_data_generator.OTHER_BENCHMARKS)
self.test_stream = StringIO()
self.mock_get_non_telemetry_benchmarks = mock.patch(
'core.perf_data_generator.get_scheduled_non_telemetry_benchmarks')
self.get_non_telemetry_benchmarks = (
self.mock_get_non_telemetry_benchmarks.start())
def tearDown(self):
perf_data_generator.GTEST_BENCHMARKS = (
self.original_GTEST_BENCHMARKS)
perf_data_generator.OTHER_BENCHMARKS = (
self.original_OTHER_BENCHMARKS)
self.mock_get_non_telemetry_benchmarks.stop()
def test_returnTrue(self):
self.get_non_telemetry_benchmarks.return_value = {'honda'}
perf_data_generator.GTEST_BENCHMARKS = {
'honda': BenchmarkMetadata('baz@foo.com'),
}
perf_data_generator.OTHER_BENCHMARKS = {}
valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
'dummy', self.test_stream)
self.assertEqual(self.test_stream.getvalue(), '')
self.assertEqual(valid, True)
def test_UnscheduledCppBenchmarks(self):
self.get_non_telemetry_benchmarks.return_value = {'honda'}
perf_data_generator.GTEST_BENCHMARKS = {
'honda': BenchmarkMetadata('baz@foo.com'),
'toyota': BenchmarkMetadata('baz@foo.com'),
}
perf_data_generator.OTHER_BENCHMARKS = {}
valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
'dummy', self.test_stream)
self.assertEqual(valid, False)
self.assertIn('Benchmark toyota is tracked but not scheduled',
self.test_stream.getvalue())
def test_UntrackedCppBenchmarks(self):
self.get_non_telemetry_benchmarks.return_value = {'honda', 'tesla'}
perf_data_generator.GTEST_BENCHMARKS = {
'honda': BenchmarkMetadata('baz@foo.com'),
}
perf_data_generator.OTHER_BENCHMARKS = {}
valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
'dummy', self.test_stream)
self.assertEqual(valid, False)
self.assertIn(
'Benchmark tesla is scheduled on perf waterfall but not tracked',
self.test_stream.getvalue())
|
18,581 | f90240695ad5bcd0199c46e0ff987c49d875c992 | #核心思想是结合广义线性模型的记忆能力(memorization)和深度前馈神经网络模型的泛化能力(generalization)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import os
import tensorflow.keras as keras
# 下载数据----------加利福尼亚房价
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
# 分割数据
from sklearn.model_selection import train_test_split
x_train_all,x_test,y_train_all,y_test =train_test_split(housing.data,housing.target,random_state=7)
x_train,x_valid,y_train,y_valid = train_test_split(x_train_all,y_train_all,random_state=11)
# 归一化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)
#网络构建 一、函数式API
input = keras.layers.Input(shape=x_train.shape[1:])
# deep
hidden1 = keras.layers.Dense(30,activation='relu')(input)
hidden2 = keras.layers.Dense(30,activation='relu')(hidden1)
# wide
concat = keras.layers.concatenate([input,hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs= [input],outputs =[output])
'''
# 网络构建 二、子类API
class WideDeepModel(keras.models.Model):
def __init__(self):
super(WideDeepModel,self).__init__()
'''定义模型的层次'''
self.hidden1_layer = keras.layers.Dense(30,activation = 'relu')
self.hidden2_layer = keras.layers.Dense(30,activation = 'relu')
self.output_layer = keras.layers.Dense(1)
def call(self,input):
'''定义模型的正向计算'''
hidden1 = self.hidden1_layer(input)
hidden2 = self.hidden2_layer(hidden1)
concat = keras.layers.cooncatenate([input,hidden2])
output = self.output_layer(concat)
return output
model = WideDeepModel()
model.build(input_shape=(None,8))
'''
# 网络结构----不一样的
# model.summary()
# 模型编译
model.compile(loss='mean_squared_error',optimizer='adam') #optimizer我用了 'sgd'几个epochloss就变成了nan了
callbacks = [keras.callbacks.EarlyStopping(patience=5,min_delta=1e-2)]
#模型训练
history = model.fit(x_train_scaled,y_train,validation_data=(x_valid_scaled,y_valid),epochs=100,callbacks=callbacks)
# 效果绘制
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
plot_learning_curves(history)
# 评估
print(model.evaluate(x_test_scaled,y_test))
|
18,582 | 1aae7b21111c6afa5e40a3ae5ae57621b710ea0d | from dataclasses import dataclass
from datetime import date
from typing import Dict, List
from or_shifty.history import History
from or_shifty.person import Person
from or_shifty.shift import ShiftType
NEVER = date(1970, 1, 1)
@dataclass(frozen=True)
class HistoryMetrics:
num_of_shifts: Dict[ShiftType, Dict[Person, int]]
date_last_on_shift: Dict[Person, date]
date_last_on_shift_of_type: Dict[Person, Dict[ShiftType, date]]
now: date
@classmethod
def build(cls, history: History, people: List[Person], now: date):
return cls(
num_of_shifts=cls._num_of_shifts(history, people),
date_last_on_shift=cls._date_last_on_shift(history, people),
date_last_on_shift_of_type=cls._date_last_on_shift_of_type(history, people),
now=now,
)
@classmethod
def _num_of_shifts(cls, history: History, people: List[Person]):
return {
shift_type: cls._num_of_shifts_for_type(history, people, shift_type)
for shift_type in ShiftType
}
@classmethod
def _num_of_shifts_for_type(
cls, history: History, people: List[Person], shift_type: ShiftType
):
shifts = {person: 0 for person in people}
for offset in history.offsets:
if offset.shift_type is shift_type:
shifts[offset.person] = offset.offset
for past_shift in history.past_shifts:
if past_shift.shift_type is shift_type and past_shift.person in shifts:
shifts.setdefault(past_shift.person, 0)
shifts[past_shift.person] += 1
return shifts
@classmethod
def _date_last_on_shift(cls, history: History, people: List[Person]):
people_seen = set()
date_last = {person: NEVER for person in people}
for past_shift in history.past_shifts:
if past_shift.person in date_last and past_shift.person not in people_seen:
people_seen.add(past_shift.person)
date_last[past_shift.person] = past_shift.day
return date_last
@classmethod
def _date_last_on_shift_of_type(cls, history: History, people: List[Person]):
people_seen = {person: set() for person in people}
date_last = {
person: {shift_type: NEVER for shift_type in ShiftType} for person in people
}
for past_shift in history.past_shifts:
if (
past_shift.person in date_last
and past_shift.shift_type not in people_seen[past_shift.person]
):
people_seen[past_shift.person].add(past_shift.shift_type)
date_last[past_shift.person][past_shift.shift_type] = past_shift.day
return date_last
def __str__(self):
formatted = "Pre-allocation history metrics:\n"
formatted += "{: <20}{: <15}{: <15}{: <15}{: <15}\n".format(
"Name", "Standard", "Special A", "Special B", "Last on"
)
for person in self.date_last_on_shift.keys():
if len(person.name) > 16:
formatted += f"{person.name[:16] + '...': <20}"
else:
formatted += f"{person.name: <20}"
formatted += f"{self.num_of_shifts[ShiftType.STANDARD][person]: <15}"
formatted += f"{self.num_of_shifts[ShiftType.SPECIAL_A][person]: <15}"
formatted += f"{self.num_of_shifts[ShiftType.SPECIAL_B][person]: <15}"
formatted += f"{(self.date_last_on_shift[person] - self.now).days: <15}"
formatted += "\n"
return formatted
|
18,583 | 9942b4c1c9dd5f6b9ee85d7f6b5b8d217720c74a | class FieldNotFoundException(Exception):
def __init__(self, section, title):
self.section = section
self.expected_title = title
self.message = f'Cannot find title: "{title}" in section: "{section.value}"'
|
18,584 | 1b820c67d08f008348dde883214d8c5b27ae1b40 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def sortList(self, head: ListNode) -> ListNode:
def split(head: ListNode) -> [ListNode, ListNode]:
fast = slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if slow == fast:
return [None, slow]
mid = head
while mid.next != slow:
mid = mid.next
mid.next = None
return [head, slow]
def merge(head1: ListNode, head2: ListNode) -> ListNode:
head = cur = ListNode(None)
while head1 and head2:
if head1.val <= head2.val:
cur.next = head1
head1 = head1.next
else:
cur.next = head2
head2 = head2.next
cur = cur.next
if not head2:
cur.next = head1
if not head1:
cur.next = head2
return head.next
# print('-------\n', head)
head1, head2 = split(head)
# print('split', head1, head2)
if not head1 or not head2:
return merge(head1, head2)
sorted1 = self.sortList(head1)
# print('1 out')
sorted2 = self.sortList(head2)
# print('2 out')
sorted = merge(sorted1, sorted2)
# print('merge:', sorted)
return sorted |
18,585 | c8b7cf8b1de696bec0df711e9e54b43527ea4770 | import sqlite3
import sys
import paramiko
k = paramiko.RSAKey.from_private_key_file("/home/rein/.ssh/id_rsa")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
conn = sqlite3.connect('jobs.db')
c = conn.cursor()
c.execute("UPDATE jobs SET status=4 WHERE 1")
conn.commit()
conn.close()
conn = sqlite3.connect('status.db')
c = conn.cursor()
c.execute("UPDATE status SET status=0 WHERE 1")
conn.commit()
conn.close()
for hostd in range(1,45):
host = 'physics-lab%02d.utsc-labs.utoronto.ca'%hostd
try:
ssh.connect(host, timeout=3, username='research', pkey=k)
command = "rm -f /tmp/*.tag"
stdin , stdout, stderr = ssh.exec_command(command)
sys.stdout.write('.')
ssh.close()
except:
sys.stdout.write('x')
sys.stdout.flush()
|
18,586 | c561b6398678977092655fe0a403f1f99833d5b3 | #!/usr/bin/python
from pytopsy import fetch, clean
from pprint import pprint
import json
api_key = fetch.read_key()
SPIKE_DATES_PATH = 'data/spike_dates.json'
def run():
# mentions_data_paths = mentions()
# print 'mentions data written to: '
# pprint(mentions_data_paths, indent=2)
# tweets()
# sentiment()
geo()
def mentions():
years = ['2010','2011','2012', '2013', '2014']
keyword_lists1 = [
['patent troll', '#patenttroll'],
['"patent assertion entity"', '#pae'],
['"patent monitization entity"', '#pme'],
['"nonpracticing entity', 'non-practicing entity', '#npe'],
]
conjunctions1 = ['OR','OR','OR','OR']
data_paths = []
for year in years:
mintime = year + '-01-01'
maxtime = year + '-12-31'
outname = 'mentions1-' + year + fetch.JSON_APPENDAGE
queries = fetch.build_queries(api_key, keyword_lists1,
conjunctions=conjunctions1,
mintime=mintime, maxtime=maxtime,
is_timestamp=False)
responses = fetch.send_queries('metrics', 'mentions', queries)
clean_data = clean.clean_responses(responses, convert_timestamps=True)
data_path = clean.write_data(clean_data, out_names=[outname])
data_paths.append(data_path)
# keyword_lists2 = [
# ['patent', 'infringe'],
# ['patent', 'troll']
# ]
# conjunctions2 = ['AND','AND']
# for year in years:
# mintime = year + '-01-01'
# maxtime = year + '-12-31'
# outname = 'mentions2-' + year + fetch.JSON_APPENDAGE
# queries = fetch.build_queries(api_key, keyword_lists2,
# conjunctions=conjunctions2,
# mintime=mintime, maxtime=maxtime,
# is_timestamp=False)
# responses = fetch.send_queries('metrics', 'mentions', queries)
# clean_data = clean.clean_responses(responses, convert_timestamps=True)
# data_path = clean.write_data(clean_data, out_names=[outname])
# data_paths.append(data_path)
return data_paths
def tweets():
dates = []
with open(SPIKE_DATES_PATH, 'r') as file:
dates = json.load(file)
keywords = [
'"patent monitization entity"',
'"patent troll"',
'#patenttroll'
]
queries = fetch.query_tweets_by_date(api_key, keywords, dates)
responses = fetch.send_queries('content', 'tweets', queries)
out_path = 'data/responses/spike_tweets.json'
with open(out_path, 'w') as out_file:
json.dump(responses, out_file, indent=2, separators=(',', ':'))
print 'wrote tweets to %s' % out_path
clean_data = clean.clean_tweets(responses)
out_path = 'data/responses/spike_tweets-CLEAN.json'
with open(out_path, 'w') as out_file:
json.dump(clean_data, out_file, indent=2, separators=(',', ':'))
print 'wrote tweets to %s' % out_path
tweets_authors = clean.clean_tweets(responses, include_author=True)
out_path = 'data/responses/spike_tweets_authors-CLEAN.json'
with open(out_path, 'w') as out_file:
json.dump(tweets_authors, out_file, indent=2, separators=(',', ':'))
print 'wrote tweets to %s' % out_path
def sentiment():
years = ['2010','2011','2012', '2013', '2014']
keyword_lists = [
['patent troll', '#patenttroll'],
['"patent assertion entity"', '#pae'],
['"patent monitization entity"', '#pme'],
['"nonpracticing entity', 'non-practicing entity', '#npe'],
]
conjunctions = ['OR','OR','OR','OR']
data_paths = []
for year in years:
mintime = year + '-01-01'
maxtime = year + '-12-31'
outname = 'sentiment-' + year + fetch.JSON_APPENDAGE
queries = fetch.build_queries(api_key, keyword_lists,
conjunctions=conjunctions,
mintime=mintime, maxtime=maxtime,
is_timestamp=False)
responses = fetch.send_queries('metrics', 'sentiment', queries)
clean_data = clean.clean_responses(responses, convert_timestamps=True)
data_path = clean.write_data(clean_data, out_names=[outname])
data_paths.append(data_path)
def geo():
keyword_lists = [
['patent troll', '#patenttroll', '"patent monitization entity"']
]
conjunctions = ['OR']
mintime = '2010-01-01'
maxtime = '2014-12-22'
outname = 'mentions-by-state.json'
params = {'scope': 225, 'types': 'state'}
queries = fetch.build_queries(api_key, keyword_lists,
conjunctions=conjunctions,
mintime=mintime, maxtime=maxtime,
is_timestamp=False, other_params=params)
responses = fetch.send_queries('metrics', 'geo', queries)
clean_data = clean.clean_geodata(responses[0])
data_path = clean.write_data(clean_data, out_names=[outname])
return data_path
if __name__ == "__main__":
run() |
18,587 | 13ff6027a1b61e10f821bf3c98937b48cb82b220 | """Manage the frame info using unique frame detail info object"""
import Models.unique_frame_detail_dict as dict_frame_detail
import Models.unique_frame_detail as obj
class DictOps(object):
"""class for manage frame info to get relevantly"""
# constructor
def __init__(self):
pass
# method - add figure to dict
# figure are fill first so other elements are None and False
# here key is frame position
@classmethod
def add_to_dict_from_figure(cls, frame_position, time_stamp):
dict_frame_detail.frame_dict.update(
{frame_position: obj.FrameDetail(frame_position, False, None, False, None, True, time_stamp)})
# method
# add texts->content and subtitle to dict
# here key is frame position
@classmethod
def add_to_dict_from_text_extract(cls, frame_position, title_availability, title, content_availability, content,
time_stamp):
# frame position is already contain means this frame contain a figure
if dict_frame_detail.frame_dict.__contains__(frame_position):
dict_frame_detail.frame_dict.update(
{frame_position: obj.FrameDetail(frame_position, title_availability, title,
content_availability, content, True, time_stamp)})
# not contain a figure
else:
dict_frame_detail.frame_dict.update(
{frame_position: obj.FrameDetail(frame_position, title_availability, title,
content_availability, content, False, time_stamp)})
# method - print the result
@classmethod
def view_dict(cls):
for key in dict_frame_detail.frame_dict:
print(dict_frame_detail.frame_dict[key].frame_position)
print(" ")
print(dict_frame_detail.frame_dict[key].title_availability)
print(" ")
print(dict_frame_detail.frame_dict[key].title)
print(" ")
print(dict_frame_detail.frame_dict[key].content_availability)
print(" ")
print(dict_frame_detail.frame_dict[key].content)
print(" ")
print(dict_frame_detail.frame_dict[key].figure)
print(" ")
print(dict_frame_detail.frame_dict[key].timestamp)
print(" ")
print("-----------------------------------------------------------")
print("-----------------------------------------------------------")
print(" ")
# provide all unique frame details as a dict to create output of the visual content
@classmethod
def get_visual_info(cls):
return dict_frame_detail.frame_dict
# provide last frame detail to do comparing in text_extraction module to avoid duplication.
# also avoid giving first frame. because it is main topic
@classmethod
def get_last_content(cls):
if len(dict_frame_detail.frame_dict) < 2:
return ""
key = list(dict_frame_detail.frame_dict.keys())[-1]
# some frames dont have content it contains figure
if dict_frame_detail.frame_dict[key].content_availability:
return dict_frame_detail.frame_dict[key].content
return ""
|
18,588 | a4f0818fea96d3dbf62f39d7521137c54320d67c | import numpy as np
import os
import cv2
import math
import matplotlib.pyplot as plt
import string
import random
# Reverses the order of channel 3.
# RGB to BGR or BGR to RGB
def flip_channel_3(image):
return image[...,::-1]
# Loads a number of images from the vggface2 dataset
def load_images(image_quantity):
path = '../data/vggface2/test/'
print("Loading %d images..." % image_quantity)
subjects = os.listdir(path)
images = []
for i, sub in enumerate(subjects):
for j, image in enumerate(os.listdir(path + sub)):
im = cv2.imread(path + sub + "/" + image)
im = im[...,::-1]
images.append(im)
if(len(images) % max(1, int(image_quantity / 20)) == 0):
print("%f%%" % (len(images) / image_quantity))
if(len(images) >= image_quantity):
images = np.array(images)
return images
# Detects the faces in the photo using Haar cascade.
# If more than one face exists, it is discarded.
# Checks if the cropped face image will fit within the bounds of a bin.
# Returns success_value, bounding_box_coordinates, bin_number
def face_check(img, bins):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('C:/Users/Daniel/Anaconda3/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) != 1:
return (0, None, None)
faces = faces[0]
for b in bins:
upper_bound = b[2]
lower_bound = b[1]
# faces = x, y, w, h
if(faces[2] <= upper_bound and faces[2] > lower_bound and faces[3] <= upper_bound and faces[3] > lower_bound):
return (1, faces, b[0])
return (0, None, None)
# Adjusts the scale of the bounding box to desired height and width dimensions
def scale_adjustment(x, y, w_c, h_c, w_d, h_d):
delta_h = h_d - h_c
delta_w = w_d - w_c
delta_y = -delta_h // 2
delta_x = -delta_w // 2
y += delta_y
x += delta_x
return (x, y, w_d, h_d)
# Translates crop bounding box to eliminate overflow
def crop_realign(x, y, w_c, h_c, w_i, h_i):
if(x + w_c > w_i):
diff = x + w_c - w_i
x -= diff
if(y + h_c > h_i):
diff = y + h_c - h_i
y -= diff
if(x < 0):
x = 0
if(y < 0):
y = 0
return (x, y, w_c, h_c)
# Generates a string of random hex digits (a-f, A-F, 0-9)
def generate_random_hex_code(length):
s = string.hexdigits
output = ""
for i in range(length):
output += s[random.randint(0, len(s) - 1)]
return output
# Crops image given bounding box
def crop_image(img, x, y, w, h):
return img[y:y+h,x:x+w,:]
# Legacy bins creation function (NEEDS UPDATING)
def create_bins(lower, upper, interval):
bins = []
while lower <= upper:
bins.append(lower)
lower += interval
return bins
# Inspects images at a given source path.
# Looks for faces within each image.
# Sorts the images into the proper bin if face is detected.
# Crops image to face bounding box.
# Saves image to destination path.
def collect_and_save_images(source_path, dest_path, image_quantity, bins):
print("Inspecting %d images..." % image_quantity)
subjects = os.listdir(source_path)
prog_count = 0
good_images = []
# build directories
for b in bins:
path = dest_path + "%dx%d" % (b[0], b[0])
if not os.path.exists(path):
os.mkdir(path)
for i, sub in enumerate(subjects):
for j, filename in enumerate(os.listdir(source_path + sub)):
image = cv2.imread(source_path + sub + "/" + filename)
prog_count += 1
if(prog_count % max(1, int(image_quantity / 20)) == 0):
print("%f%%" % (prog_count / image_quantity))
check = face_check(image, bins)
detected = check[0]
if(detected):
(x, y, w, h) = check[1]
desired_dimension = check[2]
image_width = image.shape[1]
image_height = image.shape[0]
(x, y, w, h) = scale_adjustment(x, y, w, h, desired_dimension, desired_dimension)
(x, y, w, h) = crop_realign(x, y, w, h, image_width, image_height)
cropped = crop_image(image, x, y, w, h)
good_images.append(desired_dimension)
if prog_count % int(image_quantity / 20) == 0:
print("Image count:", len(good_images))
# save image
p = dest_path + "%dx%d/" % (desired_dimension, desired_dimension)
cv2.imwrite(p + generate_random_hex_code(12) + ".jpg", cropped)
if(prog_count >= image_quantity):
return good_images
|
18,589 | d056ea299af82ca08bb032a6f19066d65b906fae | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
from pylab import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv('.\kmeans.csv')
df = pd.DataFrame(df)
print(df)
data = df['F']
cityName = df['地区']
km = KMeans(n_clusters=5) # 构造聚类器
y = km.fit_predict(data.values.reshape(-1,1)) # 聚类
mpl.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.scatter(cityName,data)
plt.tick_params(labelsize=6)
plt.xticks(rotation=60)
plt.axhline(0.9897806,color='c',linewidth=2,linestyle= "dashed")
plt.axhline(-0.40190772,color='c',linewidth=2,linestyle= "dashed")
plt.axhline(0.18540474,color='c',linewidth=2,linestyle= "dashed")
plt.axhline(-0.70173354,color='c',linewidth=2,linestyle= "dashed")
plt.axhline(-0.15881039,color='c',linewidth=2,linestyle= "dashed")
plt.show()
label_pred = km.labels_ # 获取聚类标签
centroids = km.cluster_centers_ # 获取聚类中心
print(label_pred)
print(centroids)
plt.scatter(cityName,y)
plt.xticks(rotation=60)
plt.tick_params(labelsize=6)
plt.show()
CityCluster = [[],[],[],[],[]]
for i in range(len(cityName)):
CityCluster[label_pred[i]].append(cityName[i])
for i in range(len(CityCluster)):
print(CityCluster[i])
|
18,590 | fad530a71def1000787f861a4bc7977bfc7c730e | import wiki_word_parser as wikiparser
import sys
def main():
#handle arguments
filename = sys.argv[1]
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = 100
# get an etree from xml file
root = wikiparser.parse_mw(filename)
# get all text nodes in the etree
text_nodes = wikiparser.get_texts(root)
# split the text into word counter collection
word_counter = wikiparser.convert_texts(text_nodes)
# get the top n words from the word counter collection
top_n = wikiparser.get_top_words(word_counter, n)
# write results to a file
filename = 'top_{}.txt'.format(n)
wikiparser.write_output(top_n, filename)
main() |
18,591 | 16fbc10eb990daa99d1f3868f8397507aab5c768 | import numpy as np
cimport numpy as np
cimport cython
from libc.math cimport floor, sqrt, fmin
DTYPE = np.float32
ctypedef np.float32_t DTYPE_t
@cython.boundscheck(False)
@cython.wraparound(False)
cdef azumith(float[:, :] data, float[:, :] dsts, center):
cdef int xsize = data.shape[1]
cdef int ysize = data.shape[0]
cdef float ctx = center[0]
cdef float cty = center[1]
cdef float[:, :] azum = np.zeros([ysize, xsize], dtype=DTYPE)
for j in range(ysize):
for i in range(xsize):
azum[j, i] = (j - cty) / (i - ctx)
dsts[j, i] = sqrt((j - cty)**2 + (i - ctx)**2)
return azum
@cython.boundscheck(False)
@cython.wraparound(False)
cdef backsim(float[:] average):
cdef float[:, :] bg = np.zeros([ysize, xsize], dtype=DTYPE)
for j in range(ysize):
for i in range(xsize):
d = floor(dsts[j, i])
if d <= bins:
bg[j, i] = average[d]
return bg
@cython.boundscheck(False)
@cython.wraparound(False)
def subtract(float[:, :] data, center, azumith):
cdef float ctx = center[0]
cdef float cty = center[1]
cdef float azm1 = azumith[0]
cdef float azm2 = azumith[1]
# if ctx > cty:
# cdef int bins = floor(cty)
# else:
# cdef int bins = floor(ctx)
cdef int bins = floor(fmin(ctx, cty))
cdef float[:] average = np.zeros(bins, dtype=DTYPE)
cdef float[:] counts = np.zeros(bins, dtype=DTYPE)
cdef float[:, :] dsts = np.zeros([ysize, xsize], dtype=DTYPE)
cdef float[:, :] azum = azumith(data, center)
cdef Py_ssize_t i, j, k
cdef int d
for j in range(ysize):
for i in range(xsize):
d = floor(dsts[j, i])
if azum[j, i] < azm2 and azum[j, i] > azm1 and \
d <= bins and dsts[j, i] > 60
average[d] = data[j, i]
counts[d] += 1
for k in range(bins):
average[k] = average[k] / counts[k]
return backsim(average)
|
18,592 | a1f6695acc80e108912affa849732ecf3bd5dc28 | # Generated by Django 2.1 on 2018-08-04 02:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='album',
name='year',
field=models.IntegerField(choices=[('C', 'Current year'), ('O', 'Not current year'), ('TO', 'Too old not less than 1992')]),
),
]
|
18,593 | 8b948399c6d9e602d2714e71688896700684e6d8 | #!/usr/local/bin/python2
from utilities import *
import models
#Load data
x_train, y_train, x_test, y_test = load(year=15, kpca=True)
#Define model
model = models.kNNModel()
#Train and test model
class_pred, confusion, t_train, t_test = train_and_test(model, x_train, y_train, x_test, y_test)
pred = model.predict_proba(x_test)[:,1]
#Display results
display_results(t_train, t_test, confusion, pred)
#Plot diagrams
plot_roc_curve("k-NN", "k-NN", class_pred, pred, y_test)
plot_prediction_histogram("k-NN", "k-NN", pred)
#k-NN went from 96.63% to 97.30% using KPCA |
18,594 | 08259c89d66f9122e7b9f2a5d8c7c477f24d500e | #!/usr/bin/python3
import threading
import logging
import time
from library import flashLibrary
class Evaluator(threading.Thread):
def __init__(self, _cube, _quoteGetter, _verifier, _executor, _pairInfos, _exchangePositions, _currencyPositions,
_onlyWeth=False,
_loggingLevel=logging.INFO):
self.cube = _cube
self.quoteGetter = _quoteGetter
self.verifier = _verifier
self.executor = _executor
self.exchangePositions = _exchangePositions
self.numberOfExchanges = len(self.exchangePositions)
self.currencyPositions = _currencyPositions
self.numberOfCurrencies = len(self.currencyPositions)
self.onlyWeth = _onlyWeth
self.pairInfos = _pairInfos
self.logger = flashLibrary.setupLogger("evaluator", "./logs/evaluator.log", _loggingLevel)
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
while True:
start_time = time.time()
self.findTwoCurrencyArbitrage()
print("Execution took: {}".format(time.time()-start_time))
time.sleep(5)
def findTwoCurrencyArbitrage(self):
for start_exchange in range(self.numberOfExchanges):
# Only uniswap
if start_exchange != 0:
continue
for m in range(self.numberOfCurrencies):
# Only evaluate WETH-xxx starting with uniswap
if m != 0:
continue
for n in range(self.numberOfCurrencies):
if self.onlyWeth and m > 0 and n > 0:
continue
if m == n:
continue
for e in range(self.numberOfExchanges):
if e == start_exchange:
continue
if self.cube.cube[start_exchange, m, n, 0] < 1 or self.cube.cube[start_exchange, m, n, 1] < 1 or \
self.cube.cube[e, n, m, 0] < 1 or self.cube.cube[e, n, m, 1] < 1:
continue
decimalsIn = flashLibrary.getDecimalsWithNumber(_number=m, _pairInfos=self.pairInfos,
_currencyPositions=self.currencyPositions)
decimalsSwap = flashLibrary.getDecimalsWithNumber(_number=n, _pairInfos=self.pairInfos,
_currencyPositions=self.currencyPositions)
tokenIn = flashLibrary.getTokenAddressWithNumber(_number=m,
_currencyPositions=self.currencyPositions)
swapToken = flashLibrary.getTokenAddressWithNumber(_number=n,
_currencyPositions=self.currencyPositions)
reservesA0 = self.cube.cube[start_exchange, m, n, 0]
reservesA1 = self.cube.cube[start_exchange, m, n, 1]
reservesB0 = self.cube.cube[e, n, m, 1]
reservesB1 = self.cube.cube[e, n, m, 0]
optimizedAmountIn = flashLibrary.findOptimum(_reservesA0=reservesA0,
_reservesA1=reservesA1,
_reservesB0=reservesB0,
_reservesB1=reservesB1)
if optimizedAmountIn < 0:
continue
optimizedProfit = flashLibrary.getProfitWithoutDecimals(_amountIn=optimizedAmountIn,
_reservesA0=reservesA0,
_reservesA1=reservesA1,
_reservesB0=reservesB0,
_reservesB1=reservesB1)
if optimizedProfit > 0:
# self.logger.info(
# "Arbitrage opportunity {0}/{1} - optimizedProfit: {2} optimizedAmountIn: {3} _reservesA0: {4} _reservesA1: {5} _reservesX0: {6} _reservesB1: {7}".format(
# flashLibrary.getSymbolWithNumber(m, self.currencyPositions, self.pairInfos),
# flashLibrary.getSymbolWithNumber(n, self.currencyPositions, self.pairInfos),
# optimizedProfit, optimizedAmountIn, reservesA0, reservesA1, reservesB0, reservesB1
# ))
wethProfit = self.verifier.verifyProfitOffchain(_amountIn=optimizedAmountIn,
_decimalsIn=decimalsIn, _tokenIn=tokenIn,
_reservesA0=reservesA0,
_reservesA1=reservesA1,
_reservesB0=reservesB0,
_reservesB1=reservesB1)
# self.logger.info("Arbitrage opportunity at {0}/{1} profit: {2} WETH".format(
# flashLibrary.getSymbolWithNumber(m, self.currencyPositions, self.pairInfos),
# flashLibrary.getSymbolWithNumber(n, self.currencyPositions, self.pairInfos),
# wethProfit))
if wethProfit > 0:
(pairAddress, _0to1) = flashLibrary.getPairAddress(_exchange=start_exchange,
_tokenA=tokenIn, _tokenB=swapToken,
_pairInfos=self.pairInfos)
if _0to1:
amount0Out = 0
amount1Out = flashLibrary.getAmountOut(_amountIn=optimizedAmountIn,
_reservesIn=reservesA0,
_reservesOut=reservesA1)
else:
amount0Out = flashLibrary.getAmountOut(_amountIn=optimizedAmountIn,
_reservesIn=reservesA0,
_reservesOut=reservesA1)
amount1Out = 0
# print("REAL ARBITRAGE at {0}/{1} profit: {2} WETH".format(
# flashLibrary.getSymbolWithNumber(m, self.currencyPositions, self.pairInfos),
# flashLibrary.getSymbolWithNumber(n, self.currencyPositions, self.pairInfos),
# wethProfit))
self.logger.info("Profitable arbitrage at {0}/{1} profit: {2} WETH".format(
flashLibrary.getSymbolWithNumber(m, self.currencyPositions, self.pairInfos),
flashLibrary.getSymbolWithNumber(n, self.currencyPositions, self.pairInfos),
wethProfit))
try:
infuraProfit = self.verifier.verifyProfitInfura(_tokenIn=tokenIn, _tokenOut=swapToken,
_tokenInDecimals=decimalsIn,
_tokenOutDecimals=decimalsSwap,
_amount0Out=int(amount0Out),
_amount1Out=int(amount1Out),
_pairInAddress=pairAddress, _0to1=_0to1)
print("infuraProfit:{}".format(infuraProfit))
except:
print("Failed to verify profit by infura. Maybe not profitable.")
try:
self.executor.executeArbitrage(_pairAddress=pairAddress, _amount0Out=int(amount0Out),
_amount1Out=int(amount1Out))
except Exception as e:
print("Error while executing arbitrage")
print(e)
|
18,595 | 56d237198568910a1096748b69a1942185188b91 | from utils.approx import Approx
class MetaNodeBound(object):
"""
Manages the objective function bound in a metanode. When the metanode is fully expanded, its
bound can be updated (i.e. tightened) to the best bound of its children, and propagated upwards
to the root. If the root metanode's bound is updated, the solver's bound is also automatically
updated to the new best bound in the tree.
"""
__slots__ = ("metanode", "value")
def __init__(self, metanode):
self.metanode = metanode
self.value = metanode.node.bound()
assert self.value is not None
def best_from_children(self):
metanode = self.metanode
best_in = metanode.solver.sense.best_in
return best_in(child.bound.value for child in metanode.children)
def update_from_children(self):
metanode = self.metanode
if metanode.is_expandable:
raise Exception("cannot update bound: one or more branches remain unexpanded")
solver = self.metanode.solver
is_better = solver.sense.is_better
while metanode is not None:
bound = metanode.bound
old = bound.value
new = bound.best_from_children()
if old == new:
break
assert is_better(old, new) or Approx(old) == new
bound.value = new
metanode = metanode.parent
else:
if is_better(solver.bound, solver.root.bound.value):
solver.bound = solver.root.bound.value
|
18,596 | 3fe585af5a3f4e52edf1cedca4c182ac267fcde7 | def distance(a, b):
if len(a) != len(b):
raise ValueError
return sum(1 for a, b in zip(a, b) if a != b)
|
18,597 | 6688504d1495672aa5b521be65a407943c360614 | #!/usr/bin/env python
#
# This script is released under the
# Apache License Version 2.0 http://www.apache.org/licenses/.
#
# It generates vectors with coordinates sampled randomly and uniformly from [0,1]
# If one specifies the flag -b or --binary, the script generates only binary data
#
import random
import argparse
import sys
import numpy
import math
parser = argparse.ArgumentParser(description='vector generator (uniform)')
parser.add_argument('-d','--dim', required=True, type=int, help='dimensionality (# of vector elements)')
parser.add_argument('-n','--ndata', required=True, type=int, help='# of data points')
parser.add_argument('-o','--outf', required=True, help='output file')
parser.add_argument('--min_val', default=1e-5, help='the minimum possible vector element')
args = vars(parser.parse_args())
nd = args['ndata']
outf = args['outf']
dim = args['dim']
minVal=args['min_val']
f=open(outf, 'w')
for i in range(nd):
# See an explanation from this blog post http://blog.geomblog.org/2005/10/sampling-from-simplex.html
# There's more on sampling from the simplex: Luc Devroye's book, 'Non-Uniform Random Variate Generation'.
arr = numpy.array([-math.log(random.random()) for _ in range(dim)])
arr /= sum(arr)
f.write("\t".join([("%g" % max(v,minVal)) for v in arr]) + "\n")
f.close()
|
18,598 | f807f806cc2ee644cdbb2458f2b39babce5a7caa | #! /usr/bin/env python
import sys
import argparse
import re
import six
import os
def parse_cmdline():
parser = argparse.ArgumentParser(description='Unix like grep tool')
parser.add_argument('--pattern',
type=str,
default = '.*',
help='pattern to search for')
parser.add_argument('location',
type=str,
help='path to search for pattern in filename')
args = parser.parse_args()
return args
def find(pattern, location):
pattern_re = re.compile(pattern)
def find_helper(location):
if pattern_re.search(location):
six.print_(location)
if os.path.isdir(location):
files = os.listdir(location)
for file in files:
full_filename = os.path.join(location, file)
find_helper(full_filename)
return find_helper(location)
def main():
args = parse_cmdline()
find(args.pattern, args.location)
if __name__ == '__main__':
main() |
18,599 | 1437ea0062627370cd24d1fb0336cd84c12571ca | # -*- coding: utf-8 -*-
import scrapy
import datetime
import calendar
import pandas as pd
from io import StringIO
from urllib.parse import urlencode
from trade.items import DataDailyoptionItem
class DataDailyoptionSpider(scrapy.Spider):
name = 'data_dailyoption'
allowed_domains = ['http://www.taifex.com.tw']
def __init__(self):
self.run_date = datetime.date.today()
self.start_date = self.run_date
self.end_date = self.start_date + datetime.timedelta(days=-31)
def start_requests(self):
def get_period(year, month):
"""
Get first and last day of specified year and month
"""
first_weekday, days = calendar.monthrange(year, month)
first = datetime.date(year=year, month=month, day=1)
last = datetime.date(year=year, month=month, day=days)
return first, last
start, end = map(lambda d: d.strftime('%Y/%m/%d'),
get_period(year=self.run_date.year,
month=self.run_date.month))
params = {
"queryStartDate": start,
"queryEndDate": end,
"commodity_id": 'TXO',
"down_type": 1}
start, end = map(lambda d: d.strftime('%Y/%m/%d'),
get_period(year=self.run_date.year,
month=self.run_date.month))
url = "http://www.taifex.com.tw/cht/3/dlOptDataDown?" + \
urlencode(params)
yield scrapy.Request(
url=url,
callback=self.parse)
def transform_data(self, response):
df = pd.read_csv(
StringIO("\n".join([i.translate({ord(c): None for c in ' '})
for i in response.text.split('\n')
if len(i.split(',')) >= 10])), index_col=False)
df = df.loc[(df["成交量"] > 0) | (df["未沖銷契約數"] != "-") | (df["結算價"] != "-")]
df["交易日期"] = df["交易日期"].str.replace("/", "-")
df["到期月份(週別)"] = df["到期月份(週別)"].astype(str).str.replace(" ", "")
df["契約"] = df["契約"].str.replace(" ", "")
df["交易時段"] = df["交易時段"].str.replace("一般", "AM").str.replace("盤後", "PM")
df["買賣權"] = df["買賣權"].str.replace("買權","Call").str.replace("賣權","Put").str.replace(" ","")
df = df.dropna(subset=['交易日期'])
return df
def parse(self, response):
def fmt_float(value):
try:
return float(value)
except Exception:
return 0.0
df = self.transform_data(response)
item = DataDailyoptionItem()
for idx in df.index:
item['date'] = df["交易日期"][idx]
item['commodity'] = df["契約"][idx]
item['contract'] = df["到期月份(週別)"][idx]
item['strike'] = df["履約價"][idx]
item['pc'] = df["買賣權"][idx]
item['open'] = fmt_float(df["開盤價"][idx])
item['high'] = fmt_float(df["最高價"][idx])
item['low'] = fmt_float(df["最低價"][idx])
item['close'] = fmt_float(df["收盤價"][idx])
item['volume'] = fmt_float(df["成交量"][idx])
item['adjustment'] = fmt_float(df["結算價"][idx])
item['oi'] = fmt_float(df["未沖銷契約數"][idx])
item['session'] = df["交易時段"][idx]
yield item
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.