content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.urls import path
from tweet import views
urlpatterns = [
path('posttweet/', views.post_tweet_view, name='post tweet'),
path('tweet/<int:tweet_id>/', views.tweet_view, name="tweet details"),
path('deletetweet/<int:tweet_id>/', views.delete_tweet_view, name="delete tweet")
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
6126,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
7353,
83,
7277,
14,
3256,
5009,
13,
7353,
62,
83,
7277,
62,
1177,
11,
1438,
11639,
7353... | 2.622807 | 114 |
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import os, datetime
from src import crontab, crontab_run
# main
if __name__ == '__main__':
# 每天00点00分10秒运行一次'executor script1 argv1'
executor='python3'
script1 = 'main.py'
argv1 = '-m -t -s'
crontab.every('day').at(hour=0, minute=0, second=10).execute(script1,executor,argv1)
# 每5分钟运行一次script2
#script2 = '/opt/scrapy_news.py'
#crontab.every('minute').interval(5).execute(script2)
# 设置开始时间和结束时间
#script3 = '/opt/scrapy_goods.py'
#begin_time = datetime.datetime.strptime('2018-06-01 00:00:00', '%Y-%m-%d %H:%M:%S')
#end_time = datetime.datetime.strptime('2018-10-01 00:00:00', '%Y-%m-%d %H:%M:%S')
#crontab.every('minute').interval(5).begin(begin_time).end(end_time).execute(script3)
# 每月最后一天运行script4
#script4 = '/opt/scrapy_blog.py'
#crontab.every('month').at(day=-1).execute(script4)
# 开始运行crontab, 默认debug=False
crontab_run(debug = False)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
4808,
9,
62,
19617,
25,
40477,
12,
23,
4808,
9,
62,
198,
198,
11748,
28686,
11,
4818,
8079,
198,
6738,
12351,
1330,
1067,
756,
397,
11,
1067,
756,
397,
62,
5143,
198,
198,
... | 1.800366 | 546 |
from .html_widget import HTMLWidget
| [
6738,
764,
6494,
62,
42655,
1330,
11532,
38300,
628
] | 4.111111 | 9 |
import os
import numpy as np
import onnxruntime
import torch
from stream_utils.onnx import (
letterbox,
non_max_suppression,
Annotator,
scale_coords,
Colors,
)
| [
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
319,
77,
87,
43282,
198,
11748,
28034,
198,
198,
6738,
4269,
62,
26791,
13,
261,
77,
87,
1330,
357,
198,
220,
220,
220,
3850,
3524,
11,
198,
220,
220,
220,
1729,
62,... | 2.472973 | 74 |
"""Add Username Column
Revision ID: 717ad3834899
Revises: ed5ce2cd18c2
Create Date: 2017-10-31 11:28:44.615281
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '717ad3834899'
down_revision = 'ed5ce2cd18c2'
branch_labels = None
depends_on = None
| [
37811,
4550,
50069,
29201,
198,
198,
18009,
1166,
4522,
25,
767,
1558,
324,
2548,
28978,
2079,
198,
18009,
2696,
25,
1225,
20,
344,
17,
10210,
1507,
66,
17,
198,
16447,
7536,
25,
2177,
12,
940,
12,
3132,
1367,
25,
2078,
25,
2598,
13... | 2.563025 | 119 |
import bz2
import json
import os
import re
from ast import literal_eval
grep_property_line = re.compile('(^<wd:[Qq][^>]+>\s<p:[Pp][^>]+>\s<wds:[Qq][^>]+>)')
grep_wds = re.compile('((?<=wds:)[Qq][^>]+)')
grep_property = re.compile('((?<=p:)[Pp][^>]+)')
grep_qualifier = re.compile('((?<=pq:)[Pp][^>]+)')
grep_wditem = re.compile('^<wds?:([Qq][^->]+)')
match_instance = re.compile('((?<=p:)[Pp]31(?=>))')
def create_transaction(item_cache, temp_wditem, dict_all_items, file_wd_transaction, file_wd_item):
"""
Creates the transaction string for the transaction database
:param item_cache: list of strings to construct a transaction
:param temp_wditem: a temporary variable to know about which item the transaction is about
:param dict_all_items: the item database
:param file_wd_transaction: the output file for the transaction database
:param file_wd_item: the outputfile for the Wikidata items corresponding to the transaction database
:return:
"""
temp_prop = ''
final_transaction = ''
temp_wds = ''
qualifier_set = set()
transaction = []
has_instance = False
for item_line in item_cache:
if len(re.findall(match_instance, item_line)) == 1:
has_instance = True
if re.findall(grep_wds, item_line)[0].upper() == temp_wds:
if len(re.findall(grep_qualifier, item_line)) == 1:
qualifier = re.findall(grep_qualifier, item_line)[0].upper()
qualifier_set.add(qualifier)
else:
pass
# print("Line not needed, ps: or <wd:Q> <p:P> <wds:Q> handled in next operation!")
else:
dict_tuple = tuple([temp_prop] + list(sorted(qualifier_set)))
if dict_tuple in dict_all_items.keys() and len(dict_tuple) >= 2:
if transaction.count(dict_all_items[dict_tuple]) == 0:
transaction.append(dict_all_items[dict_tuple])
final_transaction = " ".join(list(map(str, sorted(transaction))))
qualifier_set = set()
else:
qualifier_set = set()
if len(re.findall(grep_property_line, item_line)) == 1:
if len(re.findall(grep_property, item_line)) == 1 and len(re.findall(grep_wds, item_line)) == 1:
temp_prop = re.findall(grep_property, item_line)[0].upper()
temp_wds = re.findall(grep_wds, item_line)[0].upper()
if len(qualifier_set) != 0:
dict_tuple = tuple([temp_prop] + list(sorted(qualifier_set)))
if dict_tuple in dict_all_items.keys() and len(dict_tuple) >= 2:
if transaction.count(dict_all_items[dict_tuple]) == 0:
transaction.append(dict_all_items[dict_tuple])
final_transaction = " ".join(list(map(str, sorted(transaction))))
if temp_wditem != '' and final_transaction != '' and has_instance is True:
file_wd_transaction.write(final_transaction)
file_wd_item.write(temp_wditem + "\n")
file_wd_transaction.write("\n")
def create_horizontal_database(files, item_db_file_path, transaction_db_path):
"""
Create the horizontal transaction database for frequent itemset mining.
:param files: the files to process.
:param item_db_file_path: the path to the item database.
:param transaction_db_path: the path to where the transaction database will be stored.
:return:
"""
file_wd_item = open(transaction_db_path + "\\tid.txt", "w")
file_wd_transaction = open(transaction_db_path + "\\transaction.dat", "w")
with open(item_db_file_path, "r") as filehandle:
obj = json.load(filehandle)
dict_all_items = {literal_eval(k): v for k, v in obj.items()}
item_cache = []
temp_wditem = ''
for file in files:
print("File:\t", file)
stream = bz2.open(file, 'rt')
for line in stream:
if len(re.findall(grep_wditem, line)) == 1:
wditem = re.findall(grep_wditem, line)[0].upper()
if re.findall(grep_wditem, line)[0].upper() == temp_wditem:
item_cache.append(line)
else:
create_transaction(item_cache, temp_wditem, dict_all_items, file_wd_transaction, file_wd_item)
item_cache = []
temp_wditem = wditem
item_cache.append(line)
else:
if len(re.findall('<wds?:[Pp]', line)[0]) > 0:
pass
else:
print("Line:\t", line)
print("Error in dump! No WD-Item could be found!")
stream.close()
print("Last item cache length:\t", len(item_cache))
create_transaction(item_cache, temp_wditem, dict_all_items, file_wd_transaction, file_wd_item)
print("Finished creating the transaction database")
if __name__ == '__main__':
"""
The main method reads paths and makes sure they exist before calculations start.
"""
input_path = input("Enter the directory of the cleaned and splitted .nt.bz2 dump (Example: C:\dump\cleaned_dump):\t")
input_path = input_path.replace('"', '').replace("'", "")
assert os.path.exists(input_path), "Path not found at:\t" + str(input_path)
item_db_file_path = input("Enter the path to the item database (Example: C:\dump\itemdb\items.json):\t")
item_db_file_path = item_db_file_path.replace('"', '').replace("'", "")
assert os.path.exists(item_db_file_path), "File not found at:\t" + str(item_db_file_path)
transaction_db_path = input(
"Enter the directory to store the transaction database (Example: C:\dump\\transactiondb):\t")
transaction_db_path = transaction_db_path.replace('"', '').replace("'", "")
assert os.path.exists(transaction_db_path), "File not found at:\t" + str(transaction_db_path)
# get filelist of provided path
file_list = next(os.walk(input_path))[2]
file_list_fullpath = []
for file in file_list:
file_list_fullpath.append(os.path.join(input_path, file))
create_horizontal_database(file_list_fullpath, item_db_file_path, transaction_db_path)
| [
11748,
275,
89,
17,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
6468,
1330,
18875,
62,
18206,
198,
198,
70,
7856,
62,
26745,
62,
1370,
796,
302,
13,
5589,
576,
10786,
7,
61,
27,
16993,
33250,
48,
80,
7131,
61,
... | 2.282576 | 2,686 |
import abc
| [
11748,
450,
66,
198
] | 2.75 | 4 |
#!python
# Loads vessel hazard results into the spatial db
import logging
import os
from pathlib import Path
import geopandas as gpd
import sqlalchemy
from sqlalchemy import create_engine
logging.basicConfig(format='%(process)d - %(levelname)s: %(message)s', level=logging.INFO)
def load_file(hazard_file_path: Path, db_engine: sqlalchemy.engine.Engine) -> None:
"""Given a path to a hazard result file, load it, preprocess, and load into spatial db"""
SCHEMA_NAME = 'axiom_nps_vessel_drift'
region = get_region(hazard_file_path)
if region is None:
region == 'all'
layer_name = f'vessel_hazard_{region}'
# Need to specify as DateTime otherwise it will be ingested as text
dtypes = {
'date_utc': sqlalchemy.DateTime
}
gdf = gpd.read_parquet(hazard_file_path)
logging.info(f'Loading {hazard_file_path} as {layer_name}')
gdf.to_postgis(
layer_name,
db_engine,
schema=SCHEMA_NAME,
if_exists='replace',
chunksize=1000,
dtype=dtypes
)
if __name__ == '__main__':
main()
| [
2,
0,
29412,
198,
2,
8778,
82,
8837,
15834,
2482,
656,
262,
21739,
20613,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
30324,
392,
292,
355,
27809,
67,
198,
11748,
44161,
282,
26599,
198,
... | 2.5 | 436 |
from __future__ import absolute_import
# Copyright (c) 2010-2014 openpyxl
# stdlib
import datetime
import decimal
from io import BytesIO
# package
from openpyxl import Workbook
from lxml.etree import xmlfile, tostring
# test imports
import pytest
from openpyxl.tests.helper import compare_xml
@pytest.fixture
@pytest.mark.parametrize("value, expected",
[
(9781231231230, """<c t="n" r="A1"><v>9781231231230</v></c>"""),
(decimal.Decimal('3.14'), """<c t="n" r="A1"><v>3.14</v></c>"""),
(1234567890, """<c t="n" r="A1"><v>1234567890</v></c>"""),
("=sum(1+1)", """<c r="A1"><f>sum(1+1)</f><v></v></c>"""),
(True, """<c t="b" r="A1"><v>1</v></c>"""),
("Hello", """<c t="s" r="A1"><v>0</v></c>"""),
("", """<c r="A1" t="s"></c>"""),
(None, """<c r="A1" t="n"></c>"""),
(datetime.date(2011, 12, 25), """<c r="A1" t="n" s="1"><v>40902</v></c>"""),
])
@pytest.fixture
@pytest.fixture
@pytest.mark.lxml_required
@pytest.mark.lxml_required
@pytest.mark.lxml_required
@pytest.mark.lxml_required
@pytest.mark.lxml_required
@pytest.mark.lxml_required
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
2,
15069,
357,
66,
8,
3050,
12,
4967,
1280,
9078,
87,
75,
198,
198,
2,
14367,
8019,
198,
11748,
4818,
8079,
198,
11748,
32465,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
... | 1.765385 | 780 |
"""
Setup module for install lib
"""
import os
import re
from os import path
from pathlib import Path
from typing import List, Optional
from setuptools import setup
LIB_NAME = 'gen_doc'
HERE = Path(__file__).parent
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_version() -> Optional[str]:
"""
Method for getting the version of the library from the init file
:requirements: version must be specified separately
:good: __version__ = '0.0.1'
:bad: __version__, __any_variable__ = '0.0.1', 'any_value'
:return: version lib
"""
txt = (HERE / LIB_NAME / "__init__.py").read_text("utf-8")
txt = txt.replace("'", '"')
try:
version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
return version
except IndexError:
raise RuntimeError("Unable to determine version.")
def get_packages() -> List[str]:
"""
Help method
:return: List[str] path to files and folders library
"""
ignore = ['__pycache__']
list_sub_folders_with_paths = [x[0].replace(os.sep, '.')
for x in os.walk(LIB_NAME)
if x[0].split(os.sep)[-1] not in ignore]
return list_sub_folders_with_paths
setup(name=LIB_NAME,
version=get_version(),
description='Module for build documentation',
author='Denis Shchutkiy',
long_description=long_description,
long_description_content_type='text/markdown',
author_email='denisshchutskyi@gmail.com',
url='https://github.com/Shchusia/gen_doc',
packages=get_packages(),
keywords=['pip', LIB_NAME],
python_requires='>=3.7',
entry_points={
'console_scripts': [
'gen_doc=gen_doc.commands:main'
]},
)
| [
37811,
201,
198,
40786,
8265,
329,
2721,
9195,
201,
198,
37811,
201,
198,
11748,
28686,
201,
198,
11748,
302,
201,
198,
6738,
28686,
1330,
3108,
201,
198,
6738,
3108,
8019,
1330,
10644,
201,
198,
6738,
19720,
1330,
7343,
11,
32233,
201,... | 2.208939 | 895 |
# -*- coding: utf-8 -*-
import requests, os, sys
from re import findall as reg
requests.packages.urllib3.disable_warnings()
from threading import *
from threading import Thread
from ConfigParser import ConfigParser
from Queue import Queue
try:
os.mkdir('Results')
except:
pass
list_region = '''us-east-1
us-east-2
us-west-1
us-west-2
af-south-1
ap-east-1
ap-south-1
ap-northeast-1
ap-northeast-2
ap-northeast-3
ap-southeast-1
ap-southeast-2
ca-central-1
eu-central-1
eu-west-1
eu-west-2
eu-west-3
eu-south-1
eu-north-1
me-south-1
sa-east-1'''
pid_restore = '.nero_swallowtail'
if __name__ == '__main__':
print('''
________ _ __ ____
/ ____/ /_ (_) /_/ __ \____ ____
/ / / __ \/ / __/ / / / __ `/ _ \\
/ /___/ / / / / /_/ /_/ / /_/ / __/
\____/_/ /_/_/\__/\____/\__, /\___/
LARAVEL \033[32;1mRCE\033[0m V6.9 more tools : https://t.me/hackingtoolsprvi8 /____/ \n''')
try:
readcfg = ConfigParser()
readcfg.read(pid_restore)
lists = readcfg.get('DB', 'FILES')
numthread = readcfg.get('DB', 'THREAD')
sessi = readcfg.get('DB', 'SESSION')
print("log session bot found! restore session")
print('''Using Configuration :\n\tFILES='''+lists+'''\n\tTHREAD='''+numthread+'''\n\tSESSION='''+sessi)
tanya = raw_input("Want to contineu session ? [Y/n] ")
if "Y" in tanya or "y" in tanya:
lerr = open(lists).read().split("\n"+sessi)[1]
readsplit = lerr.splitlines()
else:
kntl # Send Error Biar Lanjut Ke Wxception :v
except:
try:
lists = sys.argv[1]
numthread = sys.argv[2]
readsplit = open(lists).read().splitlines()
except:
try:
lists = raw_input("websitelist ? ")
readsplit = open(lists).read().splitlines()
except:
print("Wrong input or list not found!")
exit()
try:
numthread = raw_input("threads ? ")
except:
print("Wrong thread number!")
exit()
pool = ThreadPool(int(numthread))
for url in readsplit:
if "://" in url:
url = url
else:
url = "http://"+url
if url.endswith('/'):
url = url[:-1]
jagases = url
try:
pool.add_task(main, url)
except KeyboardInterrupt:
session = open(pid_restore, 'w')
cfgsession = "[DB]\nFILES="+lists+"\nTHREAD="+str(numthread)+"\nSESSION="+jagases+"\n"
session.write(cfgsession)
session.close()
print("CTRL+C Detect, Session saved")
exit()
pool.wait_completion()
try:
os.remove(pid_restore)
except:
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
11,
28686,
11,
25064,
198,
6738,
302,
1330,
1064,
439,
355,
842,
198,
8897,
3558,
13,
43789,
13,
333,
297,
571,
18,
13,
40223,
62,
40539,
654,
3419,
198,
... | 2.187729 | 1,092 |
from collections import defaultdict
from functools import partial
from itertools import chain
import logging
import multiprocessing
from multiprocessing.shared_memory import SharedMemory
import os
from pathlib import Path
import signal
from typing import Dict, Optional, Tuple
from calvin_agent.datasets.shm_dataset import ShmDataset
from calvin_agent.datasets.utils.episode_utils import lookup_naming_pattern
import numpy as np
from omegaconf import DictConfig
from pytorch_lightning import Callback, LightningModule, Trainer
from tqdm import tqdm
log = logging.getLogger(__name__)
def gather_results(return_dict: Dict) -> Tuple[Dict, Dict]:
"""
Combine results of worker processes.
Args:
return_dict: Dictionary with results of worker processes.
Returns:
episode_lookup_vision: Combined results of vision lookup.
lang_episode_dict: Combined results of lanugage lookup.
"""
episode_lookup_vision: Dict = defaultdict(list)
lang_episode_dict: Dict = defaultdict(dict)
for proc in sorted(return_dict):
for key in return_dict[proc][0]:
episode_lookup_vision[key] += return_dict[proc][0][key]
lang_episode_dict[key].update(return_dict[proc][1][key])
return episode_lookup_vision, lang_episode_dict
def check_shm_lookup_exists(dataset_type: str) -> Optional[Dict]:
"""
Check if there is already a shared memory lookup file saved on the disk.
Args:
dataset_type: 'train' or 'val'.
Returns:
Lookup file if exists, None otherwise.
"""
load_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
try:
data: Dict = np.load(load_path / f"{dataset_type}_shm_lookup.npy", allow_pickle=True).item()
return data
except FileNotFoundError:
return None
def save_shm_lookup(train_shm_lookup: Dict, val_shm_lookup: Dict) -> None:
"""
Save shared memory lookups to disk, such that they can be reused by ddp subprocesses.
Args:
train_shm_lookup: Shared memory lookup for training data.
val_shm_lookup: Shared memory lookup for validation data.
"""
save_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
np.save(save_path / "train_shm_lookup.npy", train_shm_lookup) # type: ignore
np.save(save_path / "val_shm_lookup.npy", val_shm_lookup) # type: ignore
def load_shm_lookup() -> Tuple[Dict, Dict]:
"""
Load shared memory lookup.
Returns:
train_shm_lookup: Shared memory lookup for training data.
val_shm_lookup: Shared memory lookup for validation data.
"""
load_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
train_shm_lookup: Dict = np.load(load_path / "train_shm_lookup.npy", allow_pickle=True).item()
val_shm_lookup: Dict = np.load(load_path / "val_shm_lookup.npy", allow_pickle=True).item()
return train_shm_lookup, val_shm_lookup
class SharedMemoryLoader:
"""
Helper class for loading dataset into shared memory.
Args:
datasets_cfg: Hydra config of datasets.
dataset_dir: Path to dataset.
"""
def _worker_process(self, proc_num, ep_start_end_ids, offsets, shmem, lang_ep_start_end_ids, return_dict):
"""
Multiprocessing worker to speed up the loading of the data into shared memory.
Args:
proc_num: Process number.
ep_start_end_ids: Episode start and end indices for this worker.
offsets: Offset for addressing right portion of shared array.
shmem: Shared memory handles.
lang_ep_start_end_ids: Episode start and end indices of language data for this worker.
return_dict: Dictionary for saving the results.
"""
episode_lookup_vision = defaultdict(list)
lang_episode_dict = defaultdict(dict)
if proc_num == 0:
pbar = tqdm(total=np.sum(np.diff(ep_start_end_ids)), leave=False)
else:
pbar = None
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
seq = self._zip_sequence(start_idx, end_idx, pbar)
for key, array in seq.items():
shared_array = np.ndarray(array.shape, dtype=array.dtype, buffer=shmem[key].buf, offset=offsets[key])
shared_array[:] = array[:]
for j, idx in enumerate(range(start_idx, end_idx + 1 - self.min_window_size_vision)):
episode_lookup_vision[key].append((offsets[key], j))
if idx in lang_ep_start_end_ids[:, 0]:
lang_episode_dict[key][idx] = (offsets[key], j)
offsets[key] += array.nbytes
return_dict[proc_num] = episode_lookup_vision, lang_episode_dict
if pbar is not None:
pbar.close()
def load_data_in_shared_memory(self):
"""
Load the dataset from disk into shared memory once at the beginning of the training to speed up data loading.
Returns:
Shared memory lookup dict.
"""
lang_data = np.load(self.dataset_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).item()
ep_start_end_ids = np.load(self.dataset_dir / "ep_start_end_ids.npy")
lang_ep_start_end_ids = np.array(lang_data["info"]["indx"]) # each of them are 64
lang_ann = lang_data["language"]["emb"]
shmem, shapes, sizes, dtypes, shmem_lookup = self._init_shmem(ep_start_end_ids)
if shmem_lookup is not None:
# using existing shared memory
log.info("Using existing shared memory without reloading it.")
return shmem_lookup
lang_lookup = []
episode_lookup_lang = defaultdict(list)
log.info(
f"Loading {self.dataset_type} language episodes into shared memory. "
f"(progress bar shows only worker process 0)."
)
if self.n_proc > len(ep_start_end_ids):
self.n_proc = len(ep_start_end_ids)
split_indices = np.array_split(ep_start_end_ids, self.n_proc, axis=0)
split_lens = [np.sum(np.diff(split_indices[i])) for i in range(len(split_indices))]
obs_size = {key: dtypes[key].itemsize * np.prod(shapes[key]) for key in dtypes}
offsets = [{key: n * obs_size[key] for key in dtypes} for n in np.cumsum([0] + split_lens[:-1])]
manager = multiprocessing.Manager()
return_dict = manager.dict()
processes = []
# load vision data with multiple processes
for i in range(self.n_proc):
p = multiprocessing.Process(
target=self._worker_process,
args=(i, split_indices[i], offsets[i], shmem, lang_ep_start_end_ids, return_dict),
)
processes.append(p)
p.start()
for proc in processes:
proc.join()
episode_lookup_vision, lang_episode_dict = gather_results(return_dict)
# lang data
for i, (start_idx, end_idx) in enumerate(tqdm(lang_ep_start_end_ids)):
for key in lang_episode_dict:
offset, step = lang_episode_dict[key][start_idx]
for j, idx in enumerate(range(start_idx, end_idx + 1 - self.min_window_size_lang)):
episode_lookup_lang[key].append((offset, step + j))
for idx in range(start_idx, end_idx + 1 - self.min_window_size_lang):
lang_lookup.append(i)
result = {
"episode_lookup_vision": episode_lookup_vision,
"episode_lookup_lang": episode_lookup_lang,
"lang_lookup": lang_lookup,
"lang_ann": lang_ann,
"shapes": shapes,
"sizes": sizes,
"dtypes": dtypes,
}
return result
def _init_shmem(self, ep_start_end_ids: np.ndarray) -> Tuple[Dict, Dict, Dict, Dict, Optional[Dict]]:
"""
Initialize shared memory.
Args:
ep_start_end_ids: Episode start and end indices of dataset.
Returns:
shmem: Dictionary with shared memory handles for each dataset key (rgb_static, etc ...).
shapes: Dictionary with the shape of one datapoint for each dataset key.
sizes: Dictionary with the memory size of one datapoint for each dataset key.
dtypes: Dictionary with the dtype of data for each dataset key.
shm_lookup: If shared memory lookup dict already exists, return it here.
"""
# load first episode to determine memory usage
seq = self._zip_sequence(ep_start_end_ids[0][0], ep_start_end_ids[0][0] + 1)
total_size = np.sum(ep_start_end_ids[:, 1] - ep_start_end_ids[:, 0])
shmem: Dict[str, SharedMemory] = {}
shapes: Dict[str, Tuple] = {}
sizes: Dict[str, int] = {}
dtypes: Dict[str, str] = {}
shm_lookup = check_shm_lookup_exists(self.dataset_type)
# check if all necessary shared memories are already loaded
if shm_lookup is not None:
print("shm_lookup exists")
try:
if np.all(
[
SharedMemory(name=f"{self.dataset_type}_{key}").size == size * total_size
for key, size in shm_lookup["sizes"].items()
]
):
return shmem, shapes, sizes, dtypes, shm_lookup
except FileNotFoundError as e:
pass
for key, array in seq.items():
try:
# see if exists
s = SharedMemory(name=f"{self.dataset_type}_{key}")
s.close()
s.unlink()
log.warning(
f"Found existing shared memory {self.dataset_type}_{key}, freeing up memory."
"In case of multiple training runs on the same node, this will lead to problems."
)
except FileNotFoundError:
pass
shmem[key] = SharedMemory(create=True, size=array.nbytes * total_size, name=f"{self.dataset_type}_{key}")
shapes[key] = array.shape[1:]
sizes[key] = array.nbytes
dtypes[key] = array.dtype
# register signal handler for the case that shm data loading process gets interrupted.
signal.signal(signal.SIGTERM, partial(delete_shm, shmem.keys()))
return shmem, shapes, sizes, dtypes, None
def _zip_sequence(self, start_idx, end_idx, pbar=None):
"""
Load consecutive frames saved as individual files on disk and combine to episode dict.
Args:
start_idx: Start index of file.
end_idx: End index of file.
pbar: Tqdm progress bar.
Returns:
Episode dict.
"""
keys = list(chain(*self.obs_space.values()))
keys.remove("language")
keys.append("scene_obs")
n_items = end_idx - start_idx
episode = {}
data = np.load(self._get_episode_name(start_idx))
for key in keys:
shape = (n_items,) + data[key].shape
dtype = data[key].dtype
episode[key] = np.empty(shape=shape, dtype=dtype)
for i, file_idx in enumerate(range(start_idx, end_idx)):
with np.load(self._get_episode_name(file_idx)) as data:
for key in keys:
episode[key][i] = data[key]
if pbar is not None:
pbar.update(1)
return episode
def _get_episode_name(self, file_idx):
"""
Convert file idx to file path.
Args:
file_idx: index of starting frame.
Returns:
Path to file.
"""
return Path(f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}")
def delete_shm(shm_keys, signal, frame):
"""
Close and unlink the shared memories.
"""
for dataset_type in ["train", "val"]:
for shm_key in shm_keys:
try:
s = SharedMemory(name=f"{dataset_type}_{shm_key}")
s.close()
s.unlink()
print(f"successfully unlinked {shm_key}")
except Exception as e:
print(e)
exit()
class SignalCallback(Callback):
"""
Register a signal handler for closing and unlinking the shared memory that get's activated with a SIGTERM signal.
"""
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
18931,
198,
11748,
18540,
305,
919,
278,
198,
6738,
18540,
305,
919,
278,
13,
28710,
62,
31673,
1330,
39403,
3... | 2.197992 | 5,677 |
import aiohttp_jinja2
import yaml
import asyncio
from aiohttp import web
import logging
import jinja2
from app.service.data_svc import DataService
from app.service.file_svc import FileService
from app.api.api import RestApi
if __name__ == '__main__':
setup_logger(getattr(logging, 'INFO'))
with open('conf/config.yml') as conf:
config = yaml.safe_load(conf)
config_host = config['host']
config_port = config['port']
file_svc = FileService()
data_svc = DataService()
services = dict(file_svc=file_svc, data_svc=data_svc)
main(config_host, config_port, services)
| [
11748,
257,
952,
4023,
62,
18594,
6592,
17,
198,
11748,
331,
43695,
198,
11748,
30351,
952,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
11748,
18931,
198,
11748,
474,
259,
6592,
17,
198,
198,
6738,
598,
13,
15271,
13,
7890,
62,
21370,... | 2.463035 | 257 |
from abc import ABC, abstractmethod
from contextlib import contextmanager
import weakref
from ...util.event import Event
from ._visual_wrapper import VisualWrapper
class Layer(VisualWrapper, ABC):
"""Base layer class.
Parameters
----------
central_node : vispy.scene.visuals.VisualNode
Visual node that controls all others.
name : str, optional
Name of the layer. If not provided, is automatically generated
from `cls._basename()`
Notes
-----
Must define the following:
* `_get_shape()`: called by `shape` property
* `_refresh()`: called by `refresh` method
* `data` property (setter & getter)
May define the following:
* `_set_view_slice(indices)`: called to set currently viewed slice
* `_after_set_viewer()`: called after the viewer is set
* `_qt_properties`: QtWidget inserted into the layer list GUI
* `_qt_controls`: QtWidget inserted into the controls panel GUI
* `_basename()`: base/default name of the layer
Attributes
----------
name
ndim
shape
selected
viewer
Methods
-------
refresh()
Refresh the current view.
"""
def __str__(self):
"""Return self.name
"""
return self.name
@classmethod
@property
def name(self):
"""str: Layer's unique name.
"""
return self._name
@name.setter
@property
@abstractmethod
@data.setter
@abstractmethod
@abstractmethod
@abstractmethod
@property
def ndim(self):
"""int: Number of dimensions in the data.
"""
return len(self.shape)
@property
def shape(self):
"""tuple of int: Shape of the data.
"""
return self._get_shape()
@property
def selected(self):
"""boolean: Whether this layer is selected or not.
"""
return self._selected
@selected.setter
@property
def viewer(self):
"""Viewer: Parent viewer widget.
"""
if self._viewer is not None:
return self._viewer()
@viewer.setter
@property
def status(self):
"""string: Status string
"""
return self._status
@status.setter
@property
def help(self):
"""string: String that can be displayed to the
user in the status bar with helpful usage tips.
"""
return self._help
@help.setter
@property
def interactive(self):
"""bool: Determines if canvas pan/zoom interactivity is enabled or not.
"""
return self._interactive
@interactive.setter
@property
def cursor(self):
"""string: String identifying cursor displayed over canvas.
"""
return self._cursor
@cursor.setter
def _after_set_viewer(self, prev):
"""Triggered after a new viewer is set.
Parameters
----------
prev : Viewer
Previous viewer.
"""
if self.viewer is not None:
self.refresh()
def _set_view_slice(self, indices):
"""Called whenever the sliders change. Sets the current view given a
specific slice to view.
Parameters
----------
indices : sequence of int or slice
Indices that make up the slice.
"""
def refresh(self):
"""Fully refreshes the layer. If layer is frozen refresh will not occur
"""
if self._freeze:
return
self._refresh()
@contextmanager
def on_mouse_move(self, event):
"""Called whenever mouse moves over canvas.
"""
return
def on_mouse_press(self, event):
"""Called whenever mouse pressed in canvas.
"""
return
def on_mouse_release(self, event):
"""Called whenever mouse released in canvas.
"""
return
def on_key_press(self, event):
"""Called whenever key pressed in canvas.
"""
return
def on_key_release(self, event):
"""Called whenever key released in canvas.
"""
return
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
11748,
4939,
5420,
198,
198,
6738,
2644,
22602,
13,
15596,
1330,
8558,
198,
6738,
47540,
41464,
62,
48553,
1330,
15612,
36918,
2848,
628,
1... | 2.421694 | 1,724 |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py #import plotly
import plotly.graph_objs as go #import graphs objects
app = dash.Dash('Ciclopi Dashboard')
ciclopi=pd.read_csv("data/04_ciclopi_cleaned.csv", sep=';', \
parse_dates=['DataOraPrelievo','DataOraDeposito','DataPrelievo','OrarioPrelievo','OrarioDeposito','DataDeposito'])
app.layout = html.Div(children=[
html.H1(children='Ciclopi Statistics'),
dcc.Dropdown(
id='station',
options=[{'label': i, 'value': i} for i in ciclopi['StazPrelievo'].unique()],
value='Comune Palazzo Blu',
),
# creating a graph done in Notebook 03 - Statistics
dcc.Graph(id='rpm-graph', style={'width':600}),
], style={'width':600})
@app.callback(Output('rpm-graph', 'figure'),
[Input('station','value')])
if __name__ == '__main__':
app.run_server(debug=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
220,
532,
9,
12,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
14470,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
11748,
... | 2.366228 | 456 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
import unittest
import os
import json
| [
2,
19617,
28,
40477,
23,
198,
198,
2,
15069,
2864,
28591,
5097,
2606,
35,
13,
9858,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.692708 | 192 |
#!/usr/bin/env python
__author__ = "Mateus Ferreira"
__copyright__ = "Copyright 2020, The FAST-PROJ Group"
__credits__ = ["Mateus Ferreira"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "FAST-PROJ"
__email__ = "#"
__status__ = "Development"
from flask import Flask, request, jsonify
from flask import render_template
from mysql import dbConnection
from reader import Reader
from cleaner import Cleaner
from feature import Feature
import pandas as pd
from bertSingleton import Bert
app = Flask(__name__)
# Inicia a classe de conexão com o banco
connection = dbConnection()
# Inicia a classe de leitura do arquivo
reader = Reader()
# Inicia a classe de limpeza
cleaner = Cleaner()
# Inicia a classe de features
feature = Feature()
@app.route('/', methods=['GET'])
@app.route('/insertFiles', methods=['POST'])
@app.route('/recebePergunta', methods=['POST']) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
834,
9800,
834,
220,
220,
220,
220,
796,
366,
44,
378,
385,
12880,
260,
8704,
1,
198,
834,
22163,
4766,
834,
220,
796,
366,
15269,
12131,
11,
383,
376,
11262,
12,
31190,
41,
4912,
... | 2.829653 | 317 |
"""
Utility RPython functions to inspect objects in the GC.
"""
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
from rpython.rlib.objectmodel import free_non_gc_object
from rpython.rlib import rposix, rgc, jit
from rpython.memory.support import AddressDict, get_address_stack
# ---------- implementation of rpython.rlib.rgc.get_rpy_roots() ----------
# ---------- implementation of rpython.rlib.rgc.get_rpy_referents() ----------
# ----------
# ----------
raw_os_write = rffi.llexternal(rposix.UNDERSCORE_ON_WIN32 + 'write',
[rffi.INT, llmemory.Address, rffi.SIZE_T],
rffi.SIZE_T,
sandboxsafe=True, _nowrapper=True)
AddressStack = get_address_stack()
ARRAY_OF_HALFWORDS = lltype.Array(llgroup.HALFWORD)
| [
37811,
198,
18274,
879,
25812,
7535,
5499,
284,
10104,
5563,
287,
262,
20145,
13,
198,
37811,
198,
6738,
374,
29412,
13,
81,
774,
525,
13,
297,
19199,
6781,
1330,
32660,
4906,
11,
32660,
31673,
11,
374,
487,
72,
11,
32660,
8094,
198,
... | 2.293629 | 361 |
# This module is executed when the program is run as a module...
# using ``python -m solrzkutil``
import sys
if __name__ == '__main__':
try:
import solrzkutil
except ImportError as e:
sys.exit('solrzkutil python package is not installed. %s\n' % e)
from solrzkutil import main
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
sys.exit('\n') | [
2,
770,
8265,
318,
10945,
618,
262,
1430,
318,
1057,
355,
257,
8265,
986,
201,
198,
2,
1262,
7559,
29412,
532,
76,
1540,
81,
89,
74,
22602,
15506,
201,
198,
11748,
25064,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
... | 2.303867 | 181 |
import re
email_regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
password_regex = "^(?=.*\d)(?=.*[a-z])(?=.*[a-zA-Z])(?=.*\d)(?=.*[@$!%*#?&]).{8,}$"
| [
11748,
302,
198,
198,
12888,
62,
260,
25636,
796,
374,
18109,
61,
58,
64,
12,
89,
32,
12,
57,
15,
12,
24,
44807,
10,
12,
48688,
31,
58,
64,
12,
89,
32,
12,
57,
15,
12,
24,
12,
48688,
59,
3693,
64,
12,
89,
32,
12,
57,
15,
... | 1.40678 | 118 |
from orttraining_test_model_transform import add_name, fix_transpose, add_expand_shape
from orttraining_test_layer_norm_transform import layer_norm_transform
| [
6738,
393,
926,
24674,
62,
9288,
62,
19849,
62,
35636,
1330,
751,
62,
3672,
11,
4259,
62,
7645,
3455,
11,
751,
62,
11201,
392,
62,
43358,
198,
6738,
393,
926,
24674,
62,
9288,
62,
29289,
62,
27237,
62,
35636,
1330,
7679,
62,
27237,
... | 3.434783 | 46 |
import psutil
system = ["System", "services.exe", "svchost.exe",
"csrss.exe", "fontdrvhost.exe", "conhost.exe", None]
for proc in psutil.process_iter():
try:
parent = proc.parent().name()
except AttributeError:
parent = None
if parent not in system and proc.name() not in system:
print(proc)
try:
print(proc.exe())
except psutil.AccessDenied:
print("Access denied to executable location.")
| [
11748,
26692,
22602,
628,
198,
10057,
796,
14631,
11964,
1600,
366,
30416,
13,
13499,
1600,
366,
21370,
354,
455,
13,
13499,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
6359,
42216,
13,
13499,
1600,
366,
10331,
7109,
... | 2.359606 | 203 |
import json
import requests
import os
# Set up incoming webhooks on Slack to get a URL for your team: https://api.slack.com/incoming-webhooks
slackUrl = os.environ["SLACK_URL"]
# Declare the API endpoint for requests
response = requests.get('http://54.213.83.132/hackoregon/http/all_new_transactions/5/')
data = response.json()
# The initial message is first posted as a simple text defined by greeting
greeting={"text": "Here's the top 5 transactions for the last few days."}
requests.post(slackUrl, json.dumps(greeting), headers={'content-type': 'application/json'})
# Loop through our data declaring and formating values for the message
for value in data:
date = value['tran_date']
transType = value['sub_type']
amount = '${:,}'.format(value['amount'])
payee = value['contributor_payee']
filer = value['filer']
purpose = value['purp_desc']
# Look for the type of transaction to label and color code the Slack message attachment
if transType == 'Cash Contribution':
color = '#36a64f'
message = "%s gave %s to %s on %s" % (filer, amount, payee, date)
elif transType == 'Cash Expenditure':
color = '#B21627'
message = "%s expensed %s on %s on %s" % (filer, amount, payee, date)
else:
color = '#414243'
message = "A %s of %s from %s to %s on %s" % (transType, amount, filer, payee, date)
# Each transaction is posted as at message attachment: https://api.slack.com/docs/attachments
payload = {
"color": color,
"fields": [
{
"title": transType,
"value": message
}
]
}
req = requests.post(slackUrl, json.dumps(payload), headers={'content-type': 'application/json'})
| [
11748,
33918,
198,
11748,
7007,
198,
11748,
28686,
198,
198,
2,
5345,
510,
15619,
3992,
25480,
82,
319,
36256,
284,
651,
257,
10289,
329,
534,
1074,
25,
3740,
1378,
15042,
13,
6649,
441,
13,
785,
14,
259,
4976,
12,
12384,
25480,
82,
... | 2.870107 | 562 |
mi_vehiculo = Camion()
desplazamiento_vehiculo(mi_vehiculo)
mi_vehiculo = Coche()
desplazamiento_vehiculo(mi_vehiculo)
mi_vehiculo = Moto()
desplazamiento_vehiculo(mi_vehiculo)
| [
628,
628,
198,
11632,
62,
33892,
291,
43348,
796,
7298,
295,
3419,
198,
198,
8906,
489,
1031,
321,
1153,
78,
62,
33892,
291,
43348,
7,
11632,
62,
33892,
291,
43348,
8,
198,
198,
11632,
62,
33892,
291,
43348,
796,
18490,
258,
3419,
1... | 2.054945 | 91 |
from ggplot import mpg
from ..data import Data, GridData, WrapData
print("Single chart case")
g = Data(mpg)
print("=> S1", g.getCategoriesByIndex() is None)
print("=> S2", mpg.equals(g.getDataByIndex()["data"]))
print("=> S3", {} == g.getDataByIndex()["colCategories"])
print("=> S4", {} == g.getDataByIndex()["rowCategories"])
print("=> S5", mpg.equals(g.getDataByIndex(rowIndex=0)["data"]))
print("=> S6", mpg.equals(g.getDataByIndex(colIndex=0)["data"]))
print("=> S7", (1, 1) == g.getShape())
print("Float row single dim chart case")
g = WrapData(mpg, rowDims=["class"])
result = [[('class', '2seater')], [('class', 'compact')], [('class', 'midsize')],
[('class', 'minivan')], [('class', 'pickup')], [('class', 'subcompact')],
[('class', 'suv')]]
print("=> FR1", all(x == y for x, y in zip(result, g.rowCategories)))
print("=> FR2", g.colCount is None)
print("=> FR3", 7 == g.rowCount)
print("=> FR4", 5 == g.getDataByIndex(rowIndex=0)["data"].shape[0])
try:
5 == g.getDataByIndex(colIndex=0)["data"].shape[0]
print("=> FR5", False)
except IndexError:
print("=> FR5", True)
print("=> FR6", mpg.shape[0] == g.getDataByIndex()["data"].shape[0])
print("=> FR7", 62 == g.getDataByIndex(rowIndex=6)["data"].shape[0])
try:
62 == g.getDataByIndex(rowIndex=7)["data"]
print("=> FR8", False)
except IndexError:
print("=> FR8", True)
print("=> FR9", (7, 1) == g.getShape())
print("Float col single dim chart case")
g = WrapData(mpg, colDims=["class"])
result = [[('class', '2seater')], [('class', 'compact')], [('class', 'midsize')],
[('class', 'minivan')], [('class', 'pickup')], [('class', 'subcompact')],
[('class', 'suv')]]
print("=> FC1", all(x == y for x, y in zip(result, g.colCategories)))
print("=> FC2", g.rowCount is None)
print("=> FC3", 7 == g.colCount)
print("=> FC4", 5 == g.getDataByIndex(colIndex=0)["data"].shape[0])
try:
5 == g.getDataByIndex(rowIndex=0)["data"].shape[0]
print("=> FC5", False)
except IndexError:
print("=> FC5", True)
print("=> FC6", mpg.shape[0] == g.getDataByIndex()["data"].shape[0])
print("=> FC7", 62 == g.getDataByIndex(colIndex=6)["data"].shape[0])
try:
62 == g.getDataByIndex(colIndex=7)["data"]
print("=> FC8", False)
except IndexError:
print("=> FC8", True)
print("=> FC9", (1, 7) == g.getShape())
print("Float row double dim chart case")
g = GridData(mpg, ["drv", "cyl"])
result = [[('drv', '4'), ('cyl', 4)], [('drv', '4'), ('cyl', 5)], [('drv', '4'), ('cyl', 6)],
[('drv', '4'), ('cyl', 8)], [('drv', 'f'), ('cyl', 4)], [('drv', 'f'), ('cyl', 5)],
[('drv', 'f'), ('cyl', 6)], [('drv', 'f'), ('cyl', 8)], [('drv', 'r'), ('cyl', 4)],
[('drv', 'r'), ('cyl', 5)], [('drv', 'r'), ('cyl', 6)], [('drv', 'r'), ('cyl', 8)]]
print("=> FCM1", all(x == y for x, y in zip(result, g.rowCategories)))
print("=> FCM2", g.colCount is None)
print("=> FCM3", 12 == g.rowCount)
print("=> FCM4", 23 == g.getDataByIndex(rowIndex=0)["data"].shape[0])
try:
5 == g.getDataByIndex(colIndex=0)["data"].shape[0]
print("=> FCM5", False)
except IndexError:
print("=> FCM5", True)
print("=> FCM6", mpg.shape[0] == g.getDataByIndex()["data"].shape[0])
print("=> FCM7", 21 == g.getDataByIndex(rowIndex=11)["data"].shape[0])
try:
62 == g.getDataByIndex(rowIndex=12)["data"]
print("=> FCM8", False)
except IndexError:
print("=> FCM8", True)
print("=> FCM9", (12, 1) == g.getShape())
| [
6738,
308,
70,
29487,
1330,
285,
6024,
198,
6738,
11485,
7890,
1330,
6060,
11,
24846,
6601,
11,
41028,
6601,
198,
198,
4798,
7203,
28008,
8262,
1339,
4943,
198,
70,
796,
6060,
7,
3149,
70,
8,
198,
4798,
7203,
14804,
311,
16,
1600,
3... | 2.309859 | 1,491 |
#!/usr/bin/python
import feedparser
import time
from subprocess import check_output
import sys
#feed_name = 'TRIBUNE'
#url = 'http://chicagotribune.feedsportal.com/c/34253/f/622872/index.rss'
feed_name = sys.argv[1]
url = sys.argv[2]
db = './feeds.db'
limit = 12 * 3600 * 1000
#
# function to get the current time
#
current_time_millis = lambda: int(round(time.time() * 1000))
current_timestamp = current_time_millis()
# return true if the title is in the database with a timestamp > limit
#
# get the feed data from the url
#
feed = feedparser.parse(url)
#
# figure out which posts to print
#
posts_to_print = []
posts_to_skip = []
for post in feed.entries:
# if post is already in the database, skip it
# TODO check the time
title = post.title
if post_is_in_db_with_old_timestamp(title):
posts_to_skip.append(title)
else:
posts_to_print.append(title)
#
# add all the posts we're going to print to the database with the current timestamp
# (but only if they're not already in there)
#
f = open(db, 'a')
for title in posts_to_print:
if not post_is_in_db(title):
f.write(title + "|" + str(current_timestamp) + "\n")
f.close
#
# output all of the new posts
#
count = 1
blockcount = 1
for title in posts_to_print:
if count % 5 == 1:
print("\n" + time.strftime("%a, %b %d %I:%M %p") + ' ((( ' + feed_name + ' - ' + str(blockcount) + ' )))')
print("-----------------------------------------\n")
blockcount += 1
print(title + "\n")
count += 1
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
3745,
48610,
198,
11748,
640,
198,
6738,
850,
14681,
1330,
2198,
62,
22915,
198,
11748,
25064,
198,
198,
2,
12363,
62,
3672,
796,
705,
5446,
9865,
41884,
6,
198,
2,
6371,
796,
7... | 2.56406 | 601 |
from native.datastructs.rocketClass import RocketCallable as _RocketCallable
| [
6738,
6868,
13,
19608,
459,
1356,
82,
13,
30431,
9487,
1330,
16920,
14134,
540,
355,
4808,
50218,
14134,
540,
628
] | 3.9 | 20 |
"""
Script used by tox.ini to check the manifest file if we are under version control, or skip the
check altogether if not.
"check-manifest" will needs a vcs to work, which is not available when testing the package
instead of the source code (with ``devpi test`` for example).
"""
from __future__ import print_function
import os
import subprocess
import sys
if os.path.isdir('.git'):
sys.exit(subprocess.call('check-manifest', shell=True))
else:
print('No .git directory found, skipping checking the manifest file')
sys.exit(0)
| [
37811,
198,
7391,
973,
416,
8293,
13,
5362,
284,
2198,
262,
10561,
2393,
611,
356,
389,
739,
2196,
1630,
11,
393,
14267,
262,
198,
9122,
13318,
611,
407,
13,
198,
198,
1,
9122,
12,
805,
8409,
1,
481,
2476,
257,
410,
6359,
284,
670... | 3.4125 | 160 |
'''
Setup script for slack_post
This file is a part of sack_post
'''
from setuptools import setup
if __name__ == '__main__':
# Long description
with open('./README.md') as f:
readme = f.read()
setup(
name='slack_post',
version='0.0.1',
description='Posting message to slack for Python',
long_description=readme,
author='Ryotaro A. Abe',
author_email='ryoutar.abe@gmail.com',
url='https://github.com/users/RyotaroAbe/projects/',
license='MIT',
install_requires=['os','requests','json']) | [
7061,
6,
198,
40786,
4226,
329,
30740,
62,
7353,
198,
1212,
2393,
318,
257,
636,
286,
23704,
62,
7353,
198,
7061,
6,
628,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.269231 | 260 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
import logging
logger = logging.getLogger(__name__)
from .crawler_process import CrawlerProcess
from tweetf0rm.twitterapi.twitter_api import TwitterAPI
from tweetf0rm.handler import create_handler
from tweetf0rm.handler.crawl_user_relationship_command_handler import CrawlUserRelationshipCommandHandler
from tweetf0rm.utils import full_stack, hash_cmd
from tweetf0rm.exceptions import MissingArgs, NotImplemented
from tweetf0rm.redis_helper import NodeQueue
import copy, json
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
6738,
764,
... | 3.104046 | 173 |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Spherical i/o utilities.
"""
# Imports
import os
import gzip
import shutil
import nibabel
def ungzip(path):
""" Extract GNU zipped archive file.
Parameters
----------
path: str
the archive file to be opened: must be a .gz file.
Returns
-------
out_path: str
the generated file at the same location without the .gz extension.
"""
assert path.endswith(".gz")
dest_path = path.replace(".gz", "")
if not os.path.isfile(dest_path):
with gzip.open(path, "rb") as f_in:
with open(dest_path, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
return dest_path
def read_gifti(surf_file):
""" Read a surface geometry stored in GIFTI format.
Parameters
----------
surf_file: str
the input GIFTI surface file.
Returns
-------
vertices: array (N, 3)
the N vertices of the surface.
triangles: array (M, 3)
the M triangles that defines the surface geometry.
"""
image = nibabel.load(surf_file)
nb_of_surfs = len(image.darrays)
if nb_of_surfs != 2:
raise ValueError(
"'{0}' does not contain a valid mesh.".format(surf_file))
vertices = image.darrays[0].data
triangles = image.darrays[1].data
return vertices, triangles
def read_freesurfer(surf_file):
""" Read a surface geometry stored in FreeSurfer format.
Parameters
----------
surf_file: str
the input FreeSurfer surface file.
Returns
-------
vertices: array (N, 3)
the N vertices of the surface.
triangles: array (M, 3)
the M triangles that defines the surface geometry.
"""
vertices, traingles = nibabel.freesurfer.read_geometry(surf_file)
return vertices, traingles
def write_gifti(vertices, triangles, surf_file):
""" Write a surface geometry in GIFTI format.
Parameters
----------
vertices: array (N, 3)
the N vertices of the surface to be saved.
triangles: array (M, 3)
the M triangles that defines the surface geometry to be saved.
surf_file: str
the path to the generated GIFTI surface file.
"""
vertices_array = nibabel.gifti.GiftiDataArray(
data=vertices,
intent=nibabel.nifti1.intent_codes["NIFTI_INTENT_POINTSET"])
triangles_array = nibabel.gifti.GiftiDataArray(
data=triangles,
intent=nibabel.nifti1.intent_codes["NIFTI_INTENT_TRIANGLE"])
gii = nibabel.gifti.GiftiImage(darrays=[vertices_array, triangles_array])
nibabel.gifti.write(gii, surf_file)
def write_freesurfer(vertices, triangles, surf_file):
""" Write a surface geometry in FreeSurfer format.
Parameters
----------
vertices: array (N, 3)
the N vertices of the surface to be saved.
triangles: array (M, 3)
the M triangles that defines the surface geometry to be saved.
surf_file: str
the path to the generated FreeSurfer surface file.
"""
nibabel.freesurfer.io.write_geometry(surf_file, vertices, triangles,
create_stamp="",
volume_info=None)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
2235,
198,
2,
10551,
79,
532,
15069,
357,
34,
8,
327,
16412,
11,
33448,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
20101,
34,
8267,
12,
33,
59... | 2.498609 | 1,438 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import os
import oneflow
import oneflow.experimental as flow
import oneflow.python.framework.session_context as session_ctx
import oneflow._oneflow_internal
from oneflow.python.framework.multi_client_session import MultiClientSession
import oneflow.python.framework.c_api_util as c_api_util
@flow.unittest.skip_unless_1n1d()
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
15269,
12131,
383,
1881,
37535,
46665,
13,
1439,
2489,
10395,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.569395 | 281 |
#!/usr/bin/env python3
import os
GIT_FOLDER = os.getcwd() + '/../../'
folders = os.listdir(GIT_FOLDER)
for folder in folders:
print('\n\n\n' + folder)
cmd = 'cd ' + GIT_FOLDER + folder + ' && ' + ' git status'
os.system(cmd)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
198,
38,
2043,
62,
37,
3535,
14418,
796,
28686,
13,
1136,
66,
16993,
3419,
1343,
31051,
40720,
40720,
6,
198,
198,
11379,
364,
796,
28686,
13,
4868,
15908,
... | 2.209524 | 105 |
import os
import discord
from discord.ext import commands
from datetime import datetime, timedelta, timezone
from typing import Any
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
| [
11748,
28686,
201,
198,
11748,
36446,
201,
198,
6738,
36446,
13,
2302,
1330,
9729,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
11,
640,
11340,
201,
198,
6738,
19720,
1330,
4377,
201,
198,
6738,
4136,
541,
88,
13,
1... | 3.349206 | 63 |
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from packaging import version
EPSILON = torch.finfo(torch.float32).eps
MATH_PI = math.pi
class STFT:
"""
Short-time Fourier Transform as a Layer
"""
def forward(self, x):
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
args
x: input signal, B x N (batch x num_samples) or B x D x N (batch x num_channels x num_samples)
return
m: magnitude, B x F x T or B x D x F x T
p: phase, B x F x T or B x D x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError(
"{} expect 2D/3D tensor, but got {:d}D signal".format(
self.__name__, x.dim()
)
)
# if B x N, reshape B x 1 x N
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# B x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# B x F x T
r, i = torch.chunk(c, 2, dim=1)
# else reshape BD x 1 x N
else:
B, D, N = x.shape
x = x.view(B * D, 1, N)
# BD x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# B x D x 2F x T
c = c.view(B, D, -1, c.shape[-1])
# B x D x F x T
r, i = torch.chunk(c, 2, dim=2)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p, r, i
class IPDFeature(nn.Module):
"""
Compute inter-channel phase difference
"""
def forward(self, p):
"""
Accept multi-channel phase and output inter-channel phase difference
args
p: phase matrix, N x C x F x T
return
ipd: N x MF x T
"""
if p.dim() not in [3, 4]:
raise RuntimeError(
"{} expect 3/4D tensor, but got {:d} instead".format(
self.__name__, p.dim()
)
)
# C x F x T => 1 x C x F x T
if p.dim() == 3:
p = p.unsqueeze(0)
N, _, _, T = p.shape
pha_dif = p[:, self.index_l] - p[:, self.index_r]
# IPD mean normalization
yr = torch.cos(pha_dif)
yi = torch.sin(pha_dif)
yrm = yr.mean(-1, keepdim=True)
yim = yi.mean(-1, keepdim=True)
ipd = torch.atan2(yi - yim, yr - yrm)
# N x MF x T
ipd = ipd.view(N, -1, T)
return ipd
class FeatureExtractor(nn.Module):
"""
A PyTorch module to handle spectral & spatial features
"""
def forward(self, x):
"""
Compute spectral and spatial features
args
x: B x N
return:
mag & pha: B x F x T
feature: B x * x T
"""
mag, p, r, i = self.forward_stft.forward(x)
if mag.dim() == 4:
# just pick first channel
mag = mag[:, 0, ...]
f = torch.clamp(mag, min=EPSILON)
# mvn
f = (f - f.mean(-1, keepdim=True)) / (f.std(-1, keepdim=True) + EPSILON)
if self.ipd_feature:
ipd = self.ipd_feature.forward(p)
f = torch.cat([f, ipd], dim=1)
return mag, f, r, i
| [
11748,
10688,
198,
11748,
28034,
198,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
16846,
1330,
2196,
198,
198,
36,
3705,
4146,
1340,
796,
28034,
13,
69,
10951,
7,
1316... | 1.827072 | 1,810 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 28/05/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
import numpy as np
my_2d_array = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
print(my_2d_array)
print(np.transpose(my_2d_array))
print()
# making a 3x3 array
gfg = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# before transpose
print(gfg, end='\n\n')
#, end='\n\n' add two blank spaces
# after transpose
print(gfg.transpose())
print()
# Create 3 x 3 matrix
mat = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
print(mat)
print(mat.T)
#Transposing numpy array with 3 dimensions (tensors)
tensor = np.array([[ [1, 2], [3, 4] ],
[ [5, 6], [7, 8] ]])
print(tensor.shape)
print(tensor.T) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15622,
416,
44950,
379,
2579,
14,
2713,
14,
1129,
198,
198,
2,
38816,
25,
220,
1303,
17469,
3895,
1438,
994,
... | 1.995565 | 451 |
# pylint: disable=not-callable
"""
Create reconstructed and PID variables based on truth information
for MC events using simple parameterisations.
"""
#TODO In future this could be integrated with param.py (but, that meed updating from cake to pi first)
from __future__ import absolute_import, print_function, division
import math, fnmatch, collections
import numpy as np
from pisa import FTYPE, TARGET
from pisa.core.stage import Stage
from pisa.utils.log import logging
from pisa.utils.profiler import profile
from pisa.utils.numba_tools import WHERE, myjit, ftype
__all__ = ["simple_param","simple_reco_energy_parameterization","simple_reco_coszen_parameterization","simple_pid_parameterization"]
__author__ = 'T. Stuttard'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
def dict_lookup_wildcard(dict_obj,key) :
'''
Find the object in a dict specified by a key, where the key may include wildcards
Parameters
----------
dict_obj : dict
The dict (or dict-like) object to search
key : str
The key to search for in the dict (may include wildcards)
Returns
-------
key : str
The str found in the dict matching the wildcard
value : anything
The value found corresponding to the the requested key
'''
assert isinstance(dict_obj,collections.Mapping)
assert isinstance(key,str)
matches = collections.OrderedDict([ (k,v) for k,v in dict_obj.items() if fnmatch.fnmatch(key,k) ])
assert len(matches) > 0, "No match for '%s' found in dict" % key
assert len(matches) < 2, "Multiple matches for '%s' found in dict : %s" % (key,matches.keys())
return matches.keys()[0], matches.values()[0]
def logistic_function(a,b,c,x) :
'''
Logistic function as defined here: https://en.wikipedia.org/wiki/Logistic_function.
Starts off slowly rising, before steeply rising, then plateaus.
Parameters
----------
a : float
Normalisation (e.g. plateau height)
b : float
Steepness of rise (larger value means steeper rise)
c : float
x value at half-height of curve
x : array
The continuous parameter
Returns
-------
f(x) : array
The results of applying the logistic function to x
'''
return a / (1 + np.exp( -b * (x-c) ) )
def has_muon(particle_key) :
'''
Function returning True if the particle type has muons in the final state
This is numu CC and atmopsheric muons
Parameters
----------
particle_key : string
Key identifiying the particle type, e.g. numu_cc, nutau_nc, muon, etc.
Returns
-------
has_muon : bool
Flag set to try if particle has muon in final state
'''
#TODO consider adding nutau CC where the tau decays to muons
return ( (particle_key.startswith("numu") and particle_key.endswith("_cc")) or particle_key.startswith("muon") )
def visible_energy_correction(particle_key) :
'''
Simple way to estimate the amount of visible energy in the event.
Right now considering cases with final state neutrinos, such as NC events,
and nutau CC events (where the tau decays to a tau neutrino).
Neglecting the much lower losses due to muon decay for numu CC.
Also neglecting fact that different particle types produce differing photon yields.
I've tuned these by eye due to the biases seen in GRECO pegleg, which to first
order I'm assuming are due to this missing energy.
There is also a bias in numu CC in GRECO, but suspect this is due to containment
or stochastics, but either way not reproducing this here.
Parameters
----------
particle_key : string
Key identifiying the particle type, e.g. numu_cc, nutau_nc, muon, etc.
Returns
-------
visible_energy : array
Estimated visible energy in each event
'''
#TODO Add some smearing
# NC events have final state neutrino
if particle_key.endswith("_nc") :
return 0.4 #TODO tune, consider inelasticity numbers vs energy (including nu vs nubar)
# nutau CC events have final state neutrino, but from a subsequent tau decay
elif particle_key.startswith("nutau") and particle_key.endswith("_cc") :
return 0.6 #TODO tune, consider decay spectrum vs energy
# muons are all over the place since their "true_energy" doesn't reflect any real value in the ice (is energy as surface of generation cylinder)
elif particle_key == "muons" :
return 0.1 #TODO Should really store deposied energy in the frame instead
# Everything else deposits full energy as visible energy
else :
return 1.
def energy_dependent_sigma(energy,energy_0,sigma_0,energy_power) :
'''
Returns an energy dependent sigma (standard deviation) value(s),
with energy dependence defined as follows:
sigma(E) = sigma(E=E0) * (E/E0)^n
Parameters
----------
energy : array or float
Energy value to evaluate sigma at
energy_0 : float
Energy at which sigma_0 is defined
sigma_0 : float
The value of sigma at energy_0
energy_power : float
Power/index fo the energy dependence
Returns
-------
sigma(energy) : array or float
The value of sigma at the specified energy (or energies)
'''
return sigma_0 * np.power(energy/energy_0,energy_power)
def simple_reco_energy_parameterization(particle_key,true_energy,params,random_state) :
'''
Function to produce a smeared reconstructed energy distribution.
Resolution is particle- and energy-dependent
Use as a placeholder if real reconstructions are not currently available.
Parameters
----------
particle_key : string
Key identifiying the particle type, e.g. numu_cc, nutau_nc, muon, etc.
true_energy : array
True energy array.
params : dict
keys : particle key (wilcards accepted)
values : list : [ E0 (reference true_energy), median reco error at E0, index/power of energy dependence ]
(example: params = {'nue*_cc':[10.,0.2,0.2],})
random_state : np.random.RandomState
User must provide the random state, meaning that reproducible results
can be obtained when calling multiple times.
Returns
-------
reco_energy : array
Reconstructed energy array.
'''
#TODO Update docs
# Default random state with no fixed seed
if random_state is None :
random_state = np.random.RandomState()
# Get the visible energy
visible_energy = true_energy * visible_energy_correction(particle_key)
# Grab the params for this particle type
_,energy_dependent_sigma_params = dict_lookup_wildcard(dict_obj=params,key=particle_key)
# Get the sigma of the "reco error" distribution (energy dependent)
# Easier to use this than the "reco energy" directly
energy_0 = energy_dependent_sigma_params[0]
reco_error_sigma_0 = energy_dependent_sigma_params[1]
energy_power = energy_dependent_sigma_params[2]
reco_error_sigma = energy_dependent_sigma(visible_energy,energy_0,reco_error_sigma_0,energy_power)
# Get the reco error
reco_error = random_state.normal(np.zeros_like(reco_error_sigma),reco_error_sigma)
# Compute the corresponding reco energy
# Use visible energy since that is what really matters
reco_energy = visible_energy * ( reco_error + 1. )
# Ensure physical values
reco_energy[reco_energy < 0.] = 0.
return reco_energy
def simple_reco_coszen_parameterization(particle_key,true_energy,true_coszen,params,random_state) :
'''
Function to produce a smeared reconstructed cos(zenith) distribution.
Resolution is particle- and energy-dependent
Use as a placeholder if real reconstructions are not currently available.
Keep within the rotational bounds
Parameters
----------
true_coszen : array
True cos(zenith angle) array.
true_energy : array
True energy array.
params : dict
keys : particle key (wilcards accepted)
values : list : [ E0 (reference true_energy), median reco error at E0, index/power of energy dependence ]
(example: params = {'nue*_cc':[10.,0.2,0.5],})
random_state : np.random.RandomState
User must provide the random state, meaning that reproducible results
can be obtained when calling multiple times.
Returns
-------
reco_coszen : array
Reconstructed cos(zenith angle) array.
'''
# Default random state with no fixed seed
if random_state is None :
random_state = np.random.RandomState()
# Get the visible energy
visible_energy = true_energy * visible_energy_correction(particle_key)
# Grab the params for this particle type
_,energy_dependent_sigma_params = dict_lookup_wildcard(dict_obj=params,key=particle_key)
# Get the sigma of the "reco error" distribution (energy dependent)
# Easier to use this than the "reco coszen" directly
energy_0 = energy_dependent_sigma_params[0]
reco_error_sigma_0 = energy_dependent_sigma_params[1]
energy_power = energy_dependent_sigma_params[2]
reco_error_sigma = energy_dependent_sigma(visible_energy,energy_0,reco_error_sigma_0,energy_power)
# Get the reco error
reco_error = random_state.normal(np.zeros_like(reco_error_sigma),reco_error_sigma)
# Compute the corresponding reco coszen
# Use visible energy since that is what really matters
reco_coszen = true_coszen + reco_error
# Enforce rotational bounds
out_of_bounds_mask = reco_coszen > 1.
reco_coszen[out_of_bounds_mask] = reco_coszen[out_of_bounds_mask] - ( 2. * (reco_coszen[out_of_bounds_mask] - 1.) )
out_of_bounds_mask = reco_coszen < -1.
reco_coszen[out_of_bounds_mask] = reco_coszen[out_of_bounds_mask] - ( 2. * (reco_coszen[out_of_bounds_mask] + 1.) )
return reco_coszen
def simple_pid_parameterization(particle_key,true_energy,params,track_pid,cascade_pid,random_state,) :
'''
Function to assign a PID based on truth information.
Is particle-, interaction- and energy-dependent
Approximating energy dependence using a logistic function.
Can use as a placeholder if real reconstructions are not currently available.
Parameters
----------
particle_key : string
Key identifiying the particle type, e.g. numu_cc, nutau_nc, muon, etc.
true_energy : array
True energy array.
params : dict
keys : particle key (wilcards accepted)
values : Logistic function params for track ID (list) : [ normalisation (plateau height), steepness of rise, true_energy at half-height ]
(example: params = {'nue*_cc':[0.05,0.2,15.],})
track_pid : float
A PID value to assign to track-like events
cascade_pid : float
A PID value to assign to cascade-like events
random_state : np.random.RandomState
User must provide the random state, meaning that reproducible results
can be obtained when calling multiple times.
Returns
-------
pid : array
PID values.
'''
# Default random state with no fixed seed
if random_state is None :
random_state = np.random.RandomState()
# Grab the params for this particle type
_,logistic_func_params = dict_lookup_wildcard(dict_obj=params,key=particle_key)
# Define whether each particle is a track
track_prob = logistic_function(logistic_func_params[0],logistic_func_params[1],logistic_func_params[2],true_energy)
track_mask = random_state.uniform(0.,1.,size=true_energy.size) < track_prob
# Assign PID values
pid = np.full_like(true_energy,np.NaN)
pid[track_mask] = track_pid
pid[~track_mask] = cascade_pid
return pid
class simple_param(Stage):
"""
Stage to generate reconstructed parameters (energy, coszen, pid) using simple parameterizations.
These are not fit to any input data, but are simple and easily understandable and require no
input reconstructed events.
Can easily be tuned to any desired physics case, rught now repesent a DeepCore/ICU-like detector.
Parameters
----------
params : ParamSet
Must exclusively have parameters:
perfect_reco : bool
If True, use "perfect reco": reco == true, numu(bar)_cc -> tracks, rest to cascades
If False, use the parametrised reco energy, coszen and pid functions
reco_energy_params : dict
Dict defining the `params` argument to `simple_reco_energy_parameterization`
See `simple_reco_energy_parameterization` documentatio for more details
reco_coszen_params : dict
Dict defining the `params` argument to `simple_reco_coszen_parameterization`
See `simple_reco_coszen_parameterization` documentatio for more details
pid_track_params : dict
Dict defining the `params` argument to `simple_pid_parameterization`
See `simple_pid_parameterization` documentatio for more details
track_pid : float
The numerical 'pid' variable value to assign for tracks
cascade_pid : float
The numerical 'pid' variable value to assign for cascades
"""
| [
2,
279,
2645,
600,
25,
15560,
28,
1662,
12,
13345,
540,
198,
198,
37811,
198,
16447,
49594,
290,
37022,
9633,
1912,
319,
3872,
1321,
220,
198,
1640,
13122,
2995,
1262,
2829,
11507,
38189,
13,
198,
37811,
198,
198,
2,
51,
3727,
46,
5... | 2.865066 | 4,832 |
import os, sys, Pyro4
import django.htoken.serializer
from django.contrib.auth.models import User, Permission
from django.analysis.tracer import pause_sql_analysis, resume_sql_analysis
from django import htoken
from django.conf import settings
PYRO_NAME = "authbackend"
class ModelBackendProxy(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
# TODO: should return the fackin' token.
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
pause_sql_analysis()
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
resume_sql_analysis()
return user_obj._group_perm_cache
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
| [
11748,
28686,
11,
25064,
11,
44954,
19,
198,
198,
11748,
42625,
14208,
13,
4352,
4233,
13,
46911,
7509,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
2448,
3411,
198,
6738,
42625,
14208,
13,
20930,
13,
... | 2.411429 | 700 |
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from gtk import Button
import gobject
from mdspluswidget import MDSplusWidget
from mdsplusxdbox import MDSplusXdBox
from mdspluserrormsg import MDSplusErrorMsg
import sys
try:
import glade
guibuilder=True
except:
guibuilder=False
gobject.type_register(MDSplusXdBoxButtonWidget)
if guibuilder:
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
11,
10140,
5136,
286,
8987,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
17... | 3.395918 | 490 |
from bisect import bisect_left
| [
6738,
47457,
478,
1330,
47457,
478,
62,
9464,
198
] | 3.444444 | 9 |
"""Script to deploy to staging or production COOL environments."""
import configparser
import click
import requests # type: ignore
@click.group()
@click.pass_context
def cli(ctx):
"""Create cli."""
return
def main():
"""Execute main."""
cli.add_command(deploy)
cli.add_command(configure)
cli()
@click.command()
@click.option(
"--environment",
required=True,
prompt=True,
type=click.Choice(["staging", "production"]),
)
def deploy(environment):
"""Deploy to defined environment."""
token = get_token()
if not token:
click.echo("no token found - running configure")
configure()
token = get_token()
click.confirm(f"Are you sure you want to deploy {environment}?", abort=True)
if environment == "production":
result = deploy_production(token)
elif environment == "staging":
result = deploy_staging(token)
if result.status_code != 204:
click.echo(
f"There was an error deploying {environment}, please check your token."
)
else:
click.echo(f"Successfully started deployment for {environment}")
@click.command("configure")
@click.option("--token", required=True, prompt=True)
def configure(token):
"""Configure access point in config.ini file."""
config = configparser.ConfigParser()
config["DEFAULT"] = {"github_access_token": token}
with open("config.ini", "w") as configfile:
config.write(configfile)
def get_token():
"""Get token from config.ini file."""
config = configparser.ConfigParser()
config.read("config.ini")
return config["DEFAULT"].get("github_access_token")
def deploy_staging(token):
"""Deploy to staging environment."""
return requests.post(
url="https://api.github.com/repos/cisagov/con-pca-cicd/dispatches",
json={"event_type": "cool_staging", "client_payload": {}},
headers=get_auth_header(token),
)
def deploy_production(token):
"""Deploy to production environment."""
return requests.post(
url="https://api.github.com/repos/cisagov/con-pca-cicd/dispatches",
json={"event_type": "cool_production", "client_payload": {}},
headers=get_auth_header(token),
)
def get_auth_header(token):
"""Get authorization header."""
return {"Authorization": f"Bearer {token}"}
if __name__ == "__main__":
main()
| [
37811,
7391,
284,
6061,
284,
29475,
393,
3227,
7375,
3535,
12493,
526,
15931,
198,
11748,
4566,
48610,
198,
198,
11748,
3904,
198,
11748,
7007,
220,
1303,
2099,
25,
8856,
628,
198,
31,
12976,
13,
8094,
3419,
198,
31,
12976,
13,
6603,
... | 2.684916 | 895 |
# -*- coding: utf-8 -*-
from numpy import sin, pi
def comp_surface(self):
"""Compute the Slot total surface (by analytical computation).
Caution, the bottom of the Slot is an Arc
Parameters
----------
self : SlotM14
A SlotM14 object
Returns
-------
S: float
Slot total surface [m**2]
"""
point_dict = self._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
R1 = abs(Z1)
R2 = abs(Z2)
S1 = pi * R1 ** 2 * (self.W0 / (2 * pi))
S2 = pi * R2 ** 2 * (self.W0 / (2 * pi))
return abs(S1 - S2)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
299,
32152,
1330,
7813,
11,
31028,
628,
198,
4299,
552,
62,
42029,
7,
944,
2599,
198,
220,
220,
220,
37227,
7293,
1133,
262,
32026,
2472,
4417,
357,
1525,
3... | 2.244361 | 266 |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow-based trainer for graph embedding with GloVe model."""
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
# pylint: disable=C6120,C6113
from __future__ import print_function
import copy
import os
import struct
import glove_util
import numpy as np
import tensorflow as tf
METRICS_TO_PRINT = {
'mat_msq': lambda m: np.mean(m**2.0),
'mat_avg': lambda m: np.mean(m), # pylint: disable=unnecessary-lambda
'mat_max_avg': lambda m: np.max(np.abs(np.mean(m, axis=1))),
'mat_max_msq': lambda m: np.max(np.mean(m**2.0, axis=1)),
'm00': lambda m: float(m[0, 0]),
'm10': lambda m: float(m[1, 0]),
'avg_mag': lambda m: np.mean(np.sum(m**2.0, axis=1))
}
class GloVeModelTf(object):
"""A class to do GloVe training given cooccurrences and sorted vocab file.
Mimics behavior and performance of the original c implementation.
Requires files generated by code at https://github.com/stanfordnlp/GloVe.
***New: can also take a raw list of sentences and generate cooccurrences.
Args:
vector_size: desired size of the model
vocab_filename: name of the vocab file for building the model
"""
def _weight_initializer(self,
weight_name,
init_width,
rowdim,
coldim,
load_name=None):
"""Initializes weights either from file or from uniform initializer.
Args:
weight_name: name for the weights in the graph
init_width: width of the uniform initializer
rowdim: row dimension size of the weights
coldim: col dimension size of the weights
load_name: set only if load name is different from variable name
Returns:
a tf variable initialized with weight_name
"""
if not load_name:
load_name = weight_name
if self._init_weight_dir:
with open(
os.path.join(self._init_weight_dir, '%s.txt' % load_name), 'rb') as f:
weights = np.reshape(np.loadtxt(f), newshape=[rowdim, coldim])
else:
weights = tf.random_uniform([rowdim, coldim], -init_width, init_width)
return tf.Variable(weights, name=weight_name, dtype=tf.float32)
def __init__(self,
vector_size,
vocab_filename=None,
covariate_size=0,
random_seed=12345,
init_weight_dir=None,
random_walks=None,
covariate_data=None,
window_size=5):
"""Initializes the data reading and model variables.
Args:
vector_size: size of the word vectors.
vocab_filename: filename for getting word tokens.
covariate_size: size of the covariate embedding dimension
random_seed: seed the initialization generator
init_weight_dir: directory to pull initial weights from. defaults to a
uniform initializer if none.
random_walks: a list of tokenized sentences
covariate_data: a keyed list of float lists, where each key identifies a
token in the corpus, and each float list is a row of covariate data
window_size: window size to use for cooccurrence counting, if needed
Returns: (none)
"""
print('setting up basic stuff...')
# Get word tokens
self._vector_size = vector_size
self._covariate_size = covariate_size
self._tokens = []
self._vocab_index_lookup = None
if vocab_filename:
with open(vocab_filename, 'r') as f:
for line in f:
self._tokens.append(line.split()[0])
self._vocab_index_lookup = dict(
zip(self._tokens, list(range(len(self._tokens)))))
self._cooccurrences = None
self._cooccurrence_dict = None
print('loading or computing co-occurrences...')
if random_walks:
(self._cooccurrences, self._tokens, self._vocab_index_lookup,
self._cooccurrence_dict) = glove_util.count_cooccurrences(
random_walks, window_size, self._vocab_index_lookup)
self._vocab_size = len(self._tokens)
# Get covariate data
print('setting other placeholders...')
if covariate_data is not None:
self._covariate_data = np.array([covariate_data[t] for t in self._tokens])
# Placeholders for parameter tensors and other trackers
io_dict = {'input': None, 'outpt': None}
self._word = copy.deepcopy(io_dict)
self._bias = copy.deepcopy(io_dict)
self._iter = 0
self._sum_cost = 0
self._sum_adv_cost_g = 0
self._sum_adv_cost_d = 0
self._random_seed = random_seed
self._init_weight_dir = init_weight_dir
# Pointers to variables needed for covariate model
self._cvrt = copy.deepcopy(io_dict)
self._cvrt_transformation = copy.deepcopy(io_dict)
# Initialize the cooccurrence read format
self._cooccurrence_fmt = 'iid'
self._cooccurrence_fmt_length = struct.calcsize(self._cooccurrence_fmt)
self._struct_unpack = struct.Struct(self._cooccurrence_fmt).unpack_from
def _compute_loss_weight(self, y):
"""Computes the loss weighting function as defined in the original paper.
Args:
y: the raw (un-logged) cooccurrence score
Returns:
weighted loss
"""
return 1.0 if y > self._xmax else pow(y / self._xmax, self._alpha)
def _compute_w2v_loss(self, input_indxs):
"""define part of tensorflow graph to compute the w2v loss.
Args:
input_indxs: int32 or int64 tensor for input labels
Returns:
w2v training loss
"""
# If using word2vec, we need negative samples
print('registering w2v loss in the graph')
word_neg_sample_loss_dot_products = tf.reduce_sum(
tf.multiply(
tf.nn.embedding_lookup(self._word['outpt'],
self._neg_samples_tensor),
tf.expand_dims(self._input_word_vecs, 1)),
axis=2)
if self._covariate_size > 0:
cvrt_neg_sample_loss_dot_products = tf.reduce_sum(
tf.multiply(
tf.nn.embedding_lookup(self._cvrt['outpt'],
self._neg_samples_tensor),
tf.expand_dims(self._input_cvrt_vecs, 1)),
axis=2)
else:
cvrt_neg_sample_loss_dot_products = tf.fill(
tf.shape(word_neg_sample_loss_dot_products), 0.0)
bias_neg_sample_loss_sum = tf.squeeze(
tf.math.add(
tf.nn.embedding_lookup(self._bias['outpt'],
self._neg_samples_tensor),
tf.expand_dims(
tf.nn.embedding_lookup(self._bias['input'], input_indxs), 1)))
neg_sample_loss_logits = (
word_neg_sample_loss_dot_products + cvrt_neg_sample_loss_dot_products +
bias_neg_sample_loss_sum)
neg_sample_loss_logits = self._cap_logits(neg_sample_loss_logits,
self._w2v_logit_max)
neg_sample_loss_values = self._scores * tf.reduce_sum(
tf.math.log(tf.math.sigmoid(-neg_sample_loss_logits)),
axis=1) / (2.0 * self._w2v_neg_sample_scale)
pos_loss_logits = self._est_score
pos_loss_logits = self._cap_logits(pos_loss_logits, self._w2v_logit_max)
pos_loss_values = self._scores * tf.math.log(
tf.math.sigmoid(pos_loss_logits)) / 2.0
self._diff = -1.0 * pos_loss_values
return -1.0 * tf.reduce_sum(pos_loss_values + neg_sample_loss_values)
def _forward(self, input_indxs, outpt_indxs, scores, weights):
"""Build the graph for the forward pass.
Args:
input_indxs: int32 or int64 tensor for input labels
outpt_indxs: int32 or int64 tensor for outpt labels
scores: float32 tensor for co-occurrence score
weights: float32 tensor for loss weights
Returns:
loss: a univariate tensor giving the loss from the batch
"""
# Initialize input/outpt word (node) parameters
self._default_scope = tf.get_variable_scope()
init_width = 0.5 / (self._vector_size + self._covariate_size)
self._word['input'] = self._weight_initializer('word_input', init_width,
self._vocab_size,
self._vector_size)
self._word['outpt'] = self._weight_initializer('word_outpt', init_width,
self._vocab_size,
self._vector_size)
# Initialize input/outpt bias parameters
self._bias['input'] = self._weight_initializer('bias_input', init_width,
self._vocab_size, 1)
self._bias['outpt'] = self._weight_initializer('bias_outpt', init_width,
self._vocab_size, 1)
if self._covariate_size > 0:
# Initialize input/outpt cvrt transformation parameters
self._cvrt_transformation['input'] = self._weight_initializer(
'cvrt_input', init_width, self._covariate_data.shape[1],
self._covariate_size)
self._cvrt_transformation['outpt'] = self._weight_initializer(
'cvrt_outpt', init_width, self._covariate_data.shape[1],
self._covariate_size)
# Project the covariate data with the transformation parameters
self._cvrt['input'] = tf.matmul(self._covariate_data_tensor,
self._cvrt_transformation['input'])
self._cvrt['outpt'] = tf.matmul(self._covariate_data_tensor,
self._cvrt_transformation['outpt'])
if self._use_monet:
# Compute covariate svd
_, self._u, _ = tf.linalg.svd(self._cvrt['input'] + self._cvrt['outpt'])
# Project base word vecs and get word vecs
self._projected_word_input = tf.stop_gradient(
self._word['input'] - self._db_level * tf.matmul(
self._u, tf.matmul(tf.transpose(self._u), self._word['input'])))
self._projected_word_outpt = tf.stop_gradient(
self._word['outpt'] - self._db_level * tf.matmul(
self._u, tf.matmul(tf.transpose(self._u), self._word['outpt'])))
# Get loss input word vectors
if self._use_monet:
self._input_word_vecs = tf.nn.embedding_lookup(self._projected_word_input,
input_indxs)
self._outpt_word_vecs = tf.nn.embedding_lookup(self._projected_word_outpt,
outpt_indxs)
else:
self._input_word_vecs = tf.nn.embedding_lookup(self._word['input'],
input_indxs)
self._outpt_word_vecs = tf.nn.embedding_lookup(self._word['outpt'],
outpt_indxs)
# Get loss input bias vectors
self._input_bias_vecs = tf.nn.embedding_lookup(self._bias['input'],
input_indxs)
self._outpt_bias_vecs = tf.nn.embedding_lookup(self._bias['outpt'],
outpt_indxs)
self._word_pred = tf.reduce_sum(
tf.multiply(self._input_word_vecs, self._outpt_word_vecs), axis=1)
self._bias_pred = tf.reduce_sum(
self._input_bias_vecs + self._outpt_bias_vecs, axis=1)
estimated_score = self._bias_pred
self._word_pred = tf.reduce_sum(
tf.multiply(self._input_word_vecs, self._outpt_word_vecs), axis=1)
estimated_score += self._word_pred
# Add covariate terms
if self._covariate_size > 0:
self._input_cvrt_vecs = tf.nn.embedding_lookup(self._cvrt['input'],
input_indxs)
self._outpt_cvrt_vecs = tf.nn.embedding_lookup(self._cvrt['outpt'],
outpt_indxs)
self._cvrt_pred = tf.reduce_sum(
tf.multiply(self._input_cvrt_vecs, self._outpt_cvrt_vecs), axis=1)
estimated_score += self._cvrt_pred
else:
self._cvrt_pred = tf.constant(0.0)
self._scores = scores
self._est_score = estimated_score
if self._use_w2v:
loss = self._compute_w2v_loss(input_indxs)
else:
diff = estimated_score - scores
self._diff = diff
loss = tf.reduce_sum(tf.multiply(weights, tf.square(diff))) / 2
return loss
def _monet_train_op(self, optimizer, loss, global_step):
"""Registers the MONET training op in the graph.
Args:
optimizer: a tf optimizer object
loss: the loss to optimize from a tf graph
global_step: train step for the network
Returns:
a tf train op
"""
# Compute gradients
var_list = [
self._cvrt_transformation['input'], self._cvrt_transformation['outpt'],
self._bias['input'], self._bias['outpt'], self._projected_word_input,
self._projected_word_outpt
]
# Point the model word vector gradients to the base word vector gradients
grads_and_vars = optimizer.compute_gradients(loss, var_list)
grads_and_vars[-2] = (grads_and_vars[-2][0], self._word['input'])
grads_and_vars[-1] = (grads_and_vars[-1][0], self._word['outpt'])
self._grads_and_vars = grads_and_vars
return optimizer.apply_gradients(grads_and_vars, global_step)
def _register_adversary(self):
"""Build adversary part of graph.
Args: (none)
Returns:
train_d: adversary trainer
train_g: generator trainer
"""
adv_positives = tf.placeholder(tf.int32, shape=(None))
adv_negatives = tf.placeholder(tf.int32, shape=(None))
self._adv_positives = adv_positives
self._adv_negatives = adv_negatives
d = Discriminator(self._vector_size, self._adv_dim)
d_neg = d.net(
tf.nn.embedding_lookup(self._word['input'] + self._word['outpt'],
self._adv_negatives),
reuse=False)
self._d_neg = d_neg
d_pos = d.net(
tf.nn.embedding_lookup(self._word['input'] + self._word['outpt'],
self._adv_positives),
reuse=True)
self._d_pos = d_pos
loss_d = tf.reduce_mean(tf.log(d_pos)) + tf.reduce_mean(tf.log(1 - d_neg))
loss_g = tf.reduce_mean(tf.log(d_neg)) + tf.reduce_mean(tf.log(1 - d_pos))
self._adv_loss_d = loss_d
self._adv_loss_g = loss_g
d_var_list = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')
# g_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
# scope=self._default_scope)
g_var_list = [
var for var in tf.trainable_variables()
if 'discriminator' not in var.name
]
print(d_var_list)
print(g_var_list)
train_d = tf.train.AdamOptimizer(self._adv_lr).minimize(
-loss_d, var_list=d_var_list)
train_g = tf.train.AdamOptimizer(self._adv_lr).minimize(
-loss_g, var_list=g_var_list)
return train_d, train_g
def _build_graph(self):
"""Build the graph for the full model.
Args: (none)
Returns: (none)
"""
# Make placeholders and covariate data
input_indxs = tf.placeholder(tf.int32, shape=(None))
outpt_indxs = tf.placeholder(tf.int32, shape=(None))
neg_samples = tf.placeholder(tf.int32, shape=(None, self._neg_samples))
scores = tf.placeholder(tf.float32, shape=(None))
weight = tf.placeholder(tf.float32, shape=(None))
self._input_indxs = input_indxs
self._outpt_indxs = outpt_indxs
self._neg_samples_tensor = neg_samples
self._scores = scores
self._weight = weight
if self._covariate_size > 0:
covariate_data = tf.Variable(
self._covariate_data, trainable=False, dtype=tf.float32)
self._covariate_data_tensor = covariate_data
# Initialize global step
global_step = tf.Variable(0, name='global_step')
self._global_step = global_step
# Make the feed-forward network
tf.random.set_random_seed(self._random_seed)
loss = self._forward(input_indxs, outpt_indxs, scores, weight)
self._loss = loss
# Optimize the loss
optimizer = tf.train.AdagradOptimizer(
self._learning_rate, initial_accumulator_value=1.0)
if self._use_monet:
train = self._monet_train_op(optimizer, loss, global_step)
else:
train = optimizer.minimize(loss, global_step)
self._train = train
# Get adv trainers
if self._adv_labels:
self._train_d, self._train_g = self._register_adversary()
# Initialize the variables
tf.global_variables_initializer().run()
self._saver = tf.train.Saver()
def _save_weights(self, weight_filename, weights):
"""Saves weights using glove_util.KeyedVectors object.
Args:
weight_filename: filename to save to
weights: weights to save
Returns: the weight object, if return_no_save is True
"""
embedding_obj = glove_util.KeyedVectors(weights.shape[1])
embedding_obj.add(self._tokens, weights)
embedding_obj.save_word2vec_format(weight_filename)
def _print_extra_diagnostics(self, session):
"""diagnostic printing.
Args:
session: a tensorflow session
Returns: (none)
"""
# Get data for printing
weight_mats = {
s: self._extract_weight_matrix(w)
for s, w in self._get_model_return_dict(session).items()
if w is not None
}
bias_input, bias_outpt = session.run(
[self._bias['input'], self._bias['outpt']])
if self._use_monet:
projected_word_input, projected_word_outpt, u = session.run(
[self._projected_word_input, self._projected_word_outpt, self._u])
# Print avg, msq, max row avg, and max row msq of each weight matrix
for metric in sorted(METRICS_TO_PRINT):
print('word input %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['topo_input'])))
print('word outpt %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['topo_outpt'])))
print('bias input %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](bias_input)))
print('bias outpt %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](bias_outpt)))
if self._covariate_size > 0:
print('cvrt input %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['meta_input'])))
print('cvrt outpt %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['meta_outpt'])))
print(
'cvrt trans input %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['meta_trans_input'])))
print(
'cvrt trans outpt %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](weight_mats['meta_trans_outpt'])))
if self._use_monet:
print('projected word input %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](projected_word_input)))
print('projected word outpt %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](projected_word_outpt)))
print('projected word final %s: %0.9f' %
(metric, METRICS_TO_PRINT[metric](projected_word_outpt +
projected_word_input)))
print('u from svd %s: %0.9f' % (metric, METRICS_TO_PRINT[metric](u)))
def _get_batch(self, cooccurrence_filename, cooccurrence_count, max_count, f):
"""Gets a batch to train on.
Args:
cooccurrence_filename: where cooccurrences are
cooccurrence_count: where we are in the cooccurrence list
max_count: maximum cooccurrence count
f: potentially an open file object for the co-occurrences
Returns:
lots of stuff
"""
batch_data = {
'input_indxs': [],
'outpt_indxs': [],
'scores': [],
'weight': []
}
if self._adv_labels:
batch_data['positive_indxs'] = []
batch_data['negative_indxs'] = []
continue_training = True
for _ in range(self._batch_size):
# Get an example
if cooccurrence_filename:
last_data_read = f.read(self._cooccurrence_fmt_length)
if not last_data_read:
continue_training = False
batch_example = self._struct_unpack(last_data_read)
else:
if cooccurrence_count == max_count:
continue_training = False
break
batch_example = self._cooccurrences[cooccurrence_count]
cooccurrence_count += 1
# Store the example
batch_data['input_indxs'].append(batch_example[0] - 1)
batch_data['outpt_indxs'].append(batch_example[1] - 1)
batch_data['scores'].append(np.log(batch_example[2]))
batch_data['weight'].append(self._compute_loss_weight(batch_example[2]))
if self._adv_labels:
if self._adv_labels[self._tokens[batch_example[0] - 1]][0] == 1.0:
batch_data['positive_indxs'].append(batch_example[0] - 1)
else:
batch_data['negative_indxs'].append(batch_example[0] - 1)
if self._adv_labels[self._tokens[batch_example[1] - 1]][0] == 1.0:
batch_data['positive_indxs'].append(batch_example[1] - 1)
else:
batch_data['negative_indxs'].append(batch_example[1] - 1)
batch_data['neg_samples'] = np.random.randint(
0,
self._vocab_size,
size=(len(batch_data['scores']), self._neg_samples),
dtype=np.int32)
batch_data['neg_samples'] = [list(v) for v in batch_data['neg_samples']]
return batch_data, continue_training
def _console_print(self, session, return_dict, batch_data, print_extra,
print_every):
"""Console printer.
Args:
session: the tf session
return_dict: variables from train step
batch_data: batch from train step
print_extra: whether to print extra diagnostics
print_every: how often to print
Returns: (nothing)
"""
percent_done = 100.0 * self._total_examples_trained / (
self._num_records * self._iters)
if print_every > 0 and return_dict['step'] % print_every == 0:
print('---- iter %d, updates [%d, %d], last pair (%d, %d, %0.9f):' %
((self._iter, self._total_examples_trained -
len(batch_data['scores']), self._total_examples_trained - 1) +
(batch_data['input_indxs'][-1], batch_data['outpt_indxs'][-1],
batch_data['scores'][-1])))
print('---- %0.4f%% done: avg_cost %0.5f, sum_cost %0.5f' %
(percent_done, self._sum_cost /
(self._total_examples_trained), self._sum_cost))
if np.isnan(np.sum(return_dict['est_score'])):
print(return_dict['est_score'])
print(return_dict['diff'])
print(return_dict['loss'])
print('---- est_score: %0.5f, diff: %0.5f, loss: %0.5f' %
(np.sum(return_dict['est_score']), np.sum(
return_dict['diff']), np.sum(return_dict['loss'])))
print('-------- will do %d iters' % self._iters)
if self._print_weight_diagnostics:
self._print_weights(session, batch_data)
if self._adv_labels:
print('----adv_loss_g/d: %0.5f/%0.5f' %
(return_dict['adv_loss_g'], return_dict['adv_loss_d']))
if print_extra:
print('word_pred: %0.9f, bias_pred: %0.9f, cvrt_pred: %0.9f' %
(np.sum(return_dict['word_pred']), np.sum(
return_dict['bias_pred']), np.sum(return_dict['cvrt_pred'])))
self._print_extra_diagnostics(session)
print('===============================================')
def _train_model_thread(self,
session,
cooccurrence_filename,
checkpoint_every,
checkpoint_dir,
print_extra_diagnostics=False,
print_every=-1,
kill_after=-1):
"""Trains glove model and saves word vectors.
Args:
session: a tensorflow session
cooccurrence_filename: location of binary cooccurrences
checkpoint_every: how often to checkpoint (counted in batches)
checkpoint_dir: directory to save checkpoints in
print_extra_diagnostics: whether to show extra diagnostics
print_every: update the console every this number of steps
kill_after: stop each iteration after this many updates
Returns: (nothing)
"""
cooccurrence_count = 0
max_count = 0
f = None
if cooccurrence_filename:
f = open(cooccurrence_filename)
else:
max_count = len(self._cooccurrences)
continue_training = True
while continue_training:
batch_data, continue_training = self._get_batch(cooccurrence_filename,
cooccurrence_count,
max_count, f)
len_batch = len(batch_data['scores'])
if len_batch > 0:
# Train on the batch
training_dict = {
self._input_indxs: batch_data['input_indxs'],
self._outpt_indxs: batch_data['outpt_indxs'],
self._neg_samples_tensor: batch_data['neg_samples'],
self._scores: batch_data['scores'],
self._weight: batch_data['weight']
}
if self._adv_labels:
training_dict.update({
self._adv_positives: batch_data['positive_indxs'],
self._adv_negatives: batch_data['negative_indxs']
})
vars_to_get = {
'train': self._train,
'step': self._global_step,
'diff': self._diff,
'loss': self._loss,
'est_score': self._est_score,
'word_pred': self._word_pred,
'bias_pred': self._bias_pred,
'cvrt_pred': self._cvrt_pred
}
if self._adv_labels:
vars_to_get.update({
'adv_loss_g': self._adv_loss_g,
'adv_loss_d': self._adv_loss_d,
'train_d': self._train_d,
'train_g': self._train_g
})
if self._covariate_size > 0:
vars_to_get.update({'cvrt_pred': self._cvrt_pred})
return_dict = session.run(vars_to_get, feed_dict=training_dict)
self._sum_cost += return_dict['loss']
if self._adv_labels:
self._sum_adv_cost_g += return_dict['adv_loss_g']
self._sum_adv_cost_d += return_dict['adv_loss_d']
cooccurrence_count += len_batch
self._total_examples_trained += len_batch
# Report to console
self._console_print(session, return_dict, batch_data,
print_extra_diagnostics, print_every)
if kill_after > -1 and return_dict['step'] >= kill_after:
return
if (checkpoint_dir and checkpoint_every >= 0 and
return_dict['step'] % checkpoint_every == 0):
print('>>>>>>>>>>>>>>>checkpointing<<<<<<<<<<<<<<<<<<<<')
checkpoint_prefix = os.path.join(checkpoint_dir,
'chkpnt%d' % return_dict['step'])
_ = self._get_model_return_dict(session, checkpoint_prefix,
checkpoint_prefix)
# Close the file, if used
if cooccurrence_filename:
f.close()
def _make_keyed_weights(self, weights):
"""Makes key, weight vector representation of matrix with model tokens.
Args:
weights: weights indexed by vocab_index_lookup
Returns:
weight vector dict keyed by token
"""
return dict(
zip(self._tokens,
[weights[self._vocab_index_lookup[t]] for t in self._tokens]))
def _get_model_return_dict(self,
session,
output_prefix=None,
covariate_weight_output=None):
"""Gets a model weight dictionary for main training function.
Args:
session: a tensorflow session
output_prefix: prefix for all parameters except covariate transformation
covariate_weight_output: prefix for covariate transformation
Returns:
return_dict: dict with weight dictionaries.
"""
return_dict = {
'topo_input': None,
'topo_outpt': None,
'meta_input': None,
'meta_outpt': None,
'meta_trans_input': None,
'meta_trans_outpt': None
}
# Get word embeddings
topo_input, topo_outpt = session.run(
[self._word['input'], self._word['outpt']])
return_dict['topo_input'] = self._make_keyed_weights(topo_input)
return_dict['topo_outpt'] = self._make_keyed_weights(topo_outpt)
if self._covariate_size > 0:
# Get covariate embeddings
cvrt_input, cvrt_outpt = session.run(
[self._cvrt['input'], self._cvrt['outpt']])
return_dict['meta_input'] = self._make_keyed_weights(cvrt_input)
return_dict['meta_outpt'] = self._make_keyed_weights(cvrt_outpt)
if output_prefix is not None:
self._save_weights(output_prefix + '_cvrtvecs.txt',
cvrt_input + cvrt_outpt)
if self._use_monet:
# Project base word vectors one more time, store in word embeds
u = session.run(self._u)
topo_input -= self._db_level * np.matmul(
u, np.matmul(np.transpose(u), topo_input))
topo_outpt -= self._db_level * np.matmul(
u, np.matmul(np.transpose(u), topo_outpt))
return_dict['topo_input'] = self._make_keyed_weights(topo_input)
return_dict['topo_outpt'] = self._make_keyed_weights(topo_outpt)
if output_prefix is not None:
self._save_weights(
output_prefix + '.txt',
np.concatenate([topo_input + topo_outpt, cvrt_input + cvrt_outpt],
axis=1))
# Get covariate transformation
(return_dict['meta_trans_input'],
return_dict['meta_trans_outpt']) = session.run([
self._cvrt_transformation['input'],
self._cvrt_transformation['outpt']
])
if output_prefix is not None:
np.savetxt(
covariate_weight_output + '_cvrt_projections.txt',
np.concatenate([
return_dict['meta_trans_input'], return_dict['meta_trans_outpt']
],
axis=1))
# Save topo embeddings
wordvec_tag = 'wordvecs' if self._covariate_size > 0 else ''
if output_prefix is not None:
self._save_weights(output_prefix + '_%s.txt' % wordvec_tag,
topo_input + topo_outpt)
return return_dict
def train_model(self,
session,
iters,
alpha,
xmax,
eta,
regress_out_covariates,
covariate_weight_output=None,
output=None,
cooccurrence_filename=None,
print_every=-1,
print_extra_diagnostics=False,
print_weight_diagnostics=False,
checkpoint_every=-1,
checkpoint_dir=None,
db_level=1.0,
batch_size=1,
kill_after=-1,
init_weight_dir=None,
use_w2v=False,
neg_samples=5,
w2v_logit_max=10.0,
w2v_neg_sample_mean=False,
adv_lam=0.2,
adv_labels=None,
adv_lr=0.05,
adv_dim=None,):
"""Trains glove model and saves word vectors.
Args:
session: a tensorflow session
iters: number of iterations through the corpus
alpha: weighted diff scaling power
xmax: weighted diff scaling threshold
eta: initial learning rate
regress_out_covariates: whether to regress out word vecs with cvrt vecs
covariate_weight_output: location to save covariate transformation at
output: output filename for word vectors.
cooccurrence_filename: location of binary cooccurrences
print_every: update the console every this number of steps
print_extra_diagnostics: whether to show extra covariate diagnostics
print_weight_diagnostics: whether to print weight/update values
checkpoint_every: number of steps to checkpoint after
checkpoint_dir: where to put checkpoint files
db_level: ("debias level") - a double between 0.0 and 1.0 inclusive giving
the strength of the debiasing. 0.0 is no debiasing, 1.0 is full.
batch_size: number of cooccurrences to train at once
kill_after: kill each iteration after this many updates
init_weight_dir: if specified, will load weights from directory
use_w2v: uses word2vec-like loss, adapted for co-occurrence counts
neg_samples: number of negative samples for word2vec
w2v_logit_max: logits in w2v model are capped (in absolute value) at this
w2v_neg_sample_mean: negative sample loss is averaged, if True
adv_dim: dimension of hidden layer of MLP adversary
adv_lam: tuning parameter for adversarial loss. For now, this assumes you
have input metadata and that the metadata is binary and one-dimensional.
The adversary uses a length(adv_dim)-layer MLP with leaky ReLU
activations. The loss is softmax cross-entropy.
adv_labels: a {token, v} dict where v is a 2-length list of one-hot floats
adv_lr: learning rate for all adversarial train ops
adv_dim: dimension of hidden layer of MLP adversary
Returns:
if output and covariate_weight_output are not specified, a dict with:
{}
else, nothing - model is saved at the locations specified
"""
try:
assert (cooccurrence_filename is not None or
self._cooccurrences is not None)
except AssertionError:
print(('Error: must specify a cooccurrence filename if model object '
'was not given random walks.'))
return {}
# Set training parameters and other trackers
self._alpha = alpha
self._eta = eta
self._xmax = xmax
self._batch_size = batch_size
self._learning_rate = eta
self._iters = iters
self._total_examples_trained = 0
self._use_w2v = use_w2v
self._neg_samples = neg_samples
self._print_weight_diagnostics = print_weight_diagnostics
self._w2v_logit_max = w2v_logit_max
self._w2v_neg_sample_scale = 1.0
if w2v_neg_sample_mean:
self._w2v_neg_sample_scale *= self._neg_samples
assert 0.0 <= db_level <= 1.0
self._db_level = db_level
self._adv_dim = adv_dim
self._adv_lam = adv_lam
self._adv_labels = adv_labels
self._adv_lr = adv_lr
if cooccurrence_filename:
self._num_records = (
os.path.getsize(cooccurrence_filename) /
self._cooccurrence_fmt_length)
else:
self._num_records = len(self._cooccurrences)
self._use_monet = regress_out_covariates
self._init_weight_dir = init_weight_dir
# Build the graph
print('building graph...')
self._build_graph()
print('training...')
# Iterate through cooccurrences multiple times
for i in range(iters):
self._iter = i + 1
self._train_model_thread(session, cooccurrence_filename, checkpoint_every,
checkpoint_dir, print_extra_diagnostics,
print_every, kill_after)
print('end of iter %d, avg_cost %0.5f' %
(self._iter, self._sum_cost / self._total_examples_trained))
if self._adv_labels:
print('---- avg_adv_cost_d %0.5f' %
(self._sum_adv_cost_d / self._total_examples_trained))
print('---- avg_adv_cost_g %0.5f' %
(self._sum_adv_cost_g / self._total_examples_trained))
return self._get_model_return_dict(session, output, covariate_weight_output)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.167374 | 16,729 |
print(f'{max(3, 67 ,9)}')
print(f'{max("awesome")}')
print(f"{max({1:'a', 3:'c', 2:'b'})}")
print(f"{min(3, 67 ,9)}")
print(f'{min("awesome")}')
print(f"{min({1:'a', 3:'c', 2:'b'})}")
names = ['Arya', 'Samson', 'Dora', 'Tim', 'Ollivander']
print(f'{min(len(name) for name in names)}')
print(f'{min(names, key=lambda n:len(n))}')
songs = [
{"title": "happy birthday", "playcount": 1},
{"title": "Survive", "playcount": 6},
{"title": "YMCA", "playcount": 99},
{"title": "Toxic", "playcount": 31}
]
print(f"{min(songs, key=lambda s: s['playcount'])}")
print(f"{max(songs, key=lambda s: s['playcount'])['title']}") | [
4798,
7,
69,
6,
90,
9806,
7,
18,
11,
8275,
837,
24,
38165,
11537,
198,
4798,
7,
69,
6,
90,
9806,
7203,
707,
5927,
4943,
92,
11537,
198,
4798,
7,
69,
1,
90,
9806,
15090,
16,
32105,
64,
3256,
513,
32105,
66,
3256,
362,
32105,
65... | 2.128378 | 296 |
###############################################################################
### script for plotting boxplot meteograms of point data of icon-eu-eps ###
###############################################################################
import sys
current_path = sys.path[0]
ex_op_str = current_path[current_path.index('progs')+6: current_path.index('w2w_ensembleplots')-1]
sys.path.append('/progs/{}'.format(ex_op_str))
from w2w_ensembleplots.core.meteogram_boxplot import boxplot_forecast
########################################################################
########################################################################
########################################################################
if __name__ == '__main__':
import time
t1 = time.time()
main()
t2 = time.time()
delta_t = t2-t1
if delta_t < 60:
print('total script time: {:.1f}s'.format(delta_t))
elif 60 <= delta_t <= 3600:
print('total script time: {:.0f}min{:.0f}s'.format(delta_t//60, delta_t-delta_t//60*60))
else:
print('total script time: {:.0f}h{:.1f}min'.format(delta_t//3600, (delta_t-delta_t//3600*3600)/60))
| [
29113,
29113,
7804,
4242,
21017,
198,
21017,
220,
4226,
329,
29353,
3091,
29487,
47091,
26836,
286,
966,
1366,
286,
7196,
12,
12496,
12,
25386,
220,
220,
220,
44386,
198,
29113,
29113,
7804,
4242,
21017,
198,
198,
11748,
25064,
198,
14421... | 3.063158 | 380 |
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
"""Contains the base-class for all bit.ly commands."""
import lnk.config
import lnk.errors
from lnk.abstract import AbstractCommand
class Command(AbstractCommand):
"""
Base-class for all bit.ly commands.
Configures the AbstractCommand base class for all commands in the
entire application, which needs information about the service being
used. Moreover sets up the necessary parameters needed for any request
to the bit.ly API (the OAuth2 access token).
Attributes:
parameters (dict): The necessary parameters for any request to the
bit.ly API.
"""
def __init__(self, which):
"""
Raises:
errors.AuthorizationError: If the OAuth2 access token cannot be
retrieved from the configuration file.
"""
super(Command, self).__init__('bitly', which)
with lnk.config.Manager('bitly') as manager:
if not manager['key'] and which != 'key':
raise lnk.errors.AuthorizationError('bitly')
self.parameters = {'access_token': manager['key']}
@staticmethod
def verify(response, what, inner=None):
"""
Verifies an HTTP-response from the bit.ly API.
Overrides the 'pure-virtual' (i.e. not-implemented) base method
from AbstractCommand. If the verification finds no faults in the
response, the data is returned.
Arguments:
response (requests.Response): The HTTP response to a request
to the bit.ly API.
what (str): A human-readable string representing what the request
was for, such that if there is an error in the response,
an errors.HTTPError or errors.APIError is raised with
the message 'Could not <what>.'
inner (str): If it is expected that data has an inner layer, the
key of that inner layer to retrieve the data directly.
Returns:
The actual data of the response, if no fault was found.
Raises:
errors.HTTPError: If it was found that there was an HTTP-related
exception, such as a faulty URL or other badness.
errors.APIError: If it was found that there was an exception
related to the API itself.
"""
if not str(response.status_code).startswith('2'):
raise lnk.errors.HTTPError('Could not {0}.'.format(what),
response.status_code,
response.reason)
response = response.json()
if not str(response['status_code']).startswith('2'):
raise lnk.errors.HTTPError('Could not {0}.'.format(what),
response['status_code'],
response['status_txt'])
data = response['data']
if inner:
data = data[inner][0]
if 'error' in data:
what = 'Could not {0}!'.format(what)
raise lnk.errors.APIError(what, data['error'])
return data
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
0,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
4264,
1299,
262,
2779,
12,
4871,
329,
477,
1643,
13,
306,
9729,
526,
15931,
198,
198,
11748,
300,
77,
... | 2.862032 | 935 |
#! /usr/bin/env python3
import os
import shutil
import genanki
import requests
from vocab import get_vocab, JLPT_LEVELS
MODEL_ID = 654321
DECK_BASE_ID = 563412
VOCAB_FIELDS = [
{'name': 'id'},
{'name': 'kana'},
{'name': 'kanji'},
{'name': 'partofspeech'},
{'name': 'meaning'},
{'name': 'audio'},
]
JA_EN_TEMPLATE = {
'name': 'kana->kanji/meaning',
'qfmt': '{{audio}}</br><strong><span style="font-family: Meiryo; font-size: 60px; ">{{kana}}</span></strong><br/>(<i>{{partofspeech}}</i>)',
'afmt': '{{FrontSide}}<hr><span style="font-family: Meiryo; font-size: 30px; ">{{kanji}}</span><br><strong><span style="font-size: 40px; ">{{meaning}}</span></strong></br>',
}
EN_JA_TEMPLATE = {
'name': 'meaning->kana/kanji',
'qfmt': '<strong><span style="font-size: 40px; ">{{meaning}}</span></strong> (<i>{{partofspeech}}</i>)</br>',
'afmt': '{{FrontSide}}<hr><strong><span style="font-family: Meiryo; font-size: 60px; ">{{kanji}}</span></strong></br><span style="font-family: Meiryo; font-size: 30px; ">{{kana}}</span></br>{{audio}}',
}
MODEL_CSS = '''
.card {
font-family: arial;
font-size: 20px;
text-align: center;
color: black;
background-color: white;
}
.card1 { background-color: #969696; }
.card2 { background-color: #FFFFFF; }'
)
'''
VOCAB_MODEL = genanki.Model(
MODEL_ID,
'JLPT Vocab Model',
fields=VOCAB_FIELDS,
templates=[
JA_EN_TEMPLATE,
EN_JA_TEMPLATE,
],
css=MODEL_CSS,
)
if __name__ == '__main__':
levels = [2]
package = get_package(levels)
package.write_to_file('jlpt_{}.apkg'.format('-'.join('n{}'.format(n) for n in levels)))
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
11748,
2429,
962,
72,
198,
11748,
7007,
198,
198,
6738,
12776,
397,
1330,
651,
62,
18893,
397,
11,
449,
43,
11571,
62,
2538,... | 2.220134 | 745 |
from collections import deque
| [
6738,
17268,
1330,
390,
4188,
198
] | 5 | 6 |
#! python3
import re, sys, traceback
from typing import Generic, TypeVar
from contextlib import redirect_stdout
from io import StringIO
T = TypeVar("T")
class Array(_Base[T]):
"""A basic array structure that only allows random access."""
class Collection(_KnowsIfEmpty[T]):
"""A basic collection class that only supports methods `hasNext`,
`getNext`, `resetNext`, `addItem` and `isEmpty`."""
def addItem(self, element: T) -> None:
"""Adds element `element` at the current index."""
self._elements = (
self._elements[0 : self.index] + [element] + self._elements[self.index :]
)
self.index += 1
def getNext(self) -> T:
"""Returns the next item in the collection."""
if self.index < len(self._elements):
element = self._elements[self.index]
self.index += 1
return element
raise Exception("No elements remaining.")
def resetNext(self) -> None:
"""Resets the iteration index."""
self.index = 0
def hasNext(self) -> bool:
"""Returns whether there are any more items."""
return self.index < len(self._elements)
class Stack(_KnowsIfEmpty[T]):
"""A basic collection class that only supports methods `push`,
`pop` and `isEmpty`"""
def push(self, element: T) -> None:
"""Adds element `element` to the top of the stack."""
self._elements.append(element)
def pop(self) -> T:
"""Removes and returns the item at the top of the stack."""
if self._elements:
return self._elements.pop()
raise Exception("Tried popping an empty stack.")
class Queue(_KnowsIfEmpty[T]):
"""A basic collection class that only supports methods `enqueue`,
`dequeue` and `isEmpty`"""
def enqueue(self, element: T) -> None:
"""Adds element `element` to the back of the queue."""
self._elements.append(element)
def dequeue(self) -> T:
"""Removes and returns the element at the front of the queue."""
if self._elements:
x, *self._elements = self._elements
return x
raise Exception("Attempted to dequeue an empty queue.")
class Pseudocode:
"""A simple IBDP pseudocode interpreter."""
| [
2,
0,
21015,
18,
198,
198,
11748,
302,
11,
25064,
11,
12854,
1891,
198,
6738,
19720,
1330,
42044,
11,
5994,
19852,
198,
6738,
4732,
8019,
1330,
18941,
62,
19282,
448,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
51,
796,
5994,
19852... | 2.63164 | 866 |
from .core import JpegImage
| [
6738,
764,
7295,
1330,
449,
22071,
5159,
198
] | 3.5 | 8 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from datetime import datetime, timedelta
import simplejson as json
import requests
from itertools import product as allpairs
import re
import pandas as pd
class WikidataTreeQuery(object):
"""Class to :
* Query wikidata for all descendants of a node;
* Structure the result as an arboresence;
* Create a 'flare' dictionary object, suitable for writing as a json file
and visualisation with d3js
* Create a table with all descendants of a node, their properties as
extracted from Wikidata, and all the paths to go from the root to the
given node
"""
def __init__(self, debug=None, labels_languages=None, query_labels=None, lookup_claims=None, default_language=None, query_endpoint=None, properties_set_membership=None):
"""
Initialize query template, query prefix, asking for the result to be in json, and set default values for parameters.
The following parameters have the following default settings:
# The endpoint to send the query:
query_endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
# Whether we are in verbose mode:
debug = false
# The labels we want to get from the base:
query_labels = ["rdfs:label", "skos:altLabel", "schema:description"]
# In which languages we want to get those labels :
labels_languages = ["en", "fr"]
# A list of the properties of interest, their values will be printed in the output for each entry:
(References for names: https://www.wikidata.org/wiki/Wikidata:List_of_properties/all_in_one_table)
lookup_claims = ["P571", "P275", "P101", "P135", "P348", "P306", "P1482", "P277", "P577", "P366", "P178", "P31", "P279", "P2572", "P3966", "P144", "P170", "P1324"]
# Which properties define set membership. Default are elementOf and subClassOf:
properties_set_membership = ["P31", "P279"]
# What is the default language for the metadata of the output file:
default_language = "en"
"""
self.columns = list()
self.nodes_in_tree = list()
self.query_template_base = """SELECT DISTINCT ?entity {0} {{?entity wdt:P31*/wdt:P279* wd:{1}. {2} {3}}}"""
self.prefix_URI = "http://www.wikidata.org/entity/"
self._query_endpoint = query_endpoint or "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
self._debug = debug or False
self._labels_languages = labels_languages or ["en", "fr"]
self._default_language = default_language or "en"
self._lookup_claims = lookup_claims or ["P571", "P275", "P101", "P135", "P348", "P306", "P1482", "P277", "P577", "P366", "P178", "P31", "P279", "P2572", "P3966", "P144", "P170", "P1324"]
self._queryLabels = query_labels or ["rdfs:label", "skos:altLabel", "schema:description"]
self._properties_set_membership = properties_set_membership or ["P31", "P279"]
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/sparql-results+json'})
self.get_properties()
self.visited_nodes = dict()
self.labels = dict()
self.labels["singleEntries"] = "singleEntries"
self.QID_pattern = re.compile("^Q[0-9]+$")
def get_properties(self):
"""Function to get human-readable labels of all properties"""
template = """SELECT ?propertyId ?propertyLabel WHERE
{{?property a wikibase:Property.
BIND (replace(str(?property), str("{0}"), "") AS ?propertyId)
SERVICE wikibase:label {{bd:serviceParam wikibase:language "{1}" .
}}
}}"""
query = template.format(self.prefix_URI, self._default_language)
if self._debug:
print(query)
response = self.session.post(self._query_endpoint, data={"query": query})
if response.status_code != 200:
raise Exception("QUERY ENDPOINT CONNECTION PROBLEM! STATUS: "+str(response.status_code)+"\nQUERY TEXT:\n"+query)
results = json.loads(response.text).get("results", {}).get("bindings", [])
self.property2text = {item["propertyId"]["value"]: re.sub(r"[^\w]", "_", item["propertyLabel"]["value"]) for item in results}
self.properties_set_membership = [p+"_"+self.property2text[p] for p in self._properties_set_membership]
return
def query_string_properties(self):
"""Returns the part of the query that asks for different listProperties,
based on the parameters already set up."""
template = """OPTIONAL {{?entity wdt:{0} ?{1}.}}"""
for item in self._lookup_claims:
self.columns.append("?"+item+"_"+self.property2text[item])
return " ".join([template.format(item, item+"_"+self.property2text[item]) for item in self._lookup_claims])
def query_string_data_in_labels(self):
"""Returns the part of the query that asks for different labels of entities. Loop over all label-language pairs"""
template = """OPTIONAL {{?entity {0} ?{1}_{2} filter (lang(?{1}_{2}) = "{2}").}}"""
if any(len(item.split(":")) != 2 for item in self._queryLabels):
raise ValueError("All query labels should have prefixes (prefix:label)!")
label_language_pairs = list(allpairs(self._queryLabels, self._labels_languages))
for item in label_language_pairs:
self.columns.append("?{0}_{1}".format(item[0].split(":")[1], item[1]))
return " ".join([template.format(item[0], item[0].split(":")[1], item[1]) for item in label_language_pairs])
def build_query(self, root):
"""On the query_template_base, build a query, using two other functions
for subparts of the query string"""
partLabels = self.query_string_data_in_labels()
partProperties = self.query_string_properties()
columns = " ".join(set(self.columns))
return self.query_template_base.format(columns, root, partLabels, partProperties)
def query_wikidata(self, query):
"""Execute the query built with build_query, save the result (in self.flatData),
and save the set of subnodes for each node in order to build the tree
(in self.subnodesPerNode). Takes as input the output of the build_query function"""
if self._debug:
print(self._query_endpoint)
print("\nText of the query:\n")
print(query)
response = self.session.post(self._query_endpoint, data={"query": query})
if response.status_code != 200:
raise Exception("QUERY ENDPOINT CONNECTION PROBLEM! STATUS: "+str(response.status_code)+"\nQUERY TEXT:\n"+query)
if self._debug:
print("Query succeeded!")
try:
self.flatData = json.loads(response.content).get("results", {}).get("bindings", [])
except:
raise Exception("Query deadline is expired! You may try re-running it later or simplifying it.")
self.subnodesPerNode = dict()
# Lookup specifically for claims that define set membership.
for item in self.flatData:
P_subnodeOf = (item.get(p, {}).get("value") for p in self.properties_set_membership)
subnodeOf = [node.split("/")[-1] for node in P_subnodeOf if node]
itemId = item["entity"]["value"].split("/")[-1]
for node in subnodeOf:
if not node:
continue
if not node in self.subnodesPerNode:
self.subnodesPerNode[node] = list()
self.subnodesPerNode[node].append(itemId)
for node in self.subnodesPerNode:
self.subnodesPerNode[node] = list(set(self.subnodesPerNode[node]))
def get_labels(self, fullListOfNodes):
"""Function to convert a list of nodes to list of human-readable labels.
Saves the result in the self.labels dictionary"""
template = """SELECT * WHERE {{?entity rdfs:label ?label filter (lang(?label) = "{0}"). VALUES (?entity) {{{1}}}}}"""
fullListOfNodes = list(set(fullListOfNodes))
chunks = [fullListOfNodes[x:x+1000] for x in range(0, len(fullListOfNodes), 1000)]
result = list()
for listOfNodes in chunks:
query = template.format(self._default_language, "".join(["(wd:{0})".format(node) for node in listOfNodes]))
if self._debug:
print(query)
response = self.session.post(self._query_endpoint, data={"query": query})
if response.status_code != 200:
raise Exception("QUERY ENDPOINT CONNECTION PROBLEM! STATUS: "+str(response.status_code)+"\nQUERY TEXT:\n"+query)
result += json.loads(response.text).get("results", {}).get("bindings", [])
self.labels.update({item["entity"]["value"].split("/")[-1]: item["label"]["value"] for item in result})
def result_wikidata_aggregate_rows(self, df):
"""Function to transform the data frame to one-entity-per-line"""
for column in df.columns:
if self._debug:
print("Aggregating results for column "+column+" of the query result table, datetime: "+str(datetime.now()))
if column == "entity":
continue
grouped = df.groupby("entity").apply(lambda x: x[column])
df[column] = df["entity"].apply(lambda x: tuple(set(grouped[x])))
df = df.drop_duplicates()
return df
def from_root(self, root, forbidden=[]):
"""from_root("rootQID", ["forbidden node", "another forbidden node"]
Builds a tree from a given root (specify its Wikidata QID) and returns a
flare.json suitable to be input for d3js' tree layout"""
self.query_wikidata(self.build_query(root=root))
return self.make_tree(root, [], forbidden)
def add_labels(self, flare):
"""Get the labels of all items from query result, then call the
nested_labeler to replace the names with human-readable labels"""
self.get_labels(self.nodes_in_tree)
return self.nested_labeler(flare)
def make_HR(self, x):
"""Gets a tuple of data and converts labels like Q[0-9]+ to human-readable labels"""
return tuple([self.labels.get(i, i) if self.QID_pattern.match(i) else i for i in x])
def get_pretty_DF(self):
"""Function to take brute dataframe resulting from Wikidata query and
render a pretty table, with one-entity-per-line, human-readable labels,
and for each entity, all paths from the root to this entity"""
# Simplyify the data
self.cleanedFlatData = [{item: entity[item]["value"].replace(self.prefix_URI,"") if isinstance(entity[item]["value"], str) else entity[item]["value"] for item in entity} for entity in self.flatData]
# Get only entities in the tree
self.cleanedFlatData = [entity for entity in self.cleanedFlatData if entity["entity"] in self.nodes_in_tree]
# for visited nodes, get the labels
for entity in self.cleanedFlatData:
entityId = entity["entity"]
self.visited_nodes[entityId] = [self.make_HR(entry) for entry in self.visited_nodes[entityId]]
self.visited_nodes[entityId] = tuple(self.visited_nodes[entityId])
# Convert the list to data frame
df = pd.DataFrame(self.cleanedFlatData)
df = df.reindex_axis(sorted(df.columns, key=lambda x: x.lower()), axis=1)
df = df.fillna("")
# Convert it to one-entity-per-line
df = self.result_wikidata_aggregate_rows(df)
# Make tuples of entities human-readable
for column in df:
c2list = df[column].tolist()
c2list_expnd = list(set(reduce(lambda x, y: x+y, c2list)))
c2list_expnd_Q = filter(lambda x: self.QID_pattern.match(x), c2list_expnd)
self.get_labels(c2list_expnd_Q)
if column == "entity":
continue
tupleORstring = lambda x: x[0] if len(x) == 1 else x
df[column] = df[column].apply(lambda x: tupleORstring(self.make_HR(x)))
# add the visited nodes column to the data frame.
df["visited_nodes"] = df["entity"].apply(lambda x:self.visited_nodes[x])
return df
def nested_labeler(self, node):
"""Function to explore recursively the flare and convert the labels to
human-readable content"""
subnodes = node.get("children", [])
if len(subnodes) == 0:
return {"name": self.labels.get(node["name"], node["name"]), "nodeId": node["name"]}
else:
return {"name": self.labels.get(node["name"], node["name"]),
"nodeId": node["name"],
"children": [self.nested_labeler(subnode) for subnode in subnodes]}
def make_tree(self, node, visited, forbidden):
"""Recursive function to explore the tree"""
flare = dict()
flare["name"] = node
if not node in self.nodes_in_tree:
self.nodes_in_tree.append(node)
if not node in self.visited_nodes.keys():
self.visited_nodes[node] = list()
self.visited_nodes[node].append(tuple(visited))
result = self.subnodesPerNode.get(node)
if not result:
return flare
# recursively call the function for all subclasses and instances
flare["children"] = [self.make_tree(entry, visited+[node], forbidden) for entry in result if not entry in visited and not entry in forbidden]
# Optional : this part puts all subnodes that do not have subnodes into a subnode called "singleEntries". For viz.
newlyStructured = list()
singleEntries = list()
for index, entry in enumerate(flare["children"]):
if "children" in entry.keys():
newlyStructured.append(entry)
else:
singleEntries.append(entry)
newlyStructured.append({"name": "singleEntries", "children": singleEntries})
flare["children"] = newlyStructured
return flare
def main():
"""Example run"""
tree = WikidataTreeQuery()
print(datetime.now())
flare = tree.from_root("Q21198")
print(datetime.now())
with open("output.json", "w") as f:
json.dump(flare, f, indent=4)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
... | 2.387632 | 6,145 |
from globibot.lib.web.handlers import SessionHandler
from globibot.lib.web.decorators import authenticated, respond_json, with_query_parameters
| [
6738,
15095,
571,
313,
13,
8019,
13,
12384,
13,
4993,
8116,
1330,
23575,
25060,
198,
6738,
15095,
571,
313,
13,
8019,
13,
12384,
13,
12501,
273,
2024,
1330,
44529,
11,
3031,
62,
17752,
11,
351,
62,
22766,
62,
17143,
7307,
198
] | 3.512195 | 41 |
from .TSP import *
from .State import * | [
6738,
764,
51,
4303,
1330,
1635,
198,
6738,
764,
9012,
1330,
1635
] | 3.25 | 12 |
import struct
file = open("binary.dat", "wb")
for n in range(1000):
data = struct.pack("i", n)
file.write(data)
file.close()
file = open("binary.dat", "rb")
size = struct.calcsize("i")
bytes_read = file.read(size)
while bytes_read:
value = struct.unpack("i", bytes_read)
value = value[0]
print(value, end=" ")
bytes_read= file.read(size)
file.close()
#
# file = open("data2.txt", "w")
# file.write("Sample file writing\n")
#
# text_lines = [
# "chapter3\n",
# "sample text data file\n",
# "this is the third line of text\n",
# "this is the fourth line\n"
# ]
#
# file.writelines(text_lines)
# file.close()
#
# file = open("data2.txt", "r")
# char = file.readlines()
# print(char) | [
11748,
2878,
201,
198,
201,
198,
7753,
796,
1280,
7203,
39491,
13,
19608,
1600,
366,
39346,
4943,
201,
198,
1640,
299,
287,
2837,
7,
12825,
2599,
201,
198,
220,
220,
220,
1366,
796,
2878,
13,
8002,
7203,
72,
1600,
299,
8,
201,
198,
... | 2.241176 | 340 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The class QueueManager manages the action queue for the exoskeleton framework.
~~~~~~~~~~~~~~~~~~~~~
Source: https://github.com/RuedigerVoigt/exoskeleton
(c) 2019-2021 Rüdiger Voigt:
Released under the Apache License 2.0
"""
# standard library:
from collections import defaultdict # noqa # pylint: disable=unused-import
import logging
import time
from typing import Literal, Optional, Union
import uuid
# external dependencies:
import pymysql
import userprovided
from exoskeleton import actions
from exoskeleton import blocklist_manager
from exoskeleton import database_connection
from exoskeleton import err
from exoskeleton import exo_url
from exoskeleton import label_manager
from exoskeleton import notification_manager
from exoskeleton import statistics_manager
from exoskeleton import time_manager
class QueueManager:
"Manage the queue and labels for the exoskeleton framework."
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ADDING TO AND REMOVING FROM THE QUEUE
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def add_to_queue(self,
url: exo_url.ExoUrl,
action: Literal[1, 2, 3, 4],
labels_master: set = None,
labels_version: set = None,
prettify_html: bool = False,
force_new_version: bool = False) -> Optional[str]:
""" More general function to add items to queue. Called by
add_file_download, add_save_page_code and add_page_to_pdf."""
if not isinstance(url, exo_url.ExoUrl):
raise ValueError('url must be of class ExoUrl.')
if action not in (1, 2, 3, 4):
raise ValueError('Invalid value for action!')
# Check if the FQDN of the URL is on the blocklist
if url.hostname and self.blocklist.check_blocklist(url.hostname):
msg = 'Cannot add URL to queue: FQDN is on blocklist.'
logging.exception(msg)
raise err.HostOnBlocklistError(msg)
# Add labels for the master entry.
# Ignore labels for the version at this point, as it might
# not get processed.
if labels_master:
self.labels.assign_labels_to_master(url, labels_master)
if not force_new_version:
# check if the URL has already been processed
id_in_file_master = self.get_filemaster_id_by_url(url)
if id_in_file_master:
# The URL has been processed in _some_ way.
# Check if was the _same_ as now requested.
self.cur.execute('SELECT id FROM fileVersions ' +
'WHERE fileMasterID = %s AND ' +
'actionAppliedID = %s;',
(id_in_file_master, action))
version_id = self.cur.fetchone()
if version_id:
logging.info(
'Skipping file already processed in the same way.')
return None
# log and simply go on
logging.debug(
'File already processed, BUT not this way: Added to queue.')
else:
# File has not been processed yet.
# If the exact same task is *not* already in the queue, add it.
if self.__get_queue_uuids(url, action):
logging.info('Exact same task already in queue.')
return None
# generate a random uuid for the file version
uuid_value = uuid.uuid4().hex
# add the new task to the queue
self.cur.callproc('add_to_queue_SP',
(uuid_value, action, url, url.hostname, prettify_html))
# link labels to version item
if labels_version:
self.labels.assign_labels_to_uuid(uuid_value, labels_version)
return uuid_value
def __get_queue_uuids(self,
url: exo_url.ExoUrl,
action: int) -> set:
"""Based on the URL and action ID this returns a set of UUIDs in the
*queue* that match those. Normally this set has a single element,
but as you can force exoskeleton to repeat tasks on the same
URL it can be multiple. Returns an empty set if such combination
is not in the queue."""
self.cur.execute('SELECT id FROM queue ' +
'WHERE urlHash = SHA2(%s,256) AND ' +
'action = %s ' +
'ORDER BY addedToQueue ASC;',
(url, action))
queue_uuids = self.cur.fetchall()
return {uuid[0] for uuid in queue_uuids} if queue_uuids else set() # type: ignore[index]
def get_filemaster_id_by_url(self,
url: Union[exo_url.ExoUrl, str]
) -> Optional[str]:
"Get the id of the filemaster entry associated with this URL"
if not isinstance(url, exo_url.ExoUrl):
url = exo_url.ExoUrl(url)
self.cur.execute('SELECT id FROM fileMaster ' +
'WHERE urlHash = SHA2(%s,256);',
(url, ))
id_in_file_master = self.cur.fetchone()
return id_in_file_master[0] if id_in_file_master else None # type: ignore[index]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PROCESSING THE QUEUE
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_next_task(self) -> Optional[str]:
"Get the next suitable task"
self.cur.execute('CALL next_queue_object_SP();')
return self.cur.fetchone() # type: ignore[return-value]
def delete_from_queue(self,
queue_id: str) -> None:
"Remove all label links from item and delete it from the queue."
self.cur.callproc('delete_from_queue_SP', (queue_id,))
def process_queue(self) -> None:
"Process the queue"
self.stats.log_queue_stats()
while True:
try:
next_in_queue = self.get_next_task()
except pymysql.err.OperationalError as op_err:
if op_err.args[0] == 2013: # errno
# this error is unusual. Give the db some time:
logging.error('Lost database connection. ' +
'Trying to restore it in 10 seconds ...')
time.sleep(10)
try:
self.cur = self.db_connection.get_cursor()
next_in_queue = self.get_next_task()
logging.info('Restored database connection!')
except Exception as exc:
msg = 'Could not reestablish database connection'
logging.exception(msg, exc_info=True)
self.notify.send_msg_abort_lost_db()
raise ConnectionError(msg) from exc
else:
logging.error(
'Unexpected Operational Error', exc_info=True)
raise
if next_in_queue is None:
# no actionable item in the queue
if self.stop_if_queue_empty:
# Bot is configured to stop if queue is empty
# => check if that is only temporary or everything is done
if self.stats.num_tasks_w_temporary_errors() > 0:
# there are still tasks, but they have to wait
logging.debug("Tasks with temporary errors: " +
"waiting %s seconds until next try.",
self.queue_revisit)
time.sleep(self.queue_revisit)
continue
# Nothing left (i.e. num_temp_errors == 0)
logging.info('Queue empty. Bot stops as configured.')
num_permanent_errors = self.stats.num_tasks_w_permanent_errors()
if num_permanent_errors > 0:
logging.error("%s permanent errors!",
num_permanent_errors)
self.notify.send_msg_finish()
break
logging.debug(
"No actionable task: waiting %s seconds until next check",
self.queue_revisit)
time.sleep(self.queue_revisit)
continue
# Got a task from the queue!
queue_id = next_in_queue[0]
action = next_in_queue[1]
url = exo_url.ExoUrl(next_in_queue[2])
prettify_html = (next_in_queue[4] == 1)
# The FQDN might have been added to the blocklist *after*
# the task entered into the queue!
if self.blocklist.check_blocklist(str(url.hostname)):
logging.error(
'Cannot process queue item: FQDN meanwhile on blocklist!')
self.delete_from_queue(queue_id)
logging.info('Removed item from queue: FQDN on blocklist.')
else:
if action == 1: # download file to disk
self.actions.get_object(queue_id, 'file', url)
elif action == 2: # save page code into database
self.actions.get_object(queue_id, 'content', url, prettify_html)
elif action == 3: # headless Chrome to create PDF
self.actions.get_object(queue_id, 'page_to_pdf', url)
elif action == 4: # save page text into database
self.actions.get_object(queue_id, 'text', url)
else:
logging.error('Unknown action id!')
self.notify.send_msg_milestone()
# wait some interval to avoid overloading the server
self.time.random_wait()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
464,
1398,
4670,
518,
13511,
15314,
262,
2223,
16834,
329,
262,
409,
418,
38800,
9355,
13,
198,
... | 2.064992 | 5,016 |
import pandas as pd
import re
import sys
import os
# extract year and quarter number
# extract subject and course id
# extract minUnit from unit
# extract grading options to Letter and PassNP
# extract location and building
# extract time to start and end time
# extract enrollment status to MaxEnroll and CurEnroll
# convert am/pm to minutes
# convert some column types to int
# check if file path is valid
if len(sys.argv) == 1 or sys.argv[1] == '' or not os.path.isfile(sys.argv[1]) or not os.path.exists(os.path.dirname(sys.argv[1])):
print ('Please specify a valid data file (.csv)')
exit()
filename = sys.argv[1]
columns = ['Quarter', 'ID', 'College', 'Unit', 'Grading', 'Day', 'Time', 'Location', 'Enrollment']
df = pd.read_csv(filename, names=columns)
df_orig = df.copy(deep=True)
format_quarter(df)
format_course_id(df)
format_unit(df)
format_grading(df)
format_time(df)
format_enrollment(df)
format_day(df)
format_location(df)
df = df.drop('Grading', 1)
df = df.drop('Time', 1)
df = df.drop('Enrollment', 1)
df = df.drop('Day', 1)
df = df.drop('Location', 1)
convert_to_int(df)
df.to_csv('DataClean/cleaned_data.csv', sep=',')
plt = df.groupby('Building')['RatioEnroll'].mean()
plt.plot(kind='bar', figsize=(15, 6), logy=True)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
28686,
628,
198,
2,
7925,
614,
290,
3860,
1271,
628,
198,
2,
7925,
2426,
290,
1781,
4686,
198,
220,
220,
220,
220,
198,
198,
2,
7925,
949,
26453,
422,
... | 2.711864 | 472 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from poliastro.bodies import Earth
from poliastro.twobody import Orbit
from poliastro.twobody.propagation import cowell
from edelbaum import guidance_law, extra_quantities
# Problem data
f = 3.5e-7 # km / s2
a_0 = 7000.0 # km
a_f = 42166.0 # km
inc_f = 0.0 # rad
k = Earth.k.decompose([u.km, u.s]).value
@pytest.mark.parametrize("inc_0,expected_t_f,expected_delta_V,rtol", [
[28.5, 191.26295, 5.78378, 1e-5],
[90.0, 335.0, 10.13, 1e-3], # Extra decimal places added
[114.591, 351.0, 10.61, 1e-2]
])
@pytest.mark.parametrize("inc_0", [np.radians(28.5), np.radians(90.0)])
| [
11748,
12972,
9288,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
198,
6738,
755,
72,
459,
305,
13,
65,
5042,
1330,
3668,... | 2.270096 | 311 |
"""Service that calls the HRI loop to disambiguate the scene."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import cv2
import cv_bridge
from disambiguate.disambiguate import disambiguate_scene
from hri_interfaces.srv import Disambiguate
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
_cv_bridge = cv_bridge.CvBridge()
# TODO:
# 1. check how to get input (voice / text) from ROS execution
class DisambiguateService(Node):
"""Read the current image and perform disambiguation upon request."""
def color_callback(self, color_ros_image: Image):
"""
Store the most recent color image.
Args:
----
color_ros_image: the color image to store.
"""
self.get_logger().debug('Received color image.')
self.latest_color_ros_image = color_ros_image
if __name__ == '__main__':
main()
| [
37811,
16177,
326,
3848,
262,
367,
7112,
9052,
284,
595,
4131,
328,
4985,
262,
3715,
526,
15931,
198,
198,
2,
15069,
357,
66,
8,
33160,
11,
9564,
33,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
... | 3.078005 | 782 |
from flask import Blueprint, jsonify
from app_blog.services.users_service import users_service
users_blueprint = Blueprint("users", __name__, url_prefix="/users")
@users_blueprint.route('/info/<int:user_id>', methods=['GET'])
| [
6738,
42903,
1330,
39932,
11,
33918,
1958,
198,
198,
6738,
598,
62,
14036,
13,
30416,
13,
18417,
62,
15271,
1330,
2985,
62,
15271,
198,
198,
18417,
62,
17585,
4798,
796,
39932,
7203,
18417,
1600,
11593,
3672,
834,
11,
19016,
62,
40290,
... | 3.194444 | 72 |
"""Domain RepliesFilter enum class."""
import enum
class RepliesFilter(enum.Enum):
"""Domain RepliesFilter enum class."""
ONLY_REPLIES = 1
ONLY_ORIGINAL = 2
| [
37811,
43961,
18407,
444,
22417,
33829,
1398,
526,
15931,
198,
198,
11748,
33829,
628,
198,
4871,
18407,
444,
22417,
7,
44709,
13,
4834,
388,
2599,
198,
220,
220,
220,
37227,
43961,
18407,
444,
22417,
33829,
1398,
526,
15931,
628,
220,
... | 2.836066 | 61 |
import os
| [
11748,
28686,
198
] | 3.333333 | 3 |
import cassandra
import sys
import json
from cassandra.cluster import Cluster
import os
if __name__ == '__main__':
os.chdir("./detailedData")
cluster = Cluster()
session = cluster.connect('hash')
for root, dirs, files in os.walk("."):
for filename in files:
data = json.load(open(filename))
recipeid = data["id"]
rows = session.execute('''
SELECT * from public_recipe where id = %s allow filtering
''',
(recipeid,)
)
if rows:
continue
recipename = data["name"]
totaltime = data["totalTimeInSeconds"]
if "hostedLargeUrl" in data["images"][0]:
image = data["images"][0]["hostedLargeUrl"]
else:
image = None
ingredients = data["ingredientLines"]
number = data["numberOfServings"]
largest = 0
main_flavor = ""
for flavor in data["flavors"].keys():
if data["flavors"][flavor] > largest:
largest = data["flavors"][flavor]
main_flavor = flavor
#flavor = data["flavors"]
instruction = data["source"]["sourceRecipeUrl"]
session.execute(
'''
INSERT INTO public_recipe (id, name, time, imageurl, ingredients, numberofserving, flavor, instruction)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
''',
(recipeid, recipename, totaltime, image, ingredients, number, main_flavor, instruction)
)
cluster.shutdown() | [
11748,
30606,
15918,
198,
11748,
25064,
198,
11748,
33918,
198,
6738,
30606,
15918,
13,
565,
5819,
1330,
38279,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
418,
13,
354,
15908,
7,
191... | 2.424408 | 549 |
'''
Simple wrapper for tests
'''
import sys, unittest
class Webtestcase(unittest.TestCase):
'''
Simple test outline and utilities
'''
| [
7061,
6,
198,
26437,
29908,
329,
5254,
198,
7061,
6,
628,
198,
11748,
25064,
11,
555,
715,
395,
198,
198,
4871,
5313,
9288,
7442,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
17427,
... | 1.510638 | 188 |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SwitchHostRangeLearnedInfoTriggerAttributes(Base):
"""NOT DEFINED
The SwitchHostRangeLearnedInfoTriggerAttributes class encapsulates a required switchHostRangeLearnedInfoTriggerAttributes resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'switchHostRangeLearnedInfoTriggerAttributes'
_SDM_ATT_MAP = {
'CustomPacket': 'customPacket',
'DestinationCustom': 'destinationCustom',
'DestinationCustomIpv4Address': 'destinationCustomIpv4Address',
'DestinationCustomIpv4AddressStep': 'destinationCustomIpv4AddressStep',
'DestinationCustomMacAddress': 'destinationCustomMacAddress',
'DestinationCustomMacAddressStep': 'destinationCustomMacAddressStep',
'DestinationHostList': 'destinationHostList',
'MeshingType': 'meshingType',
'PacketType': 'packetType',
'PeriodIntervalInMs': 'periodIntervalInMs',
'Periodic': 'periodic',
'PeriodicIterationNumber': 'periodicIterationNumber',
'ResponseTimeout': 'responseTimeout',
'SourceHostList': 'sourceHostList',
}
@property
def CustomPacket(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['CustomPacket'])
@CustomPacket.setter
@property
def DestinationCustom(self):
"""
Returns
-------
- bool: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationCustom'])
@DestinationCustom.setter
@property
def DestinationCustomIpv4Address(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationCustomIpv4Address'])
@DestinationCustomIpv4Address.setter
@property
def DestinationCustomIpv4AddressStep(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationCustomIpv4AddressStep'])
@DestinationCustomIpv4AddressStep.setter
@property
def DestinationCustomMacAddress(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationCustomMacAddress'])
@DestinationCustomMacAddress.setter
@property
def DestinationCustomMacAddressStep(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationCustomMacAddressStep'])
@DestinationCustomMacAddressStep.setter
@property
def DestinationHostList(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../switchHostRanges]): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationHostList'])
@DestinationHostList.setter
@property
def MeshingType(self):
"""
Returns
-------
- str(fullyMesh): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['MeshingType'])
@MeshingType.setter
@property
def PacketType(self):
"""
Returns
-------
- str(arp | ping | custom): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['PacketType'])
@PacketType.setter
@property
def PeriodIntervalInMs(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['PeriodIntervalInMs'])
@PeriodIntervalInMs.setter
@property
def Periodic(self):
"""
Returns
-------
- bool: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['Periodic'])
@Periodic.setter
@property
def PeriodicIterationNumber(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['PeriodicIterationNumber'])
@PeriodicIterationNumber.setter
@property
def ResponseTimeout(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ResponseTimeout'])
@ResponseTimeout.setter
@property
def SourceHostList(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../switchHostRanges]): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['SourceHostList'])
@SourceHostList.setter
def update(self, CustomPacket=None, DestinationCustom=None, DestinationCustomIpv4Address=None, DestinationCustomIpv4AddressStep=None, DestinationCustomMacAddress=None, DestinationCustomMacAddressStep=None, DestinationHostList=None, MeshingType=None, PacketType=None, PeriodIntervalInMs=None, Periodic=None, PeriodicIterationNumber=None, ResponseTimeout=None, SourceHostList=None):
"""Updates switchHostRangeLearnedInfoTriggerAttributes resource on the server.
Args
----
- CustomPacket (str): NOT DEFINED
- DestinationCustom (bool): NOT DEFINED
- DestinationCustomIpv4Address (str): NOT DEFINED
- DestinationCustomIpv4AddressStep (str): NOT DEFINED
- DestinationCustomMacAddress (str): NOT DEFINED
- DestinationCustomMacAddressStep (str): NOT DEFINED
- DestinationHostList (list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../switchHostRanges])): NOT DEFINED
- MeshingType (str(fullyMesh)): NOT DEFINED
- PacketType (str(arp | ping | custom)): NOT DEFINED
- PeriodIntervalInMs (number): NOT DEFINED
- Periodic (bool): NOT DEFINED
- PeriodicIterationNumber (number): NOT DEFINED
- ResponseTimeout (number): NOT DEFINED
- SourceHostList (list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../switchHostRanges])): NOT DEFINED
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
2,
17168,
38559,
24290,
201,
198,
2,
201,
198,
2,
15069,
8309,
532,
12131,
416,
22631,
3539,
26363,
432,
201,
198,
2,
201,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
201,
198,
... | 2.426637 | 3,176 |
from random import shuffle
from typing import List, Union
def randomSequence(*args: Union[List[int], range]) -> List[int]:
"""Returns a shuffled array\n
args can be either stop, start stop, and start stop step, or a range object
"""
if isinstance(args[0], int):
if (len(args)) == 1:
array = [i for i in range(0, args[0])]
elif len(args) == 2:
array = [i for i in range(args[0], args[1])]
elif len(args) == 3:
array = [i for i in range(args[0], args[1], args[2])]
elif isinstance(args[0], range):
array = [i for i in args[0]]
shuffle(array)
return array
| [
6738,
4738,
1330,
36273,
198,
6738,
19720,
1330,
7343,
11,
4479,
628,
198,
4299,
4738,
44015,
594,
46491,
22046,
25,
4479,
58,
8053,
58,
600,
4357,
2837,
12962,
4613,
7343,
58,
600,
5974,
198,
220,
220,
220,
37227,
35561,
257,
32299,
... | 2.348921 | 278 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
README = open('README.rst').read()
CHANGELOG = open('CHANGELOG.rst').read()
requirements = [
"numpy",
"matplotlib"
]
test_requirements = [
"tox",
"pytest",
"nose",
"python-coveralls",
]
setuptools.setup(
name="goldilocks",
version="0.1.1",
url="https://github.com/samstudio8/goldilocks",
description="Locating genomic regions that are \"just right\".",
long_description=README + '\n\n' + CHANGELOG,
author="Sam Nicholls",
author_email="sam@samnicholls.net",
maintainer="Sam Nicholls",
maintainer_email="sam@samnicholls.net",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements,
entry_points = {
"console_scripts": ["goldilocks=goldilocks.cmd:main"]
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
],
test_suite='tests',
tests_require=test_requirements
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
900,
37623,
10141,
198,
198,
15675,
11682,
796,
1280,
10786,
15675,
11682,
13,
81,
301,
27691,
961,
3419,
1... | 2.487896 | 537 |
ascii_art = r"""
_ _ _ __
(_)_ __ ___| |_ __ _(_)_ __ / _| ___
| | '_ \/ __| __/ _` | | '_ \| |_ / _ \
| | | | \__ \ || (_| | | | | | _| (_) |
|_|_| |_|___/\__\__,_|_|_| |_|_| \___/
Author : Waseem Akram (Hacker wasii)
"""
| [
292,
979,
72,
62,
433,
796,
374,
37811,
198,
4808,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4808,
220,
220,
220,
220,
220,
220,
220,
4808,
220,
220,
220,
220,
220,
220,
220,
11593,
198,
28264,
8,
62,
11593,
220,
46444,
... | 1.6375 | 160 |
""" Hash wordpress files through FTP server """
import hashlib, os
def md5(fname, r_mode='rb'):
""" Returns a md5 hash string of a given filename """
hash_md5 = hashlib.md5()
with open(fname, r_mode) as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def ftp_file_hash(con):
""" Get the file hashes inside the FTP server
Assumes that the FTP connection is already in the wordpress dir.
"""
# Function to get MD5 Hash inside the FTP server
def get_md5(fpath):
""" Returns the md5 hash string of a file """
ftp_md5 = hashlib.md5()
with con.open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
ftp_md5.update(chunk)
return ftp_md5.hexdigest()
# Function to get WordPress hash via FTP
def get_hash_dict():
""" Returns the hash dictionary of php files inside the ftp server """
hash_dict = {}
for root, dirs, files in con.walk('.'):
for x in files:
if x.endswith('.php'):
path = os.path.normpath(con.path.join(root, x))
hash_dict[path] = {'hash': get_md5(path), 'path': path}
return hash_dict
# Get Hash Dictionary
ftp_hash_dict = get_hash_dict()
return ftp_hash_dict
def clean_file_hash(dpath):
""" Get the file hashes of the clean WP version
Assumes that the given version in the parameter already exists
in the 'file-wp' directory.
"""
# Find all .php files and store it in an object
root_count = dpath.count(os.path.sep)
hash_dict = {}
for root, dirs, files in os.walk(dpath):
for x in files:
if x.endswith('.php'):
path = os.path.join(root, x)
path_count = path.count(os.path.sep)
key_path = path.split('\\')[-(path_count - root_count):]
hash_dict[os.path.join(*key_path)] = {'hash': md5(path), 'path': path}
return hash_dict
if __name__ == '__main__':
# ftp_hash = ftp_file_hash()
""" Testing Puporses """
# output_path = os.path.dirname(os.path.realpath(__file__)) + '\\output'
# with open(output_path + '\\ftp_file_hash.json', 'w') as jsonfile:
# json_output = json.dumps(ftp_hash, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': '))
# jsonfile.write(json_output)
# dir_path = os.path.dirname(os.path.realpath(__file__))
# output_path = dir_path + '\\output'
# wp_files_path = dir_path + '\\wp-files'
# ver = 4.8
# clean_wp_path = '{}\\{}\\wordpress'.format(wp_files_path, ver)
# ftp_diff = clean_file_hash(clean_wp_path)
# with open(output_path + '\\file-diff-ftp.json', 'w') as jsonfile:
# json_output = json.dumps(ftp_diff, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': '))
# jsonfile.write(json_output)
| [
37811,
21059,
1573,
8439,
3696,
832,
45854,
4382,
37227,
198,
11748,
12234,
8019,
11,
28686,
198,
198,
4299,
45243,
20,
7,
69,
3672,
11,
374,
62,
14171,
11639,
26145,
6,
2599,
198,
220,
220,
220,
37227,
16409,
257,
45243,
20,
12234,
4... | 2.228996 | 1,345 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# connect to mediawiki server and export all pages from a category to PDF files
#
# setup to use this script
# sudo pip3 install mwclient
# sudo apt-get install wkhtmltopdf
# WARN: wkhtmltopdf need an X server running, so if need, we use xvfb (virtual X server)
# sudo apt-get install xvfb
# wrap call of wkhtmltopdf to add xvfb
# cat <<EOF | sudo tee /usr/bin/wkhtmltopdf.sh
# #!/bin/bash
# xvfb-run -a --server-args="-screen 0, 1024x768x24" /usr/bin/wkhtmltopdf -q $*
# EOF
# sudo chmod a+x /usr/bin/wkhtmltopdf.sh
# sudo ln -s /usr/bin/wkhtmltopdf.sh /usr/local/bin/wkhtmltopdf
# test with:
# wkhtmltopdf http://www.google.com google.pdf
import mwclient
import subprocess
from urllib import parse
site = mwclient.Site(("http", "163.111.168.19"), path="/mediawiki/")
for page in site.Categories["RTU"]:
# build pdf
url = "http://163.111.168.19/mediawiki/index.php?title=%s" % parse.quote(page.name)
pdf_file = "%s.pdf" % page.name
pdf_build_status = subprocess.call(["wkhtmltopdf", url, pdf_file])
# print build status
if pdf_build_status == 0:
print("render of %s to %s OK" % (url, pdf_file))
else:
print("render of %s to %s error" % (url, pdf_file))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2018,
284,
2056,
15466,
4382,
290,
10784,
477,
5468,
422,
257,
6536,
284,
12960,
3696,
198,
2,
198,
2,
... | 2.420952 | 525 |
import os
import win32com.client
from .. import hecrasgeometry
from .. import get_supported_versions
from ..runtime import Runtime
def HECRASController(ras_version=None):
""" """
if ras_version is None:
ras_version = os.environ['RAS_CONTROLLER_VERSION']
elif ras_version not in get_supported_versions():
error = 'ras_version "{}" not supported.'.format(ras_version)
raise Exception(error)
ras = __import__(ras_version.lower(), globals(), locals(), [], -1)
class RASController(ras.Controller, ras.ControllerDeprecated):
"""
"""
def runtime(self):
""" """
return self._runtime
return RASController(ras_version)
| [
11748,
28686,
198,
198,
11748,
1592,
2624,
785,
13,
16366,
198,
198,
6738,
11485,
1330,
339,
6098,
292,
469,
15748,
198,
6738,
11485,
1330,
651,
62,
15999,
62,
47178,
198,
6738,
11485,
43282,
1330,
43160,
628,
198,
4299,
367,
2943,
49,
... | 2.583333 | 276 |
import itertools
import numpy as np
import matplotlib.pyplot as plt
from soops.base import output
from soops.parsing import parse_as_dict
def get_row_style_used(row, selected, compares, styles, used, **plot_kwargs):
"""
Combines :func:`get_row_style()` and :func:`update_used()` into a single
call.
"""
style_kwargs, indices = get_row_style(
row, selected, compares, styles, **plot_kwargs
)
if indices is not None:
used = update_used(used, indices)
return style_kwargs, indices, used
| [
11748,
340,
861,
10141,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
523,
2840,
13,
8692,
1330,
5072,
198,
6738,
523,
2840,
13,
79,
945,
278,
1330,
21136,
62,
... | 2.640394 | 203 |
"""This module defines LoopPulseTemplate, a higher-order hierarchical pulse template that loops
another PulseTemplate based on a condition."""
from typing import Dict, Set, Optional, Any, Union, Tuple, Generator, Sequence, cast
import warnings
import sympy
from cached_property import cached_property
from qupulse.serialization import Serializer, PulseRegistryType
from qupulse._program._loop import Loop
from qupulse.expressions import ExpressionScalar
from qupulse.utils import checked_int_cast
from qupulse.pulses.parameters import Parameter, ConstantParameter, InvalidParameterNameException, ParameterConstrainer, ParameterNotProvidedException
from qupulse.pulses.pulse_template import PulseTemplate, ChannelID
from qupulse.pulses.conditions import Condition, ConditionMissingException
from qupulse._program.instructions import InstructionBlock
from qupulse.pulses.sequencing import Sequencer
from qupulse._program.waveforms import SequenceWaveform as ForLoopWaveform
from qupulse.pulses.measurement import MeasurementDefiner, MeasurementDeclaration
__all__ = ['ForLoopPulseTemplate', 'LoopPulseTemplate', 'LoopIndexNotUsedException']
class LoopPulseTemplate(PulseTemplate):
"""Base class for loop based pulse templates. This class is still abstract and cannot be instantiated."""
@property
@property
@property
@property
class ParametrizedRange:
"""Like the builtin python range but with parameters."""
def __init__(self, *args, **kwargs):
"""Positional and keyword arguments cannot be mixed.
Args:
*args: Interpreted as ``(start, )`` or ``(start, stop[, step])``
**kwargs: Expected to contain ``start``, ``stop`` and ``step``
Raises:
TypeError: If positional and keyword arguments are mixed
KeyError: If keyword arguments but one of ``start``, ``stop`` or ``step`` is missing
"""
if args and kwargs:
raise TypeError('ParametrizedRange only takes either positional or keyword arguments')
elif kwargs:
start = kwargs['start']
stop = kwargs['stop']
step = kwargs['step']
elif len(args) in (1, 2, 3):
if len(args) == 3:
start, stop, step = args
elif len(args) == 2:
(start, stop), step = args, 1
elif len(args) == 1:
start, (stop,), step = 0, args, 1
else:
raise TypeError('ParametrizedRange expected 1 to 3 arguments, got {}'.format(len(args)))
self.start = ExpressionScalar.make(start)
self.stop = ExpressionScalar.make(stop)
self.step = ExpressionScalar.make(step)
def to_tuple(self) -> Tuple[Any, Any, Any]:
"""Return a simple representation of the range which is useful for comparison and serialization"""
return (self.start.get_serialization_data(),
self.stop.get_serialization_data(),
self.step.get_serialization_data())
@property
class ForLoopPulseTemplate(LoopPulseTemplate, MeasurementDefiner, ParameterConstrainer):
"""This pulse template allows looping through an parametrized integer range and provides the loop index as a
parameter to the body. If you do not need the index in the pulse template, consider using
:class:`~qupulse.pulses.repetition_pulse_template.RepetitionPulseTemplate`"""
def __init__(self,
body: PulseTemplate,
loop_index: str,
loop_range: Union[int,
range,
str,
Tuple[Any, Any],
Tuple[Any, Any, Any],
ParametrizedRange],
identifier: Optional[str]=None,
*,
measurements: Optional[Sequence[MeasurementDeclaration]]=None,
parameter_constraints: Optional[Sequence]=None,
registry: PulseRegistryType=None) -> None:
"""
Args:
body: The loop body. It is expected to have `loop_index` as an parameter
loop_index: Loop index of the for loop
loop_range: Range to loop through
identifier: Used for serialization
"""
LoopPulseTemplate.__init__(self, body=body, identifier=identifier)
MeasurementDefiner.__init__(self, measurements=measurements)
ParameterConstrainer.__init__(self, parameter_constraints=parameter_constraints)
if isinstance(loop_range, ParametrizedRange):
self._loop_range = loop_range
elif isinstance(loop_range, (int, str)):
self._loop_range = ParametrizedRange(loop_range)
elif isinstance(loop_range, (tuple, list)):
self._loop_range = ParametrizedRange(*loop_range)
elif isinstance(loop_range, range):
self._loop_range = ParametrizedRange(start=loop_range.start,
stop=loop_range.stop,
step=loop_range.step)
else:
raise ValueError('loop_range is not valid')
if not loop_index.isidentifier():
raise InvalidParameterNameException(loop_index)
body_parameters = self.body.parameter_names
if loop_index not in body_parameters:
raise LoopIndexNotUsedException(loop_index, body_parameters)
self._loop_index = loop_index
if self.loop_index in self.constrained_parameters:
constraints = [str(constraint) for constraint in self.parameter_constraints
if self._loop_index in constraint.affected_parameters]
warnings.warn("ForLoopPulseTemplate was created with a constraint on a variable shadowing the loop index.\n" \
"This will not constrain the actual loop index but introduce a new parameter.\n" \
"To constrain the loop index, put the constraint in the body subtemplate.\n" \
"Loop index is {} and offending constraints are: {}".format(self._loop_index, constraints))
self._register(registry=registry)
@property
@property
@property
@cached_property
@property
@classmethod
@property
class WhileLoopPulseTemplate(LoopPulseTemplate):
"""Conditional looping in a pulse.
A LoopPulseTemplate is a PulseTemplate whose body is repeated
during execution as long as a certain condition holds.
"""
def __init__(self, condition: str,
body: PulseTemplate,
identifier: Optional[str]=None,
registry: PulseRegistryType=None) -> None:
"""Create a new LoopPulseTemplate instance.
Args:
condition (str): A unique identifier for the looping condition. Will be used to obtain
the Condition object from the mapping passed in during the sequencing process.
body (PulseTemplate): The PulseTemplate which will be repeated as long as the condition
holds.
identifier (str): A unique identifier for use in serialization. (optional)
"""
super().__init__(body=body, identifier=identifier)
self._condition = condition
self._register(registry=registry)
@property
def condition(self) -> str:
"""This LoopPulseTemplate's condition."""
return self._condition
@property
@property
@classmethod
@property
| [
37811,
1212,
8265,
15738,
26304,
47,
9615,
30800,
11,
257,
2440,
12,
2875,
38958,
19445,
11055,
326,
23607,
198,
29214,
25062,
30800,
1912,
319,
257,
4006,
526,
15931,
628,
198,
6738,
19720,
1330,
360,
713,
11,
5345,
11,
32233,
11,
4377... | 2.456958 | 3,090 |
from pymongo import MongoClient
import yfinance as yf #pip install yfinance
import os
from datetime import datetime, timedelta
import glob
import shutil
import time
import csv
import argparse
import pandas as pd
ARGS, UNKNOWN = parse_args()
def merge_daily_df(new_df, old_df):
"""Merge YFinance Daily Dfs
Args:
new_df (pandas dataframe): DF containg YFinance Ticker Info, new read
old_df (pandas dataframe): DF containg Stored MongoDB Ticker Info, old read
Returns:
pandas dataframe: A DF containg all of the data from new_df, merged with old_df
"""
# Delete '_id' column that is added from mongo
del old_df['_id']
# Set new_df index to be the 'date' column and delete index
index_name = new_df.index.names[0]
new_df[index_name] = new_df.index
# new_df[index_name] = new_df[index_name].apply(lambda x: x.strftime('%Y-%m-%d'))
new_df.reset_index(drop=True, inplace=True)
# Append old_df to new_df and dedup
new_df = new_df.append(old_df)
new_df['date'] = new_df['date'].apply(lambda x: date_to_string(x))
new_df = new_df.sort_values(by='audit_load_epoch', ascending=False)
new_df = new_df.drop_duplicates(subset='date', keep='first')
new_df = new_df.sort_values(by='date')
# Set the new_df index back to 'date' and delete 'date' column
new_df.index.name = 'date'
new_df.index = new_df['date']
del new_df['date']
return new_df
main()
| [
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
331,
69,
14149,
355,
331,
69,
1303,
79,
541,
2721,
331,
69,
14149,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
15095,
198,
11748,
4423... | 2.473862 | 593 |
import functools
__all__ = ["ProgressBar"]
| [
11748,
1257,
310,
10141,
198,
198,
834,
439,
834,
796,
14631,
32577,
10374,
8973,
628
] | 3 | 15 |
from bs4 import BeautifulSoup
from manga import Manga
import requests
import re
URL = "https://www.starcomics.com/catalogo-fumetti"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser").find("tbody")
# This function format scraped elements
# This function adds manga to list
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
15911,
1330,
27024,
198,
11748,
7007,
198,
11748,
302,
198,
198,
21886,
796,
366,
5450,
1378,
2503,
13,
7364,
785,
873,
13,
785,
14,
9246,
11794,
78,
12,
69,
388,
24851,
1,
198,
... | 3.271739 | 92 |
import mypingsweeper as script
| [
11748,
616,
79,
654,
732,
5723,
355,
4226,
198
] | 3.444444 | 9 |
from . import social
from app import db
from app.modules.base.base_handler import BaseHandler
from app.models.social.image import ImageModel
from app.models.social.video import VideoModel
from app.helper.auth import login_required
from app.helper.upload import UploadImage, UploadVideo
from app.helper.response import json_success_response, json_fail_response
social.add_url_rule("/video/uploadchatvideo", view_func=UploadChatVideoHandler.as_view("video_upload_chat_video"))
| [
6738,
764,
1330,
1919,
198,
6738,
598,
1330,
20613,
198,
6738,
598,
13,
18170,
13,
8692,
13,
8692,
62,
30281,
1330,
7308,
25060,
198,
6738,
598,
13,
27530,
13,
14557,
13,
9060,
1330,
7412,
17633,
198,
6738,
598,
13,
27530,
13,
14557,
... | 3.593985 | 133 |
import numpy
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
two() | [
11748,
299,
32152,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1855,
11518,
3351,
36213,
11,
8997,
3351,
36213,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
... | 3.245902 | 61 |
from .softmax import SoftmaxHead, AmSoftmaxHead
from .arc import ArcHead
from .cos import CosHead
from .sphere import SphereHead
__all__ = ['build_head']
_head_factory = {
'softmax': SoftmaxHead,
'am_softmax': AmSoftmaxHead,
'arc': ArcHead,
'cos': CosHead,
'sphere': SphereHead,
}
| [
628,
198,
6738,
764,
4215,
9806,
1330,
8297,
9806,
13847,
11,
1703,
18380,
9806,
13847,
198,
6738,
764,
5605,
1330,
10173,
13847,
198,
6738,
764,
6966,
1330,
10437,
13847,
198,
6738,
764,
2777,
1456,
1330,
31798,
13847,
628,
198,
834,
4... | 2.610169 | 118 |
import cv2, ctypes, logging, os, numpy as np, pickle
from numpy import ma
from collections import OrderedDict
from skimage.morphology import binary_closing, disk
import scipy, skfmm
import matplotlib.pyplot as plt
| [
11748,
269,
85,
17,
11,
269,
19199,
11,
18931,
11,
28686,
11,
299,
32152,
355,
45941,
11,
2298,
293,
198,
6738,
299,
32152,
1330,
17266,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
1341,
9060,
13,
24503,
1435,
1330,
13934... | 3.147059 | 68 |
import argparse
import copy
import json
import random
from functools import partial
from pathlib import Path
import torch
import torch.nn as nn
import torchaudio
from torch import Tensor
from utils.wav2mel import Wav2Mel
# attack_and_save function modified from attack.py
if __name__ == "__main__":
torchaudio.set_audio_backend("sox_io")
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=Path, default="../vctk_test_vad")
parser.add_argument("--save_dir", type=Path)
parser.add_argument("--model_path", type=Path)
parser.add_argument("--metadata_path", type=Path)
parser.add_argument("--eps", type=float, default=0.005)
parser.add_argument("--n_steps", type=int, default=1000)
main(**vars(parser.parse_args()))
| [
11748,
1822,
29572,
198,
11748,
4866,
198,
11748,
33918,
198,
11748,
4738,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034... | 2.85348 | 273 |
import cv2
import argparse
import time
import os
from torch import Tensor
from datasets.dataset_utils import get_class_labels
from opts import parse_model
from spatial_transforms import *
from online.online_classifier import OnlineClassifier
from online.online_utils import FPSMeasurer, ImageStreamer
from visualization.stream_utils import *
from visualization.plotters import ResultPlotter
DISPLAY_SCALE = 600
if __name__ == "__main__":
main()
| [
11748,
269,
85,
17,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
28034,
1330,
309,
22854,
198,
198,
6738,
40522,
13,
19608,
292,
316,
62,
26791,
1330,
651,
62,
4871,
62,
23912,
1424,
198,
6738,
2172,
82,
133... | 3.595238 | 126 |
# coding=utf-8
GOAL = 'iabcabc'
| [
2,
19617,
28,
40477,
12,
23,
628,
198,
198,
11230,
1847,
796,
705,
72,
39305,
39305,
6,
628,
198
] | 1.947368 | 19 |
"""
Writing 1/2 as a sum of inverse squares
""" | [
37811,
198,
33874,
352,
14,
17,
355,
257,
2160,
286,
34062,
24438,
198,
37811
] | 3.357143 | 14 |
# =============================================================================
# url_test
#
# Copyright (c) 2014, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import condoor
import sys
import pytest
import logging
logging.basicConfig(
format='%(asctime)-15s %(levelname)8s: %(message)s',
level=logging.DEBUG)
from condoor.exceptions import ConnectionError, ConnectionAuthenticationError
@pytest.mark.parametrize("urls", [
(pytest.mark.xfail(['telnet://<user>:<password>@1.1.1.1', 'telnet://<user>:<password>@mercy'], raises=ConnectionError)),
(pytest.mark.xfail(['telnet://<user>:<password>@sj20lab-as1', 'telnet://<user>:<password@1.1.1.1'], raises=ConnectionError)),
(pytest.mark.xfail(['telnet://<user>:wrong_pass@sj20lab-as1', 'telnet://<user>:<password>@mercy'], raises=ConnectionError)),
(pytest.mark.xfail(['telnet://<user>:<password>@sj20lab-as1', 'telnet://<user>:wrong_pass@mercy'], raises=ConnectionError)),
(pytest.mark.xfail(['ssh://wrong_user:<password>@sj20lab-as1', 'telnet://<user>:<password>@mercy'], raises=ConnectionAuthenticationError)),
(pytest.mark.xfail(['ssh://<user>:<password>@1.1.1.1', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032'], raises=ConnectionError)),
(pytest.mark.xfail(['ssh://<user>:<password>@localhost', 'ssh://<user>:<password>@1.1.1.1', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032'], raises=ConnectionError)),
(pytest.mark.xfail(['telnet://<user>:<password>@people', 'telnet://<user>:<password>@1.1.1.1', 'telnet://<user>:<password>@mercy'], raises=ConnectionError)),
(pytest.mark.xfail(['ssh://<user>:<password>@people', 'ssh://<user>:<password>@people', 'telnet://<user>:<passowrd>@1.1.1.1', 'telnet://<user>:<password>@mercy'], raises=ConnectionError)),
(pytest.mark.xfail(['telnet://lab:lab@10.105.226.125', 'telnet://lab:lab@10.105.226.126'], raises=ConnectionError)),
(['ssh://<user>:<password>@sj20lab-as1', 'telnet://<user>:<password>@mercy']),
(['telnet://<user>:<password>@people', 'telnet://<user>:<password>@mercy']),
(['telnet://<user>:<password>@mercy']),
(['ssh://<user>:<password>@localhost', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032']),
(['ssh://<user>:<password>@sweet-brew', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032']),
(['telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032']),
(['ssh://<user>:<password>@localhost', 'ssh://<user>:<password>@localhost', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032']),
(['ssh://<user>:<password>@people', 'telnet://cisco:C1sco123@bdlk1-b05-ts-01:2032']),
# # -- no longer works (['telnet://lab:lab@10.105.226.125:20421']),
(['ssh://lab:lab@gsr-india03-lnx', 'telnet://lab:lab@5.34.16.101']),
(['telnet://lab:lab@10.105.226.125', 'telnet://lab:lab@5.34.16.101']),
(['telnet://lab:lab@10.105.226.125:2065']),
(['ssh://<user>:<password>@sweet-brew-1', 'telnet://lab:lab@10.105.226.125:2065']),
(['telnet://:ww@10.50.2.225', 'telnet://ww:ww@192.168.0.4']),
(['telnet://10.50.2.225', 'telnet://ww:ww@192.168.0.4']),
])
# class TestClass:
# def test_1(self, username, password):
# url1 = mkurl('telnet', username, password, "1.1.1.1")
# url2 = mkurl('telnet', username, password, "mercy")
# with pytest.raises(condoor.exceptions.ConnectionError) as excinfo:
# urls = [ url1, url2 ]
# connection(urls)
#
#
#
# def test_2(self):
# url1 = mkurl('telnet', "", "ww", "10.50.2.225")
# url2 = mkurl('telnet', "ww", "ww", "192.168.0.4")
# connection([url1, url2])
#
# return ["info1: did you know that ...", "did you?"]
| [
2,
38093,
25609,
198,
2,
19016,
62,
9288,
198,
2,
198,
2,
15069,
357,
66,
8,
220,
1946,
11,
28289,
11998,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
1303,
6434,
25,
14770,
3885,
3754,
89,
7299,
494,
74,
198,
2,
198,
2,
229... | 2.542683 | 1,968 |
from ImportFile import *
pi = math.pi
T = 10
a = [4, 0]
extrema_values = torch.tensor([[0, 1],
[-8, 8],
[-8, 8]])
list_of_BC = [[ub0, ub1], [ub0y, ub1y]]
| [
6738,
17267,
8979,
1330,
1635,
198,
198,
14415,
796,
10688,
13,
14415,
198,
51,
796,
838,
198,
198,
64,
796,
685,
19,
11,
657,
60,
198,
198,
2302,
260,
2611,
62,
27160,
796,
28034,
13,
83,
22854,
26933,
58,
15,
11,
352,
4357,
198,... | 1.582192 | 146 |
#!/usr/bin/env python
import pygsheets
import datetime
import time
import psycopg2
import yaml
with open('postgres_config.json') as config_file:
config = yaml.safe_load(config_file)
connection = psycopg2.connect(dbname=config['database'], user=config['user'], host=config['host'], password=config['password'])
cursor = connection.cursor();
gc = pygsheets.authorize()
sh = gc.open('PowerWatch Devices - Deployment Table Hardware Mapping')
wks = sh.sheet1
first = True;
for row in wks:
if(first):
first = False
continue
#get the information
core_id = row[1]
shield_id = row[2]
product_id = row[3]
if(core_id == '' or shield_id == ''):
continue
#insert it into postgres
print("Adding core_id: {}, shield_id: {}, product_id: {}".format(core_id, shield_id, product_id))
cursor.execute('INSERT INTO devices (core_id, shield_id, product_id) VALUES (%s, %s, %s)', (core_id, shield_id, product_id))
connection.commit();
cursor.close();
cursor.close();
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
12972,
70,
42011,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
331,
43695,
198,
198,
4480,
1280,
10786,
7353,
34239,
62,
11250,
13,
... | 2.662304 | 382 |
# for blog views global varibale calls
from .models import Catagory, Blog
from django.db.models import Count
# src = https://able.bio/rhett/how-to-order-by-count-of-a-foreignkey-field-in-django--26y1ug1
## Information
#.annotate(post_count=Count('blog'))\ -> countin the blog post under each category
#.filter(blog__isnull=False)\ -> Filtering the blog model so that category without post wont display
#.order_by('-post_count')[:5] -> ordering by most post , category with max posts will show first. | [
2,
329,
4130,
5009,
220,
3298,
1401,
571,
1000,
3848,
198,
6738,
764,
27530,
1330,
5181,
363,
652,
11,
14001,
198,
6738,
220,
42625,
14208,
13,
9945,
13,
27530,
1330,
2764,
628,
220,
198,
2,
12351,
796,
3740,
1378,
540,
13,
65,
952,... | 3.103659 | 164 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.proto',
package='yandex.cloud.mdb.elasticsearch.v1.config',
syntax='proto3',
serialized_options=b'\n,yandex.cloud.api.mdb.elasticsearch.v1.configZZgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/elasticsearch/v1/config;elasticsearch',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n<yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.proto\x12(yandex.cloud.mdb.elasticsearch.v1.config\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dyandex/cloud/validation.proto\"k\n\x14\x45lasticsearchConfig7\x12\x35\n\x10max_clause_count\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x1c\n\x14\x66ielddata_cache_size\x18\x04 \x01(\t\"\xa6\x02\n\x17\x45lasticsearchConfigSet7\x12^\n\x10\x65\x66\x66\x65\x63tive_config\x18\x01 \x01(\x0b\x32>.yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7B\x04\xe8\xc7\x31\x01\x12S\n\x0buser_config\x18\x02 \x01(\x0b\x32>.yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7\x12V\n\x0e\x64\x65\x66\x61ult_config\x18\x03 \x01(\x0b\x32>.yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7B\x8a\x01\n,yandex.cloud.api.mdb.elasticsearch.v1.configZZgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/elasticsearch/v1/config;elasticsearchb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_ELASTICSEARCHCONFIG7 = _descriptor.Descriptor(
name='ElasticsearchConfig7',
full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_clause_count', full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7.max_clause_count', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fielddata_cache_size', full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7.fielddata_cache_size', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=276,
)
_ELASTICSEARCHCONFIGSET7 = _descriptor.Descriptor(
name='ElasticsearchConfigSet7',
full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfigSet7',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='effective_config', full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfigSet7.effective_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_config', full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfigSet7.user_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_config', full_name='yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfigSet7.default_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=573,
)
_ELASTICSEARCHCONFIG7.fields_by_name['max_clause_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ELASTICSEARCHCONFIGSET7.fields_by_name['effective_config'].message_type = _ELASTICSEARCHCONFIG7
_ELASTICSEARCHCONFIGSET7.fields_by_name['user_config'].message_type = _ELASTICSEARCHCONFIG7
_ELASTICSEARCHCONFIGSET7.fields_by_name['default_config'].message_type = _ELASTICSEARCHCONFIG7
DESCRIPTOR.message_types_by_name['ElasticsearchConfig7'] = _ELASTICSEARCHCONFIG7
DESCRIPTOR.message_types_by_name['ElasticsearchConfigSet7'] = _ELASTICSEARCHCONFIGSET7
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ElasticsearchConfig7 = _reflection.GeneratedProtocolMessageType('ElasticsearchConfig7', (_message.Message,), {
'DESCRIPTOR' : _ELASTICSEARCHCONFIG7,
'__module__' : 'yandex.cloud.mdb.elasticsearch.v1.config.elasticsearch_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7)
})
_sym_db.RegisterMessage(ElasticsearchConfig7)
ElasticsearchConfigSet7 = _reflection.GeneratedProtocolMessageType('ElasticsearchConfigSet7', (_message.Message,), {
'DESCRIPTOR' : _ELASTICSEARCHCONFIGSET7,
'__module__' : 'yandex.cloud.mdb.elasticsearch.v1.config.elasticsearch_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfigSet7)
})
_sym_db.RegisterMessage(ElasticsearchConfigSet7)
DESCRIPTOR._options = None
_ELASTICSEARCHCONFIGSET7.fields_by_name['effective_config']._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
331,
392,
1069,
14,
17721,
14,
9132,
65,
14,
417,
3477,
12947,
14,
85,
... | 2.490453 | 2,828 |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc. All rights reserved
from contextlib import contextmanager
from typing import Generator, List, Optional, cast
from fbmeshd import MeshService
from fbmeshd.ttypes import MpathEntry, PeerMetrics, StatCounter
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
MESH_SERVICE_HOST = "localhost"
MESH_SERVICE_PORT = 30303
MESH_SERVICE_TIMEOUT_MS = 5000
@contextmanager
def mesh_client() -> Generator[MeshService.Client, None, None]:
"""
A context manager that can be used to safely open a connection
to a MeshService, yield a ready-to-use client object, and properly
clean up afterward.
"""
transport = TSocket.TSocket(MESH_SERVICE_HOST, MESH_SERVICE_PORT)
transport.setTimeout(MESH_SERVICE_TIMEOUT_MS)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = MeshService.Client(protocol)
transport.open()
try:
yield client
finally:
transport.close()
def peers(interface_name: str) -> List[str]:
"""Information about the peers in the mesh"""
with mesh_client() as client:
# pyre-fixme[22]: The cast is redundant.
return cast(List[str], client.getPeers(interface_name))
def peer_count(interface_name: str) -> int:
"""Number of peers in the mesh"""
with mesh_client() as client:
return len(client.getPeers(interface_name))
def metrics(interface_name: str) -> PeerMetrics:
"""Information about the metric to peers in the mesh"""
with mesh_client() as client:
return client.getMetrics(interface_name)
def dump_mpaths() -> List[MpathEntry]:
"""Dump mpath table"""
with mesh_client() as client:
# pyre-fixme[22]: The cast is redundant.
return cast(List[MpathEntry], client.dumpMpath())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
2177,
12,
25579,
11,
3203,
11,
3457,
13,
1439,
2489,
10395,
628,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
19720,
1330,
35986,
11,
7343,
11,
3223... | 2.902736 | 658 |
#!/usr/bin/env python
import os
import sys
import time
import math
from struct import unpack
import re
import subprocess
"""
Downloaded from:
http://www.wadsworth.org/spider_doc/spider/proc/spyder.py
Documentation:
http://www.wadsworth.org/spider_doc/spider/docs/scripting2.html
by Neil on Feb 12, 2008
"""
"""
There are 2 streams:
The Python program sends commands to Spider as if they were typed at the
.OPERATION: prompt.
The only information Python gets from Spider are register values, via
an external fifo pipe.
The spider session is started by creating an instance of the SpiderSession
class:
sp = SpiderSession(dataext='dat')
Then you use the instance methods (functions of sp)
- send commands to Spider with sp.toSpider("op", "infile","outfile","args")
- get register values from Spider w/ sp.getreg("[var]")
"""
# --------------------------------------------------------------
if __name__ == '__main__':
sp = SpiderSession(dataext='dat')
sp.toSpider("[size]=117")
sp.getreg('size')
sp.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
10688,
198,
6738,
2878,
1330,
555,
8002,
198,
11748,
302,
198,
11748,
850,
14681,
198,
198,
37811,
198,
10002,
276,
422... | 3.391447 | 304 |
import os
import errno
from threading import RLock
from symsynd.libdebug import is_valid_cpu_name
from symsynd.utils import parse_addr, timedsection
from symsynd.exceptions import SymbolicationError
from symsynd.libsymbolizer import Symbolizer as LowLevelSymbolizer
class Symbolizer(object):
"""The main symbolication driver. This abstracts around a low level
LLVM based symbolizer that works with DWARF files. It's recommended to
explicitly close the driver to ensure memory cleans up timely.
"""
def symbolize(self, dsym_path, image_vmaddr, image_addr,
instruction_addr, cpu_name,
symbolize_inlined=False):
"""Symbolizes a single frame based on the information provided. If
the symbolication fails a `SymbolicationError` is raised.
`dsym_path` is the path to the dsym file on the file system.
`image_vmaddr` is the slide of the image. For most situations this
can just be set to `0`. If it's zero or unset we will attempt to
find the slide from the dsym file. `image_addr` is the canonical
image address as loaded. `instruction_addr` is the address where the
error happened.
`cpu_name` is the CPU name. It follows general apple conventions and
is used to special case certain behavior and look up the right
symbols. Common names are `armv7` and `arm64`.
Additionally if `symbolize_inlined` is set to `True` then a list of
frames is returned instead which might contain inlined frames. In
that case the return value might be an empty list instead.
"""
if self._closed:
raise RuntimeError('Symbolizer is closed')
dsym_path = normalize_dsym_path(dsym_path)
image_vmaddr = parse_addr(image_vmaddr)
if not image_vmaddr:
di = self._symbolizer.get_debug_info(dsym_path)
if di is not None:
variant = di.get_variant(cpu_name)
if variant is not None:
image_vmaddr = variant.vmaddr
image_addr = parse_addr(image_addr)
instruction_addr = parse_addr(instruction_addr)
if not is_valid_cpu_name(cpu_name):
raise SymbolicationError('"%s" is not a valid cpu name' % cpu_name)
addr = image_vmaddr + instruction_addr - image_addr
with self._lock:
with timedsection('symbolize'):
if symbolize_inlined:
return self._symbolizer.symbolize_inlined(
dsym_path, addr, cpu_name)
return self._symbolizer.symbolize(
dsym_path, addr, cpu_name)
| [
11748,
28686,
198,
11748,
11454,
3919,
198,
6738,
4704,
278,
1330,
371,
25392,
198,
198,
6738,
827,
907,
88,
358,
13,
8019,
24442,
1330,
318,
62,
12102,
62,
36166,
62,
3672,
198,
6738,
827,
907,
88,
358,
13,
26791,
1330,
21136,
62,
... | 2.47156 | 1,090 |
RAWDATA_DIR = '/staging/as/skchoudh/rna-seq-datasets/single/pan_paniscus/SRP007412'
OUT_DIR = '/staging/as/skchoudh/rna-seq-output/pan_paniscus/SRP007412'
CDNA_FA_GZ = '/home/cmb-panasas2/skchoudh/genomes/pan_paniscus/cdna/Pan_paniscus.panpan1.1.cdna.all.fa.gz'
CDNA_IDX = '/home/cmb-panasas2/skchoudh/genomes/pan_paniscus/cdna/Pan_paniscus.panpan1.1.cdna.all.kallisto.index'
| [
198,
20530,
26947,
62,
34720,
796,
31051,
301,
3039,
14,
292,
14,
8135,
354,
2778,
71,
14,
81,
2616,
12,
41068,
12,
19608,
292,
1039,
14,
29762,
14,
6839,
62,
6839,
2304,
385,
14,
12562,
47,
405,
4524,
1065,
6,
198,
12425,
62,
347... | 1.915423 | 201 |
from typing import Iterable, List
from di.core.assignment import (
DirectMatcher,
TypeAggregationMatcher,
TypeIterableMatcher,
TypeMatcher,
)
from di.core.element import Dependency, Element, Value
| [
6738,
19720,
1330,
40806,
540,
11,
7343,
198,
198,
6738,
2566,
13,
7295,
13,
562,
16747,
1330,
357,
198,
220,
220,
220,
4128,
19044,
2044,
11,
198,
220,
220,
220,
5994,
46384,
43068,
19044,
2044,
11,
198,
220,
220,
220,
5994,
29993,
... | 2.973333 | 75 |
import getpass
import random
import readline
import socket
import string
import sys
from datetime import datetime
from signal import SIG_DFL, SIGPIPE, signal
from typing import Any, Dict, Union
from ..api import Novem401, NovemAPI
from ..utils import cl, colors, get_current_config
from ..version import __version__
from .config import check_if_profile_exists, update_config
from .plot import plot
from .setup import setup
def init_config(args: Dict[str, str] = None) -> None:
"""
Initialize user and config
the --init flag has been supplied, this means we are going to update the
configuration file. There are several different scenarios we have to adapt
to in this case:
* a simple --init with no options and missing config file
* request username and password from user
* authenticate against service
* create config file and store credentials if successful
* a simple --init with no options and existing config file
* check if config has a default user
* if default user exist in config, inform user that config
* already exist and terminate
* a simple --init with --profile <username> and missing config file
* request username and password from user
* with username prefilled as --profile
* authenticate against service
* create config file and store credentials if successful
--init
--profile
--token
--api-url
--force
user in config
config exist
if --token not supplied, request username and password
if --profile specified, prefill username
if token supplied request token info, if username password
"""
if not args:
# make mypy happy, return if no argument supplied
return
token: Union[str, None] = None
if "token" in args:
token = args["token"]
api_root: str = args["api-url"]
profile: str = args["profile"]
force: str = args["force"]
config_path: str = args["config_path"]
# first check if we have a valid config
profile_exists: bool = check_if_profile_exists(profile, config_path)
if profile_exists and not force:
print(
f"{cl.WARNING} ! {cl.ENDC}"
f' The supplied profile "{cl.OKCYAN}{profile}{cl.ENDC}" already '
f"exist, use --force to override"
)
sys.exit(1)
valid_char_sm = string.ascii_lowercase + string.digits
valid_char = valid_char_sm + "-_"
hostname: str = socket.gethostname()
token_name: Union[str, None] = None
if not token_name:
token_hostname: str = "".join(
[x for x in hostname.lower() if x in valid_char]
)
nounce: str = "".join(random.choice(valid_char_sm) for _ in range(8))
token_name = f"novem-python-{token_hostname}-{nounce}"
new_token_name = "".join([x for x in token_name if x in valid_char])
if token_name != new_token_name:
print(
f"{cl.WARNING} ! {cl.ENDC}"
"The supplied token name contained invalid charracters,"
f' token changed to "{cl.OKCYAN}{new_token_name}{cl.ENDC}"'
)
token_name = new_token_name
# get novem username
prefill = ""
if "profile" in args:
prefill = args["profile"]
username = input_with_prefill(" \u2022 novem.no username: ", prefill)
# username = "abc"
# get novem password
password = getpass.getpass(" \u2022 novem.no password: ")
# authenticate and request token by name
req = {
"username": username,
"password": password,
"token_name": token_name,
"token_description": (
f'cli token created for "{hostname}" '
f'on "{datetime.now():%Y-%m-%d:%H:%M:%S}"'
),
}
if not api_root:
(hasconf, curconf) = get_current_config(**args)
# if our config exist, try to read it from there
if not hasconf:
api_root = "https://api.novem.no/v1/"
else:
api_root = curconf["api_root"]
# let's grab our token
novem = NovemAPI(
api_root=api_root,
ignore_config=True,
)
try:
res = novem.create_token(req)
token = res["token"]
token_name = res["token_name"]
except Novem401:
print("Invalid username and/or password")
sys.exit(1)
# default profile to username if not supplied
if not profile:
profile = username
# let's write our config
do_update_config(
profile, username, api_root, token_name, token, config_path
)
__all__ = ["run_cli"]
| [
11748,
651,
6603,
198,
11748,
4738,
198,
11748,
1100,
1370,
198,
11748,
17802,
198,
11748,
4731,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
6737,
1330,
33993,
62,
35,
3697,
11,
33993,
47,
4061,
36,
11,
6737,
... | 2.542653 | 1,817 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
from __future__ import print_function
import sys
import os
import glob
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../metadata/utils"))
from exp import Exp
from exp_file import ExpFile
if __name__ == "__main__":
sys.exit(main())
| [
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
2,
15069,
357,
66,
8,
1584,
12,
42334,
3899,
9330,
7718,
78,
11,
8616,
34780,
11,
22772,
8877,
11,
1168,
1056,
278,
370,
1516,
198,
198,
6738,
11593,
37443,
834,
1330,
... | 2.932836 | 134 |
# python imports
import locale
# django imports
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
# lfs imports
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.models import Shop
from lfs.core.signals import order_submitted
from lfs.criteria import utils as criteria_utils
from lfs.customer import utils as customer_utils
from lfs.payment.models import PaymentMethod
from lfs.payment.settings import PAYPAL
from lfs.payment.settings import PM_ORDER_IMMEDIATELY
from lfs.payment.settings import PM_ORDER_ACCEPTED
# other imports
from paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
def update_to_valid_payment_method(request, customer, save=False):
"""
After this method has been called the given customer has a valid payment
method.
"""
valid_sms = get_valid_payment_methods(request)
if customer.selected_payment_method not in valid_sms:
customer.selected_payment_method = get_default_payment_method(request)
if save:
customer.save()
def get_valid_payment_methods(request):
"""
Returns all valid payment methods (aka. selectable) for given request as
list.
"""
result = []
for pm in PaymentMethod.objects.filter(active=True):
if criteria_utils.is_valid(request, pm):
result.append(pm)
return result
def get_default_payment_method(request):
"""
Returns the default payment method for given request.
"""
active_payment_methods = PaymentMethod.objects.filter(active=True)
return criteria_utils.get_first_valid(request, active_payment_methods)
def get_selected_payment_method(request):
"""
Returns the selected payment method for given request. This could either
be an explicitly selected payment method of the current user or the default
payment method.
"""
customer = customer_utils.get_customer(request)
if customer and customer.selected_payment_method:
return customer.selected_payment_method
else:
return get_default_payment_method(request)
def get_payment_costs(request, payment_method):
"""
Returns the payment price and tax for the given request.
"""
if payment_method is None:
return {
"price": 0.0,
"tax": 0.0
}
try:
tax_rate = payment_method.tax.rate
except AttributeError:
tax_rate = 0.0
price = criteria_utils.get_first_valid(request,
payment_method.prices.all())
if price is None:
price = payment_method.price
tax = (tax_rate / (tax_rate + 100)) * price
return {
"price": price,
"tax": tax
}
else:
tax = (tax_rate / (tax_rate + 100)) * price.price
return {
"price": price.price,
"tax": tax
}
def process_payment(request):
"""
Processes the payment depending on the selected payment method. Returns a
dictionary with the success state, the next url and a optional error
message.
"""
payment_method = get_selected_payment_method(request)
if payment_method.module:
payment_class = lfs.core.utils.import_symbol(payment_method.module)
payment_instance = payment_class(request)
create_order_time = payment_instance.get_create_order_time()
if create_order_time == PM_ORDER_IMMEDIATELY:
order = lfs.order.utils.add_order(request)
payment_instance.order = order
result = payment_instance.process()
if result.get("order_state"):
order.state = result.get("order_state")
order.save()
order_submitted.send({"order": order, "request": request})
else:
cart = lfs.cart.utils.get_cart(request)
payment_instance.cart = cart
result = payment_instance.process()
if result["accepted"]:
if create_order_time == PM_ORDER_ACCEPTED:
order = lfs.order.utils.add_order(request)
if result.get("order_state"):
order.state = result.get("order_state")
order.save()
order_submitted.send({"order": order, "request": request})
return result
elif payment_method.id == PAYPAL:
order = lfs.order.utils.add_order(request)
if order: # if we have no cart then the order will be None
order_submitted.send({"order": order, "request": request})
if settings.LFS_PAYPAL_REDIRECT:
return {
"accepted": True,
"next_url": order.get_pay_link(request),
}
return {
"accepted": True,
"next_url": reverse("lfs_thank_you"),
}
else:
order = lfs.order.utils.add_order(request)
order_submitted.send({"order": order, "request": request})
return {
"accepted": True,
"next_url": reverse("lfs_thank_you"),
}
def get_pay_link(request, payment_method, order):
"""
Creates a pay link for the passed payment_method and order.
This can be used to display the link within the order mail and/or the
thank you page after a customer has payed.
"""
if payment_method.id == PAYPAL:
return get_paypal_link_for_order(order)
elif payment_method.module:
payment_class = lfs.core.utils.import_symbol(payment_method.module)
payment_instance = payment_class(request=request, order=order)
try:
return payment_instance.get_pay_link()
except AttributeError:
return ""
else:
return ""
def get_paypal_link_for_order(order):
"""
Creates paypal link for given order.
"""
shop = lfs_get_object_or_404(Shop, pk=1)
current_site = Site.objects.get(id=settings.SITE_ID)
conv = locale.localeconv()
default_currency = conv['int_curr_symbol']
info = {
"cmd": "_xclick",
"upload": "1",
"business": settings.PAYPAL_RECEIVER_EMAIL,
"currency_code": default_currency,
"notify_url": "http://" + current_site.domain + reverse('paypal-ipn'),
"return": "http://" + current_site.domain + reverse('paypal-pdt'),
"first_name": order.invoice_firstname,
"last_name": order.invoice_lastname,
"address1": order.invoice_line1,
"address2": order.invoice_line2,
"city": order.invoice_city,
"state": order.invoice_state,
"zip": order.invoice_code,
"no_shipping": "1",
"custom": order.uuid,
"invoice": order.uuid,
"item_name": shop.shop_owner,
"amount": "%.2f" % (order.price - order.tax),
"tax": "%.2f" % order.tax,
}
parameters = "&".join(["%s=%s" % (k, v) for (k, v) in info.items()])
if getattr(settings, 'PAYPAL_DEBUG', settings.DEBUG):
url = SANDBOX_POSTBACK_ENDPOINT + "?" + parameters
else:
url = POSTBACK_ENDPOINT + "?" + parameters
return url
| [
2,
21015,
17944,
198,
11748,
36693,
198,
198,
2,
42625,
14208,
17944,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
27530,
1330,
14413,
198,
6738,
42625,
14208,
13,
7295,
13,
6371... | 2.384051 | 2,997 |
from django.shortcuts import render
from django.shortcuts import render, redirect
from django.contrib import messages
from django.db import connection, transaction
from django.db import connections
from datetime import datetime
from datetime import date
import datetime
from PIL import Image
import os
from pathlib import Path
from django.conf import settings
from django.core.files.storage import FileSystemStorage
# Create your views here.
from django.http import HttpResponse
BASE_DIR = Path(__file__).resolve().parent.parent
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
9945,
1330,
4637,
11,
8611,
198,
6738,
... | 3.79021 | 143 |
import os
import csv
import requests
import numpy as np
from glob import glob
from tqdm import tqdm
from growth_stock_functions import *
glob_path = '../stock-tickers/*.txt'
tickers = []
ticker_paths = glob(glob_path)
for path in ticker_paths:
with open(path) as f:
reader = csv.DictReader(f, delimiter='|')
for row in reader:
# check if it is an ETF
if row['ETF'] != 'Y':
tickers.append(row)
final_tickers = []
for ticker in tqdm(tickers):
try:
ticker_symbol = ticker['NASDAQ Symbol']
except Exception:
ticker_symbol = ticker['Symbol']
res = process_ticker(ticker_symbol)
if res is not None:
final_tickers.append(res)
final_tickers.sort(key=lambda x: x[1], reverse=True)
with open('growth_good_buys.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['Symbol', 'Final Weighted Rating', 'GPM', 'ROA', 'ROE', 'Gross Profit Growth', 'Net Income Growth', 'EPS Growth', 'Unweighted Rating'])
writer.writerows(final_tickers)
| [
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
7007,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
15095,
1330,
15095,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
3349,
62,
13578,
62,
12543,
2733,
1330,
163... | 2.363431 | 443 |
from func_data_preparation import *
from func_ml import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save_as", action = 'store', default = "../models/har_model_v10.pkl" ,
help='Give the name of the trained model. Default: ../models/har_model_v10.pkl')
parser.add_argument("--acc_cut_off", action = 'store', default = 12 , type = int,
help='Cut off frequency for the accelerometer signals [Hz]. Default: 12')
parser.add_argument("--gyro_cut_off", action = 'store', default = 2 , type = int,
help='Cut off frequency for the gyrooscope signals [Hz]. Default: 2')
parser.add_argument('--overlap', default=False, action='store_true',
help='If you want to use overlap in the data, write True. Default: False')
parser.add_argument("--overlap_size", action = 'store', default = 0.5 , type = float,
help='Size of overlap in percent. Float between 0-1. Default 0.5')
parser.add_argument("--block_size", action = 'store', default = 512 , type = int,
help='Size of the blocks, in samples. Defalut: 512')
args = parser.parse_args()
#save model to disk as ...
save_as = args.save_as
#cut off frequency of the IR filter
#accelerometer
acc_cut_off = args.acc_cut_off
#gyrooscope
gyro_cut_off = args.gyro_cut_off
#transform data with overlap
overlap = args.overlap
overlap_size = args.overlap_size
block_size = args.block_size
#pwelch nperseg
nperseg = block_size/2
############################################################################
if __name__ == '__main__':
print("Create Labels")
print('*'*20)
labels = create_labels()
print("Read Data")
print('*'*20)
read_data()
df = read_data()
print("Label Data")
print('*'*20)
labeled_df = add_activity_label(df, labels)
print("Filter data")
print('*'*20)
filtered_df_acc = filter_acc(labeled_df,cutoff = acc_cut_off)
filtered_df_gyro = filter_gyro(labeled_df,cutoff= gyro_cut_off)
labeled_df = remake_df(filtered_df_acc, filtered_df_gyro, labeled_df)
#labeled_df = drop_unlabeled(labeled_df)
labeled_df = renindex_df(labeled_df)
print("Add Blocks")
print('*'*20)
if overlap:
block_df = create_block_df(labeled_df,block_size,overlap_size)
else:
block_df = create_block_df_no_overlap(labeled_df,block_size)
print("Add Activity labels")
print('*'*20)
activity_labels = create_activity_labels(block_df)
print("Aggregate Data")
print('*'*20)
agg_df = create_aggregated(block_df)
fft_df = do_fft(block_df,nperseg=nperseg)
fft_agg_df = create_aggregated_freq(fft_df)
print("Add Features")
print('*'*20)
features = create_features(agg_df, fft_agg_df)
print("Drop na-s")
print('*'*20)
features_to_drop = find_na(features)
print(features_to_drop)
features = drop_features(features,features_to_drop)
print("ML model")
print('*'*20)
X_train, X_test, y_train, y_test = create_train_test(features, activity_labels)
# print("Drop na-s")
# print('*'*20)
# # features_to_drop = find_na(X_train)
# # X_train = drop_features(X_train,features_to_drop)
# # X_test = drop_features(X_test,features_to_drop)
start_train = time.time()
model = train_model(X_train, y_train)
train_time = round(time.time()-start_train,1)
accuracy = accuracy_score(y_test,model.predict(X_test))
recall = recall_score(y_test,model.predict(X_test), average=None)
precision = precision_score(y_test,model.predict(X_test), average=None)
f1 = f1_score(y_test,model.predict(X_test), average=None)
print('*'*20)
print(f'Train time: {train_time}s')
print(f'Accuracy score: {round(accuracy,3)}%')
)
save_model(name=save_as, model=model)
| [
6738,
25439,
62,
7890,
62,
3866,
1845,
341,
1330,
1635,
198,
6738,
25439,
62,
4029,
1330,
1635,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
198,
48610,
13,
2860,
62,
49140,
7203,
438,
... | 2.471042 | 1,554 |