content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
import re
import argparse
import random
from time import time
from itertools import combinations
from app import db
from util import run_sql
from util import safe_commit
from util import Timer
from util import elapsed
from app import get_db_cursor
#
# pick a random number from 1 to 8
# pick that many attributes from list
# for each of them, pick a random setting
# pick an aggregation level (top dois, top journals, top publishers, top countries)
# number of combos of length up to four when have 15 options
# is 15 choose 4 + 15 choose 3 + ... 15 choose 1 https://www.calculatorsoup.com/calculators/discretemathematics/combinations.php?n=15&r=4&action=solve
# = 1365 + 455 + 105 + 15
# = 1940
# 3.5 seconds per = 3.5*1940 = 6790 seconds = 1.9 hours until everything has been primed once
table_lookup = {}
join_lookup = {}
# join_lookup["mag_main_papers"] = ""
# table_lookup["mag_main_papers"] = [
# ("doi", str),
# ("doc_type", str),
# ("year", int),
# ]
#
#
#
# join_lookup["unpaywall"] = " JOIN unpaywall ON unpaywall.doi = mag_main_papers.doi_lower "
# table_lookup["unpaywall"] = [
# ("doi", str),
# ("genre", str),
# # ("journal_is_in_doaj", str),
# ("journal_is_oa", str),
# ("oa_status", str),
# ("best_version", str),
# ("has_green", bool),
# ("is_oa_bool", bool),
# ]
#
#
# join_lookup["mag_paperid_affiliations_details"] = " JOIN mag_paperid_affiliations_details ON mag_main_papers.paper_id = mag_paperid_affiliations_details.paper_id "
# table_lookup["mag_paperid_affiliations_details"] = [
# ("ror_id", str),
# # ("grid_id", str),
# ("org", str),
# ("city", str),
# # ("region", str),
# ("state", str),
# ("country", str),
# ("continent", str),
# ]
#
# join_lookup["mid.journalsdb_computed"] = """ JOIN mag_main_journals ON mag_main_journals.journal_id = mag_main_papers.journal_id
# JOIN mid.journalsdb_computed_flat ON mag_main_journals.issn = mid.journalsdb_computed_flat.issn
# JOIN mid.journalsdb_computed ON mid.journalsdb_computed_flat.issn_l = mid.journalsdb_computed.issn_l """
# table_lookup["mid.journalsdb_computed"] = [
# ("publisher", str),
# ("issn_l", str),
# ]
# join_lookup["mag_main_authors"] = """ JOIN mag_main_paper_author_affiliations ON mag_main_paper_author_affiliations.paper_id = mag_main_papers.paper_id
# JOIN mag_main_authors ON mag_main_paper_author_affiliations.author_id = mag_main_authors.author_id """
# table_lookup["mag_main_authors"] = [
# ("normalized_name", str),
# ("author_id", int),
# ]
#
# join_lookup["unpaywall_oa_location"] = " JOIN unpaywall_oa_location ON unpaywall_oa_location.doi = mag_main_papers.doi_lower "
# table_lookup["unpaywall_oa_location"] = [
# # ("endpoint_id", str),
# ("version", str),
# ("license", str),
# ("repository_institution", str),
# ]
entity_table_lookup = {
"works": "api.mag_combo_all",
"authors": "api.mag_paperid_authors",
"journals": "mid.journalsdb_computed",
"oa_locations": "api.unpaywall_paperid_oa_location",
"fields_of_study": "api.mag_paperid_fields_of_study"
}
for entity in entity_table_lookup.keys():
join_lookup[entity] = {}
join_lookup["works"]["api.mag_combo_all"] = ""
join_lookup["authors"]["api.mag_combo_all"] = """ JOIN api.mag_combo_all ON api.mag_paperid_authors.paper_id = api.mag_combo_all.paper_id """
join_lookup["journals"]["api.mag_combo_all"] = """ JOIN api.mag_combo_all ON mid.journalsdb_computed.issn_l = api.mag_combo_all.issn_l """
join_lookup["oa_locations"]["api.mag_combo_all"] = """ JOIN api.mag_combo_all ON api.unpaywall_paperid_oa_location.paper_id = api.mag_combo_all.paper_id """
join_lookup["fields_of_study"]["api.mag_combo_all"] = """ JOIN api.mag_combo_all ON api.mag_paperid_fields_of_study.paper_id = api.mag_combo_all.paper_id """
table_lookup["api.mag_combo_all"] = [
("paper_id", int),
("doi", str),
("doc_type", str),
("year", int),
("paper_title", str),
("journal_title", str),
]
table_lookup["api.mag_combo_all"] += [
("genre", str),
# ("journal_is_in_doaj", str),
("journal_is_oa", str),
("oa_status", str),
("best_version", str),
("has_green", bool),
("is_oa_bool", bool),
]
table_lookup["api.mag_combo_all"] += [
("ror_id", str),
# ("grid_id", str),
("org", str),
("city", str),
# ("region", str),
("state", str),
("country", str),
("continent", str),
]
table_lookup["api.mag_combo_all"] += [
("publisher", str),
("issn_l", str),
]
join_lookup["works"]["api.mag_paperid_authors"] = """ JOIN api.mag_paperid_authors ON api.mag_paperid_authors.paper_id = api.mag_combo_all.paper_id """
table_lookup["api.mag_paperid_authors"] = [
("normalized_name", str),
("author_id", int),
]
join_lookup["works"]["api.unpaywall_paperid_oa_location"] = " JOIN api.unpaywall_paperid_oa_location ON api.unpaywall_paperid_oa_location.paper_id = api.mag_combo_all.paper_id "
table_lookup["api.unpaywall_paperid_oa_location"] = [
# ("endpoint_id", str),
("version", str),
("license", str),
("repository_institution", str),
]
join_lookup["works"]["api.mag_paperid_fields_of_study"] = """ JOIN api.mag_paperid_fields_of_study ON api.mag_paperid_fields_of_study.paper_id = api.mag_combo_all.paper_id """
table_lookup["api.mag_paperid_fields_of_study"] = [
("field_of_study_id", int),
("normalized_field_of_study_name", str),
]
field_lookup = {}
entities = entity_table_lookup.keys()
for entity in entities:
field_lookup[entity] = {}
for table_name in table_lookup:
for (field, datatype) in table_lookup[table_name]:
column_dict = {}
column_dict["table_name"] = table_name
column_dict["column_name"] = "{}.{}".format(table_name, field)
column_dict["datatype"] = datatype
field_lookup[entity][field] = column_dict
max_num_filters = 3
chosen_fields_combinations_remaining = []
all_fields = {}
for entity in entities:
all_fields[entity] = field_lookup[entity].keys()
# add one for offset
num_groupbys = 1
chosen_fields_combinations_remaining = {}
for entity in entities:
chosen_fields_combinations_remaining[entity] = []
for num_filters in range(0, max_num_filters + 1):
new_combo = list(combinations(all_fields[entity], num_groupbys + num_filters))
random.shuffle(new_combo) # randomize within the filter size
chosen_fields_combinations_remaining[entity] += new_combo
# max_num_filters = len(field_lookup) - 2
# chosen_fields_combinations_remaining = list(combinations(all_fields, num_groupbys + max_num_filters + 1))
# print(chosen_fields_combinations_remaining)
# print chosen_fields_combinations_remaining
# print("Number of fields: {}".format(len(field_lookup.keys())))
# print("Number of tables: {}".format(len(table_lookup.keys())))
# print("Number of combos with {} filters and a group-by: {}".format(max_num_filters, len(chosen_fields_combinations_remaining)))
# print("Number of hours it'd take to go through in a single thread, if 10 seconds each: {}".format(round(len(chosen_fields_combinations_remaining) * 10.0 / 60.0), 1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
parser.add_argument('--warm', action='store_true', help="warm cache")
parser.add_argument('--verbose', action='store_true', help="print verbose")
parsed_args = parser.parse_args()
parsed_vars = vars(parsed_args)
entities = ["works"]
start_time = time()
print("getting valid column values")
field_values = {}
all_fields = {}
for entity in entities:
field_values[entity] = {}
all_fields[entity] = list(field_lookup[entity].keys())
random.shuffle(all_fields[entity]) # helps be fast in parallel
for field in all_fields[entity]:
field_values[entity][field] = get_column_values_for_querying(entity, field)
print("done, took {} seconds".format(elapsed(start_time)))
# print(all_fields)
keep_running = True
while keep_running:
for entity in entities:
if parsed_vars.get("warm"):
chosen_fields = chosen_fields_combinations_remaining[entity].pop(0)
# print(chosen_fields)
if not chosen_fields_combinations_remaining:
keep_running = False
else:
num_fields = random.randint(num_groupbys, num_groupbys + max_num_filters)
chosen_fields = random.sample(all_fields[entity], num_fields)
filters = ["{}:{}".format(c, random.choice(field_values[entity][c])) for c in chosen_fields[num_groupbys:]]
groupby = chosen_fields[0]
searches = []
verbose = parsed_vars.get("verbose")
(rows, q, timing) = do_query(entity, filters, searches, groupby, verbose=verbose, details=False)
(rows, q, timing) = do_query(entity, filters, searches, groupby, verbose=verbose, details=True)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
1822,
29572,
198,
11748,
4738,
198,
6738,
640,
1330,
640,
1... | 2.440997 | 3,771 |
# Generated from .\xpath\xpathgrammer\XPath.g4 by ANTLR 4.9.3
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
| [
2,
2980,
515,
422,
764,
59,
87,
6978,
59,
87,
6978,
4546,
647,
59,
55,
15235,
13,
70,
19,
416,
3537,
14990,
49,
604,
13,
24,
13,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
1885,
14050,
19,
1330,
1635,
198,
6738,
33245... | 2.659091 | 88 |
__author__ = 'lsteng'
# Copyright 2018 Shaoteng Liu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx
from copy import deepcopy
import operator
from collections import OrderedDict
import gen_graph as gg
from heapq import heappush, heappop
from itertools import count
# def Dijkstra_weight(G, source, dst_mode = False, w='dweight'):
# G_succ = G.succ if not dst_mode else G.pred
#
# push = heappush
# pop = heappop
# dist = {} # dictionary of final distances
# seen = {source: 0}
# c = count()
# fringe = [] # use heapq with (distance,label) tuples
# push(fringe, (0, next(c), source))
# paths = {}
# paths[source] = [source]
# while fringe:
# (d, _, v) = pop(fringe)
# if v in dist:
# continue # already searched this node.
# dist[v] = d
#
#
# for u, e in G_succ[v].items():
# cost = get_weight(v, u, G, w, dst_mode)
# if cost is None:
# continue
# vu_dist = dist[v] + get_weight(v, u, G, w, dst_mode)
# # if cutoff is not None:
# # if vu_dist > cutoff:
# # continue
# if u in dist:
# if vu_dist < dist[u]:
# raise ValueError('Contradictory paths found:',
# 'negative weights?')
# elif u not in seen or vu_dist < seen[u]:
# seen[u] = vu_dist
# push(fringe, (vu_dist, next(c), u))
# #if paths is not None:
# if not dst_mode:
# paths[u] = paths[v] + [u]
# else:
# paths[u] = [u] + paths[v]
# # if pred is not None:
# # pred[u] = [v]
# # elif vu_dist == seen[u]:
# # if pred is not None:
# # pred[u].append(v)
#
# #if paths is not None:
# return (dist, paths)
# def Dijkstra_weight(G, source, dst_mode = False, w='dweight'):
# G_succ = G.succ if not dst_mode else G.pred
#
# push = heappush
# pop = heappop
# dist = {} # dictionary of final distances
# seen = {source: 0}
# c = count()
# fringe = [] # use heapq with (distance,label) tuples
# push(fringe, (0, next(c), source))
# paths = {}
# paths[source] = [source]
# while fringe:
# (d, _, v) = pop(fringe)
# if v in dist:
# continue # already searched this node.
# dist[v] = d
#
#
# for u, e in G_succ[v].items():
# cost = get_weight(v, u, G, w, dst_mode)
# if cost is None:
# continue
# vu_dist = dist[v] + get_weight(v, u, G, w, dst_mode)
# # if cutoff is not None:
# # if vu_dist > cutoff:
# # continue
# if u in dist:
# if vu_dist < dist[u]:
# raise ValueError('Contradictory paths found:',
# 'negative weights?')
# elif u not in seen or vu_dist < seen[u]:
# seen[u] = vu_dist
# push(fringe, (vu_dist, next(c), u))
# #if paths is not None:
# if not dst_mode:
# paths[u] = paths[v] + [u]
# else:
# paths[u] = [u] + paths[v]
# # if pred is not None:
# # pred[u] = [v]
# # elif vu_dist == seen[u]:
# # if pred is not None:
# # pred[u].append(v)
#
# #if paths is not None:
# return (dist, paths)
if __name__ == '__main__':
#G= gg.genGraph_triangle(8, 8, 1000)
G=gg.gen_mesh_Graph(2, 2, 1)
width, paths = Dijkstra_weight(G, 3, w='capacity')
print width
print paths
print width[0], paths[0]
p =paths[0]
print "#########test reverse alg ###################"
width, paths = Dijkstra_weight(G, 3, dst_mode=True, w='capacity')
print width
print paths
print width[0], paths[0]
p =paths[0]
| [
834,
9800,
834,
796,
705,
75,
301,
1516,
6,
198,
198,
2,
15069,
2864,
220,
19413,
313,
1516,
18258,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 1.931467 | 2,393 |
""" Module to handle the connection process.
"""
# pylint: disable=import-error
import json
import base64
import uuid
import aiohttp
from indy import crypto, did, pairwise, non_secrets
import serializer.json_serializer as Serializer
from router.simple_router import SimpleRouter
from . import Module
from message import Message
from helpers import serialize_bytes_json, bytes_to_str, str_to_bytes
| [
37811,
19937,
284,
5412,
262,
4637,
1429,
13,
198,
37811,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
198,
198,
11748,
33918,
198,
11748,
2779,
2414,
198,
11748,
334,
27112,
198,
198,
11748,
257,
952,
4023,
198,
6738... | 3.517544 | 114 |
from django.test import TestCase
from app.calc import add, substract
"""
run unit testing
"""
class CalcTest(TestCase):
""" test add 2 those number """
""" test subtract 2 those number """ | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
598,
13,
9948,
66,
1330,
751,
11,
3293,
974,
628,
198,
37811,
198,
5143,
4326,
4856,
198,
37811,
198,
4871,
2199,
66,
14402,
7,
14402,
20448,
2599,
628,
220,
220,
220,
37227,... | 3.389831 | 59 |
from flask_restful import Resource, abort
from flask import request, jsonify, make_response
import config
from controller import worker_store as ws
| [
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
15614,
198,
6738,
42903,
1330,
2581,
11,
33918,
1958,
11,
787,
62,
26209,
198,
198,
11748,
4566,
198,
6738,
10444,
1330,
8383,
62,
8095,
355,
266,
82,
628
] | 4.166667 | 36 |
#!/usr/bin/env python3
"""
Duino-Coin REST API © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duco-rest-api
Duino-Coin Team & Community 2019-2021
"""
import gevent.monkey
gevent.monkey.patch_all()
from wrapped_duco_functions import *
from Server import (
now, SAVE_TIME, POOL_DATABASE, CONFIG_WHITELIST_USR,
jail, global_last_block_hash, HOSTNAME,
DATABASE, DUCO_EMAIL, DUCO_PASS, alt_check, acc_check,
DB_TIMEOUT, CONFIG_MINERAPI, SERVER_VER,
CONFIG_TRANSACTIONS, API_JSON_URI,
BCRYPT_ROUNDS, user_exists, SOCKET_TIMEOUT,
email_exists, send_registration_email,
DECIMALS, CONFIG_BANS, protocol_verified_mail,
CONFIG_JAIL, CONFIG_WHITELIST, perm_ban,
NodeS_Overide, CAPTCHA_SECRET_KEY)
from fastrand import pcg32bounded as fastrandint
from xxhash import xxh64
from hashlib import sha1
import threading
import traceback
import os
from json import load
from bcrypt import hashpw, gensalt, checkpw
from sqlite3 import connect as sqlconn
from time import sleep, time
from re import sub, match
from colorama import Back, Fore, Style, init
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from nano_lib_rvx import Account
from tronapi import HttpProvider
from tronapi import Tron
from cashaddress import convert
from bitcash import Key
import requests
import random
import json
from socket import socket
from flask_ipban import IpBan
from flask_limiter.util import get_remote_address
from flask_limiter import Limiter
from flask import Flask, request, jsonify, render_template
from flask_caching import Cache
import functools
from dotenv import load_dotenv
# Exchange settings
exchange_address = {
"duco": "coinexchange",
"xmg": "95JLhkyWVDce5D17LyApULc5YC4vrVzaio",
"lke": "Like3yYC34YQJRMQCSbTDWKLhnzCoZvo9AwWuu5kooh",
"bch": "bitcoincash:qpgpd7slludx5h9p53qwf8pxu9z702n95qteeyzay3",
"trx": "TQUowTaHwvkWHbNVkxkAbcnbYyhF4or1Qy",
"xrp": "rGT84ryubURwFMmiJChRbWUg9iQY18VGuQ (Destination tag: 2039609160)",
"dgb": "DHMV4BNGpWbdhpq6Za3ArncuhpmtCjyQXg",
"nano": "nano_3fpqpbcgt3nga3s81td6bk7zcqdr7ockgnyjkcy1s8nfn98df6c5wu14fuuq",
"fjc": "FsfCoLL8JmLJoU57bUr2W3u3TA8acL9kf3",
"rvn": "RH4bTDaHH7LSSCVSvXJzJ5KkiGR1QRMaqN",
"nim": "NQ88 Q9ME 470X 8KY8 HXQG J96N 6FHR 8G0B EDMH"}
load_dotenv()
IPDB_KEY = os.getenv('IPDB_KEY')
PROXYCHECK_KEY = os.getenv('PROXYCHECK_KEY')
TRX_SECRET_KEY = os.getenv('TRX_SECRET_KEY')
BCH_SECRET_KEY = os.getenv('BCH_SECRET_KEY')
LIKECOIN_SECRET_KEY = os.getenv('LIKECOIN_SECRET_KEY')
NANO_SECRET_KEY = os.getenv('NANO_SECRET_KEY')
EXCHANGE_MAIL = DUCO_EMAIL
IP_CHECK_DISABLED = False
XXHASH_TX_PROB = 30
overrides = [
NodeS_Overide,
DUCO_PASS]
config = {
"DEBUG": False,
"CACHE_TYPE": "redis",
"CACHE_REDIS_URL": "redis://localhost:6379/0",
"CACHE_DEFAULT_TIMEOUT": SAVE_TIME,
"JSONIFY_PRETTYPRINT_REGULAR": False}
limiter = Limiter(
key_func=forwarded_ip_check,
default_limits=["5000 per day", "1 per 1 second"],)
ip_ban = IpBan(
ban_seconds=60*60,
ban_count=3,
persist=True,
ip_header='HTTP_X_REAL_IP',
record_dir="config/ipbans/",
ipc=True,
secret_key=DUCO_PASS,
abuse_IPDB_config={
"key": IPDB_KEY,
"report": True,
"load": False})
app = Flask(__name__, template_folder='config/error_pages')
app.config.from_mapping(config)
cache = Cache(app)
limiter.init_app(app)
ip_ban.init_app(app)
requests_session = requests.Session()
thread_lock = threading.Lock()
nano_key = Account(priv_key=NANO_SECRET_KEY)
bch_key = Key(BCH_SECRET_KEY)
trx_key = Tron(
full_node=HttpProvider('https://api.trongrid.io'),
solidity_node=HttpProvider('https://api.trongrid.io'),
event_server=HttpProvider('https://api.trongrid.io'))
trx_key.private_key = TRX_SECRET_KEY
trx_key.default_address = exchange_address["trx"]
last_transactions_update, last_miners_update, last_balances_update = 0, 0, 0
miners, balances, transactions = [], [], []
rate_count, last_transfer, checked_ips = {}, {}, {}
banlist, jailedusr, registrations, whitelisted_usr = [], [], [], []
with open('config/sell_email.html', 'r') as file:
html_exc = file.read()
with open('config/sell_email.html', 'r') as file:
html_auto = file.read()
with open('config/buy_email.html', 'r') as file:
html_buy = file.read()
with open(CONFIG_JAIL, "r") as jailedfile:
jailedusr = jailedfile.read().splitlines()
for username in jailedusr:
jail.append(username)
dbg("Successfully loaded jailed usernames file")
with open(CONFIG_BANS, "r") as bannedusrfile:
bannedusr = bannedusrfile.read().splitlines()
for username in bannedusr:
banlist.append(username)
dbg("Successfully loaded banned usernames file")
with open(CONFIG_WHITELIST_USR, "r") as whitelistedusrfile:
whitelist = whitelistedusrfile.read().splitlines()
for username in whitelist:
whitelisted_usr.append(username)
dbg("Successfully loaded whitelisted usernames file")
with open(CONFIG_WHITELIST, "r") as whitelistfile:
whitelist = whitelistfile.read().splitlines()
for ip in whitelist:
ip_ban.ip_whitelist_add(ip)
dbg("Successfully loaded whitelisted IPs file")
observations = {}
@app.errorhandler(429)
@app.errorhandler(404)
@app.errorhandler(500)
@app.errorhandler(403)
trusted = {}
creation = {}
@app.route("/ping")
@cache.cached(timeout=60)
@app.route("/404")
@cache.cached(timeout=60)
@app.route("/429")
@cache.cached(timeout=60)
@app.route("/403")
@cache.cached(timeout=60)
@app.route("/500")
@cache.cached(timeout=60)
@app.route("/all_pools")
@cache.cached(timeout=SAVE_TIME)
@app.route("/autopool")
@cache.cached(timeout=SAVE_TIME)
registration_db = {}
@app.route("/auth/<username>")
@limiter.limit("6 per 1 minute")
@app.route("/register/")
@limiter.limit("5 per hour")
@app.route("/miners/<username>")
@cache.cached(timeout=SAVE_TIME)
@app.route("/wduco_wrap/<username>")
@limiter.limit("3 per 1 minute")
@app.route("/users/<username>")
@limiter.limit("60 per 1 minute")
@cache.cached(timeout=SAVE_TIME)
@app.route("/users/")
@cache.cached(timeout=60)
@app.route("/changepass/<username>")
@limiter.limit("1 per 1 minute")
@app.route("/verify/<username>")
@app.route("/user_transactions/<username>")
@cache.cached(timeout=SAVE_TIME)
@app.route("/id_transactions/<tx_id>")
@cache.cached(timeout=SAVE_TIME)
@app.route("/transactions/<hash>")
@cache.cached(timeout=SAVE_TIME)
@app.route("/balances/<username>")
@cache.cached(timeout=SAVE_TIME)
@app.route("/balances")
@cache.cached(timeout=SAVE_TIME*3)
@app.route("/transactions")
@cache.cached(timeout=SAVE_TIME*3)
@app.route("/miners")
@cache.cached(timeout=SAVE_TIME*3)
@app.route("/statistics")
@cache.cached(timeout=SAVE_TIME*3)
@app.route("/ip")
@app.route("/statistics_miners")
@cache.cached(timeout=SAVE_TIME*3)
@app.route("/exchange_request/")
@limiter.limit("2 per 1 day")
@app.route("/transaction/")
@limiter.limit("2 per 1 minute")
@app.route("/pool_sync/")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
35660,
2879,
12,
24387,
30617,
7824,
10673,
17168,
11971,
198,
5450,
1378,
646,
259,
25634,
259,
13,
785,
198,
5450,
1378,
12567,
13,
785,
14,
18218,
1140,
1456,
14,
64... | 2.387563 | 2,975 |
## @imflash217
##
## creating a new file
with open("numbers.txt", "w") as file:
for i in range(1000):
print(str(i), file=file)
##################################################################
#################################################################
import heapq
#################################################################
if __name__ == "__main__":
with open("./numbers.txt", "r") as file:
results = find_largest(file, 5)
print(results)
#################################################################
| [
2235,
2488,
320,
34167,
24591,
198,
2235,
198,
2235,
4441,
257,
649,
2393,
198,
4480,
1280,
7203,
77,
17024,
13,
14116,
1600,
366,
86,
4943,
355,
2393,
25,
198,
220,
220,
220,
329,
1312,
287,
2837,
7,
12825,
2599,
198,
220,
220,
220... | 3.902098 | 143 |
from BaseAgent import BaseAgent
import logging
import math
import numpy as np
import os
import tensorflow as tf
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| [
6738,
7308,
36772,
1330,
7308,
36772,
198,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,... | 3.175439 | 57 |
# Classifier Evaluation incorporating similarity
#
# (C) 2015 by Mareike Picklum (mareikep@cs.uni-bremen.de)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from subprocess import Popen
from ..core.wordnet import WordNet
from pracmln.utils.eval import ConfusionMatrix
class ConfusionMatrixSim(ConfusionMatrix):
'''
Subclass of ConfusionMatrix incorporating similarities
between concepts into the precisions calculations
'''
def countClassifications(self, classname, sim=False):
'''
Returns the true positive, true negative, false positive, false negative
classification counts (in this order).
False positives and false negatives consider concept similarity.
'''
tp = self.matrix.get(classname,{}).get(classname,0)
classes = list(self.matrix.keys())
fp = 0.
wn = self.wordnet
classSyn = wn.synset(classname)
for c in classes:
if c != classname:
if sim:
cSyn = wn.synset(c)
fp += (self.getMatrixEntry(classname, c) * (1- wn.similarity(classSyn,cSyn)))
else:
fp += self.getMatrixEntry(classname, c)
fn = 0.
for c in classes:
if c != classname:
if sim:
cSyn = wn.synset(c)
fn += (self.getMatrixEntry(c, classname) * (1- wn.similarity(classSyn,cSyn)))
else:
fn += self.getMatrixEntry(c, classname)
tn = 0.
for c in classes:
if c != classname:
for c2 in classes:
if c2 != classname:
tn += self.getMatrixEntry(c, c2)
if not sim:
assert sum([tp, tn, fp, fn]) == self.instanceCount
return tp, tn, fp, fn
def getMetrics(self, classname, sim=False):
'''
Returns the classifier evaluation metrices in the following order:
Accuracy, Precision, Recall, F1-Score.
'''
classes = []
for classification in self.matrix:
for truth in self.matrix.get(classification,{}):
try:
classes.index(truth)
except ValueError:
classes.append(truth)
classes = sorted(classes)
tp, tn, fp, fn = self.countClassifications(classname, sim)
acc = None
if tp + tn + fp + fn > 0:
acc = (tp + tn) / float(tp + tn + fp + fn)
pre = 0.0
if tp + fp > 0:
pre = tp / float(tp + fp)
rec = 0.0
if tp + fn > 0:
rec = tp / float(tp + fn)
f1 = 0.0
if pre + rec > 0:
f1 = (2.0 * pre * rec) / (pre + rec)
return acc, pre, rec, f1
def getLatexTable(self, sim=False):
'''
Returns LaTex code for the confusion matrix.
'''
grid = "|l|"
for cl in sorted(self.labels):
grid += "l|"
endl = '\n'
result = ''
result += r'\footnotesize' + endl
result += r'\begin{tabular}{' + grid + '}' + endl
headerRow = r"Prediction/Ground Truth"
for cl in sorted(self.labels):
headerRow += r" & \begin{turn}{90}" + cl.replace('_', r'\_') + r'\end{turn}'
# count number of actual instances per class label
examplesPerClass = {}
for label in self.labels:
tp, tn, fp, fn = self.countClassifications(label)
examplesPerClass[label] = sum([tp, fp, fn])
result += r'\hline' + endl
result += headerRow + r'\\ \hline' + endl
#for each class create row
for clazz in sorted(self.labels):
values = []
#for each row fill colum
for cl2 in sorted(self.labels):
counts = self.getMatrixEntry(clazz, cl2)
if sim:
classSyn = self.wordnet.synset(clazz)
cl2Syn = self.wordnet.synset(cl2)
counts *= self.wordnet.similarity(classSyn, cl2Syn)
values.append('\cellcolor{{cfmcolor!{0}}}{1}'.format(int(round(counts/examplesPerClass[clazz] * 100)), ('\\textbf{{{:g}}}' if clazz == cl2 else '{:g}').format(float('{:.2f}'.format(counts)))))
result += clazz.replace('_', r'\_') + ' & ' + ' & '.join(values) + r'\\ \hline' + endl
result += r"\end{tabular}" + endl
return result
def printPrecisions(self, sim=False):
'''
Prints to the standard out a table of the class-specific error measures accurracy, precision, recall, F score.
'''
classes = []
for classification in self.matrix:
for truth in self.matrix.get(classification,{}):
try:
classes.index(truth)
except ValueError:
classes.append(truth)
classes = sorted(classes)
s = ''
precs = {}
for cf in classes:
acc,pre,rec,f1 = self.getMetrics(cf, sim)
print('{}: - Acc={:2f}, Pre={:2f}, Rec={:2f}, F1={:2f}\n'.format(cf, acc, pre, rec, f1))
precs[cf] = 'Acc={:2f}, Pre={:2f}, Rec={:2f}, F1={:2f}'.format(acc, pre, rec, f1)
return precs
def precisionsToFile(self, filename, sim=False):
'''
Prints to the standard out a table of the class-specific error measures accurracy, precision, recall, F score.
'''
precisions = self.printPrecisions(sim=sim)
f = open(filename, 'w+')
for prec in precisions:
f.write('{}: {}\n'.format(prec, precisions[prec]))
f.write('Total Accuracy: {}\n'.format(self.getTotalAccuracy()))
f.write('Average Precision: Acc={0[0]}, Pre={0[1]}, Rec={0[2]}, F1={0[3]}\n'.format(self.printAveragePrecision(sim=sim)))
def toPDF(self, filename, sim=False):
'''
Creates a PDF file of this matrix. Requires 'pdflatex' and 'pdfcrop' installed.
'''
texFileName = filename + '.tex'
texFile = open(texFileName, 'w+')
texFile.write(r'''
\documentclass[10pt]{{article}}
\usepackage{{color}}
\usepackage{{rotating}}
\usepackage[table]{{xcolor}}
\definecolor{{cfmcolor}}{{rgb}}{{0.2,0.4,0.6}}
\begin{{document}}
\pagenumbering{{gobble}}
\resizebox{{\columnwidth}}{{!}}{{{}}}
\end{{document}}
'''.format(self.getLatexTable(sim)))
texFile.close()
cmd = 'pdflatex -halt-on-error {}'.format(texFileName)
p = Popen(cmd, shell=True)
if p.wait() != 0:
raise Exception('Couldn\'t compile LaTex.')
else:
cmd = 'pdfcrop {}.pdf {}.pdf'.format(filename, filename)
p = Popen(cmd, shell=True)
if p.wait() != 0:
raise Exception('Couldn\'t crop pdf')
if __name__ == '__main__':
cm = ConfusionMatrixSim()
for _ in range(10):
cm.addClassificationResult("lemon.n.01","lemon.n.01")
cm.addClassificationResult("lemon.n.01","lemon.n.01")
cm.addClassificationResult("lemon.n.01","lemon.n.01")
cm.addClassificationResult("orange.n.01","lemon.n.01")
cm.addClassificationResult("orange.n.01","lemon.n.01")
cm.addClassificationResult("lemon.n.01","orange.n.01")
cm.addClassificationResult("orange.n.01","orange.n.01")
cm.addClassificationResult("orange.n.01","orange.n.01")
cm.printTable()
cm.printPrecisions()
print(cm.getLatexTable())
cm.toPDF('tmp')
cm.toPDF('tmp_sim', sim=True)
| [
2,
5016,
7483,
34959,
29927,
26789,
198,
2,
198,
2,
357,
34,
8,
1853,
416,
36989,
522,
12346,
75,
388,
357,
11449,
522,
79,
31,
6359,
13,
35657,
12,
65,
2787,
268,
13,
2934,
8,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
... | 2.107365 | 4,182 |
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import pymaster as nmt
#This script showcases the use of NmtWorkspace objects to speed up the
#computation of power spectra for many pairs of fields with the same masks.
#HEALPix map resolution
nside=256
#We start by creating some synthetic masks and maps with contaminants.
#Here we will focus on the cross-correlation of a spin-2 and a spin-1 field.
#a) Read and apodize mask
mask=nmt.mask_apodization(hp.read_map("mask.fits",verbose=False),1.,apotype="Smooth")
#b) Read maps
mp_t,mp_q,mp_u=hp.read_map("maps.fits",field=[0,1,2],verbose=False)
#c) Read contaminants maps
tm_t,tm_q,tm_u=hp.read_map("temp.fits",field=[0,1,2],verbose=False)
#d) Create contaminated fields
# Spin-0
f0=nmt.NmtField(mask,[mp_t+tm_t],templates=[[tm_t]])
# Spin-2
f2=nmt.NmtField(mask,[mp_q+tm_q,mp_u+tm_u],templates=[[tm_q,tm_u]])
#e) Create binning scheme. We will use 20 multipoles per bandpower.
b=nmt.NmtBin(nside,nlb=20)
#f) Finally, we read our best guess for the true power spectrum. We will
# use this to:
# i) Compute the bias to the power spectrum from contaminant cleaning
# ii) Generate random realizations of our fields to compute the errors
l,cltt,clee,clbb,clte=np.loadtxt("cls.txt",unpack=True)
cl_02_th=np.array([clte,np.zeros_like(clte)])
#We then generate an NmtWorkspace object that we use to compute and store
#the mode coupling matrix. Note that this matrix depends only on the masks
#of the two fields to correlate, but not on the maps themselves (in this
#case both maps are the same.
w=nmt.NmtWorkspace()
w.compute_coupling_matrix(f0,f2,b)
#Since we suspect that our maps are contaminated (that's why we passed the
#contaminant templates as arguments to the NmtField constructor), we also
#need to compute the bias to the power spectrum caused by contaminant
#cleaning (deprojection bias).
cl_bias=nmt.deprojection_bias(f0,f2,cl_02_th)
#The function defined below will compute the power spectrum between two
#NmtFields f_a and f_b, using the coupling matrix stored in the
#NmtWorkspace wsp and subtracting the deprojection bias clb.
#Note that the most expensive operations in the MASTER algorithm are
#the computation of the coupling matrix and the deprojection bias. Since
#these two objects are precomputed, this function should be pretty fast!
#OK, we can now compute the power spectrum of our two input fields
cl_master=compute_master(f0,f2,w,cl_bias)
#Let's now compute the errors on this estimator using 100 Gaussian random
#simulations. In a realistic scenario you'd want to compute the full
#covariance matrix, but let's keep things simple.
nsim=100
cl_mean=np.zeros_like(cl_master)
cl_std=np.zeros_like(cl_master)
for i in np.arange(nsim) :
print "%d-th simulation"%i
t,q,u=hp.synfast([cltt,clee,clbb,clte],nside,verbose=False)
f0_sim=nmt.NmtField(mask,[t],templates=[[tm_t]])
f2_sim=nmt.NmtField(mask,[q,u],templates=[[tm_q,tm_u]])
cl=compute_master(f0_sim,f2_sim,w,cl_bias)
cl_mean+=cl
cl_std+=cl*cl
cl_mean/=nsim
cl_std=np.sqrt(cl_std/nsim-cl_mean*cl_mean)
#One final thing needs to be done before we can compare the result with
#the theory. The theory power spectrum must be binned into bandpowers in
#the same manner the data has. This is straightforward to do using just
#two nested function calls.
cl_02_th_binned=w.decouple_cell(w.couple_cell(cl_02_th))
#Now let's plot the result!
plt.plot(b.get_effective_ells(),cl_02_th_binned[0],'r-',label='True power spectrum')
plt.plot(b.get_effective_ells(),cl_02_th_binned[1],'g-')
plt.errorbar(b.get_effective_ells(),cl_master[0],yerr=cl_std[0],
fmt='ro',label='MASTER estimate (TE)')
plt.errorbar(b.get_effective_ells(),cl_master[1],yerr=cl_std[1],
fmt='bo',label='MASTER estimate (TB)')
plt.ylim([-0.03,0.03]);
plt.legend(loc='upper right')
plt.xlabel('$\\ell$',fontsize=16); plt.ylabel('$C_\\ell$',fontsize=16);
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12035,
9078,
355,
27673,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
279,
4948,
1603,
355,
299,
16762,
198,
198,
2,
1212,
4226,
45064,
262,
779,
286,
399,
16762,
... | 2.70096 | 1,458 |
# -*- coding: utf-8 -*-
#
# Settings file for OpenSlides
#
from openslides.global_settings import * # noqa
# Use 'DEBUG = True' to get more details for server errors. Default for releases: False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Set timezone
TIME_ZONE = 'Europe/Berlin'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'secred'
# Add OpenSlides plugins to this list (see example entry in comment)
INSTALLED_PLUGINS = (
'openslides_csv_export',
)
INSTALLED_APPS += INSTALLED_PLUGINS
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# Path to Whoosh search index
# Use RAM storage
HAYSTACK_CONNECTIONS['default']['STORAGE'] = 'ram'
# Use a faster passwort hasher
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
16163,
2393,
329,
4946,
11122,
1460,
198,
2,
198,
198,
6738,
9808,
75,
1460,
13,
20541,
62,
33692,
1330,
1635,
220,
1303,
645,
20402,
198,
198,
2,
5765,
7... | 2.486683 | 413 |
'''
Utility functions for other modules.
'''
import os
import sys
import re
import math
import pandas as pd
import pytricia as pt
def normalize(astr):
'no whitespace or dots in string'
return re.sub(r'(\s|\.)+', '_', astr)
def load_csv(filename):
'csv file to dataframe w/ normalized column names'
try:
df = pd.read_csv(filename, skipinitialspace=True)
except (IOError, OSError):
return pd.DataFrame() # empty dataframe
df.columns = [normalize(n) for n in df.columns]
return df
def write_csv(df, output=sys.stdout):
'output df to sys.stdout or a file'
df.to_csv(output, index=False, mode='w')
return 0
def unknown_fields(df, fields):
'return list of fields that are not columns in df'
return [x for x in fields if x not in df.columns]
def load_ipt(filename, ip_field=None):
'turn a dataframe into ip lookup table -> pd.Series'
# prn(0, 'loading iptable {}'.format(filename))
fname = filename if os.path.isfile(filename) else '{}.csv'.format(filename)
try:
df = load_csv(fname)
except (OSError, IOError) as e:
# prn(0, 'error reading ip lookup file {}'.format(fname))
sys.exit(1)
if ip_field is None:
# find suitable field
tmp = pt.PyTricia()
for field in df.columns:
try:
tmp[df[field].iloc[0]] = 'test'
ip_field = field
break
except ValueError:
continue
elif ip_field not in df.columns:
# prn(0, 'field {!r} not available as lookup column'.format(ip_field))
sys.exit(1)
# tidy the ip_field lookup column (also remove leading zeros?)
df[ip_field] = df[ip_field].str.replace(' ', '')
ipt = pt.PyTricia()
for idx, row in df.iterrows():
try:
ip_idx = row[ip_field]
# ensure /32 for bare addresses
ip_idx = ip_idx if '/' in ip_idx else '{}/{}'.format(ip_idx, 32)
if ipt.has_key(ip_idx): # noqa W601
# has_key must be used to do an exact match, because
# the "if ip_idx in ipt:" does a longest pfx match,
# which is not what we want here...
# prn(0, '>> ignoring duplicate entry for {}'.format(ip_idx))
# prn(0, ' - already have', ','.join(str(x) for x in ipt[ip_idx]))
# prn(0, ' - ignoring data', ','.join(str(x) for x in row))
continue
ipt[ip_idx] = row # stores reference to the Series
except ValueError:
# prn(0, 'Fatal, cannot create ip lookup table from dataframe')
# prn(0, 'its index is not an ip address?')
# prn(0, df.index)
# prn(0, 'current index element: {}'.format(idx))
# prn(0, 'current row is', row)
sys.exit(1)
return ipt
def cmd_tokens(command):
'tokenize a string into a list of (sep, value)'
tokens = []
value = []
seps = ': = , ~'.split()
escape = '\\'
for c in command:
if c in seps:
if len(value) == 0:
tokens.append((c,c))
elif value[-1] == escape:
value[-1] = c
else:
tokens.append((c, ''.join(value)))
value.clear()
else:
value.append(c)
if len(value):
tokens.append(('', ''.join(value)))
return tokens
def cmd_str(cmd, lhs, rhs):
'reconstruct actual cli command from basic fields'
lhs = [] if lhs is None else lhs
rhs = [] if rhs is None else rhs
return '{}={}:{}'.format(','.join(lhs), cmd, ','.join(rhs))
def cmd_error(df, lhs, rhs, errors):
'Report fatal error(s) and exit'
caller_name = sys._getframe(1).f_code.co_name # is real/org func name
func = globals().get(caller_name, None) # registered name may be different
cmdstr = cmd_str(func.__name__, lhs, rhs)
prn(0, '[{}] error in {!r}'.format(func.__name__, cmdstr))
prn(0, '[{}] lhs {}'.format(func.__name__, lhs))
prn(0, '[{}] rhs {}'.format(func.__name__, rhs))
for error in errors:
prn(0, '[{}] {}'.format(func.__name__, error))
# list help when being verbose
if args.v:
prn(1, '[{}] doc'.format(func.__name__))
prn(1, '---')
prn(1, func.__doc__)
prn(1, '---')
sys.exit(1)
def pfx_proper(pfxstr):
'turn a single pfx-string into a well-formatted pfx'
try:
# allow for shorthands like 10/8, use a /32 by default
if pfxstr.count('.') > 3:
raise ValueError('{!r} invalid prefix string'.format(pfxstr))
if '/' not in pfxstr:
prefix = '{}/32'.format(pfxstr)
elif pfxstr.endswith('/'):
prefix = '{}32'.format(pfxstr)
else:
prefix = pfxstr
addr, msk = prefix.split('/', 1)
addr = '.'.join('{}.0.0.0'.format(addr).split('.')[0:4])
except Exception as e:
raise ValueError('cannot turn {!r} into a valid prefix'.format(pfxstr))
return '{}/{}'.format(addr, msk)
#-- new pfx mangling
# addr /plen
# pfx a.b.c.d/e <-- addr/plen as strings
# | | | | |
# pil [a,b,c,d,e] <-- prefix integer list
# ------- -
# ^ ^
# | |
# v v
# ival (uint, numh) <-- ival_netw, ival_bcast
#
# pstr port1-port2/proto
#
#
#-- 1. unit conversions, these may raise ValueError
def uint2intq(uint):
'turn uint into the four dotted quad ints'
if 0 <= uint < 2**32:
d1 = (uint // 16777216) & 0x000000FF
d2 = (uint // 65536) & 0x000000FF
d3 = (uint // 256) & 0x000000FF
d4 = uint & 0x000000FF
return [d1, d2, d3, d4]
raise ValueError('uint ({}) invalid for ipv4'.format(uint))
def uint2dotq(uint):
'turn uint for ipv4 address into dotted quad decimal'
return '{}.{}.{}.{}'.format(*uint2intq(uint))
def dotq2uint(dotq):
'dotted quad decimal to uint, allow shorthands like 1.1'
if dotq.count('.') > 3:
raise ValueError('{!r} is invalid dotq'.format(dotq))
try:
x = list(map(int, dotq.split('.')))
for d in x:
if d < 0 or d > 255:
raise ValueError('{!r} invalid dotquad'.format(dotq))
if len(x) > 4:
raise ValueError('{!r} invalid dotquad'.format(dotq))
elif len(x) < 4:
x = (x + [0,0,0,0])[0:4]
return x[0] * 16777216 + x[1] * 65536 + x[2] * 256 + x[3]
except Exception:
raise ValueError('{!r} invalid dotquad'.format(dotq))
def numh2mask(numh):
'convert number of hosts to network mask'
# 256 to uint for 255.255.255.0'
if ((numh & (numh - 1)) == 0) and numh > 0: # ensure power of two
return 2**32 - numh
raise ValueError('numhosts ({}) is not a power of two'.format(numh))
def mask2numh(uint):
'convert uint mask to number of hosts'
# uint for 2255.255.255.0 -> 256
numh = 2**32 - uint
if ((numh & (numh - 1)) == 0) and numh > 0: # ensure power of two
return numh
raise ValueError('uint ({}) is an invalid mask'.format(uint))
#--2. ival conversions
def ival_network(ival):
'mask down to network address'
pass
def ival2pfx(ival):
'(addr, numhosts) -> addr/len, donot mask to network address'
uint, numh = ival
plen = 32 - int(math.log(numh) / math.log(2))
return '{}/{}'.format(uint2dotq(uint), plen)
def ival2pfx_netw(ival):
'(addr, numhosts) -> network_addr/len'
uint, numh = ival
plen = 32 - int(math.log(numh) / math.log(2))
mask = len2mask(plen)
return '{}/{}'.format(uint2dotq(uint & mask), plen)
def ival2pfx_bcast(ival):
'(addr, numhosts) -> bcast_addr/len'
uint, numh = ival
plen = 32 - int(math.log(numh) / math.log(2))
imask = len2imask(plen)
return '{}/{}'.format(uint2dotq(uint | imask), plen)
#-- old pfx mangling
def pfx_network(pfxstr):
'turn a single pfx-string into a well-formatted network-pfx'
try:
netpfx = pfx_fromival(pfx_toivalnetwork(pfxstr))
except Exception as e:
raise ValueError('cannot turn {!r} into a valid prefix'.format(pfxstr))
return netpfx
def pfx_broadcast(pfxstr):
'turn a single pfx-string into a well-formatted broadcast-pfx'
try:
netpfx = pfx_fromival(pfx_toivalbcast(pfxstr))
except Exception as e:
raise ValueError('cannot turn {!r} into a valid prefix'.format(pfxstr))
return netpfx
def pfx_hosts(pfxstr):
'iterator across valid ip nrs in range of pfxstr, start with host-pfx'
uint, numh = pfx_toival(pfxstr)
umax, numh = pfx_toivalbcast(pfxstr)
for num in range(uint, umax+1):
yield pfx_fromival((num, 1))
def pfx_range(pfxstr):
'given a prefix, what is the range, 1.1.1.1/24 -> 1.1.1.1 - 255'
uint, numh = pfx_toivalnetwork(pfxstr)
mind = uint2dotq(uint).split('.')
maxd = uint2dotq(uint + numh - 1).split('.')
difd = filter(None, ['' if x==y else y for (x,y) in zip(mind, maxd)])
return '{} - {}'.format('.'.join(mind), '.'.join(difd))
def pfx_fromival(ival):
'turn a (host-uint, num_hosts)-tuple into a pfx'
# donot mask host-uint to this network address
uint, numh = ival
plen = 32 - int(math.log(numh) / math.log(2))
d1 = (uint // 16777216) & 0x000000FF
d2 = (uint // 65536) & 0x000000FF
d3 = (uint // 256) & 0x000000FF
d4 = uint & 0x000000FF
return '{}.{}.{}.{}/{}'.format(d1,d2,d3,d4,plen)
def pfxnet_fromival(ival):
'turn a (host-uint, num_hosts)-tuple into a network pfx'
# donot mask host-uint to this network address
uint, numh = ival
plen = 32 - int(math.log(numh) / math.log(2))
mask = 2**32 - numh
uint = uint & mask
d1 = (uint // 16777216) & 0x000000FF
d2 = (uint // 65536) & 0x000000FF
d3 = (uint // 256) & 0x000000FF
d4 = uint & 0x000000FF
return '{}.{}.{}.{}/{}'.format(d1,d2,d3,d4,plen)
def pfxbcast_fromival(ival):
'turn a (host-uint, num_hosts)-tuple into a broadcast pfx'
# donot mask host-uint to this network address
uint, numh = ival
invmask = (2**32 -1) ^ (2**32 - numh)
uint = uint | invmask
plen = 32 - int(math.log(numh) / math.log(2))
d1 = (uint // 16777216) & 0x000000FF
d2 = (uint // 65536) & 0x000000FF
d3 = (uint // 256) & 0x000000FF
d4 = uint & 0x000000FF
return '{}.{}.{}.{}/{}'.format(d1,d2,d3,d4,plen)
def pfx_toival(pfx):
'turn (im)properly formatted pfx into (host-uint, num_hosts) tuple'
# donot mask to this network address, use toivalnetwork for that
x = list(map(int, re.split('\.|/', pfx_proper(pfx))))
uint = x[0] * 16777216 + x[1] * 65536 + x[2] * 256 + x[3]
numh = 2**(32-x[4])
return (uint, numh)
def pfx_toivalnetwork(pfx):
'turn (im)properly formatted pfx into (network-uint, num_hosts) tuple'
x = list(map(int, re.split('\.|/', pfx_proper(pfx))))
uint = x[0] * 16777216 + x[1] * 65536 + x[2] * 256 + x[3]
numh = 2**(32-x[4])
return (uint & (2**32 - numh), numh)
def pfx_toivalbcast(pfx):
'turn (im)properly formatted pfx into (bcast-uint, num_hosts) tuple'
x = list(map(int, re.split('\.|/', pfx_proper(pfx))))
uint = x[0] * 16777216 + x[1] * 65536 + x[2] * 256 + x[3]
numh = 2**(32-x[4])
invmask = (2**32 -1) ^ (2**32 - numh)
return (uint | invmask, numh)
def pfx_summary(pfxlst):
'summarize a list of host-prefixes into minimum set of network-prefixes'
# blatant disregard for ipv6
heap = []
for pfx in pfxlst:
heap.append(pfx_toivalnetwork(pfx)) # note masking for network address
# reverse since this sorts first on uint, then on length in ascending order
# heap = list(reversed(sorted(heap)))
# reduce heap to minimum amount of ranges/intervals
return [pfx_fromival(x) for x in ival_summary(heap)]
# rv = [] while len(heap):
# x = heap.pop()
# y = heap.pop() if len(heap) else None
# if y:
# x, y = ival_combine(x, y) # y is None when x combines x+y
# if y:
# heap.append(y) # push back for later combine attempt
# else:
# heap.append(x) # combined range back on heap
# continue # and start over
#
# y = rv.pop() if len(rv) else None
# if y:
# x, y = ival_combine(x, y) # y is None when x combines x+y
# if y:
# rv.append(y) # could not combine, both goto rv
# rv.append(x) # make sure to keep rv ordering intact
# else:
# heap.append(x) # combined range back on heap
#
# else:
# rv.append(x)
#
# # intervals need to be aligned on power of 2 intervals, so a given
# # single interval might yield multiple network prefixes
# # (s, l) -> ...
# #
# return [pfx_fromival(x) for x in rv if x]
def ival_combine(x, y):
'combine two intervals as (combined, None) if possible, else (x, y)'
# intervals can be combined iff:
# - one lies inside the other, or
# - overlap each other exactly, or
# - are adjacent and of equal length
dbg = False
if y is None:
return (x, y)
if x is None:
return (y, x)
if dbg: print('combine:', pfx_fromival(x), pfx_fromival(y))
if x[1] == y[1]:
# equal length intervals that may be adjacent
if sum(x) == y[0]: # x left of y and adjacent
xy = pfx_toival(pfx_fromival((x[0], 2*x[1])))
if dbg: print('x|y ->', 'x', x, 'y', y, 'xy', xy, '->', pfx_fromival(xy))
return (x[0], 2 * x[1]), None
if sum(y) == x[0]: # y left of x and adjacent
yx = (y[0], 2 * y[1])
if dbg: print('y|x ->', 'y', y, 'x', x, 'yx', yx, '->', pfx_fromival(yx))
return (y[0], 2 * y[1]), None
if sum(x) == sum(y): # x == y, they're the same
if dbg: print('x == y')
return (x, None)
# unequal lengths or non-adjacent intervals
if x[0] <= y[0] and sum(y) <= sum(x): # y lies in x
if dbg: print('y in x')
return (x, None)
if y[0] <= x[0] and sum(x) <= sum(y): # x lies in y
if dbg: print('x in y')
return (y, None)
if dbg: print('no joy')
return (x, y) # no joy
def ival_summary(ivals):
'summarize a list intervals (uint, numh) into minimum set of intervals'
# donot use masking
# reverse since this sorts first on uint, then on length in ascending order
heap = list(reversed(sorted(ivals)))
# reduce heap to minimum amount of intervals
rv = []
while len(heap):
x = heap.pop()
y = heap.pop() if len(heap) else None
if y:
x, y = ival_combine(x, y) # y is None when x combines x+y
if y:
heap.append(y) # push back for later combine attempt
else:
heap.append(x) # combined range back on heap
continue # and start over
y = rv.pop() if len(rv) else None
if y:
x, y = ival_combine(x, y) # y is None when x combines x+y
if y:
rv.append(y) # could not combine, both goto rv
rv.append(x) # make sure to keep rv ordering intact
else:
heap.append(x) # combined range back on heap
else:
rv.append(x)
# intervals need to be aligned on power of 2 intervals, so a given
# single interval might yield multiple network prefixes
# (s, l) -> ...
#
return rv
def ival_aspfxs(ival):
'turn a (host-uint, num_hosts)-tuple into a list of net-pfx-s'
# The interval gives that first valid address and the number of following
# addresses that should be matched by the list of ival_pfx's returned.
# - so no mask can/may be applied to the address to get this-network
# - 1.1.1.128/24 => valid ip's are 1.1.1.128 - .255 = [1.1.1.128/25]
# - 1.1.1.127/24 => valid are 1.1.1.127 -.255 = [1.1.1.127/32, 1.1.1.128/25]
# ival[1] is number of hosts and must always be a power of 2 (!)
# *-----:-----| -> start == network address -> result = 1 net pfx
# |-----:-----* -> start == bcast address -> result = 1 host pfx
# |---*-:-----| -> start inside left half -> result = 1 net pfx + recurse
# |-----:--*--| -> start inside right half -> result = recurse
print('pfxs for:', pfx_fromival(ival))
uint, numh = ival
half = int(numh//2)
plen = 32 - int(math.log(numh) / math.log(2))
mask = 2**32 - numh
imsk = (2**32 - 1) ^ mask
nint = uint & mask # this network
bint = uint | imsk # broadcast
mint = nint + half # start of upper half
rv = []
if plen == 32: # single address
return [pfx_fromival(ival)]
elif uint == nint: # aligned on start of range
return [pfx_fromival(ival)]
elif uint == bint: # aligned on bcast address
return [pfx_fromival((uint, 1))]
elif uint == mint: # aligned on start of upper half
return [pfx_fromival((uint, half))]
elif uint < mint: # inside left half
return [pfx_fromival((mint, half))] + ival2pfxs((uint, half))
# rv.append(pfx_fromival((mint, half))) # - add upper half
# rv.extend(ival2pfxs((uint, half))) # - recurse
elif uint > mint: # inside right half
return ival2pfxs((uint, half)) # - recurse
else:
raise ValueError('{!} invalid ival for pfx-list'.format(ival))
return rv
def pp_fromstr(ppstr):
'turn port/proto into 0.proto.port1.port2/32 prefix'
pass
def pp_fromint(uint):
'turn port, proto into 0.proto.port1.port2 address'
proto = (uint // 65536) & 0xff
port = uint & 0xffff
return (port, proto)
def ports_fromppfx(ppfx):
'0.d1.d2.d3/len -> port-port/proto, where proto=d1, port=d2*256+d3'
ppfx = ppfx if '/' in ppfx else ppfx + '/32' # ensure a mask
x = list(map(int, re.split('\.|/', ppfx)))
proto = x[1]
port = x[2]*256 + x[3]
nports = 2**(32-x[4])
if nports > 1:
rv = '{}-{}/{}'.format(port, port+nports, proto)
else:
rv = '{}/{}'.format(port, proto)
def ports_toppfx(portstr):
'a-b/c -> shortest list of [ppfx-s] possible'
# 80/tcp -> 80, 6 -> 0.6.0.80/32 -> uint
if '/' not in portstr:
raise ValueError('{} is missing protocol'.format(portstr))
x = re.split('-|/', portstr)
if not len(x) in (2,3):
raise ValueError('{!r} is malformed'.format(portstr))
if x[-1].lower() not in self._name_tonum:
raise ValueError('{!r} has unknown protocol'.format(portstr))
try:
proto = self._name_tonum[x[1].lower()]
start = int(x[0])
stop = int(x[1]) if len(x) == 3 else start
rv = []
for y in range(start, stop+1):
p1 = (y // 256) & 0xff
p2 = y & 0xff
rv.append('0.{}.{}.{}/32'.format(proto, p1, p2))
except ValueError as e:
raise ValueError('{!r} not valid portstring'.format(portstr))
return rv
def pfx_summary_org(pfxlst):
'remove redundancy from a list of (im)properly formatted pfx-strings'
# blatant disregard for ipv6
rv, heap = [], []
for pfx in map(pfx_proper, pfxlst): # properly formatted pfxs
x = list(map(int, re.split('\.|/', pfx)))
uint = x[0] * 16777216 + x[1] * 65536 + x[2] * 256 + x[3]
numh = 2**(32-x[4])
mask = 2**32 - numh
heap.append((uint & mask, numh))
# reverse since this sorts first on uint, then on length in ascending order
heap = list(reversed(sorted(heap)))
# absorb/join or keep adjacent (start, length)-intervals
while len(heap):
x = heap.pop()
if len(heap):
y = heap.pop()
if x[1] == y[1] and sum(x) == y[0]:
heap.append((x[0], x[1] + y[1])) # x joins y
elif x[0] <= y[0] and sum(y) <= sum(x):
heap.append(x) # x absorbs y
else:
heap.append(y) # y may absorb/join next one
if len(rv):
x, y = rv.pop(), x
if x[1] == y[1] and sum(x) == y[0]:
rv.append((x[0], x[1] + y[1])) # x joins y
elif x[0] <= y[0] and sum(y) <= sum(x):
rv.append(x) # x absorbs y
else:
rv.append(x)
rv.append(y) # y may absorb/join next one
else:
rv.append(x) # no joy, x in final result
return [pfx_fromival(x) for x in rv]
def str2list(a_string):
'split a string into a list of constituents'
try:
return [x for x in re.split(' |,', a_string) if len(x)]
except (TypeError, ValueError):
raise ValueError('{!r} doesnt look like a string'.format(a_string))
if __name__ == '__main__':
pfx = '1.1.1.249/31'
print('pfx', pfx, '-> netpfxs', ival_aspfxs(pfx_toival(pfx)))
print('pfx', pfx, '-> summ', pfx_summary([pfx]))
| [
7061,
6,
198,
18274,
879,
5499,
329,
584,
13103,
13,
198,
7061,
6,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
10688,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
83,
26654,
355,
42975,
198,
198... | 2.087859 | 10,164 |
import json
import logging
from apps.report.worker.report_generator import ReportGeneratorAction
from apps.utils.timezone_utils import str_utc
from ..action import BaseAction
logger = logging.getLogger(__name__)
| [
11748,
33918,
198,
11748,
18931,
198,
198,
6738,
6725,
13,
13116,
13,
28816,
13,
13116,
62,
8612,
1352,
1330,
6358,
8645,
1352,
12502,
198,
6738,
6725,
13,
26791,
13,
2435,
11340,
62,
26791,
1330,
965,
62,
315,
66,
198,
198,
6738,
114... | 3.483871 | 62 |
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import pickle
import random
from typing import List
import numpy as np
import torch
from dataclasses import dataclass
from pytorch_transformers import BertTokenizer
from torch.utils.data import Dataset
from tqdm import tqdm
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
@dataclass
if __name__ == '__main__':
# tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
# file_name = "./CoQA-dataset/dev.json"
# save_path = "./coqa-dev.pkl"
#
# results = prepare_dataset(file_name, tokenizer, max_question_len=15, max_sequence_len=24, samples_no=10, save_path=save_path)
# print("results: {:,}".format(len(results)))
from torch.utils.data import DataLoader
dataset = CoQAOrderDataset(json_file="./coqa-dataset/dev.json", pkl_file="./coqa-dev.pkl",
max_question_len=20, max_sequence_len=24, samples_no=5)
loader = DataLoader(dataset, batch_size=8, shuffle=False, num_workers=0, collate_fn=CoQAOrderDataset.collate_fn)
for i, d in enumerate(tqdm(loader)):
if i == 10:
break
print(d)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
4738,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
299,
321... | 2.331081 | 592 |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl diags [--log-dir=<LOG_DIR>]
Description:
Save diagnostic information
Options:
--log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico]
"""
import sys
import os
from datetime import datetime
import tarfile
import tempfile
import traceback
import subprocess
import re
from etcd import EtcdException
from pycalico.datastore import DatastoreClient
from shutil import copytree, ignore_patterns
from utils import print_paragraph, enforce_root
def diags(arguments):
"""
Main dispatcher for diags commands. Calls the corresponding helper function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
# The command has to be run as root for ipset collections (and iptables)
enforce_root()
print("Collecting diagnostics")
save_diags(arguments["--log-dir"])
sys.exit(0)
temp_diags_dir = None
class DiagsErrorWriter(object):
"""
Context manager used to handle error handling when writing diagnostics.
In the event of an exception being thrown within the context manager, the
details of the exception are written to file and the exception is
swallowed. This allows the diagnostics to retrieve as much information as
possible.
"""
def __enter__(self):
"""
Open the diags file for writing, and return the file object.
:return: The file object.
"""
self.file = open(os.path.join(self.temp_dir, self.filename), "w")
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Close the diagnostics file and if an error occurred, write that into
the file.
:param exc_type: The exception type, or None.
:param exc_val: The exception instance, or None.
:param exc_tb: The exception traceback, or None.
:return: False for KeyboardInterrupt exceptions, or no exceptions,
True for all other exceptions (exception is traced in file).
"""
if exc_type is KeyboardInterrupt:
rc = False
elif exc_type is None:
rc = False
else:
print " - Error gathering diagnostics"
self.file.write("\nError gathering diagnostics\n")
self.file.write("Exception: %s(%s)\n" % (exc_type, exc_val))
traceback.print_tb(exc_tb, None, self.file)
rc = True
self.file.close()
return rc
| [
2,
15069,
1853,
3395,
292,
42248,
27862,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
92... | 2.867907 | 1,075 |
class MuseumpyError(Exception):
"""
General MuseumPlus error class to provide a superclass for all other errors
"""
class MuseumPlusError(MuseumpyError):
"""
MuseumPlus error raised when an error with the communication with MuseumPlus occurs
"""
class XMLParsingError(MuseumpyError):
"""
The error raised when parsing the XML.
"""
class NoMoreRecordsError(MuseumpyError):
"""
This error is raised if all records have been loaded (or no records are
present)
"""
| [
4871,
32887,
32152,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3611,
9594,
17860,
4049,
1398,
284,
2148,
257,
2208,
4871,
329,
477,
584,
8563,
198,
220,
220,
220,
37227,
628,
198,
4871,
9594,
17860,
12331,
7... | 3.223602 | 161 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import re
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import dask.array as da
import numpy as np
import pandas as pd
import xarray as xr
from fsspec.spec import AbstractFileSystem
from tifffile import TiffFile, TiffFileError, TiffSequence, imread
from tifffile.tifffile import TiffTags
from .. import constants, exceptions, types
from ..dimensions import (
DEFAULT_CHUNK_DIMS,
DEFAULT_DIMENSION_ORDER,
DEFAULT_DIMENSION_ORDER_LIST_WITH_SAMPLES,
REQUIRED_CHUNK_DIMS,
DimensionNames,
)
from ..metadata import utils as metadata_utils
from ..utils import io_utils
from .reader import Reader
TIFF_IMAGE_DESCRIPTION_TAG_INDEX = 270
class TiffGlobReader(Reader):
r"""
Wraps the tifffile imread API to provide the same aicsimageio Reader API but for
multifile tiff datasets (and other tifffile supported) images.
Parameters
----------
glob_in: Union[PathLike, List[PathLike]]
Glob string that identifies all files to be loaded or a list
of paths to the files as returned by glob.
indexer: Union[Callable, pandas.DataFrame]
If callable, should consume each filename and return a pd.Series with series
index corresponding to the dimensions and values corresponding to the array
index of that image file within the larger array.
Default: None (Look for 4 numbers in the file name and use them as
S, T, C, and Z indices.)
scene_glob_character: str
Character to represent different scenes.
Default: "S"
chunk_dims: Union[str, List[str]]
Which dimensions to create chunks for.
Default: DEFAULT_CHUNK_DIMS
Note: Dimensions.SpatialY, Dimensions.SpatialX, and DimensionNames.Samples,
will always be added to the list if not present during dask array
construction.
dim_order: Optional[Union[List[str], str]]
A string of dimensions to be applied to all array(s) or a
list of string dimension names to be mapped onto the list of arrays
provided to image. I.E. "TYX".
Default: None (guess dimensions for single array or multiple arrays)
channel_names: Optional[Union[List[str], List[List[str]]]]
A list of string channel names to be applied to all array(s) or a
list of lists of string channel names to be mapped onto the list of arrays
provided to image.
Default: None (create OME channel IDs for names for single or multiple arrays)
single_file_shape : Optional[Tuple]
Expected shape for a single file of the set. If not provided, will attempt to
determine the shape from the first file found in the glob.
Default : None
single_file_dims : Optional[Tuple]
Dimensions that correspond to the data dimensions of a single file in the glob.
Default : ('Y', 'X')
Examples
--------
# Given files with names like "Position001_t002_c03_z04.tif"
reader = TiffGlobReader("path/to/data/*.tif")
# We can use this to read single image tiffs generated by MicroManager
# Micromanager creates directories for each position so we need to recursively glob
# for the images files and pass the list to TiffGlobReader. Note that all images are
# named according to img_channel000_position001_time000000003_z004.tif"
import glob
files = glob.glob("path/to/data/**/*.tif", recursive=True)
# since the numbers in Micromanager files are not in STCZ order we
# need to use a different indexer than default. For convenience
# when working MicroManager generated files you can use the provided
# TiffGlobReader.MicroManagerIndexer
mm_reader = TiffGlobReader(files, indexer=TiffGlobReader.MicroManagerIndexer)
# as an example of making a custom indexer you can manually create
# the MicroManagerIndexer like so:
import pandas as pd
from pathlib import Path
import re
def mm_indexer(path_to_img):
inds = re.findall(r"\d+", Path(path_to_img).name)
series = pd.Series(inds, index=['C', 'S', 'T', 'Z']).astype(int)
return series
mm_reader = TiffGlobReader(files, indexer=mm_indexer)
"""
@staticmethod
@property
@staticmethod
@staticmethod
def MicroManagerIndexer(path_to_img: Union[str, Path]) -> pd.Series:
"""
An indexer function to transform Micromanager MDA tiff filenames
to indices. To use::
reader = TiffGlobReader(files, indexer=TiffGlobReader.MicroManagerIndexer)
Expects images to have names of the form:
img_channel_[0-9]+_position[0-9]+_time[0-9]+_z[0-9]+.tif[f]
Parameters
----------
path_to_img : [str, Path]
The path to an image.
Returns
-------
pd.Series
"""
inds = re.findall(r"\d+", Path(path_to_img).name)
series = pd.Series(inds, index=["C", "S", "T", "Z"]).astype(int)
return series
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
15095,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
3108,
8019,
1330,
10644,
1... | 2.760823 | 1,848 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from shapely.geometry import LineString
from shapely.ops import linemerge
# get_shapes_from_osm('1257117')
# get_shapes_from_osm('1254451')
get_shapes_from_osm('1254455')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
7007,
201,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6910,
10100,
201,
198,
6738,
5485,
306,
13,
2... | 2.333333 | 105 |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-group-service."""
__version__ = "2.11.5"
__author__ = "AccelByte"
__email__ = "dev@accelbyte.net"
# pylint: disable=line-too-long
# configuration
from .wrappers import create_group_configuration_admin_v1
from .wrappers import create_group_configuration_admin_v1_async
from .wrappers import delete_group_configuration_global_rule_admin_v1
from .wrappers import delete_group_configuration_global_rule_admin_v1_async
from .wrappers import delete_group_configuration_v1
from .wrappers import delete_group_configuration_v1_async
from .wrappers import get_group_configuration_admin_v1
from .wrappers import get_group_configuration_admin_v1_async
from .wrappers import initiate_group_configuration_admin_v1
from .wrappers import initiate_group_configuration_admin_v1_async
from .wrappers import list_group_configuration_admin_v1
from .wrappers import list_group_configuration_admin_v1_async
from .wrappers import update_group_configuration_admin_v1
from .wrappers import update_group_configuration_admin_v1_async
from .wrappers import update_group_configuration_global_rule_admin_v1
from .wrappers import update_group_configuration_global_rule_admin_v1_async
# group
from .wrappers import create_new_group_public_v1
from .wrappers import create_new_group_public_v1_async
from .wrappers import delete_group_admin_v1
from .wrappers import delete_group_admin_v1_async
from .wrappers import delete_group_predefined_rule_public_v1
from .wrappers import delete_group_predefined_rule_public_v1_async
from .wrappers import delete_group_public_v1
from .wrappers import delete_group_public_v1_async
from .wrappers import get_group_list_admin_v1
from .wrappers import get_group_list_admin_v1_async
from .wrappers import get_group_list_public_v1
from .wrappers import get_group_list_public_v1_async
from .wrappers import get_single_group_admin_v1
from .wrappers import get_single_group_admin_v1_async
from .wrappers import get_single_group_public_v1
from .wrappers import get_single_group_public_v1_async
from .wrappers import update_group_custom_attributes_public_v1
from .wrappers import update_group_custom_attributes_public_v1_async
from .wrappers import update_group_custom_rule_public_v1
from .wrappers import update_group_custom_rule_public_v1_async
from .wrappers import update_group_predefined_rule_public_v1
from .wrappers import update_group_predefined_rule_public_v1_async
from .wrappers import update_patch_single_group_public_v1
from .wrappers import update_patch_single_group_public_v1_async
from .wrappers import update_single_group_v1
from .wrappers import update_single_group_v1_async
# group_member
from .wrappers import accept_group_invitation_public_v1
from .wrappers import accept_group_invitation_public_v1_async
from .wrappers import accept_group_join_request_public_v1
from .wrappers import accept_group_join_request_public_v1_async
from .wrappers import cancel_group_join_request_v1
from .wrappers import cancel_group_join_request_v1_async
from .wrappers import get_group_members_list_admin_v1
from .wrappers import get_group_members_list_admin_v1_async
from .wrappers import get_group_members_list_public_v1
from .wrappers import get_group_members_list_public_v1_async
from .wrappers import get_user_group_information_public_v1
from .wrappers import get_user_group_information_public_v1_async
from .wrappers import invite_group_public_v1
from .wrappers import invite_group_public_v1_async
from .wrappers import join_group_v1
from .wrappers import join_group_v1_async
from .wrappers import kick_group_member_public_v1
from .wrappers import kick_group_member_public_v1_async
from .wrappers import leave_group_public_v1
from .wrappers import leave_group_public_v1_async
from .wrappers import reject_group_invitation_public_v1
from .wrappers import reject_group_invitation_public_v1_async
from .wrappers import reject_group_join_request_public_v1
from .wrappers import reject_group_join_request_public_v1_async
# group_roles
from .wrappers import create_member_role_admin_v1
from .wrappers import create_member_role_admin_v1_async
from .wrappers import delete_member_role_admin_v1
from .wrappers import delete_member_role_admin_v1_async
from .wrappers import delete_member_role_public_v1
from .wrappers import delete_member_role_public_v1_async
from .wrappers import get_member_roles_list_admin_v1
from .wrappers import get_member_roles_list_admin_v1_async
from .wrappers import get_member_roles_list_public_v1
from .wrappers import get_member_roles_list_public_v1_async
from .wrappers import get_single_member_role_admin_v1
from .wrappers import get_single_member_role_admin_v1_async
from .wrappers import update_member_role_admin_v1
from .wrappers import update_member_role_admin_v1_async
from .wrappers import update_member_role_permission_admin_v1
from .wrappers import update_member_role_permission_admin_v1_async
from .wrappers import update_member_role_public_v1
from .wrappers import update_member_role_public_v1_async
# member_request
from .wrappers import get_group_invitation_request_public_v1
from .wrappers import get_group_invitation_request_public_v1_async
from .wrappers import get_group_join_request_public_v1
from .wrappers import get_group_join_request_public_v1_async
| [
2,
15069,
357,
66,
8,
33448,
4013,
5276,
40778,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
770,
318,
11971,
3788,
422,
4013,
5276,
40778,
3457,
11,
329,
11247,
198,
2,
290,
8733,
2800,
534,
1664,
2775,
4706,
13,
198,
2,
220,
198,
2,... | 3.08589 | 1,793 |
# coding: utf-8
# ASR post-processing corrector pred vs gold: TRAIN
import torch
from afterburner_pretrained_model import afterburner_pretrained_model
import matplotlib.pyplot as plt
from progress_bar import progress_bar
from tqdm.auto import tqdm
import logging
logging.getLogger('nemo_logger').setLevel(logging.ERROR)
if __name__=="__main__":
language='vietnamese'
phase='build'
release='400'
model_fn='save/new_afterburner/afterburner_302.pt'
new_model_fn='save/new_afterburner/afterburner_400.pt'
epochs = 1000
afterburner_train(language, phase, release, model_fn, new_model_fn, epochs)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
7054,
49,
1281,
12,
36948,
3376,
273,
2747,
3691,
3869,
25,
29125,
1268,
198,
198,
11748,
28034,
198,
6738,
706,
10899,
263,
62,
5310,
13363,
62,
19849,
1330,
706,
10899,
263,
62,
5310,
13363,... | 2.705628 | 231 |
import pandas as pd
from dateutil import parser
from pm4pymdl.objects.mdl.exporter import exporter as mdl_exporter
from pm4pymdl.objects.ocel.exporter import exporter as ocel_exporter
from sapextractor.utils.dates import timestamp_column_from_dt_tm
from pandas.core.frame import DataFrame
from sapextractor.database_connection.interface import DatabaseConnection
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
3128,
22602,
1330,
30751,
198,
6738,
9114,
19,
79,
4948,
25404,
13,
48205,
13,
9132,
75,
13,
1069,
26634,
1330,
1033,
4337,
355,
285,
25404,
62,
1069,
26634,
198,
6738,
9114,
19,
79,
4948,
... | 3.401869 | 107 |
'''
mod_peak.py
usage: python mod_peak.py narrowPeak_file dataset.dat output.dat
modify the `chipseq_peak` values to 1 if a window from narrowPeak file appears in the dataset.
'''
import sys
nps = open(sys.argv[1])
inp = open(sys.argv[2])
out = open(sys.argv[3], 'w')
inpl = inp.readlines()
npsl = nps.readlines()
for i in range(len(inpl)):
inpl[i] = inpl[i].split()
for i in range(len(npsl)):
npsl[i] = npsl[i].split()
#del npsl[0] # this element has all the column names, not actual values
wset = []
m = []
res = ''
# extract first column from inpl
first_col_in_inpl = [int(el[0]) for el in inpl]
peak_cnt = 0
for el in npsl:
# scale num
num = int(el[1])
num_inpl = scale(num)
if num_inpl == 0:
num_inpl = num
# now search for num_inpl in the first_col_in_inpl array
index = bin_search(num_inpl, first_col_in_inpl)
if index != -1:
if el[3] == 'B': # if bound.
inpl[index][6] = '1'
peak_cnt += 1
'''
prev = inpl[index][5]
if float(el[6]) > float(prev):
inpl[index][-1] = el[6]
peak_cnt += 1
'''
else:
print "Not found", num_inpl
for el in inpl:
res += el[0] + '\t' + el[1] + '\t' + el[2] + '\t' + el[3] + '\t' + el[4] + '\t' + el[5] + '\t' + el[6] + '\n'
print('Number of peaks: ' + str(peak_cnt))
out.write(res)
out.close()
nps.close()
inp.close()
| [
7061,
6,
198,
220,
220,
220,
953,
62,
36729,
13,
9078,
198,
220,
220,
220,
8748,
25,
21015,
953,
62,
36729,
13,
9078,
7135,
6435,
461,
62,
7753,
27039,
13,
19608,
5072,
13,
19608,
198,
220,
220,
220,
13096,
262,
4600,
35902,
41068,
... | 2.014144 | 707 |
from unittest import TestCase
from mock import Mock, patch, ANY
from samtranslator.policy_template_processor.template import Template
from samtranslator.policy_template_processor.exceptions import InvalidParameterValues, InsufficientParameterValues
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
15290,
1330,
44123,
11,
8529,
11,
15529,
198,
198,
6738,
6072,
7645,
41880,
13,
30586,
62,
28243,
62,
41341,
13,
28243,
1330,
37350,
198,
6738,
6072,
7645,
41880,
13,
30586,
62,
28243,
... | 4.464286 | 56 |
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
from .item import Item
__all__ = ['MediaShare', 'MediaShareInterface']
| [
6738,
11485,
76,
11463,
1330,
14161,
44,
11463,
11,
5949,
72,
39317,
14881,
198,
6738,
11485,
76,
11463,
13,
19199,
1330,
5045,
27823,
11,
4377,
6030,
198,
198,
6738,
764,
9186,
1330,
9097,
198,
198,
834,
439,
834,
796,
37250,
13152,
... | 3.5 | 50 |
import datetime
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from project.models import *
post_save.connect(create_profile, sender=User)
| [
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
14881,
12982,
11,
2448,
8481,
35608,
259,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
... | 3.435294 | 85 |
# The following source code was originally obtained from:
# https://github.com/tensorflow/model-optimization/blob/v0.7.0/tensorflow_model_optimization/python/core/quantization/keras/quantize.py
# ==============================================================================
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantization API functions for tf.keras models."""
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras import quantize_annotate as quantize_annotate_mod
from tensorflow_model_optimization.python.core.quantization.keras import quantize as quantize_mod
from .default_quantize_scheme import DefaultQuantizeScheme
def _add_quant_wrapper(layer):
"""Add annotation wrapper."""
# Already annotated layer. No need to wrap.
if isinstance(layer, quantize_annotate_mod.QuantizeAnnotate):
return layer
if isinstance(layer, tf.keras.Model):
raise ValueError(
'Quantizing a tf.keras Model inside another tf.keras Model is not supported.'
)
return quantize_annotate_mod.QuantizeAnnotate(layer)
def quantize_scope(*args):
"""Scope which can be used to deserialize quantized Keras models and layers.
Under `quantize_scope`, Keras methods such as `tf.keras.load_model` and
`tf.keras.models.model_from_config` will be able to deserialize Keras models
and layers which contain quantization classes such as `QuantizeConfig`
and `Quantizer`.
Example:
```python
tf.keras.models.save_model(quantized_model, keras_file)
with quantize_scope():
loaded_model = tf.keras.models.load_model(keras_file)
# If your quantized model uses custom objects such as a specific `Quantizer`,
# you can pass them to quantize_scope to deserialize your model.
with quantize_scope({'FixedRangeQuantizer', FixedRangeQuantizer}
loaded_model = tf.keras.models.load_model(keras_file)
```
For further understanding, see `tf.keras.utils.custom_object_scope`.
Args:
*args: Variable length list of dictionaries of `{name, class}` pairs to add
to the scope created by this method.
Returns:
Object of type `CustomObjectScope` with quantization objects included.
"""
quantization_objects = DefaultQuantizeScheme._QUANTIZATION_OBJECTS.copy()
return tf.keras.utils.custom_object_scope(*(args + (quantization_objects,)))
def quantize_model(to_quantize, annotate_fn=_add_quant_wrapper):
"""Quantize a `tf.keras` model with the default quantization implementation.
Quantization constructs a model which emulates quantization during training.
This allows the model to learn parameters robust to quantization loss, and
also model the accuracy of a quantized model.
For more information, see
https://www.tensorflow.org/model_optimization/guide/quantization/training
Quantize a model:
```python
# Quantize sequential model
model = quantize_model(
keras.Sequential([
layers.Dense(10, activation='relu', input_shape=(100,)),
layers.Dense(2, activation='sigmoid')
]))
# Quantize functional model
in = tf.keras.Input((3,))
out = tf.keras.Dense(2)(in)
model = tf.keras.Model(in, out)
quantized_model = quantize_model(model)
```
Note that this function removes the optimizer from the original model.
The returned model copies over weights from the original model. So while
it preserves the original weights, training it will not modify the weights
of the original model.
Args:
to_quantize: tf.keras model to be quantized. It can have pre-trained
weights.
Returns:
Returns a new `tf.keras` model prepared for quantization.
"""
if to_quantize is None:
raise ValueError('`to_quantize` cannot be None')
if not isinstance(to_quantize, tf.keras.Model):
raise ValueError(
'`to_quantize` can only be a `tf.keras.Model` instance. Use '
'the `quantize_annotate_layer` API to handle individual layers.'
'You passed an instance of type: {input}.'.format(
input=to_quantize.__class__.__name__))
if (not isinstance(to_quantize, tf.keras.Sequential) and
not to_quantize._is_graph_network): # pylint: disable=protected-access
raise ValueError(
'`to_quantize` can only either be a tf.keras Sequential or '
'Functional model.')
annotated_model = quantize_annotate_model(to_quantize, annotate_fn=annotate_fn)
return quantize_mod.quantize_apply(annotated_model, DefaultQuantizeScheme())
def quantize_annotate_model(to_annotate, annotate_fn=_add_quant_wrapper):
"""Annotate a `tf.keras` model to be quantized.
This function does not actually quantize the model. It merely specifies
that the model needs to be quantized. `quantize_apply` can then be used
to quantize the model.
This function is intended to be used in conjunction with the
`quantize_annotate_layer` API. Otherwise, it is simpler to use
`quantize_model`.
Annotate a model while overriding the default behavior for a layer:
```python
quantize_config = MyDenseQuantizeConfig()
model = quantize_annotate_model(
keras.Sequential([
layers.Dense(10, activation='relu', input_shape=(100,)),
quantize_annotate_layer(
layers.Dense(2, activation='sigmoid'),
quantize_config=quantize_config)
]))
# The first Dense layer gets quantized with the default behavior,
# but the second layer uses `MyDenseQuantizeConfig` for quantization.
quantized_model = quantize_apply(model)
```
Note that this function removes the optimizer from the original model.
Args:
to_annotate: `tf.keras` model which needs to be quantized.
Returns:
New tf.keras model with each layer in the model wrapped with
`QuantizeAnnotate`. The new model preserves weights from the original
model.
"""
if to_annotate is None:
raise ValueError('`to_annotate` cannot be None')
if not isinstance(to_annotate, tf.keras.Model):
raise ValueError(
'`to_annotate` can only be a `tf.keras.Model` instance. Use '
'the `quantize_annotate_layer` API to handle individual layers. '
'You passed an instance of type: {input}.'.format(
input=to_annotate.__class__.__name__))
if (not isinstance(to_annotate, tf.keras.Sequential) and
not to_annotate._is_graph_network): # pylint: disable=protected-access
raise ValueError(
'`to_annotate` can only either be a tf.keras Sequential or '
'Functional model.')
return tf.keras.models.clone_model(
to_annotate, input_tensors=None, clone_function=annotate_fn)
| [
2,
383,
1708,
2723,
2438,
373,
6198,
6492,
422,
25,
198,
2,
3740,
1378,
12567,
13,
785,
14,
83,
22854,
11125,
14,
19849,
12,
40085,
1634,
14,
2436,
672,
14,
85,
15,
13,
22,
13,
15,
14,
83,
22854,
11125,
62,
19849,
62,
40085,
163... | 3.118718 | 2,308 |
# pylint: disable=R0903, C0115
from abc import ABC, abstractmethod
from typing import AbstractSet, Any, final, Mapping, Sequence
from .base import ASTNode, Span
class Type(ASTNode, ABC):
"""
This is the base class for the program's representation of types in
the type system.
Warnings
--------
- This class should not be used directly, instead use one of its
subclasses.
"""
@final
@abstractmethod
def substitute(self, substitution: Mapping["TypeVar", "Type"]) -> "Type":
"""
Replace free type vars in the object with the types in
`substitution`.
Parameters
----------
substitution: Substitution
The mapping to used to replace the free type vars.
Returns
-------
Type
The same object but without any free type variables.
"""
@abstractmethod
def strong_eq(self, other: "Type") -> bool:
"""A version of equality that comes with more guarantees."""
@abstractmethod
def weak_eq(self, other: "Type") -> bool:
"""A version of equality that comes with fewer guarantees."""
@final
@abstractmethod
| [
2,
279,
2645,
600,
25,
15560,
28,
49,
2931,
3070,
11,
327,
486,
1314,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
27741,
7248,
11,
4377,
11,
2457,
11,
337,
5912,
11,
45835,
198,
198,
6738,
764,
8692,
... | 2.740826 | 436 |
import os
import csv
from st2common.runners.base_action import Action
__all__ = [
'ParseCSVFileAction'
]
| [
11748,
28686,
198,
11748,
269,
21370,
198,
198,
6738,
336,
17,
11321,
13,
36740,
13,
8692,
62,
2673,
1330,
7561,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
10044,
325,
7902,
53,
8979,
12502,
6,
198,
60,
628
] | 2.666667 | 42 |
import abc
import time
from ipaddress import ip_address
from ticket_auth import TicketFactory, TicketError
from .abstract_auth import AbstractAuthentication
from aiohttp import web
_REISSUE_KEY = 'aiohttp_auth.auth.TktAuthentication.reissue'
class TktAuthentication(AbstractAuthentication):
"""Ticket authentication mechanism based on the ticket_auth library.
This class is an abstract class that creates a ticket and validates it.
Storage of the ticket data itself is abstracted to allow different
implementations to store the cookie differently (encrypted, server side
etc).
"""
def __init__(
self,
secret,
max_age,
reissue_time=None,
include_ip=False,
cookie_name='AUTH_TKT'):
"""Initializes the ticket authentication mechanism.
Args:
secret: Byte sequence used to initialize the ticket factory.
max_age: Integer representing the number of seconds to allow the
ticket to remain valid for after being issued.
reissue_time: Integer representing the number of seconds before
a valid login will cause a ticket to be reissued. If this
value is 0, a new ticket will be reissued on every request
which requires authentication. If this value is None, no
tickets will be reissued, and the max_age will always expire
the ticket.
include_ip: If true, requires the clients ip details when
calculating the ticket hash
cookie_name: Name to use to reference the ticket details.
"""
self._ticket = TicketFactory(secret)
self._max_age = max_age
if (self._max_age is not None and
reissue_time is not None and
reissue_time < self._max_age):
self._reissue_time = max_age - reissue_time
else:
self._reissue_time = None
self._include_ip = include_ip
self._cookie_name = cookie_name
@property
def cookie_name(self):
"""Returns the name of the cookie stored in the session"""
return self._cookie_name
async def remember(self, request, user_id):
"""Called to store the userid for a request.
This function creates a ticket from the request and user_id, and calls
the abstract function remember_ticket() to store the ticket.
Args:
request: aiohttp Request object.
user_id: String representing the user_id to remember
"""
ticket = self._new_ticket(request, user_id)
await self.remember_ticket(request, ticket)
async def forget(self, request):
"""Called to forget the userid for a request
This function calls the forget_ticket() function to forget the ticket
associated with this request.
Args:
request: aiohttp Request object
"""
await self.forget_ticket(request)
async def get(self, request):
"""Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated.
"""
ticket = await self.get_ticket(request)
if ticket is None:
return None
try:
# Returns a tuple of (user_id, token, userdata, validuntil)
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
# Check if we need to reissue a ticket
if (self._reissue_time is not None and
now >= (fields.valid_until - self._reissue_time)):
# Reissue our ticket, and save it in our request.
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)
return fields.user_id
except TicketError as e:
return None
async def process_response(self, request, response):
"""If a reissue was requested, only reiisue if the response was a
valid 2xx response
"""
if _REISSUE_KEY in request:
if (response.started or
not isinstance(response, web.Response) or
response.status < 200 or response.status > 299):
return
await self.remember_ticket(request, request[_REISSUE_KEY])
@abc.abstractmethod
async def remember_ticket(self, request, ticket):
"""Abstract function called to store the ticket data for a request.
Args:
request: aiohttp Request object.
ticket: String like object representing the ticket to be stored.
"""
pass
@abc.abstractmethod
async def forget_ticket(self, request):
"""Abstract function called to forget the ticket data for a request.
Args:
request: aiohttp Request object.
"""
pass
@abc.abstractmethod
async def get_ticket(self, request):
"""Abstract function called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request.
"""
pass
| [
11748,
450,
66,
198,
11748,
640,
198,
6738,
20966,
21975,
1330,
20966,
62,
21975,
198,
6738,
7846,
62,
18439,
1330,
24014,
22810,
11,
24014,
12331,
198,
6738,
764,
397,
8709,
62,
18439,
1330,
27741,
47649,
3299,
198,
6738,
257,
952,
402... | 2.479583 | 2,204 |
# -*- coding:utf-8 -*-
"""
===========================================
@author: lmy
@time: 2020/8/19 11:12 PM
@project: brat
@file: labelMatchEngine.py
===========================================
"""
import re
MATCH_ENGINE_HANDLERS = {
'keywords': 'keywordHandler',
'length': 'lengthHandler',
'regex': 'regexHandler',
}
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
10052,
2559,
18604,
198,
220,
2488,
9800,
25,
220,
300,
1820,
198,
220,
2488,
2435,
25,
220,
220,
220,
12131,
14,
23,
14,
1129,
1367,
25,
1065,
3122,
198,
220... | 2.748031 | 127 |
from datetime import datetime
import os
import shutil
from zipfile import ZipFile
from rdflib import Graph, Namespace, RDF, Literal, XSD, URIRef
from csv import DictReader
from rdflib.namespace import FOAF, DCTERMS
from rdflib.resource import Resource
import sys
__author__ = 'Diarmuid'
if __name__ == "__main__":
print(sys.argv)
GtfsCsvToRdf(uri=sys.argv[1], output_file=sys.argv[2], zip_file=sys.argv[3]) | [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
19974,
7753,
1330,
38636,
8979,
198,
6738,
374,
67,
2704,
571,
1330,
29681,
11,
28531,
10223,
11,
371,
8068,
11,
25659,
1691,
11,
1395,
10305,
11,
... | 2.60625 | 160 |
""" Stores connection info between game nodes (positions) """
from src import helper
class Connection(object):
""" Stores connection info between game nodes (positions) """
def setstartX(self, value):
""" Setter for startX with type checking """
self.startX = value
def setstartY(self, value):
""" Setter for startY with type checking """
self.startY = value
def setdirection(self, value):
""" Setter for direction with type checking """
self.direction = value
def setendX(self, value):
""" Setter for endX with type checking """
self.endX = value
def setendY(self, value):
""" Setter for endY with type checking """
self.endY = value
| [
37811,
41835,
4637,
7508,
1022,
983,
13760,
357,
1930,
1756,
8,
37227,
198,
198,
6738,
12351,
1330,
31904,
198,
198,
4871,
26923,
7,
15252,
2599,
198,
220,
220,
220,
37227,
41835,
4637,
7508,
1022,
983,
13760,
357,
1930,
1756,
8,
37227,... | 2.825758 | 264 |
#!/usr/bin/env python3
# Write a program that computes typical stats
# Count, Min, Max, Mean, Std. Dev, Median
# No, you cannot import the stats library!
import sys
import math
data = []
for x in sys.argv[1:]:
flt = float(x)
data.append(flt)
count = len(data)
#max and min
minimum = data[0]
maximum = data[0]
for num in data:
if num < minimum:
minimum = num
if num > maximum:
maximum = num
# mean
total_sum = 0
for num in data:
total_sum += num
mean = total_sum / count
#standard deviation
numerator = 0
for num in data:
distance = (num - mean)**2
numerator += distance
stdev = math.sqrt(numerator/ count)
#median
data.sort()
#even number inputs
if (count % 2) == 0:
upper = math.ceil(count/2)
lower = math.floor(count/2)
median = (data[upper] + data[lower])/2
#odd number inputs
else:
median = data[count//2]
print('Count: ' + str(count), 'Minimum: ' + f'{minimum:.1f}', 'Maximum: ' + f'{maximum:.1f}', 'Mean: ' + f'{mean:.3f}', 'Std. dev: ' + f'{stdev:.3f}','Median: ' + f'{median:.3f}', sep='\n')
"""
python3 30stats.py 3 1 4 1 5
Count: 5
Minimum: 1.0
Maximum: 5.0
Mean: 2.800
Std. dev: 1.600
Median 3.000
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
19430,
257,
1430,
326,
552,
1769,
7226,
9756,
198,
2,
2764,
11,
1855,
11,
5436,
11,
22728,
11,
520,
67,
13,
6245,
11,
26178,
198,
2,
1400,
11,
345,
2314,
1330,
262,
9... | 2.408421 | 475 |
import json
import os.path
import sys
from typing import List, Dict
import boto3
if __name__ == '__main__':
cli()
| [
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
25064,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
198,
198,
11748,
275,
2069,
18,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
537... | 2.733333 | 45 |
import hashlib
import re
from django.core.cache import cache
from django.core.paginator import InvalidPage
from django.db.models import Case
from django.db.models import CharField
from django.db.models import F
from django.db.models import IntegerField
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Value
from django.db.models import When
from django_filters.rest_framework import BooleanFilter
from django_filters.rest_framework import CharFilter
from le_utils.constants import content_kinds
from le_utils.constants import roles
from rest_framework.pagination import NotFound
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import File
from contentcuration.viewsets.base import RequiredFilterSet
from contentcuration.viewsets.common import NotNullMapArrayAgg
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.contentnode import ContentNodeViewSet
uuid_re = re.compile("([a-f0-9]{32})")
| [
11748,
12234,
8019,
198,
11748,
302,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
17665,
9876,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
8913,
198... | 3.661765 | 340 |
# %% Import Libraries
from landlab import Component
from ...utils.decorators import use_file_name_or_kwds
import numpy as np
# %% Instantiate Object
class LandslideProbability(Component):
"""
Landlab component designed to calculate a probability of failure at
each grid node based on the infinite slope stability model
stability index (Factor of Safety).
The driving force for failure is provided by the user in the form of
groundwater recharge, simply user provided minimum and maximum annual
peak values of recharge. The model uses topographic and soils
characteristics provided as input in the landslide_driver.
A LandslideProbability calcuation function provides the user with the
mean soil relative wetness, mean factor-of-safety, and probabilty
of failure at each node.
Construction::
LandslideProbability(grid, number_of_simulations"=250,
rechare_minimum=5., groundwater__recharge_maximum=120.)
Parameters
----------
grid: RasterModelGrid
A grid.
number_of_simulations: float, optional
Number of simulations to run Monte Carlo.
groundwater__recharge_minimum: float, optional
User provided minimum annual maximum recharge
recharge (mm/day).
groundwater__recharge_maximum: float, optional
User provided maximum annual maximum recharge
recharge (mm/day).
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.landslides import LandslideProbability
>>> import numpy as np
>>> grid = RasterModelGrid((5, 4), spacing=(0.2, 0.2))
>>> LS_prob = LandslideProbability(grid)
>>> LS_prob.name
'Landslide Probability'
>>> sorted(LandslideProbability.input_var_names) # doctest: +NORMALIZE_WHITESPACE
['soil__density',
'soil__internal_friction_angle',
'soil__maximum_total_cohesion',
'soil__minimum_total_cohesion',
'soil__mode_total_cohesion',
'soil__thickness',
'soil__transmissivity',
'topographic__slope',
'topographic__specific_contributing_area']
>>> sorted(LS_prob.output_var_names) # doctest: +NORMALIZE_WHITESPACE
['landslide__mean_factor_of_safety',
'landslide__probability_of_failure',
'soil__mean_relative_wetness']
>>> sorted(LS_prob.units) # doctest: +NORMALIZE_WHITESPACE
[('landslide__mean_factor_of_safety', 'None'),
('landslide__probability_of_failure', 'None'),
('soil__density', 'kg/m3'),
('soil__internal_friction_angle', 'degrees'),
('soil__maximum_total_cohesion', 'Pa or kg/m-s2'),
('soil__mean_relative_wetness', 'None'),
('soil__minimum_total_cohesion', 'Pa or kg/m-s2'),
('soil__mode_total_cohesion', 'Pa or kg/m-s2'),
('soil__thickness', 'm'),
('soil__transmissivity', 'm2/day'),
('topographic__slope', 'tan theta'),
('topographic__specific_contributing_area', 'm')]
>>> LS_prob.grid.number_of_node_rows
5
>>> LS_prob.grid.number_of_node_columns
4
>>> LS_prob.grid is grid
True
>>> grid['node']['topographic__slope'] = np.random.rand(
... grid.number_of_nodes)
>>> scatter_dat = np.random.random_integers(1, 10, grid.number_of_nodes)
>>> grid['node']['topographic__specific_contributing_area'] = np.sort(
... np.random.random_integers(30, 900, grid.number_of_nodes))
>>> grid['node']['soil__transmissivity'] = np.sort(
... np.random.random_integers(5, 20, grid.number_of_nodes),-1)
>>> grid['node']['soil__mode_total_cohesion'] = np.sort(
... np.random.random_integers(30, 900, grid.number_of_nodes))
>>> grid['node']['soil__minimum_total_cohesion'] = (
... grid.at_node['soil__mode_total_cohesion'] - scatter_dat)
>>> grid['node']['soil__maximum_total_cohesion'] = (
... grid.at_node['soil__mode_total_cohesion'] + scatter_dat)
>>> grid['node']['soil__internal_friction_angle'] = np.sort(
... np.random.random_integers(26, 40, grid.number_of_nodes))
>>> grid['node']['soil__thickness']= np.sort(
... np.random.random_integers(1, 10, grid.number_of_nodes))
>>> grid['node']['soil__density'] = (2000. * np.ones(grid.number_of_nodes))
>>> LS_prob = LandslideProbability(grid)
>>> np.allclose(grid.at_node['landslide__probability_of_failure'], 0.)
True
>>> LS_prob.calculate_landslide_probability()
>>> np.allclose(grid.at_node['landslide__probability_of_failure'], 0.)
False
>>> core_nodes = LS_prob.grid.core_nodes
>>> isinstance(LS_prob.landslide__factor_of_safety_histogram[
... core_nodes[0]], np.ndarray) == True
True
"""
# component name
_name = 'Landslide Probability'
__version__ = '1.0'
# component requires these values to do its calculation, get from driver
_input_var_names = (
'topographic__specific_contributing_area',
'topographic__slope',
'soil__transmissivity',
'soil__mode_total_cohesion',
'soil__minimum_total_cohesion',
'soil__maximum_total_cohesion',
'soil__internal_friction_angle',
'soil__density',
'soil__thickness',
)
# component creates these output values
_output_var_names = (
'soil__mean_relative_wetness',
'landslide__mean_factor_of_safety',
'landslide__probability_of_failure',
)
# units for each parameter and output
_var_units = {
'topographic__specific_contributing_area': 'm',
'topographic__slope': 'tan theta',
'soil__transmissivity': 'm2/day',
'soil__mode_total_cohesion': 'Pa or kg/m-s2',
'soil__minimum_total_cohesion': 'Pa or kg/m-s2',
'soil__maximum_total_cohesion': 'Pa or kg/m-s2',
'soil__internal_friction_angle': 'degrees',
'soil__density': 'kg/m3',
'soil__thickness': 'm',
'soil__mean_relative_wetness': 'None',
'landslide__mean_factor_of_safety': 'None',
'landslide__probability_of_failure': 'None',
}
# grid centering of each field and variable
_var_mapping = {
'topographic__specific_contributing_area': 'node',
'topographic__slope': 'node',
'soil__transmissivity': 'node',
'soil__mode_total_cohesion': 'node',
'soil__minimum_total_cohesion': 'node',
'soil__maximum_total_cohesion': 'node',
'soil__internal_friction_angle': 'node',
'soil__density': 'node',
'soil__thickness': 'node',
'soil__mean_relative_wetness': 'node',
'landslide__mean_factor_of_safety': 'node',
'landslide__probability_of_failure': 'node',
}
# short description of each field
_var_doc = {
'topographic__specific_contributing_area':
('specific contributing (upslope area/cell face )' +
' that drains to node'),
'topographic__slope':
'slope of surface at node represented by tan theta',
'soil__transmissivity':
('mode rate of water transmitted' +
' through a unit width of saturated soil'),
'soil__mode_total_cohesion':
'mode of combined root and soil cohesion at node',
'soil__minimum_total_cohesion':
'minimum of combined root and soil cohesion at node',
'soil__maximum_total_cohesion':
'maximum of combined root and soil cohesion at node',
'soil__internal_friction_angle':
('critical angle just before failure' +
' due to friction between particles'),
'soil__density': 'wet bulk density of soil',
'soil__thickness': 'soil depth to restrictive layer',
'soil__mean_relative_wetness':
('Indicator of soil wetness;' +
' relative depth perched water table' +
' within the soil layer'),
'landslide__mean_factor_of_safety':
('(FS) dimensionless index of stability' +
' based on infinite slope stabiliity model'),
'landslide__probability_of_failure':
('number of times FS is <1 out of number of' +
' interations user selected'),
}
# Run Component
@use_file_name_or_kwds
def __init__(self, grid, number_of_simulations=250.,
groundwater__recharge_minimum=20.,
groundwater__recharge_maximum=120., **kwds):
"""
Parameters
----------
grid: RasterModelGrid
A grid.
number_of_simulations: int, optional
number of simulations to run Monte Carlo (None)
groundwater__recharge_minimum: float, optional
Minimum annual maximum recharge (mm/d)
groundwater__recharge_maximum: float, optional
Maximum annual maximum rechage (mm/d)
g: float, optional
acceleration due to gravity (m/sec^2)
"""
# Store grid and parameters and do unit conversions
self._grid = grid
self.n = number_of_simulations
self.recharge_min = groundwater__recharge_minimum/1000.0 # mm->m
self.recharge_max = groundwater__recharge_maximum/1000.0
self.g = 9.81
super(LandslideProbability, self).__init__(grid)
for name in self._input_var_names:
if name not in self.grid.at_node:
self.grid.add_zeros('node', name, units=self._var_units[name])
for name in self._output_var_names:
if name not in self.grid.at_node:
self.grid.add_zeros('node', name, units=self._var_units[name])
self._nodal_values = self.grid['node']
# Raise an error if somehow someone is using this weird functionality
if self._grid is None:
raise ValueError('You must now provide an existing grid!')
def calculate_factor_of_safety(self, i):
"""
Method calculates factor-of-safety stability index by using
node specific parameters, creating distributions of these parameters,
and calculating the index by sampling these distributions 'n' times.
The index is calculated from the 'infinite slope stabilty
factor-of-safety equation' in the format of Pack RT, Tarboton DG,
and Goodwin CN (1998)The SINMAP approach to terrain stability mapping.
Parameters
----------
i: int
index of core node ID.
"""
# generate distributions to sample from to provide input parameters
# currently triangle distribution using mode, min, & max
self.a = self.grid['node'][
'topographic__specific_contributing_area'][i]
self.theta = self.grid['node']['topographic__slope'][i]
self.Tmode = self.grid['node']['soil__transmissivity'][i]
self.Cmode = self.grid['node']['soil__mode_total_cohesion'][i]
self.Cmin = self.grid['node']['soil__minimum_total_cohesion'][i]
self.Cmax = self.grid['node']['soil__maximum_total_cohesion'][i]
self.phi_mode = self.grid['node']['soil__internal_friction_angle'][i]
self.rho = self.grid['node']['soil__density'][i]
self.hs_mode = self.grid['node']['soil__thickness'][i]
# Transmissivity (T)
Tmin = self.Tmode-(0.3*self.Tmode)
Tmax = self.Tmode+(0.3*self.Tmode)
self.T = np.random.triangular(Tmin, self.Tmode, Tmax, size=self.n)
# Cohesion
# if provide fields of min and max C, uncomment 2 lines below
# Cmin = Cmode-0.3*self.Cmode
# Cmax = Cmode+0.3*self.Cmode
self.C = np.random.triangular(self.Cmin, self.Cmode,
self.Cmax, size=self.n)
# phi - internal angle of friction provided in degrees
phi_min = self.phi_mode-0.18*self.phi_mode
phi_max = self.phi_mode+0.32*self.phi_mode
self.phi = np.random.triangular(phi_min, self.phi_mode,
phi_max, size=self.n)
# soil thickness
hs_min = self.hs_mode-0.3*self.hs_mode
hs_max = self.hs_mode+0.3*self.hs_mode
self.hs = np.random.triangular(hs_min, self.hs_mode,
hs_max, size=self.n)
self.hs[self.hs <= 0.] = 0.0001
# recharge distribution
self.Re = np.random.uniform(self.recharge_min,
self.recharge_max, size=self.n)
# calculate Factor of Safety for n number of times
# calculate components of FS equation
self.C_dim = self.C/(self.hs*self.rho*self.g) # dimensionless cohesion
self.Rel_wetness = ((self.Re)/self.T)*(self.a/np.sin(
np.arctan(self.theta))) # relative wetness
np.place(self.Rel_wetness, self.Rel_wetness > 1, 1.0)
# maximum Rel_wetness = 1.0
self.soil__mean_relative_wetness = np.mean(self.Rel_wetness)
self.Y = np.tan(np.radians(self.phi))*(1 - (self.Rel_wetness*0.5))
# convert from degrees; 0.5 = water to soil density ratio
# calculate Factor-of-safety
self.FS = (self.C_dim/np.sin(np.arctan(self.theta))) + (
np.cos(np.arctan(self.theta)) *
(self.Y/np.sin(np.arctan(self.theta))))
self.FS_store = np.array(self.FS) # array of factor of safety
self.FS_distribution = self.FS_store
self.landslide__mean_factor_of_safety = np.mean(self.FS)
count = 0
for val in self.FS: # find how many FS values <= 1
if val <= 1.0:
count = count + 1
self.FS_L1 = float(count) # number with unstable FS values (<=1)
# probability: No. unstable values/total No. of values (n)
self.landslide__probability_of_failure = self.FS_L1/self.n
def calculate_landslide_probability(self, **kwds):
"""
Method creates arrays for output variables then loops through all
the core nodes to run the method 'calculate_factor_of_safety.'
Some output variables are assigned as fields to nodes. One output
parameter is an factor-of-safety distribution at each node.
Parameters
----------
self.landslide__factor_of_safety_histogram: numpy.ndarray([
self.grid.number_of_nodes, self.n], dtype=float)
This is an output - distribution of factor-of-safety from
Monte Carlo simulations (units='None')
"""
# Create arrays for data with -9999 as default to store output
self.mean_Relative_Wetness = -9999*np.ones(self.grid.number_of_nodes,
dtype='float')
self.mean_FS = -9999*np.ones(self.grid.number_of_nodes, dtype='float')
self.prob_fail = -9999*np.ones(
self.grid.number_of_nodes, dtype='float')
self.landslide__factor_of_safety_histogram = -9999*np.ones(
[self.grid.number_of_nodes, self.n], dtype='float')
# Run factor of safety Monte Carlo for all core nodes in domain
# i refers to each core node id
for i in self.grid.core_nodes:
self.calculate_factor_of_safety(i)
# Populate storage arrays with calculated values
self.mean_Relative_Wetness[i] = self.soil__mean_relative_wetness
self.mean_FS[i] = self.landslide__mean_factor_of_safety
self.prob_fail[i] = self.landslide__probability_of_failure
self.landslide__factor_of_safety_histogram[i] = (
self.FS_distribution)
# stores FS values from last loop (node)
# replace unrealistic values in arrays
self.mean_Relative_Wetness[
self.mean_Relative_Wetness < 0.] = 0. # so can't be negative
self.mean_FS[self.mean_FS < 0.] = 0. # can't be negative
self.mean_FS[self.mean_FS == np.inf] = 0. # to deal with NaN in data
self.prob_fail[self.prob_fail < 0.] = 0. # can't be negative
# assign output fields to nodes
self.grid['node']['soil__mean_relative_wetness'] = (
self.mean_Relative_Wetness)
self.grid['node']['landslide__mean_factor_of_safety'] = self.mean_FS
self.grid['node']['landslide__probability_of_failure'] = self.prob_fail
| [
201,
198,
2,
43313,
17267,
46267,
201,
198,
6738,
1956,
23912,
1330,
35100,
201,
198,
6738,
2644,
26791,
13,
12501,
273,
2024,
1330,
779,
62,
7753,
62,
3672,
62,
273,
62,
46265,
9310,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,... | 2.2138 | 7,507 |
import asyncio
import inspect
import logging
from typing import Awaitable, List, TYPE_CHECKING, Callable, Coroutine, Optional
import naff.models.naff as naff
from naff.client.const import logger_name, MISSING
from naff.client.utils.misc_utils import wrap_partial
from naff.models.naff.tasks import Task
if TYPE_CHECKING:
from naff.client import Client
from naff.models.naff import AutoDefer, BaseCommand, Listener
from naff.models.naff import Context
log = logging.getLogger(logger_name)
__all__ = ("Extension",)
class Extension:
"""
A class that allows you to separate your commands and listeners into separate files. Skins require an entrypoint in the same file called `setup`, this function allows client to load the Extension.
??? Hint "Example Usage:"
```python
class ExampleExt(Extension):
def __init__(self, bot):
print("Extension Created")
@prefixed_command()
async def some_command(self, context):
await ctx.send(f"I was sent from a extension called {self.name}")
```
Attributes:
bot Client: A reference to the client
name str: The name of this Extension (`read-only`)
description str: A description of this Extension
extension_checks str: A list of checks to be ran on any command in this extension
extension_prerun List: A list of coroutines to be run before any command in this extension
extension_postrun List: A list of coroutines to be run after any command in this extension
"""
bot: "Client"
name: str
extension_name: str
description: str
extension_checks: List
extension_prerun: List
extension_postrun: List
extension_error: Optional[Callable[..., Coroutine]]
_commands: List
_listeners: List
auto_defer: "AutoDefer"
@property
@property
def commands(self) -> List["BaseCommand"]:
"""Get the commands from this Extension."""
return self._commands
@property
def listeners(self) -> List["Listener"]:
"""Get the listeners from this Extension."""
return self._listeners
def drop(self) -> None:
"""Called when this Extension is being removed."""
for func in self._commands:
if isinstance(func, naff.ModalCommand):
for listener in func.listeners:
# noinspection PyProtectedMember
self.bot._modal_callbacks.pop(listener)
elif isinstance(func, naff.ComponentCommand):
for listener in func.listeners:
# noinspection PyProtectedMember
self.bot._component_callbacks.pop(listener)
elif isinstance(func, naff.InteractionCommand):
for scope in func.scopes:
if self.bot.interactions.get(scope):
self.bot.interactions[scope].pop(func.resolved_name, [])
elif isinstance(func, naff.PrefixedCommand):
if not func.is_subcommand:
self.bot.prefixed_commands.pop(func.name, None)
for alias in func.aliases:
self.bot.prefixed_commands.pop(alias, None)
for func in self.listeners:
self.bot.listeners[func.event].remove(func)
self.bot.ext.pop(self.name, None)
log.debug(f"{self.name} has been drop")
def add_ext_auto_defer(self, ephemeral: bool = False, time_until_defer: float = 0.0) -> None:
"""
Add a auto defer for all commands in this extension.
Args:
ephemeral: Should the command be deferred as ephemeral
time_until_defer: How long to wait before deferring automatically
"""
self.auto_defer = naff.AutoDefer(enabled=True, ephemeral=ephemeral, time_until_defer=time_until_defer)
def add_ext_check(self, coroutine: Callable[["Context"], Awaitable[bool]]) -> None:
"""
Add a coroutine as a check for all commands in this extension to run. This coroutine must take **only** the parameter `context`.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_ext_check(self.example)
@staticmethod
async def example(context: Context):
if context.author.id == 123456789:
return True
return False
```
Args:
coroutine: The coroutine to use as a check
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Check must be a coroutine")
if not self.extension_checks:
self.extension_checks = []
self.extension_checks.append(coroutine)
def add_extension_prerun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **before** all commands in this Extension.
Note:
Pre-runs will **only** be run if the commands checks pass
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_prerun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_prerun:
self.extension_prerun = []
self.extension_prerun.append(coroutine)
def add_extension_postrun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **after** all commands in this Extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_postrun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_postrun:
self.extension_postrun = []
self.extension_postrun.append(coroutine)
def set_extension_error(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to handle any exceptions raised in this extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.set_extension_error(self.example)
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if self.extension_error:
log.warning("Extension error callback has been overridden!")
self.extension_error = coroutine
| [
11748,
30351,
952,
198,
11748,
10104,
198,
11748,
18931,
198,
6738,
19720,
1330,
5851,
4548,
540,
11,
7343,
11,
41876,
62,
50084,
2751,
11,
4889,
540,
11,
2744,
28399,
11,
32233,
198,
198,
11748,
299,
2001,
13,
27530,
13,
77,
2001,
35... | 2.328921 | 3,022 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import subprocess
import tempfile
import re
from inducingSHARK.util.git import CollectGit
# def test_excluding_file_regexes(self):
# positives = [
# 'src/java/test/org/apache/commons/Test.java',
# 'test/examples/org/apache/commons/Test.java',
# 'examples/org/apache/commons/Test.java',
# 'example/org/apache/commons/Test.java',
# 'src/examples/org/apache/commons/Test.java',
# 'src/example/org/apache/commons/Test.java',
# ]
# negatives = [
# 'src/java/main/org/apache/commons/Test.java',
# 'src/java/main/Example.java',
# ]
# for pos in positives:
# a1 = re.match(CollectGit._regex_test_example, pos)
# self.assertNotEqual(a1, None)
# for neg in negatives:
# a1 = re.match(CollectGit._regex_test_example, neg)
# self.assertEqual(a1, None)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
11748,
850,
14681,
198,
11748,
20218,
7753,
198,
11748,
302,
198,
198,
6738,
44561,
969... | 2.05102 | 490 |
from pytest import approx
from vyperdatum.points import *
from vyperdatum.vdatum_validation import vdatum_answers
gvc = VyperCore()
data_folder = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
vdatum_answer = vdatum_answers[gvc.vdatum.vdatum_version]
| [
6738,
12972,
9288,
1330,
5561,
198,
198,
6738,
410,
88,
525,
19608,
388,
13,
13033,
1330,
1635,
198,
6738,
410,
88,
525,
19608,
388,
13,
85,
19608,
388,
62,
12102,
341,
1330,
410,
19608,
388,
62,
504,
86,
364,
198,
198,
70,
28435,
... | 2.391667 | 120 |
from uuid import UUID
import json
from fastapi import APIRouter, Path, Body, Depends, HTTPException, Request, Response
from fastapi.responses import StreamingResponse
from fastapi.templating import Jinja2Templates
from starlette import status
from app.api.dependencies import image_service_dep, image_file_service_dep
from app.services.image import ImageService
from app.services.image_file import ImageFileService
from app.models.image import Image, ImageAddVM, ImageUpdateVM
from app.models.response import AddResponse, UpdateResponse, DeleteResponse, HealthcheckResponse
templates = Jinja2Templates(directory = "app/templates")
router = APIRouter()
@router.get("/")
@router.get('/init', response_model=HealthcheckResponse, tags=['gallery'])
@router.get('/images', tags=['gallery'])
@router.get('/image/{id}', response_model=Image, tags=['gallery'])
@router.post('/image', response_model=AddResponse, status_code=status.HTTP_201_CREATED, tags=['gallery'])
@router.put('/image/{id}', response_model=UpdateResponse, tags=['gallery'])
@router.delete('/image/{id}', response_model=DeleteResponse, tags=['gallery'])
| [
6738,
334,
27112,
1330,
471,
27586,
198,
11748,
33918,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
10644,
11,
12290,
11,
2129,
2412,
11,
14626,
16922,
11,
19390,
11,
18261,
198,
6738,
3049,
15042,
13,
16733,
274,
1330,
431... | 3.265896 | 346 |
from typing import List, Dict, Any
from setuptools import setup, find_packages
VERSION: Dict[str, Any] = {}
with open("registrable/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="registrable",
version=VERSION["VERSION"],
description="Python module for registering and instantiating classes by name. "
"Based on the implementation from AllenNLP.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/epwalsh/python-registrable",
license="Apache 2.0",
author="Evan Pete Walsh",
author_email="epwalsh10@gmail.com",
packages=find_packages(exclude=["registrable.tests.*", "tests"]),
install_requires=read_reqs_file("requirements.txt"),
tests_require=read_reqs_file("requirements.dev.txt"),
python_requires=">=3.6.1",
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| [
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4377,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
43717,
25,
360,
713,
58,
2536,
11,
4377,
60,
796,
23884,
198,
4480,
1280,
7203,
2301,
396,
81,
540,
14,
9641,
... | 2.770089 | 448 |
def buffer_LinearRing(ring, dist, kwArgCheck = None, debug = False, fill = 1.0, nang = 19, simp = 0.1, tol = 1.0e-10):
"""Buffer a LinearRing
This function reads in a LinearRing that exists on the surface of the Earth
and returns a [Multi]Polygon of the same LinearRing buffered by a constant
distance (in metres).
Parameters
----------
ring : shapely.geometry.polygon.LinearRing
the LinearRing
dist : float
the Geodesic distance to buffer each point within the LinearRing by (in metres)
debug : bool, optional
print debug messages
fill : float, optional
the Euclidean distance to fill in between each point within the [Multi]Polygon by; negative values disable filling in (in degrees)
nang : int, optional
the number of angles around each point within the LinearRing that are calculated when buffering
simp : float, optional
how much intermediary [Multi]Polygons are simplified by; negative values disable simplification (in degrees)
tol : float, optional
the Euclidean distance that defines two points as being the same (in degrees)
Returns
-------
buff : shapely.geometry.polygon.Polygon, shapely.geometry.multipolygon.MultiPolygon
the buffered LinearRing
"""
# Import special modules ...
try:
import shapely
import shapely.geometry
import shapely.validation
except:
raise Exception("\"shapely\" is not installed; run \"pip install --user Shapely\"") from None
# Import sub-functions ...
from .buffer import buffer
# Check keyword arguments ...
if kwArgCheck is not None:
print(f"WARNING: \"{__name__}\" has been called with an extra positional argument")
# Check argument ...
if not isinstance(ring, shapely.geometry.polygon.LinearRing):
raise TypeError("\"ring\" is not a LinearRing") from None
if not ring.is_valid:
raise Exception(f"\"ring\" is not a valid LinearRing ({shapely.validation.explain_validity(ring)})") from None
if ring.is_empty:
raise Exception("\"ring\" is an empty LinearRing") from None
# Return buffered LinearRing ...
return buffer(ring.coords, dist, debug = debug, fill = fill, nang = nang, simp = simp, tol = tol)
| [
4299,
11876,
62,
14993,
451,
39687,
7,
1806,
11,
1233,
11,
479,
86,
28100,
9787,
796,
6045,
11,
14257,
796,
10352,
11,
6070,
796,
352,
13,
15,
11,
299,
648,
796,
678,
11,
985,
79,
796,
657,
13,
16,
11,
284,
75,
796,
352,
13,
1... | 2.888476 | 807 |
import os
from azure_encryption_helper import Encryptor
# Create ServicePrincipalCredentials object using your SP
# You can use MSIAuthentication if running on Azure VM
if 'USE_MSI' in os.environ.keys():
from msrestazure.azure_active_directory import MSIAuthentication
credentials = MSIAuthentication(resource='https://vault.azure.net')
else:
from azure.common.credentials import ServicePrincipalCredentials
client_id = os.environ['AZURE_CLIENT_ID']
client_secret = os.environ['AZURE_CLIENT_SECRET']
tenant_id = os.environ['AZURE_TENANT_ID']
credentials = ServicePrincipalCredentials(client_id=client_id,
secret=client_secret,
tenant=tenant_id)
vault_uri = os.environ['VAULT_URI']
key_name = os.environ['KEY_NAME']
key_version = os.environ.get('KEY_VERSION', '')
message = 'hello custom data encryption!'
print("Original message:" + message)
# Create Encryptor
# credentials can be ServicePrincipalCrendentials or MSIAuthentication object
encryptor_a = Encryptor.create_with_raw_key(vault_uri, credentials, key_name, key_version)
# Encrypt messages
encrypted_message = encryptor_a.encrypt(message)
# Wrap AES key using RSA key from the KeyVault.
# Note that wrap operation is local, but it requires KV access to retrieve public part
# of RSA key
wrapped_key = encryptor_a.get_wrapped_key()
print("Encrypted message:" + str(encrypted_message))
print("Wrapped_key:" + str(wrapped_key))
# Transfer message & key to the different location
print("Transferring wrapped key and encrypted message...")
# Create another Encryptor
encryptor_b = Encryptor.create_with_wrapped_key(vault_uri, credentials, key_name, key_version, wrapped_key)
# Now you can decrypt the message
decrypted_message = encryptor_b.decrypt(encrypted_message)
print("Decrypted message:" + decrypted_message) | [
11748,
28686,
198,
6738,
35560,
495,
62,
12685,
13168,
62,
2978,
525,
1330,
14711,
6012,
273,
628,
198,
2,
13610,
4809,
42904,
8521,
34,
445,
14817,
2134,
1262,
534,
6226,
198,
2,
921,
460,
779,
6579,
3539,
315,
6925,
3299,
611,
2491,... | 2.845808 | 668 |
"""
Douban OAuth support.
This adds support for Douban OAuth service. An application must
be registered first on douban.com and the settings DOUBAN_CONSUMER_KEY
and DOUBAN_CONSUMER_SECRET must be defined with they corresponding
values.
By default account id is stored in extra_data field, check OAuthBackend
class for details on how to extend it.
"""
from social.backends.oauth import BaseOAuth2, BaseOAuth1
class DoubanOAuth(BaseOAuth1):
"""Douban OAuth authentication backend"""
name = 'douban'
EXTRA_DATA = [('id', 'id')]
AUTHORIZATION_URL = 'http://www.douban.com/service/auth/authorize'
REQUEST_TOKEN_URL = 'http://www.douban.com/service/auth/request_token'
ACCESS_TOKEN_URL = 'http://www.douban.com/service/auth/access_token'
def get_user_details(self, response):
"""Return user details from Douban"""
return {'username': response["db:uid"]["$t"],
'email': ''}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return self.get_json('http://api.douban.com/people/%40me?&alt=json',
auth=self.oauth_auth(access_token))
class DoubanOAuth2(BaseOAuth2):
"""Douban OAuth authentication backend"""
name = 'douban-oauth2'
AUTHORIZATION_URL = 'https://www.douban.com/service/auth2/auth'
ACCESS_TOKEN_URL = 'https://www.douban.com/service/auth2/token'
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('uid', 'username'),
('refresh_token', 'refresh_token'),
]
def get_user_details(self, response):
"""Return user details from Douban"""
return {'username': response.get('uid', ''),
'fullname': response.get('name', ''),
'email': ''}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return self.get_json(
'https://api.douban.com/v2/user/~me',
headers={'Authorization': 'Bearer %s' % access_token}
)
| [
37811,
198,
40287,
3820,
440,
30515,
1104,
13,
198,
198,
1212,
6673,
1104,
329,
5728,
3820,
440,
30515,
2139,
13,
1052,
3586,
1276,
198,
1350,
6823,
717,
319,
3385,
272,
13,
785,
290,
262,
6460,
360,
2606,
33,
1565,
62,
10943,
50,
5... | 2.40732 | 847 |
# -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
from torch.nn import Linear, Embedding
from torch_geometric.nn import ARMAConv
from torch_geometric.data import Data
from util import timeclass,get_logger
import pandas as pd
import numpy as np
import random
import time
import copy
from process_data import ModelData
VERBOSITY_LEVEL = 'INFO'
LOGGER = get_logger(VERBOSITY_LEVEL, __file__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
20471,
1330,
44800,
11,
13302,
6048,
278,
198,
6738,
28034,
62,
469,
16996,
13,
20... | 2.826667 | 150 |
import io
import os
import ui
import sys
import time
import Image
import numpy
import photos
import shutil
import socket
import console
import zipfile
import ImageOps
import objc_util
import matplotlib.cm
import urllib.request
from threading import Event, Thread
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
import diff_match_patch
except:
for pth in sys.path:
if pth[-15:] == 'site-packages-3':
sp3 = pth + '/'
with urllib.request.urlopen('https://files.pythonhosted.org/packages/c2/5a/9aa3b95a1d108b82fadb1eed4c3773d19069f765bd4c360a930e107138ee/diff_match_patch-20200713-py3-none-any.whl') as f:
with open(sp3 + 'dmp.zip', 'wb') as ff:
ff.write(f.read())
with zipfile.ZipFile(sp3 + 'dmp.zip') as zf:
zf.extractall(sp3)
shutil.rmtree(sp3 + 'diff_match_patch-20200713.dist-info', ignore_errors = True)
os.remove(sp3 + 'dmp.zip')
import diff_match_patch
if os.path.isfile('holoplay.js'):
with open('holoplay.js', 'r') as f:
holoplay_js = f.read()
else:
with urllib.request.urlopen('https://cdn.jsdelivr.net/npm/holoplay@0.2.3/holoplay.js') as f:
holoplay_js_vanilla = f.read().decode('utf-8')
with urllib.request.urlopen('https://raw.githubusercontent.com/jankais3r/driverless-HoloPlay.js/main/holoplay.js.patch') as f:
diff = f.read().decode('utf-8').replace('\r\n', '\n')
dmp = diff_match_patch.diff_match_patch()
patches = dmp.patch_fromText(diff)
holoplay_js, _ = dmp.patch_apply(patches, holoplay_js_vanilla)
holoplay_js = holoplay_js.replace(
# Original calibration:
'{"configVersion":"1.0","serial":"00000","pitch":{"value":49.825218200683597},"slope":{"value":5.2160325050354},"center":{"value":-0.23396748304367066},"viewCone":{"value":40.0},"invView":{"value":1.0},"verticalAngle":{"value":0.0},"DPI":{"value":338.0},"screenW":{"value":2560.0},"screenH":{"value":1600.0},"flipImageX":{"value":0.0},"flipImageY":{"value":0.0},"flipSubp":{"value":0.0}}',
# Your calibration:
'{"configVersion":"1.0","serial":"00000","pitch":{"value":47.56401443481445},"slope":{"value":-5.480000019073486},"center":{"value":0.374184787273407},"viewCone":{"value":40.0},"invView":{"value":1.0},"verticalAngle":{"value":0.0},"DPI":{"value":338.0},"screenW":{"value":2560.0},"screenH":{"value":1600.0},"flipImageX":{"value":0.0},"flipImageY":{"value":0.0},"flipSubp":{"value":0.0}}')
with open('holoplay.js', 'w') as f:
f.write(holoplay_js)
if os.path.isfile('three.min.js'):
with open('three.min.js', 'r') as f:
three_js = f.read()
else:
with urllib.request.urlopen('https://cdn.jsdelivr.net/gh/mrdoob/three.js@r124/build/three.min.js') as f:
three_js = f.read().decode('utf-8')
with open('three.min.js', 'w') as f:
f.write(three_js)
if os.path.isfile('OrbitControls.js'):
with open('OrbitControls.js', 'r') as f:
orbitcontrols_js = f.read()
else:
with urllib.request.urlopen('https://cdn.jsdelivr.net/gh/mrdoob/three.js@r124/examples/js/controls/OrbitControls.js') as f:
orbitcontrols_js = f.read().decode('utf-8')
with open('OrbitControls.js', 'w') as f:
f.write(orbitcontrols_js)
if os.path.isfile('pydnet.mlmodel'):
pass
else:
with urllib.request.urlopen('https://github.com/FilippoAleotti/mobilePydnet/raw/v2/iOS/AppML/Models/Pydnet.mlmodel') as f:
pydnet = f.read()
with open('pydnet.mlmodel', 'wb') as f:
f.write(pydnet)
allow_ML = True
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
# This class uses iOS API to fetch a depth map from the provided image data. The beauty of it is that it works regardless if we have a JPG or a HEIC file.
pointcloud = '''
<html>
<head>
<style>
body {
margin: 0;
}
canvas {
width: 100%;
height: 100%;
display: block;
}
</style>
<meta charset="utf-8"/>
<script src="http://localhost:8080/holoplay.js"></script>
<script src="http://localhost:8080/three.min.js"></script>
</head>
<body>
<canvas></canvas>
<script>
"use strict";
var camera;
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = (e) => {
resolve(img);
};
img.onerror = reject;
img.src = url;
});
}
function getImageData(img) {
const ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = img.width;
ctx.canvas.height = img.height;
ctx.drawImage(img, 0, 0);
return ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
}
function getPixel(imageData, u, v) {
const x = (u * (imageData.width - 1)) | 0;
const y = (v * (imageData.height - 1)) | 0;
if (x < 0 || x >= imageData.width || y < 0 || y >= imageData.height) {
return [0, 0, 0, 0];
} else {
const offset = (y * imageData.width + x) * 4;
return Array.from(imageData.data.slice(offset, offset + 4)).map((v) => v / 255);
}
}
async function main() {
const images = await Promise.all([
loadImage("http://localhost:8080/rgb.png"), // RGB
loadImage("http://localhost:8080/depth.png"), // Depth
]);
const data = images.map(getImageData);
const canvas = document.querySelector("canvas");
const renderer = new THREE.WebGLRenderer({ canvas: canvas });
// Constants you can experiment with: near, far, camera.position.z, depthSpread, skip, size
const fov = 70;
const aspect = 2;
const near = 1;
const far = 4000;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 450;
const scene = new THREE.Scene();
var holoplay = new HoloPlay(scene, camera, renderer);
const rgbData = data[0];
const depthData = data[1];
const skip = 1;
const across = Math.ceil(rgbData.width / skip);
const down = Math.ceil(rgbData.height / skip);
const positions = [];
const colors = [];
const color = new THREE.Color();
const spread = 200;
const depthSpread = 350;
const imageAspect = rgbData.width / rgbData.height;
const size = 1;
for (let y = 0; y < down; ++y) {
const v = y / (down - 1);
for (let x = 0; x < across; ++x) {
const u = x / (across - 1);
const rgb = getPixel(rgbData, u, v);
const depth = 1 - getPixel(depthData, u, v)[0];
positions.push((u * 2 - 1) * spread * imageAspect, (v * -2 + 1) * spread, depth * depthSpread - 220);
colors.push(...rgb.slice(0, 3));
}
}
const geometry = new THREE.BufferGeometry();
geometry.addAttribute("position", new THREE.Float32BufferAttribute(positions, 3));
geometry.addAttribute("color", new THREE.Float32BufferAttribute(colors, 3));
geometry.computeBoundingSphere();
const material = new THREE.PointsMaterial({ size: size, vertexColors: THREE.VertexColors });
const points = new THREE.Points(geometry, material);
scene.add(points);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
var timer = setInterval(function () {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
//renderer.render(scene, camera);
holoplay.render();
var ctxx = renderer.domElement.getContext("webgl");
var pixels = new Uint8Array(ctxx.drawingBufferWidth * ctxx.drawingBufferHeight * 4);
ctxx.readPixels(0, 0, ctxx.drawingBufferWidth, ctxx.drawingBufferHeight, ctxx.R, ctxx.UNSIGNED_BYTE, pixels);
var pixelSum = pixels.reduce(function (a, b) {
return a + b;
}, 0);
if (pixelSum != 0) {
clearInterval(timer);
}
}, 10);
}
render();
}
main();
</script>
</body>
</html>
'''
mesh = '''
<html>
<head>
<style>
html,
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
</style>
<meta charset="utf-8"/>
</head>
<body>
<canvas id="c"></canvas>
<script src="http://localhost:8080/holoplay.js"></script>
<script src="http://localhost:8080/three.min.js"></script>
<script>
var camera;
function main() {
const canvas = document.querySelector("#c");
const renderer = new THREE.WebGLRenderer({ canvas });
const fov = 70;
const aspect = 2; // the canvas default
const near = 1;
const far = 5000;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 220);
camera.lookAt(0, 0, 0);
const scene = new THREE.Scene();
var holoplay = new HoloPlay(scene, camera, renderer);
const imgLoader = new THREE.ImageLoader();
imgLoader.load("http://localhost:8080/depth.png", createHeightmap);
function createHeightmap(image) {
// extract the data from the image by drawing it to a canvas
// and calling getImageData
const ctx = document.createElement("canvas").getContext("2d");
const { width, height } = image;
ctx.canvas.width = width;
ctx.canvas.height = height;
ctx.drawImage(image, 0, 0);
const { data } = ctx.getImageData(0, 0, width, height);
const geometry = new THREE.Geometry();
const cellsAcross = width - 1;
const cellsDeep = height - 1;
for (let z = 0; z < cellsDeep; ++z) {
for (let x = 0; x < cellsAcross; ++x) {
// compute row offsets into the height data
// we multiply by 4 because the data is R,G,B,A but we
// only care about R
const base0 = (z * width + x) * 4;
const base1 = base0 + width * 4;
// look up the height for the for points
// around this cell
const h00 = width * 0.14 + data[base0] * -1.4;
const h01 = width * 0.14 + data[base0 + 4] * -1.4;
const h10 = width * 0.14 + data[base1] * -1.4;
const h11 = width * 0.14 + data[base1 + 4] * -1.4;
// compute the average height
const hm = (h00 + h01 + h10 + h11) / 4;
// the corner positions
const x0 = x;
const x1 = x + 1;
const z0 = z;
const z1 = z + 1;
// remember the first index of these 5 vertices
const ndx = geometry.vertices.length;
// add the 4 corners for this cell and the midpoint
geometry.vertices.push(new THREE.Vector3(x0, h00, z0), new THREE.Vector3(x1, h01, z0), new THREE.Vector3(x0, h10, z1), new THREE.Vector3(x1, h11, z1), new THREE.Vector3((x0 + x1) / 2, hm, (z0 + z1) / 2));
// 2----3
// |\ /|
// | \/4|
// | /\ |
// |/ \|
// 0----1
// create 4 triangles
geometry.faces.push(new THREE.Face3(ndx, ndx + 4, ndx + 1), new THREE.Face3(ndx + 1, ndx + 4, ndx + 3), new THREE.Face3(ndx + 3, ndx + 4, ndx + 2), new THREE.Face3(ndx + 2, ndx + 4, ndx + 0));
// add the texture coordinates for each vertex of each face.
const u0 = x / cellsAcross;
const v0 = z / cellsDeep;
const u1 = (x + 1) / cellsAcross;
const v1 = (z + 1) / cellsDeep;
const um = (u0 + u1) / 2;
const vm = (v0 + v1) / 2;
geometry.faceVertexUvs[0].push(
[new THREE.Vector2(u0, v0), new THREE.Vector2(um, vm), new THREE.Vector2(u1, v0)],
[new THREE.Vector2(u1, v0), new THREE.Vector2(um, vm), new THREE.Vector2(u1, v1)],
[new THREE.Vector2(u1, v1), new THREE.Vector2(um, vm), new THREE.Vector2(u0, v1)],
[new THREE.Vector2(u0, v1), new THREE.Vector2(um, vm), new THREE.Vector2(u0, v0)]
);
}
}
geometry.computeFaceNormals();
// center the geometry
geometry.translate(width / -2, 0, height / -2);
const loader = new THREE.TextureLoader();
const texture = loader.load("http://localhost:8080/rgb.png");
texture.flipY = false;
texture.minFilter = THREE.LinearFilter;
var material = new THREE.MeshBasicMaterial({ map: texture});
var portrait = new THREE.Mesh(geometry, material);
portrait.rotation.x = 90 * THREE.Math.DEG2RAD;
scene.add(portrait);
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
var timer = setInterval(function () {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
//renderer.render(scene, camera);
holoplay.render();
var ctxx = renderer.domElement.getContext("webgl");
var pixels = new Uint8Array(ctxx.drawingBufferWidth * ctxx.drawingBufferHeight * 4);
ctxx.readPixels(0, 0, ctxx.drawingBufferWidth, ctxx.drawingBufferHeight, ctxx.R, ctxx.UNSIGNED_BYTE, pixels);
var pixelSum = pixels.reduce(function (a, b) {
return a + b;
}, 0);
if (pixelSum != 0) {
scene.remove(portrait);
portrait.dispose();
portrait = undefined;
material.dispose();
material = undefined;
geometry.dispose();
geometry = undefined;
scene.dispose();
scene = undefined;
clearInterval(timer);
}
}, 10);
}
render();
}
main();
</script>
</body>
</html>
'''
wireframe = '''
<html>
<head>
<style>
html,
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
</style>
<meta charset="utf-8"/>
</head>
<body>
<canvas id="c"></canvas>
<script src="http://localhost:8080/holoplay.js"></script>
<script src="http://localhost:8080/three.min.js"></script>
<script>
var camera;
function main() {
const canvas = document.querySelector("#c");
const renderer = new THREE.WebGLRenderer({ canvas });
const fov = 70;
const aspect = 2; // the canvas default
const near = 1;
const far = 5000;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 15);
camera.lookAt(0, 0, 0);
const scene = new THREE.Scene();
var holoplay = new HoloPlay(scene, camera, renderer);
const imgLoader = new THREE.ImageLoader();
imgLoader.load("http://localhost:8080/depth.png", createHeightmap);
function createHeightmap(image) {
// extract the data from the image by drawing it to a canvas
// and calling getImageData
const ctx = document.createElement("canvas").getContext("2d");
var { width, height } = image;
width = Math.floor(width / 15);
height = Math.floor(height / 15);
ctx.imageSmoothingQuality = "high";
ctx.imageSmoothingEnabled = true;
ctx.canvas.width = width;
ctx.canvas.height = height;
ctx.drawImage(image, 0, 0, width, height);
const { data } = ctx.getImageData(0, 0, width, height);
const geometry = new THREE.Geometry();
const cellsAcross = width - 1;
const cellsDeep = height - 1;
for (let z = 0; z < cellsDeep; ++z) {
for (let x = 0; x < cellsAcross; ++x) {
// compute row offsets into the height data
// we multiply by 4 because the data is R,G,B,A but we
// only care about R
const base0 = (z * width + x) * 4;
const base1 = base0 + width * 4;
// look up the height for the for points
// around this cell
const h00 = width * 0.2 + data[base0] * -0.1;
const h01 = width * 0.2 + data[base0 + 4] * -0.1;
const h10 = width * 0.2 + data[base1] * -0.1;
const h11 = width * 0.2 + data[base1 + 4] * -0.1;
// compute the average height
const hm = (h00 + h01 + h10 + h11) / 4;
// the corner positions
const x0 = x;
const x1 = x + 1;
const z0 = z;
const z1 = z + 1;
// remember the first index of these 5 vertices
const ndx = geometry.vertices.length;
// add the 4 corners for this cell and the midpoint
geometry.vertices.push(new THREE.Vector3(x0, h00, z0), new THREE.Vector3(x1, h01, z0), new THREE.Vector3(x0, h10, z1), new THREE.Vector3(x1, h11, z1), new THREE.Vector3((x0 + x1) / 2, hm, (z0 + z1) / 2));
// 2----3
// |\ /|
// | \/4|
// | /\ |
// |/ \|
// 0----1
// create 4 triangles
geometry.faces.push(new THREE.Face3(ndx, ndx + 4, ndx + 1), new THREE.Face3(ndx + 1, ndx + 4, ndx + 3), new THREE.Face3(ndx + 3, ndx + 4, ndx + 2), new THREE.Face3(ndx + 2, ndx + 4, ndx + 0));
// add the texture coordinates for each vertex of each face.
const u0 = x / cellsAcross;
const v0 = z / cellsDeep;
const u1 = (x + 1) / cellsAcross;
const v1 = (z + 1) / cellsDeep;
const um = (u0 + u1) / 2;
const vm = (v0 + v1) / 2;
geometry.faceVertexUvs[0].push(
[new THREE.Vector2(u0, v0), new THREE.Vector2(um, vm), new THREE.Vector2(u1, v0)],
[new THREE.Vector2(u1, v0), new THREE.Vector2(um, vm), new THREE.Vector2(u1, v1)],
[new THREE.Vector2(u1, v1), new THREE.Vector2(um, vm), new THREE.Vector2(u0, v1)],
[new THREE.Vector2(u0, v1), new THREE.Vector2(um, vm), new THREE.Vector2(u0, v0)]
);
}
}
geometry.computeFaceNormals();
// center the geometry
geometry.translate(width / -2, 0, height / -2);
const loader = new THREE.TextureLoader();
const texture = loader.load("http://localhost:8080/rgb.png");
texture.flipY = false;
texture.minFilter = THREE.LinearFilter;
var material = new THREE.MeshBasicMaterial({ map: texture, wireframe: true, wireframeLinewidth: 1.5});
var portrait = new THREE.Mesh(geometry, material);
portrait.rotation.x = 90 * THREE.Math.DEG2RAD;
scene.add(portrait);
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
var timer = setInterval(function () {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
holoplay.render();
var ctxx = renderer.domElement.getContext("webgl");
var pixels = new Uint8Array(ctxx.drawingBufferWidth * ctxx.drawingBufferHeight * 4);
ctxx.readPixels(0, 0, ctxx.drawingBufferWidth, ctxx.drawingBufferHeight, ctxx.R, ctxx.UNSIGNED_BYTE, pixels);
var pixelSum = pixels.reduce(function (a, b) {
return a + b;
}, 0);
if (pixelSum != 0) {
scene.remove(portrait);
portrait.dispose();
portrait = undefined;
material.dispose();
material = undefined;
geometry.dispose();
geometry = undefined;
scene.dispose();
scene = undefined;
clearInterval(timer);
}
}, 10);
}
render();
}
main();
</script>
</body>
</html>
'''
control = '''
<!DOCTYPE html>
<html>
<head>
<style>
html,
body {
margin: 0;
}
</style>
<meta charset="utf-8" />
</head>
<body>
<script src="http://localhost:8080/three.min.js"></script>
<script src="http://localhost:8080/OrbitControls.js"></script>
<script>
report = new Object();
report.posx = function(log) {
var iframe = document.createElement("IFRAME");
iframe.setAttribute("src", "posx:" + log);
document.documentElement.appendChild(iframe);
iframe.parentNode.removeChild(iframe);
iframe = null;
};
report.posy = function(log) {
var iframe = document.createElement("IFRAME");
iframe.setAttribute("src", "posy:" + log);
document.documentElement.appendChild(iframe);
iframe.parentNode.removeChild(iframe);
iframe = null;
};
report.posz = function(log) {
var iframe = document.createElement("IFRAME");
iframe.setAttribute("src", "posz:" + log);
document.documentElement.appendChild(iframe);
iframe.parentNode.removeChild(iframe);
iframe = null;
};
var camera, scene, renderer;
function init() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(12.5, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.set(xxx);
renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
const controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.addEventListener("change", report_camera);
mesh = new THREE.Mesh(new THREE.SphereGeometry(yyy), new THREE.MeshBasicMaterial({
color: 0xffffff,
wireframe: true
}));
scene.add(mesh);
}
window.addEventListener("resize", function() {
var width = window.innerWidth;
var height = window.innerHeight;
renderer.setSize(width, height);
camera.aspect = width / height;
camera.updateProjectionMatrix();
});
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
}
function report_camera() {
report.posx(camera.position.x);
report.posy(camera.position.y);
report.posz(camera.position.z);
}
init();
render();
</script>
</body>
</html>
'''
wk = None
mode = mesh
depthSource = None
control_sphere = '14.667, 8, 8'
control_startcamera = '0, 0, 220'
@objc_util.on_main_thread
# This might break on non-English iOS. Too lazy to test.
for album in photos.get_smart_albums():
if album.title == 'Portrait':
my_album = album
break
# Again using iOS API to get the photo's proper filename
try:
if allow_ML:
chosen_pic = photos.pick_asset(assets = photos.get_assets(), title = 'Select a photo')
# chosen_pic = photos.pick_image(show_albums=True, include_metadata=False, original=True, raw_data=False, multi=False)
else:
chosen_pic = photos.pick_asset(assets = my_album.assets, title = 'Select a portrait photo')
filename, file_extension = os.path.splitext(str(objc_util.ObjCInstance(chosen_pic).originalFilename()))
assert filename != 'None'
output_filename = 'Holo_' + filename + '.png'
except:
quit()
try:
chosen_pic_image = chosen_pic.get_image(original = False)
except:
print('Image format (' + file_extension[1:] + ') not supported.')
quit()
chosen_pic_data = chosen_pic.get_image_data(original = False).getvalue()
# Extract a depth map
try:
chosen_pic_depth = CImage(objc_util.ns(chosen_pic_data)).to_png()
chosen_pic_depth_stream = io.BytesIO(chosen_pic_depth)
chosen_pic_depth_image = Image.open(chosen_pic_depth_stream)
# Some Portrait photos have a completely white depth map. Let's treat those as if there was no depth map at all.
arr = numpy.array(chosen_pic_depth_image).astype(int)
if numpy.ptp(arr) == 0:
if allow_ML:
raise('The selected portrait photo does not contain a depth map.')
else:
print('The selected portrait photo does not contain a depth map.')
quit()
# If the selected photo does not contain a depth map, let's infer it using coreML
except Exception as e:
# Hardcoded resolution for the Pydnet model
chosen_pic_resized = chosen_pic.get_image(original = False).resize((640, 384))
with io.BytesIO() as bts:
chosen_pic_resized.save(bts, format = 'PNG')
chosen_pic_depth = CoreML(bts).to_png()
chosen_pic_depth_stream = io.BytesIO(chosen_pic_depth)
chosen_pic_depth_image = Image.open(chosen_pic_depth_stream)
chosen_pic_depth_image = chosen_pic_depth_image.resize((int(chosen_pic.get_ui_image().size[0]), int(chosen_pic.get_ui_image().size[1])), Image.BICUBIC)
chosen_pic_depth_image = ImageOps.invert(chosen_pic_depth_image)
# chosen_pic_depth_image.show()
arr = numpy.array(chosen_pic_depth_image).astype(int)
# This part takes the depth map and normalizes its values to the range of (0, 180). You can experiment with the value, 255 is the ceiling.
chosen_pic_depth_image_array = (120*(arr - numpy.min(arr))/numpy.ptp(arr)).astype(int)
chosen_pic_depth_image = Image.fromarray(numpy.uint8(chosen_pic_depth_image_array))
# chosen_pic_depth_image = chosen_pic_depth_image.convert('P', palette = Image.ADAPTIVE, colors = 2)
# chosen_pic_depth_image.show()
# Making the images smaller for faster processing.
chosen_pic_image.thumbnail((350, 350), Image.ANTIALIAS)
chosen_pic_depth_image.thumbnail((350, 350), Image.ANTIALIAS)
# When the colormap mode is enabled, we use the colormapped depth data as a texture.
chosen_pic_photo_image_buffer = io.BytesIO()
chosen_pic_colormap_image_buffer = io.BytesIO()
arrx = numpy.array(chosen_pic_depth_image.convert('L')).astype(int)
pre_cmap_array = (255*(arrx - numpy.min(arrx))/numpy.ptp(arrx)).astype(int)
cm = matplotlib.cm.get_cmap('jet')
post_cmap_array = numpy.uint8(numpy.rint(cm(pre_cmap_array)*255))[:, :, :3]
cmap_img = Image.fromarray(post_cmap_array)
cmap_img.save(chosen_pic_colormap_image_buffer, format = 'PNG')
chosen_pic_image.save(chosen_pic_photo_image_buffer, format = 'PNG')
rgbData = chosen_pic_photo_image_buffer.getvalue()
chosen_pic_depth_image_buffer = io.BytesIO()
chosen_pic_depth_image.save(chosen_pic_depth_image_buffer, format = 'PNG')
depthData = chosen_pic_depth_image_buffer.getvalue()
s = Server()
s.start_server()
modeSelector = ui.SegmentedControl(alpha = 0, corner_radius = 5)
modeSelector.segments = ('Mesh' , 'Wireframe', 'Point Cloud')
modeSelector.selected_index = 0
modeSelector.action = modeSelect
textureSelector = ui.SegmentedControl(alpha = 0, corner_radius = 5)
textureSelector.segments = ('Photo' , 'Colormap')
textureSelector.selected_index = 0
textureSelector.action = textureSelect
closeButton = ui.Button(title = 'Close', alpha = 0, background_color = 'black', tint_color = 'white', corner_radius = 5)
closeButton.action = close_button
depthSourceLabel = ui.Label(text = 'Depth Source: ' + depthSource, font = ('<system>', 14), alignment = ui.ALIGN_CENTER, alpha = 0, text_color = 'black')
cameracontrol = ui.WebView(apha = 0, corner_radius = 15)
cameracontrol.delegate = debugDelegate()
v = ui.View()
v.present(style = 'fullscreen', hide_title_bar = True)
v.add_subview(textureSelector)
v.add_subview(modeSelector)
v.add_subview(closeButton)
v.add_subview(depthSourceLabel)
v.add_subview(cameracontrol)
textureSelector.frame = (v.width / 2 - 75, v.height / 2 - 288, 150, 32)
textureSelector.alpha = 1
modeSelector.frame = (v.width / 2 - 125, v.height / 2 - 240, 250, 32)
modeSelector.alpha = 1
closeButton.frame = (v.width / 2 - 40, v.height / 2 - 192, 80, 32)
closeButton.alpha = 1
depthSourceLabel.frame = (v.width / 2 - 90, v.height / 2 - 150, 180, 32)
depthSourceLabel.alpha = 1
cameracontrol.frame = (v.width / 2 - 150, v.height / 2 - 100, 300, 300)
cameracontrol.alpha = 1
cameracontrol.load_url('http://localhost:8080/cameracontrol.html')
main()
try:
# If you are rendering a complex Three.js scene and the hologram doesn't look right, try increasing the sleep timer.
# This is a hack around a webkit bug. The window needs to be resized once the rendering completes in order to use correct shader values.
time.sleep(3)
wk.setFrame_(CGRect((0, 0), (second_screen.bounds().size.width, second_screen.bounds().size.height)))
except:
pass
| [
11748,
33245,
198,
11748,
28686,
198,
11748,
334,
72,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
7412,
198,
11748,
299,
32152,
198,
11748,
5205,
198,
11748,
4423,
346,
198,
11748,
17802,
198,
11748,
8624,
198,
11748,
19974,
7753,
198... | 2.384226 | 11,652 |
# !/usr/bin/python
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import transaction
import sys
import pyexcel as pe
from core.base_permission import create_test_paciente_data
| [
2,
5145,
14,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
8611,
198,
11748,
25064,
... | 3.411765 | 68 |
import matplotlib.pyplot as plt
from .interfaces import BaseLassoNetCV
from .utils import confidence_interval, eval_on_path
def plot_path(model, path, X_test, y_test, *, score_function=None):
"""
Plot the evolution of the model on the path, namely:
- lambda
- number of selected variables
- score
Parameters
==========
model : LassoNetClassifier or LassoNetRegressor
path
output of model.path
X_test : array-like
y_test : array-like
score_function : function or None
if None, use score_function=model.score
score_function must take as input X_test, y_test
"""
# TODO: plot with manually computed score
score = eval_on_path(model, path, X_test, y_test, score_function=score_function)
n_selected = [save.selected.sum() for save in path]
lambda_ = [save.lambda_ for save in path]
plt.figure(figsize=(16, 16))
plt.subplot(311)
plt.grid(True)
plt.plot(n_selected, score, ".-")
plt.xlabel("number of selected features")
plt.ylabel("score")
plt.subplot(312)
plt.grid(True)
plt.plot(lambda_, score, ".-")
plt.xlabel("lambda")
plt.xscale("log")
plt.ylabel("score")
plt.subplot(313)
plt.grid(True)
plt.plot(lambda_, n_selected, ".-")
plt.xlabel("lambda")
plt.xscale("log")
plt.ylabel("number of selected features")
plt.tight_layout()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
764,
3849,
32186,
1330,
7308,
43,
28372,
7934,
33538,
198,
198,
6738,
764,
26791,
1330,
6628,
62,
3849,
2100,
11,
5418,
62,
261,
62,
6978,
628,
198,
4299,
7110,
62,
... | 2.47007 | 568 |
from django import forms
from captcha.fields import ReCaptchaField
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
48972,
13,
25747,
1330,
797,
19209,
11693,
15878,
628
] | 4.25 | 16 |
import logging
from typing import Any, Dict, List
from pydantic import BaseModel # pylint: disable=no-name-in-module
from tgcf.plugins import FileType, TgcfMessage, TgcfPlugin
from tgcf.utils import match
| [
11748,
18931,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
220,
1303,
279,
2645,
600,
25,
15560,
28,
3919,
12,
3672,
12,
259,
12,
21412,
198,
198,
6738,
256,
70,
12993,
13... | 3.086957 | 69 |
import numpy
data = numpy.genfromtxt(fname="housing.data")
################# Advanced
for line in data:
print(*line)
################# Reach 1
for line in data:
list = line.tolist()
strList = [str(i) for i in list]
print(strList)
| [
11748,
299,
32152,
198,
198,
7890,
796,
299,
32152,
13,
5235,
6738,
14116,
7,
69,
3672,
2625,
50028,
13,
7890,
4943,
198,
198,
14468,
2,
13435,
198,
1640,
1627,
287,
1366,
25,
198,
220,
220,
220,
3601,
46491,
1370,
8,
198,
198,
1446... | 2.747253 | 91 |
# Generated by Django 2.1.5 on 2019-02-11 16:40
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
13130,
12,
2999,
12,
1157,
1467,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import re
import ast
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as ureq
from .helpers.constants import INFOGRAM_LINK
| [
11748,
302,
198,
11748,
6468,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
17141,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
355,
334,
42180,
198,
198,
6738,
764,
16794,
364,
13,
9979,
1187,
1330,
24890,
10761,
2... | 3.23913 | 46 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import re
import codecs
black_list_pattern = r'[\u4e00-\u9fff]+|[\uff01-\uff5e]+|[\u201c\u201d\u300a\u300b\u3010\u3011\uff5b\uff5d\uff1b\uff1a\u3002\uff0c\u3001\uff1f]+'
white_list_pattern = r'[^\s\x21-\xE7]+'
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
40481,
82,
198,
198,
13424,
62,
4868,
62,
... | 1.861272 | 173 |
import sys
from math import gcd as gcd_math
from math import factorial as factorial_math
from scipy.special import perm
from scipy.special import comb
from random import randint
import data_structure_lib
import math_lib
# --------------------------------------------------
# data_structure_lib
# 二分探索
# ダイクストラ法,ワーシャルフロイド法
# 部分和問題
"""
# BST
def test_BST():
l = [randint(1, 100) for _ in range(20)]
print("sorted list:", sorted(l))
tree = data_structure_lib.BST(l)
if tree.search_min() != min(l):
print("search_min test failed")
sys.exit()
if tree.search_max() != max(l):
print("search_max test failed")
sys.exit()
"""
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
6738,
10688,
1330,
308,
10210,
355,
308,
10210,
62,
11018,
198,
6738,
10688,
1330,
1109,
5132,
355,
1109,
5132,
62,
11018,
198,
6738,
629,
541,
88,
13,
20887,
1330,
9943,
198,
6738,
629,
541,
88,
13,
20887,
1330,
19... | 2.339806 | 309 |
def read_yaml_file(filename):
"""
@parameters filename, filename of the yaml file
@returns dictionary of each parameter with the corresponding value.
"""
parameters = {}
with open(filename) as file:
lines = file.readlines()
for line in lines:
line = line.strip()
for word in line:
word = word.replace(" ", "")
word = word.strip()
# print(word)
for line in lines:
processed = line.strip().split(":")
key, value = processed[0].strip(), processed[1].strip()
try:
if value.find(".") == -1:
value = int(value)
else:
value = float(value)
except ValueError:
continue
parameters[key] = value
return parameters
# Example:
# print(parameters["hsv_hue_max"])
# print(read_yaml_file("gate_2.yaml")["hsv_hue_max"]) | [
4299,
1100,
62,
88,
43695,
62,
7753,
7,
34345,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
7307,
29472,
11,
29472,
286,
262,
331,
43695,
2393,
198,
220,
220,
220,
2488,
7783,
82,
22155,
286,
1123,
11507,
351,
2... | 2.142202 | 436 |
tot = 0
nomep = []
nomel = []
dadost = []
pesop = 0
pesol = 100000000000000000000 ** 10000
while True:
dadost.append(input('Nome: '))
dadost.append(float(input('Peso: ')))
tot += 1
if dadost[1] >= pesop:
if dadost[1] == pesop:
nomep.append(dadost[0])
elif dadost[1] > pesop:
nomep.clear()
nomep.append(dadost[0])
pesop = dadost[1]
if dadost[1] <= pesol:
if dadost[1] == pesol:
nomel.append(dadost[0])
elif dadost[1] < pesol:
pesol = dadost[1]
nomel.clear()
nomel.append(dadost[0])
dadost.clear()
opção = str(input('Quer continuar? [S/N]: ')).strip()[0]
if opção in 'Nn':
break
print(f'O total de pessoas cadastradas foi {tot}')
print(f'O maior peso registrado foi de {pesop}KG. peso de {nomep}')
print(f'O menor peso registrado foi de {pesol}KG. peso de {nomel}') | [
83,
313,
796,
657,
198,
77,
462,
79,
796,
17635,
198,
26601,
417,
796,
17635,
198,
47984,
455,
796,
17635,
198,
12272,
404,
796,
657,
198,
12272,
349,
796,
1802,
25645,
405,
12429,
33028,
198,
4514,
6407,
25,
198,
220,
9955,
455,
13... | 2.068293 | 410 |
""" scraper.py
Handles the webscraping of the app.
Key variables
- scraper.archive == Array of the titles of old notices
- scraper.notices_5 == Array of Meetings/Practices (has ALL INFORMATION)
- scraper.notices_3 == Array of Notices (has ALL INFORMATION BUT date and location)
Each is explained further below
"""
import datetime # datetime: so we can rewind -1 days to find yesterdays stuff.
import requests # requests: access URLs and download source
from bs4 import BeautifulSoup # bs4: HTML parser - access tags
import dateformatter # contains the daily scraped date formatted into a sentence
# INITIAL VARIABLES
URL = 'https://parents.newlands.school.nz/index.php/notices' # URL to notices of the school
URL_PAST = URL + '/' + str(dateformatter.dt - datetime.timedelta(days=1)) # URL to yesterdays notices
PAGE = requests.get(URL) # download source
SOUP = BeautifulSoup(PAGE.text, 'html.parser') # todays notices as bs4 soup object
SOUP_OLD = BeautifulSoup(requests.get(URL_PAST).text, 'html.parser') # yesterday's notices as bs4 soup object
# PROCESSING OF TODAY'S NOTICES
TR_SOUP = SOUP.find_all('tr') # Finds all the <table row> tags in the given page
'''
Notices format:
********TYPE #1:************** <td> elements: 5
<tr>
<td> $who is this notice for </td>
<td> $title of the notice </td>
<td> $where is this event </td>
<td> $when is it </td>
<td> $who wrote this notice </td>
</tr>
Succeded by...
<tr>
<td> $description of the notice </td>
</tr>
********TYPE #2:************** td elements: 3
<tr>
<td> $who is this notice for </td>
<td> $title of the notice </td>
<td> $who wrote this notice </td>
</tr>
followed by...
<tr>
<td> $description of the notice </td>
</tr>
Hence, the HTML is parsed by the number of td elements, which determines the notice type, I or II
'''
notices_5 = [] # Array to hold 5 td element notices
notices_3 = [] # Array to hold 3 td element notices
td_count = 0 # Counts the number of td elements iterated over
for tr in TR_SOUP:
# Extract all <td> from the table
td = tr.find_all('td')
# If the table empty, pass over it
if len(td) == 0:
continue
# If the table has 5 elements, it is a 5td notice
elif len(td) == 5:
notices_5.append([i.text for i in td])
td_count = 5
# If the table has 3 elements, it is a 3td notice
elif len(td) == 3:
notices_3.append([i.text for i in td])
td_count = 3
# If the table has 1 element, it must be the description, hence we add to either notices3/5
elif len(td) == 1:
td = str(list(td)[0]) # Turns it into a string
# If there have been 5 prev <td> elements, it is a descriptor of a 5_td_element notice, hence goes into 5notices
# the list slicing from 15/16 --> 16 is to remove the <td></td> tags
if td_count == 5:
notices_5[-1].append(td[16:-16])
# If there have been 3 prev <td> elements, it is a descriptor of a 3_td_element notice, hence goes into 3notices
elif td_count == 3:
notices_3[-1].append(td[15:-16])
"""PROCESSING OF YESTERDAY'S NOTICES"""
TR_SOUP_OLD = SOUP_OLD.find_all('tr')
notices_5_old, notices_3_old = [], []
td_count = 0
# This is processed the same way as Today's notices -- read the ABOVE section for documentation
for tr in TR_SOUP_OLD:
td = tr.find_all('td')
if len(td) == 0:
continue
elif len(td) == 5:
notices_5_old.append([i.text for i in td])
td_count = 5
elif len(td) == 3:
notices_3_old.append([i.text for i in td])
td_count = 3
elif len(td) == 1:
td = str(list(td)[0])
if td_count == 5:
notices_5_old[-1].append(td[16:-16])
elif td_count == 3:
notices_3_old[-1].append(td[15:-16])
# Only contains titles of the old notices -- no other info needed to check duplicity.
archive = set([i[1].lower() for i in notices_3_old] + [i[1].lower() for i in notices_5_old])
# Debug Content
if __name__ == '__main__':
print('Notices 5: ')
for i in notices_5:
print(i)
print('\nNotices 3: ')
for i in notices_3:
print(i)
print('Notices old:', archive)
| [
37811,
19320,
525,
13,
9078,
198,
12885,
829,
262,
3992,
1416,
2416,
278,
286,
262,
598,
13,
198,
198,
9218,
9633,
198,
220,
220,
220,
532,
19320,
525,
13,
17474,
6624,
15690,
286,
262,
8714,
286,
1468,
19748,
198,
220,
220,
220,
53... | 2.296945 | 1,997 |
import pytest
from tests.factories import RegistrationRequestFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
@pytest.mark.django_db
| [
11748,
12972,
9288,
198,
198,
6738,
5254,
13,
22584,
1749,
1330,
24610,
18453,
22810,
198,
6738,
5254,
13,
26791,
1330,
651,
62,
1177,
62,
1640,
62,
7220,
628,
198,
31,
9078,
9288,
13,
4102,
13,
28241,
14208,
62,
9945,
628,
198,
31,
... | 3.115385 | 52 |
import mrcnn.model as modellib
import os
import sys
import cv2
import random
import numpy as np
import pandas as pd
import deeplabcut
import json
import skimage
import skimage.io
from skimage.util import img_as_ubyte, img_as_float
from skimage import morphology, measure, filters
from shutil import copyfile
from skimage.measure import regionprops
from skimage.measure import find_contours
from skimage.morphology import square, dilation
from skimage.color import rgb2gray
from .mouse import MouseDataset
from .mouse import InferenceConfig
from .shape import shapes_to_labels_masks
from multiprocessing import Pool
import shutil
import time
import errno
import ntpath
import glob
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
def video2frames(video_dir):
"""Convert a video into frames saved in a directory named as the video name.
Args:
video_dir: path to the video
"""
cap = cv2.VideoCapture(video_dir)
nframes = int(cap.get(7))
data_dir = os.path.splitext(video_dir)[0]
frames_dir = os.path.join(data_dir, "images")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(frames_dir):
os.mkdir(frames_dir)
for index in range(nframes):
cap.set(1, index) # extract a particular frame
ret, frame = cap.read()
if ret:
image = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
img_name = os.path.join(frames_dir, str(index) + ".jpg")
skimage.io.imsave(img_name, image)
return frames_dir
def background_subtraction(frames_dir, background_dir):
"""Generate foregrounds corresponding to frames
Args:
frames_dir: path to directory containing frames
background_dir: path to the background image
Returns:
components: 1D array of number of blobs in each frame.
"""
fg_dir = os.path.join(os.path.dirname(frames_dir), 'FG')
try:
os.mkdir(fg_dir)
except FileExistsError:
shutil.rmtree(fg_dir)
os.mkdir(fg_dir)
bg = img_as_float(skimage.io.imread(background_dir))
if bg.ndim == 3:
bg = rgb2gray(bg)
threshold = bg * 0.5
frames_list = os.listdir(frames_dir)
components = np.zeros(len(frames_list), dtype=int)
for frame in range(len(frames_list)):
im = img_as_float(skimage.io.imread(
os.path.join(frames_dir, str(frame) + '.jpg')))
if im.ndim == 3:
im = rgb2gray(im)
fg = (bg - im) > threshold
bw1 = morphology.remove_small_objects(fg, 1000)
bw2 = morphology.binary_closing(bw1, morphology.disk(radius=10))
bw3 = morphology.binary_opening(bw2, morphology.disk(radius=10))
label = measure.label(bw3)
num_fg = np.max(label)
masks = np.zeros([bg.shape[0], bg.shape[1], 3], dtype=np.uint8)
if num_fg == 2:
bw3_1 = label == 1
bw4_1 = morphology.binary_closing(
bw3_1, morphology.disk(radius=30))
bw5_1 = filters.median(bw4_1, morphology.disk(10))
bw3_2 = label == 2
bw4_2 = morphology.binary_closing(
bw3_2, morphology.disk(radius=30))
bw5_2 = filters.median(bw4_2, morphology.disk(10))
# masks[:, :, 0] = img_as_bool(bw5_1)
# masks[:, :, 1] = img_as_bool(bw5_2)
masks[:, :, 0] = img_as_ubyte(bw5_1)
masks[:, :, 1] = img_as_ubyte(bw5_2)
else:
masks[:, :, 0] = img_as_ubyte(bw3)
components[frame] = num_fg
# masks = masks.astype(np.uint8)
skimage.io.imsave(os.path.join(fg_dir, str(frame) + '.png'), masks)
components_df = pd.DataFrame({'components': components})
components_df.to_csv(os.path.join(os.path.dirname(
frames_dir), 'components.csv'), index=False)
return components
def split_train_val(dataset_dir, frac_split_train):
"""Split a dataset into subsets train and val inside dataset directory
Args:
dataset_dir: path to the dataset containing images and their annotation json files
frac_split_train: fraction of train subset in the dataset
Returns:
"""
json_ids = [f for f in os.listdir(dataset_dir) if f.endswith('.json')]
random.shuffle(json_ids)
train_dir = os.path.join(dataset_dir, 'train')
os.mkdir(train_dir)
val_dir = os.path.join(dataset_dir, 'val')
os.mkdir(val_dir)
for json_id in json_ids[: int(frac_split_train * len(json_ids))]:
copyfile(os.path.join(dataset_dir, json_id),
os.path.join(train_dir, json_id))
os.remove(os.path.join(dataset_dir, json_id))
copyfile(os.path.join(dataset_dir, os.path.splitext(json_id)[0] + '.jpg'),
os.path.join(train_dir, os.path.splitext(json_id)[0] + '.jpg'))
os.remove(os.path.join(
dataset_dir, os.path.splitext(json_id)[0] + '.jpg'))
for json_id in json_ids[int(frac_split_train * len(json_ids)):]:
copyfile(os.path.join(dataset_dir, json_id),
os.path.join(val_dir, json_id))
os.remove(os.path.join(dataset_dir, json_id))
copyfile(os.path.join(dataset_dir, os.path.splitext(json_id)[0] + '.jpg'),
os.path.join(val_dir, os.path.splitext(json_id)[0] + '.jpg'))
os.remove(os.path.join(
dataset_dir, os.path.splitext(json_id)[0] + '.jpg'))
def create_dataset(images_dir, components_info, num_annotations):
"""Randomly choose images which have one blob in their foreground
Args:
images_dir: path to images directory
components_info: path to a csv file or an array
num_annotations: the number of images will be picked
Returns:
"""
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
dataset_dir = os.path.join(os.path.dirname(images_dir), 'dataset')
os.mkdir(dataset_dir)
touching = [i for i in range(len(components)) if components[i] == 1]
if (components == 1).sum() > num_annotations:
random.shuffle(touching)
for image_id in touching[:num_annotations]:
copyfile(os.path.join(images_dir, str(image_id) + '.jpg'),
os.path.join(dataset_dir, str(image_id) + '.jpg'))
else:
for image_id in touching:
copyfile(os.path.join(images_dir, str(image_id) + '.jpg'),
os.path.join(dataset_dir, str(image_id) + '.jpg'))
def correct_segmentation_errors(components_info, fix_dir, frames_dir):
"""Count and pick one failed frame in every 3 consecutive fail frames for correcting
Args:
components_info: path to a csv file or an array
fix_dir: path to directory for saving frames chosen
frames_dir: path to directory containing frames
Returns:
correct_frames: the number of frames picked up
"""
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
errors = np.array(components != 2, dtype=int)
errors_accumulate = np.zeros(len(errors))
interval_start = 0
for i in range(len(errors)):
if (errors[i] == 1) & (interval_start == 0):
interval_start = 1
elif errors[i] == 0:
interval_start = 0
if (interval_start == 1) & (i > 0):
errors_accumulate[i] = errors_accumulate[i - 1] + 1
# plt.plot(errors_accumulate)
correct_frames = 0
if components[0] != 2:
copyfile(os.path.join(frames_dir, '0.jpg'),
os.path.join(fix_dir, '0.jpg'))
correct_frames = correct_frames + 1
for i in range(len(errors_accumulate)):
if (errors_accumulate[i] > 0) & (errors_accumulate[i] % 3 == 0):
copyfile(os.path.join(frames_dir, str(i) + '.jpg'),
os.path.join(fix_dir, str(i) + '.jpg'))
correct_frames = correct_frames + 1
return correct_frames
def tracking_inference(fg_dir, components_info):
"""Track the identities of mice
Args:
fg_dir: path to directory containing foreground
components_info: path to a csv file or an array
"""
tracking_dir = os.path.join(os.path.dirname(fg_dir), 'tracking')
if not os.path.exists(tracking_dir):
os.mkdir(tracking_dir)
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
#I = skimage.io.imread(os.path.join(fg_dir, str(0) + '.png'))
#skimage.io.imsave(os.path.join(tracking_dir, str(0) + '.png'), I)
flag = 1
index = 0
while(flag):
if components[index]==2:
flag = 0
else:
index = index + 1
I = skimage.io.imread(os.path.join(fg_dir, str(index) + '.png'))
skimage.io.imsave(os.path.join(tracking_dir, str(0) + '.png'), I)
I = img_as_ubyte(I/255)
for i in range(1, components.shape[0]):
I1 = I[:, :, 0]
I2 = I[:, :, 1]
if components[i] == 2:
J = skimage.io.imread(os.path.join(
fg_dir, str(i) + '.png')) / 255.0
J1 = J[:, :, 0]
J2 = J[:, :, 1]
overlap_1 = np.sum(np.multiply(J1, I1)[:]) / np.sum(I1[:])
overlap_2 = np.sum(np.multiply(J2, I1)[:]) / np.sum(I1[:])
overlap_12 = np.abs(overlap_1 - overlap_2)
overlap_3 = np.sum(np.multiply(J1, I2)[:]) / np.sum(I2[:])
overlap_4 = np.sum(np.multiply(J2, I2)[:]) / np.sum(I2[:])
overlap_34 = np.abs(overlap_3 - overlap_4)
if overlap_12 >= overlap_34:
if overlap_1 >= overlap_2:
I[:, :, 0] = J1
I[:, :, 1] = J2
else:
I[:, :, 0] = J2
I[:, :, 1] = J1
else:
if overlap_3 >= overlap_4:
I[:, :, 1] = J1
I[:, :, 0] = J2
else:
I[:, :, 1] = J2
I[:, :, 0] = J1
I = I.astype(np.uint8) * 255
skimage.io.imsave(os.path.join(tracking_dir, str(i) + '.png'), I)
else:
#I = I.astype(np.uint8) * 255
skimage.io.imsave(os.path.join(tracking_dir, str(i) + '.png'), I)
def tracking_inference_marker(fg_dir, components_info):
"""Track the identities of mice
Args:
fg_dir: path to directory containing foreground
components_info: path to a csv file or an array
"""
tracking_dir = os.path.join(os.path.dirname(fg_dir), 'tracking')
if not os.path.exists(tracking_dir):
os.mkdir(tracking_dir)
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
# I = skimage.io.imread(os.path.join(fg_dir, str(0) + '.png'))
# skimage.io.imsave(os.path.join(tracking_dir, str(0) + '.png'), I)
flag = 1
index = 0
while(flag):
if components[index]==2:
flag = 0
else:
index = index + 1
I = skimage.io.imread(os.path.join(fg_dir, str(index) + '.png'))
skimage.io.imsave(os.path.join(tracking_dir, str(0) + '.png'), I)
for i in range(1, components.shape[0]):
if components[i] == 2:
J = skimage.io.imread(os.path.join(fg_dir, str(i) + '.png'))
skimage.io.imsave(os.path.join(tracking_dir, str(i) + '.png'), J)
else:
J = skimage.io.imread(os.path.join(fg_dir, str(i-1) + '.png'))
skimage.io.imsave(os.path.join(tracking_dir, str(i) + '.png'), J)
def mask_based_detection(tracking_dir, components_info, floor=[[51, 51], [490, 490]], image_shape=(540, 540)):
"""Detect snout and tailbase coordinated from masks
Args:
tracking_dir: path to directory containing masks corresponding to identities
components_info: path to a csv file or an array
floor: coordinates of top left and bottom right corners of rectangular floor zone
image_shape: size of frames (height, width)
Returns:
np.array(features_mouse1_df): coordinates of snout and tailbase of mouse 1
np.array(features_mouse2_df): coordinates of snout and tailbase of mouse 2
"""
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
features_mouse1 = np.zeros((len(components), 4))
features_mouse2 = np.zeros((len(components), 4))
floor_zone = np.zeros(image_shape)
floor_zone[floor[0][0]:floor[1][0], floor[0][1]:floor[1][1]] = 1
for i in range(len(components)):
#print('frames: ', i)
I = (skimage.io.imread(os.path.join(
tracking_dir, str(i) + '.png')) / 255.0).astype(int)
I1 = I[:, :, 0]
I2 = I[:, :, 1]
properties1 = regionprops(I1.astype(int), I1.astype(float))
center_of_mass1 = properties1[0].centroid
properties2 = regionprops(I2.astype(int), I2.astype(float))
center_of_mass2 = properties2[0].centroid
BB1 = find_contours(I1, 0.5)[0]
BB2 = find_contours(I2, 0.5)[0]
# mouse 1
center_BB1 = np.sum((BB1 - center_of_mass1) ** 2, axis=1)
index1 = np.argmax(center_BB1)
I1_end1 = BB1[index1]
end1_BB1 = np.sum((BB1 - I1_end1) ** 2, axis=1)
index2 = np.argmax(end1_BB1)
I1_end_max = np.max(end1_BB1)
I1_end2 = BB1[index2]
condition_mouse1 = np.sum(np.multiply(
floor_zone, I1)[:]) / np.sum(I1[:])
if i == 0:
features_mouse1[i, :2] = I1_end1
features_mouse1[i, 2:] = I1_end2
else:
if ((I1_end_max >= 90) & (condition_mouse1 == 1)):
features_mouse1[i, :2] = I1_end1
features_mouse1[i, 2:] = I1_end2
else:
end1_nose = np.sum((I1_end1 - features_mouse1[i - 1, :2]) ** 2)
end1_tail = np.sum((I1_end1 - features_mouse1[i - 1, 2:]) ** 2)
if end1_nose < end1_tail:
features_mouse1[i, :2] = I1_end1
features_mouse1[i, 2:] = I1_end2
else:
features_mouse1[i, :2] = I1_end2
features_mouse1[i, 2:] = I1_end1
# mouse 2
center_BB2 = np.sum((BB2 - center_of_mass2) ** 2, axis=1)
index1 = np.argmax(center_BB2)
I2_end1 = BB2[index1]
end1_BB2 = np.sum((BB2 - I2_end1) ** 2, axis=1)
index2 = np.argmax(end1_BB2)
I2_end_max = np.max(end1_BB2)
I2_end2 = BB2[index2]
condition_mouse2 = np.sum(np.multiply(
floor_zone, I2)[:]) / np.sum(I2[:])
if i == 0:
features_mouse2[i, :2] = I2_end1
features_mouse2[i, 2:] = I2_end2
else:
if ((I2_end_max >= 90) & (condition_mouse2 == 1)):
features_mouse2[i, :2] = I2_end1
features_mouse2[i, 2:] = I2_end2
else:
end1_nose = np.sum((I2_end1 - features_mouse2[i - 1, :2]) ** 2)
end1_tail = np.sum((I2_end1 - features_mouse2[i - 1, 2:]) ** 2)
if end1_nose < end1_tail:
features_mouse2[i, :2] = I2_end1
features_mouse2[i, 2:] = I2_end2
else:
features_mouse2[i, :2] = I2_end2
features_mouse2[i, 2:] = I2_end1
features_mouse1 = np.round(features_mouse1, 2)
features_mouse1_df = pd.DataFrame({'snout_x': features_mouse1[:, 1],
'snout_y': features_mouse1[:, 0],
'tailbase_x': features_mouse1[:, 3],
'tailbase_y': features_mouse1[:, 2]})
features_mouse1_df.to_csv(os.path.join(os.path.dirname(tracking_dir), 'features_mouse1_md.csv'),
index=False)
features_mouse2 = np.round(features_mouse2, 2)
features_mouse2_df = pd.DataFrame({'snout_x': features_mouse2[:, 1],
'snout_y': features_mouse2[:, 0],
'tailbase_x': features_mouse2[:, 3],
'tailbase_y': features_mouse2[:, 2]})
features_mouse2_df.to_csv(os.path.join(os.path.dirname(tracking_dir), 'features_mouse2_md.csv'),
index=False)
return np.array(features_mouse1_df), np.array(features_mouse2_df)
def mice_separation(tracking_dir, frames_dir, bg_dir):
"""Separate the sequence of frames into 2 videos. Each video contains one mouse
Args:
tracking_dir: path to directory containing masks corresponding to identities
frames_dir: path to frames directory
bg_dir: path to the background image
"""
bg = img_as_ubyte(skimage.io.imread(bg_dir))
num_images = len(os.listdir(tracking_dir))
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
video_mouse1_dir = os.path.join(
os.path.dirname(tracking_dir), 'mouse1.avi')
video1 = cv2.VideoWriter(video_mouse1_dir, fourcc,
30, (bg.shape[1], bg.shape[0]), 0)
video_mouse2_dir = os.path.join(
os.path.dirname(tracking_dir), 'mouse2.avi')
video2 = cv2.VideoWriter(video_mouse2_dir, fourcc,
30, (bg.shape[1], bg.shape[0]), 0)
for i in range(num_images):
masks = skimage.io.imread(os.path.join(
tracking_dir, str(i) + '.png')) / 255
image = skimage.io.imread(os.path.join(frames_dir, str(i) + '.jpg'))
mask1 = masks[:, :, 0].astype(np.uint8)
mask1 = dilation(mask1, square(10))
mask2 = masks[:, :, 1].astype(np.uint8)
mask2 = dilation(mask2, square(10))
mouse2_remove = (mask2 != 1) | (mask1 == 1)
mouse1 = np.multiply(image, mouse2_remove) + \
np.multiply(bg, (1 - mouse2_remove))
mouse1 = img_as_ubyte(mouse1)
mouse1_remove = (mask1 != 1) | (mask2 == 1)
mouse2 = np.multiply(image, mouse1_remove) + \
np.multiply(bg, (1 - mouse1_remove))
mouse2 = img_as_ubyte(mouse2)
video1.write(mouse1)
video2.write(mouse2)
cv2.destroyAllWindows()
video1.release()
video2.release()
def deeplabcut_detection(config_dir, video_dir):
"""Detect snout and tailbase coordinated with Deeplabcut model
Args:
config_dir: path to config file
video_dir: path to video input
Returns:
features_mouse1: coordinates of snout and tailbase of mouse 1
features_mouse2: coordinates of snout and tailbase of mouse 2
"""
deeplabcut.analyze_videos(config_dir, video_dir, videotype='.avi')
dlc_output = [f for f in os.listdir(
os.path.dirname(video_dir[0])) if f.endswith('.h5')]
# mouse1
mouse1_dlc = pd.read_hdf(os.path.join(
os.path.dirname(video_dir[0]), dlc_output[0]))
features_mouse1 = mouse1_dlc.values[:, [0, 1, 9, 10]]
features_mouse1 = np.round(features_mouse1, 2)
features_mouse1_df = pd.DataFrame({'snout_x': np.round(mouse1_dlc.values[:, 0], 2),
'snout_y': np.round(mouse1_dlc.values[:, 1], 2),
'tailbase_x': np.round(mouse1_dlc.values[:, 9], 2),
'tailbase_y': np.round(mouse1_dlc.values[:, 10], 2)})
features_mouse1_df.to_csv(os.path.join(os.path.dirname(video_dir[0]), 'features_mouse1_dlc.csv'),
index=False)
mouse2_dlc = pd.read_hdf(os.path.join(
os.path.dirname(video_dir[0]), dlc_output[1]))
features_mouse2 = mouse2_dlc.values[:, [0, 1, 9, 10]]
features_mouse2 = np.round(features_mouse2, 2)
features_mouse2_df = pd.DataFrame({'snout_x': np.round(mouse2_dlc.values[:, 0], 2),
'snout_y': np.round(mouse2_dlc.values[:, 1], 2),
'tailbase_x': np.round(mouse2_dlc.values[:, 9], 2),
'tailbase_y': np.round(mouse2_dlc.values[:, 10], 2)})
features_mouse2_df.to_csv(os.path.join(os.path.dirname(video_dir[0]), 'features_mouse2_dlc.csv'),
index=False)
return features_mouse1, features_mouse2
def ensemble_features(features_mouse_md, features_mouse_dlc, tracking_dir, mouse_id=1):
"""Ensemble the result of mask-based detection and deeplabcut-based detection
Args:
features_mouse_md: coordinates of snout and tailbase generated by mask-based detection
features_mouse_dlc: coordinates of snout and tailbase generated by deeplabcut detection
tracking_dir: path to directory containing masks corresponding to identities
mouse_id: mouse id ( 1 or 2)
Returns:
features_ensemble: ensemble coordinates of snout and tailbase
"""
features_ensemble = np.zeros(features_mouse_md.shape)
for i in range(len(features_mouse_md)):
masks = skimage.io.imread(os.path.join(
tracking_dir, str(i) + '.png')) / 255.0
mask = masks[:, :, mouse_id - 1].astype(int)
mask = dilation(mask, square(15))
nose_DLC = np.zeros(mask.shape)
tailbase_DLC = np.zeros(mask.shape)
nose_DLC[int(features_mouse_dlc[i, 1]),
int(features_mouse_dlc[i, 0])] = 1
tailbase_DLC[int(features_mouse_dlc[i, 3]),
int(features_mouse_dlc[i, 2])] = 1
if np.sum(np.multiply(mask, nose_DLC)[:]) > 0:
features_ensemble[i, :2] = features_mouse_dlc[i, :2]
else:
features_ensemble[i, :2] = features_mouse_md[i, :2]
if np.sum(np.multiply(mask, tailbase_DLC)[:]) > 0:
features_ensemble[i, 2:] = features_mouse_dlc[i, 2:]
else:
features_ensemble[i, 2:] = features_mouse_md[i, 2:]
features_ensemble_df = pd.DataFrame({'snout_x': features_ensemble[:, 0],
'snout_y': features_ensemble[:, 1],
'tailbase_x': features_ensemble[:, 2],
'tailbase_y': features_ensemble[:, 3]})
features_ensemble_df.to_csv(os.path.join(os.path.dirname(tracking_dir),
'features_mouse' + str(mouse_id) + '_ensemble.csv'), index=False)
return features_ensemble
def labelmejson_to_png(fix_dir, output_dir):
"""Convert annotations created by labelme to images
Args:
fix_dir: path to directory for saving frames chosen
output_dir: path to save output
Returns:
"""
json_ids = [f for f in os.listdir(fix_dir) if f.endswith('.json')]
dataset_fix = MouseDataset()
dataset_fix.load_mouse(fix_dir, "")
class_name_to_id = {label["name"]: label["id"]
for label in dataset_fix.class_info}
# Read mask file from json
for json_id in json_ids:
json_id_dir = os.path.join(fix_dir, json_id)
with open(json_id_dir) as f:
data = json.load(f)
image_shape = (data['imageHeight'], data['imageWidth'])
cls, masks = shapes_to_labels_masks(img_shape=image_shape,
shapes=data['shapes'],
label_name_to_value=class_name_to_id)
masks_rgb = np.zeros(
(data['imageHeight'], data['imageWidth'], 3), dtype=np.float)
masks_rgb[:, :, :2] = masks[:, :, :2]
skimage.io.imsave(os.path.join(
output_dir, os.path.splitext(json_id)[0] + '.png'), masks_rgb)
def mouse_mrcnn_segmentation(components_info, frames_dir, background_dir, model_dir, model_path=None):
"""Segment mice using Mask-RCNN model
Args:
components_info: path to a csv file or an array
frames_dir: path to frames directory
background_dir: path to background image
model_dir: path to save log and trained model
model_path: path to model weights
Returns:
components: array of the number of blobs in each frames
"""
config = InferenceConfig()
# config.set_config(batch_size=1)
# Create model object in inference mode.
model = modellib.MaskRCNN(
mode="inference", model_dir=model_dir, config=config)
if model_path:
model.load_weights(model_path, by_name=True)
else:
model_path = model.find_last()
model.load_weights(model_path, by_name=True)
output_dir = os.path.join(os.path.dirname(frames_dir), 'FG')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
bg = cv2.imread(background_dir)
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
print("The video has {} frames: ".format(components.shape[0]))
for i in range(components.shape[0]):
if components[i] != 2:
image_name = str(i) + '.jpg'
image = skimage.io.imread(frames_dir + '/' + image_name)
if image.ndim == 2:
image_rgb = skimage.color.gray2rgb(image)
else:
image_rgb = image
results = model.detect([image_rgb], verbose=0)
results_package = results[0]
masks_rgb = np.zeros((bg.shape[0], bg.shape[1], 3), dtype=np.uint8)
if len(results_package["scores"]) >= 2:
class_ids = results_package['class_ids'][:2]
scores = results_package['scores'][:2]
masks = results_package['masks'][:, :, :2] # Bool
rois = results_package['rois'][:2, :]
masks_1 = morphology.remove_small_objects(masks[:, :, 0], 1000)
masks_1 = morphology.binary_dilation(
masks_1, morphology.disk(radius=3))
masks_2 = morphology.remove_small_objects(masks[:, :, 1], 1000)
masks_2 = morphology.binary_dilation(
masks_2, morphology.disk(radius=3))
if (masks_1.sum().sum() > 0) & (masks_2.sum().sum() > 0):
masks_rgb[:, :, 0] = img_as_ubyte(masks_1)
masks_rgb[:, :, 1] = img_as_ubyte(masks_2)
components[i] = 2
skimage.io.imsave(os.path.join(
output_dir, str(i) + '.png'), masks_rgb)
components_df = pd.DataFrame({'components': components})
components_df.to_csv(os.path.join(os.path.dirname(
frames_dir), 'components.csv'), index=False)
return components
def mouse_mrcnn_segmentation_multi_images(components_info, frames_dir, background_dir, model_dir, model_path=None, batch_size=2):
"""Segment mice using Mask-RCNN model
Args:
components_info: path to a csv file or an array
frames_dir: path to frames directory
background_dir: path to background image
model_dir: path to save log and trained model
model_path: path to model weights
batch_size: int
Returns:
components: array of the number of blobs in each frames
"""
config = InferenceConfig()
config.IMAGES_PER_GPU = batch_size
# Create model object in inference mode.
model = modellib.MaskRCNN(
mode="inference", model_dir=model_dir, config=config)
if model_path:
model.load_weights(model_path, by_name=True)
else:
model_path = model.find_last()
model.load_weights(model_path, by_name=True)
output_dir = os.path.join(os.path.dirname(frames_dir), 'FG')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
bg = cv2.imread(background_dir)
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
print("The video has {} frames: ".format(components.shape[0]))
nbatchs = int(np.ceil(components.shape[0]/batch_size))
for batch in range(nbatchs):
if batch < nbatchs-1:
nimages = batch_size
else:
nimages = components.shape[0] - batch_size*(nbatchs-1)
image_rgb_batch = []
# ----inference----------------
for ind in range(nimages):
image_name = str(batch*batch_size + ind) + '.jpg'
image = skimage.io.imread(frames_dir + '/' + image_name)
if image.ndim == 2:
image_rgb = skimage.color.gray2rgb(image)
else:
image_rgb = image
image_rgb_batch.append(image_rgb)
results = model.detect(image_rgb_batch, verbose=0)
# ----save results-------------
for ind in range(nimages):
print('Segment: ', batch*batch_size + ind)
image_name = str(batch*batch_size + ind) + '.png'
results_package = results[ind]
masks_rgb = np.zeros((bg.shape[0], bg.shape[1], 3), dtype=np.uint8)
if len(results_package["scores"]) >= 2:
class_ids = results_package['class_ids'][:2]
scores = results_package['scores'][:2]
masks = results_package['masks'][:, :, :2] # Bool
rois = results_package['rois'][:2, :]
masks_1 = morphology.remove_small_objects(masks[:, :, 0], 1000)
masks_1 = morphology.binary_dilation(
masks_1, morphology.disk(radius=3))
masks_2 = morphology.remove_small_objects(masks[:, :, 1], 1000)
masks_2 = morphology.binary_dilation(
masks_2, morphology.disk(radius=3))
if (masks_1.sum().sum() > 0) & (masks_2.sum().sum() > 0):
masks_rgb[:, :, 0] = img_as_ubyte(masks_1)
masks_rgb[:, :, 1] = img_as_ubyte(masks_2)
components[batch*batch_size + ind] = 2
skimage.io.imsave(os.path.join(output_dir, image_name), masks_rgb)
components_df = pd.DataFrame({'components': components})
components_df.to_csv(os.path.join(os.path.dirname(
frames_dir), 'components.csv'), index=False)
return components
def dlc_snout_tailbase(dlc_file):
"""Extract coordinates of snout and tailbase
Args:
dlc_file: path to .h5 file
Returns:
df_mouse1: pd.DataFrame
df_mouse2: pd.DataFrame
"""
df_dlc = pd.read_hdf(dlc_file)
scorer = df_dlc.columns[1][0]
df_mouse1 = pd.DataFrame()
df_mouse1['snout_x'] = df_dlc[scorer, 'mouse1', 'snout', 'x']
df_mouse1['snout_y'] = df_dlc[scorer, 'mouse1', 'snout', 'y']
df_mouse1['tailbase_x'] = df_dlc[scorer, 'mouse1', 'tailbase', 'x']
df_mouse1['tailbase_y'] = df_dlc[scorer, 'mouse1', 'tailbase', 'y']
df_mouse2 = pd.DataFrame()
df_mouse2['snout_x'] = df_dlc[scorer, 'mouse2', 'snout', 'x']
df_mouse2['snout_y'] = df_dlc[scorer, 'mouse2', 'snout', 'y']
df_mouse2['tailbase_x'] = df_dlc[scorer, 'mouse2', 'tailbase', 'x']
df_mouse2['tailbase_y'] = df_dlc[scorer, 'mouse2', 'tailbase', 'y']
return df_mouse1, df_mouse2
def deeplabcut_detection_multi(config_path, video_path, shuffle=1, trainingsetindex=0, track_method='skeleton', videotype='.avi'):
"""Function to get snout and tailbase through deeplabcut
Args:
Returns:
"""
videos = [video_path]
deeplabcut.analyze_videos(config_path, videos, videotype=videotype)
deeplabcut.convert_detections2tracklets(config_path, videos, videotype=videotype,
shuffle=shuffle, trainingsetindex=trainingsetindex, track_method=track_method)
# --------------bypass refining tracklets-----------------------
# --------find pickle file containing tracklets------------------
file_list = [f for f in glob.glob(os.path.join(
os.path.dirname(video_path), '*.pickle'))]
video_name = ntpath.basename(video_path).split('.')[0]
if track_method == 'skeleton':
for filename in file_list:
if (video_name in filename) and ('sk.pickle' in filename):
tracklet_result = filename
elif track_method == 'box':
for filename in file_list:
if (video_name in filename) and ('bx.pickle' in filename):
tracklet_result = filename
df_tracklet = pd.read_pickle(tracklet_result)
mouse1_columns = [(col[0], 'mouse1', col[1], col[2])
for col in df_tracklet['header']]
mouse2_columns = [(col[0], 'mouse2', col[1], col[2])
for col in df_tracklet['header']]
mice_columns = pd.MultiIndex.from_tuples(
mouse1_columns + mouse2_columns, names=["scorer", "individuals", "bodyparts", "coords"])
df_mouse1 = pd.DataFrame(df_tracklet[0])
df_mouse2 = pd.DataFrame(df_tracklet[1])
df_mice = pd.concat([df_mouse1.T, df_mouse2.T], axis=1)
df_mice_all = pd.DataFrame(df_mice.values, columns=mice_columns)
df_mice_all.to_hdf(os.path.splitext(tracklet_result)
[0]+'.h5', key='df', mode='w')
# -----------filter-----------------------------------------------
deeplabcut.filterpredictions(
config_path, video_path, track_method='skeleton')
# ----------find filter result-----------------------------------
file_list = [f for f in glob.glob(
os.path.join(os.path.dirname(video_path), '*.h5'))]
video_name = ntpath.basename(video_path).split('.')[0]
if track_method == 'skeleton':
for filename in file_list:
if (video_name in filename) and ('sk_filtered.h5' in filename):
filter_result = filename
elif track_method == 'box':
for filename in file_list:
if (video_name in filename) and ('bx_filtered.h5' in filename):
filter_result = filename
#df_mouse1, df_mouse2 = dlc_snout_tailbase(filter_result)
# #---------Extracting snout and tailbase coordinates-------------
# df = pd.read_pickle(dlc_result)
# df_mouse1_all = pd.DataFrame(df[0])
# df_mouse1_all= pd.DataFrame(df_mouse1_all.T.values, columns=df['header'])
# df_mouse2_all = pd.DataFrame(df[1])
# df_mouse2_all = pd.DataFrame(df_mouse2_all.T.values, columns=df['header'])
# scorer = df_mouse1_all.columns[0][0]
# df_mouse1 = pd.DataFrame()
# df_mouse1['snout_x'] = df_mouse1_all[scorer,'snout', 'x']
# df_mouse1['snout_y'] = df_mouse1_all[scorer,'snout', 'y']
# df_mouse1['tailbase_x'] = df_mouse1_all[scorer,'tailbase', 'x']
# df_mouse1['tailbase_y'] = df_mouse1_all[scorer,'tailbase', 'y']
# df_mouse2 = pd.DataFrame()
# df_mouse2['snout_x'] = df_mouse2_all[scorer,'snout', 'x']
# df_mouse2['snout_y'] = df_mouse2_all[scorer,'snout', 'y']
# df_mouse2['tailbase_x'] = df_mouse2_all[scorer,'tailbase', 'x']
# df_mouse2['tailbase_y'] = df_mouse2_all[scorer,'tailbase', 'y']
return filter_result
# def deeplabcut_detection_multi_without_refine(config_path, video_path, shuffle=1, trainingsetindex=0, track_method='skeleton', videotype='.avi'):
# """Function to get snout and tailbase through deeplabcut
# Args:
# Returns:
# """
# videos=[video_path]
# deeplabcut.analyze_videos(config_path, videos, videotype=videotype)
# deeplabcut.convert_detections2tracklets(config_path, videos, videotype=videotype,
# shuffle=shuffle, trainingsetindex=trainingsetindex, track_method=track_method)
# #--------------bypass refining tracklets-----------------------
# #--------find pickle file containing tracklets------------------
# file_list = [f for f in glob.glob(os.path.join(os.path.dirname(video_path),'*.pickle'))]
# video_name = ntpath.basename(video_path).split('.')[0]
# if track_method=='skeleton':
# for filename in file_list:
# if (video_name in filename) and ('sk.pickle' in filename):
# tracklet_result = filename
# elif track_method=='box':
# for filename in file_list:
# if (video_name in filename) and ('bx.pickle' in filename):
# tracklet_result = filename
# #--------------------------------------------------------------------
# df_tracklets = pd.read_pickle(tracklet_result)
# scorer = df_tracklets['header'][0][0]
# mouse1_columns = [(col[0], 'mouse1', col[1], col[2]) for col in df_tracklets['header']]
# mouse2_columns = [(col[0], 'mouse2', col[1], col[2]) for col in df_tracklets['header']]
# mice_columns = mouse1_columns + mouse2_columns
# all_columns = pd.MultiIndex.from_tuples(mice_columns, names=["scorer", "individuals", "bodyparts", "coords"])
# cap = cv2.VideoCapture(video_path)
# nframes = int(cap.get(7))
# #---------------------------------------------------
# mouse1 = np.empty((nframes,len(mouse1_columns)))
# mouse1[:] = np.NaN
# for frame in df_tracklets[0].keys():
# # ind = int(frame[5:])
# # mouse1[ind,:] = df_tracklets[0][frame]
# try:
# ind = int(frame[5:])
# mouse1[ind,:] = df_tracklets[0][frame][:,0:3].flatten()
# except:
# continue
# #----------------------------------------------------
# mouse2 = np.empty((nframes,len(mouse2_columns)))
# mouse2[:] = np.NaN
# for frame in df_tracklets[1].keys():
# # ind = int(frame[5:])
# # mouse2[ind,:] = df_tracklets[1][frame]
# try:
# ind = int(frame[5:])
# mouse2[ind,:] = df_tracklets[1][frame][:,0:3].flatten()
# except:
# continue
# mice = np.concatenate((mouse1, mouse2), axis=1)
# df_mice = pd.DataFrame(mice, columns=all_columns)
# #------------Test: refine--------------------------
# df_mice.to_hdf(os.path.splitext(tracklet_result)[0]+'.h5', key='df', mode='w')
# #----------- Test: filter--------------------------
# deeplabcut.filterpredictions(config_path,video_path, track_method='skeleton')
# #---------- Test: find filter result-----------------------------------
# file_list = [f for f in glob.glob(os.path.join(os.path.dirname(video_path),'*.h5'))]
# video_name = ntpath.basename(video_path).split('.')[0]
# if track_method=='skeleton':
# for filename in file_list:
# if (video_name in filename) and ('sk_filtered.h5' in filename):
# filter_result = filename
# elif track_method=='box':
# for filename in file_list:
# if (video_name in filename) and ('bx_filtered.h5' in filename):
# filter_result = filename
# return filter_result
def deeplabcut_detection_multi_without_refine(config_path, video_path, shuffle=1, trainingsetindex=0, track_method='skeleton', videotype='.avi'):
"""Function to get snout and tailbase through deeplabcut
Args:
config_path: path to config.yaml
video_path: video path
shuffle: int
trainingsetindex: int
track_method: str
videotype: str
Returns:
filter_result: path to filtered result
"""
videos = [video_path]
deeplabcut.analyze_videos(config_path, videos, videotype=videotype)
deeplabcut.convert_detections2tracklets(config_path, videos, videotype=videotype,
shuffle=shuffle, trainingsetindex=trainingsetindex, track_method=track_method)
# --------find pickle file containing tracklets------------------
file_list = [f for f in glob.glob(os.path.join(
os.path.dirname(video_path), '*.pickle'))]
video_name = ntpath.basename(video_path).split('.')[0]
if track_method == 'skeleton':
for filename in file_list:
if (video_name in filename) and ('sk.pickle' in filename):
tracklet_result = filename
elif track_method == 'box':
for filename in file_list:
if (video_name in filename) and ('bx.pickle' in filename):
tracklet_result = filename
elif track_method == 'ellipse':
for filename in file_list:
if (video_name in filename) and ('el.pickle' in filename):
tracklet_result = filename
# -------find pickle file containing assemblies ------------
file_list = [f for f in glob.glob(os.path.join(
os.path.dirname(video_path), '*.pickle'))]
video_name = ntpath.basename(video_path).split('.')[0]
for filename in file_list:
if (video_name in filename) and ('assemblies' in filename):
assembly_result = filename
# -----------columns----------------------------
tracklets = pd.read_pickle(tracklet_result)
scorer = tracklets['header'][0][0]
mouse1_columns = [(col[0], 'mouse1', col[1], col[2])
for col in tracklets['header']]
mouse2_columns = [(col[0], 'mouse2', col[1], col[2])
for col in tracklets['header']]
mice_columns = mouse1_columns + mouse2_columns
all_columns = pd.MultiIndex.from_tuples(
mice_columns, names=["scorer", "individuals", "bodyparts", "coords"])
# -------------bypass refining----------------
assemblies = pd.read_pickle(assembly_result)
cap = cv2.VideoCapture(video_path)
nframes = int(cap.get(7))
# ---------------------------------------------------
mouse1 = np.empty((nframes, len(mouse1_columns)))
mouse1[:] = np.NaN
mouse2 = np.empty((nframes, len(mouse2_columns)))
mouse2[:] = np.NaN
# ---------------------------------------------------
for frame in assemblies.keys():
try:
mouse1[frame, :] = assemblies[frame][0][:, 0:3].flatten()
except:
continue
try:
mouse2[frame, :] = assemblies[frame][1][:, 0:3].flatten()
except:
continue
mice = np.concatenate((mouse1, mouse2), axis=1)
df_mice = pd.DataFrame(mice, columns=all_columns)
# ------------refine--------------------------
df_mice.to_hdf(os.path.splitext(tracklet_result)
[0]+'.h5', key='df', mode='w')
# ----------- filter--------------------------
deeplabcut.filterpredictions(
config_path, video_path, track_method=track_method)
# ---------- find filter result-----------------------------------
file_list = [f for f in glob.glob(
os.path.join(os.path.dirname(video_path), '*.h5'))]
video_name = ntpath.basename(video_path).split('.')[0]
if track_method == 'skeleton':
for filename in file_list:
if (video_name in filename) and ('sk_filtered.h5' in filename):
filter_result = filename
elif track_method == 'box':
for filename in file_list:
if (video_name in filename) and ('bx_filtered.h5' in filename):
filter_result = filename
elif track_method == 'ellipse':
for filename in file_list:
if (video_name in filename) and ('el_filtered.h5' in filename):
filter_result = filename
return filter_result
def ensemble_features_multi(mouse1_md, mouse2_md, mouse1_dlc, mouse2_dlc, tracking_dir):
"""Ensemble the result of mask-based detection and deeplabcut-based detection
Args:
mouse1_md: coordinates of snout and tailbase generated by mask-based detection
mouse2_md: coordinates of snout and tailbase generated by mask-based detection
mouse1_dlc: coordinates of snout and tailbase generated by deeplabcut detection
mouse1_dlc: coordinates of snout and tailbase generated by deeplabcut detection
tracking_dir: path to directory containing masks corresponding to identities
Returns:
df_mouse1_ensemble: ensemble coordinates of snout and tailbase of mouse1
df_mouse2_ensemble: ensemble coordinates of snout and tailbase of mouse1
"""
components = pd.read_csv(os.path.join(
os.path.dirname(tracking_dir), 'components.csv'))
mouse1_ensemble = np.zeros(mouse1_md.shape)
mouse2_ensemble = np.zeros(mouse2_md.shape)
mouse1_dlc = np.array(mouse1_dlc)
mouse2_dlc = np.array(mouse2_dlc)
flag1 = np.zeros((len(mouse1_md),))
flag2 = np.zeros((len(mouse2_md),))
for i in range(len(mouse1_md)):
masks = skimage.io.imread(os.path.join(
tracking_dir, str(i) + '.png')) / 255.0
mask1 = masks[:, :, 0].astype(int)
mask2 = masks[:, :, 1].astype(int)
nose1_DLC = np.zeros(mask1.shape)
tail1_DLC = np.zeros(mask1.shape)
nose2_DLC = np.zeros(mask2.shape)
tail2_DLC = np.zeros(mask2.shape)
try:
nose1_DLC[mouse1_dlc[i, 1].astype(
int), mouse1_dlc[i, 0].astype(int)] = 1
tail1_DLC[mouse1_dlc[i, 3].astype(
int), mouse1_dlc[i, 2].astype(int)] = 1
except:
pass
try:
nose2_DLC[mouse2_dlc[i, 1].astype(
int), mouse2_dlc[i, 0].astype(int)] = 1
tail2_DLC[mouse2_dlc[i, 3].astype(
int), mouse2_dlc[i, 2].astype(int)] = 1
except:
pass
# -----------mouse 1---------------------
if (np.sum(np.sum(nose1_DLC*mask1)) > 0) & (np.sum(np.sum(tail1_DLC*mask1)) > 0):
mouse1_ensemble[i, 0:2] = mouse1_dlc[i, 0:2]
mouse1_ensemble[i, 2:4] = mouse1_dlc[i, 2:4]
flag1[i] = 1
elif (np.sum(np.sum(nose2_DLC*mask1)) > 0) & (np.sum(np.sum(tail2_DLC*mask1)) > 0):
mouse1_ensemble[i, 0:2] = mouse2_dlc[i, 0:2]
mouse1_ensemble[i, 2:4] = mouse2_dlc[i, 2:4]
flag1[i] = 1
else:
mouse1_ensemble[i, 0:2] = mouse1_md[i, 0:2]
mouse1_ensemble[i, 2:4] = mouse1_md[i, 2:4]
flag1[i] = 0
# --------logic to fix swapping: ------------
if i > 0:
if (flag1[i] == 0) & (flag1[i-1] == 1) & (components.loc[i, 'components'] == 2):
mouse1_snout2snout = np.sum(
(mouse1_ensemble[i, 0:2]-mouse1_ensemble[i-1, 0:2]) ** 2)
mouse1_snout2tail = np.sum(
(mouse1_ensemble[i, 0:2]-mouse1_ensemble[i-1, 2:4]) ** 2)
if mouse1_snout2tail < mouse1_snout2snout:
temp1 = mouse1_ensemble[i, 0:2].copy()
mouse1_ensemble[i, 0:2] = mouse1_ensemble[i, 2:4]
mouse1_ensemble[i, 2:4] = temp1
# --------mouse 2-------------------------
if (np.sum(np.sum(nose1_DLC*mask2)) > 0) & (np.sum(np.sum(tail1_DLC*mask2)) > 0):
mouse2_ensemble[i, 0:2] = mouse1_dlc[i, 0:2]
mouse2_ensemble[i, 2:4] = mouse1_dlc[i, 2:4]
flag2[i] = 1
elif (np.sum(np.sum(nose2_DLC*mask2)) > 0) & (np.sum(np.sum(tail2_DLC*mask2)) > 0):
mouse2_ensemble[i, 0:2] = mouse2_dlc[i, 0:2]
mouse2_ensemble[i, 2:4] = mouse2_dlc[i, 2:4]
flag2[i] = 1
else:
mouse2_ensemble[i, 0:2] = mouse2_md[i, 0:2]
mouse2_ensemble[i, 2:4] = mouse2_md[i, 2:4]
flag2[i] = 0
# --------logic to fix swapping: ------------
if i > 0:
if (flag2[i] == 0) & (flag2[i-1] == 1) & (components.loc[i, 'components'] == 2):
mouse2_snout2snout = np.sum(
(mouse2_ensemble[i, 0:2]-mouse2_ensemble[i-1, 0:2]) ** 2)
mouse2_snout2tail = np.sum(
(mouse2_ensemble[i, 0:2]-mouse2_ensemble[i-1, 2:4]) ** 2)
if mouse2_snout2tail < mouse2_snout2snout:
temp2 = mouse2_ensemble[i, 0:2].copy()
mouse2_ensemble[i, 0:2] = mouse2_ensemble[i, 2:4]
mouse2_ensemble[i, 2:4] = temp2
# mouse1_ensemble[:,1] = 540-mouse1_ensemble[:,1]
# mouse1_ensemble[:,3] = 540-mouse1_ensemble[:,3]
# mouse2_ensemble[:,1] = 540-mouse2_ensemble[:,1]
# mouse2_ensemble[:,3] = 540-mouse2_ensemble[:,3]
df_mouse1_ensemble = pd.DataFrame({'snout_x': mouse1_ensemble[:, 0],
'snout_y': mouse1_ensemble[:, 1],
'tailbase_x': mouse1_ensemble[:, 2],
'tailbase_y': mouse1_ensemble[:, 3]})
df_mouse2_ensemble = pd.DataFrame({'snout_x': mouse2_ensemble[:, 0],
'snout_y': mouse2_ensemble[:, 1],
'tailbase_x': mouse2_ensemble[:, 2],
'tailbase_y': mouse2_ensemble[:, 3]})
df_mouse1_ensemble.to_csv(os.path.join(os.path.dirname(tracking_dir),
'mouse1_ensemble.csv'), index=False)
df_mouse2_ensemble.to_csv(os.path.join(os.path.dirname(tracking_dir),
'mouse2_ensemble.csv'), index=False)
return df_mouse1_ensemble, df_mouse2_ensemble
def background_subtraction_single(frames_dir, fg_dir, background, threshold, frame_index):
"""Generate foregrounds corresponding to frames
Args:
frames_dir: path to directory containing frames
fg_dir: path to save foreground
background_dir: path to the background image
threshold: np.array
frame_index: int
Returns:
components: 1D array of number of blobs in each frame.
"""
im = img_as_float(skimage.io.imread(
os.path.join(frames_dir, str(frame_index) + '.jpg')))
if im.ndim == 3:
im = rgb2gray(im)
fg = (background - im) > threshold
bw1 = morphology.remove_small_objects(fg, 1000)
bw2 = morphology.binary_closing(bw1, morphology.disk(radius=10))
bw3 = bw2
label = measure.label(bw3)
num_fg = np.max(label)
masks = np.zeros(
[background.shape[0], background.shape[1], 3], dtype=np.uint8)
if num_fg == 2:
bw3_1 = label == 1
bw4_1 = morphology.binary_dilation(bw3_1, morphology.disk(radius=3))
bw3_2 = label == 2
bw4_2 = morphology.binary_dilation(bw3_2, morphology.disk(radius=3))
masks[:, :, 0] = img_as_ubyte(bw4_1)
masks[:, :, 1] = img_as_ubyte(bw4_2)
else:
masks[:, :, 0] = img_as_ubyte(bw3)
skimage.io.imsave(os.path.join(fg_dir, str(frame_index) + '.png'), masks)
return num_fg
def background_subtraction_parallel(frames_dir, background_path, num_processors=None):
"""Generate foregrounds corresponding to frames
Args:
frames_dir: path to directory containing frames
background_dir: path to the background image
num_processors: int
returns:
components: 1D array of number of blobs in each frame.
"""
fg_dir = os.path.join(os.path.dirname(frames_dir), 'FG')
if not os.path.exists(fg_dir):
os.mkdir(fg_dir)
# clean_dir_safe(fg_dir)
background = img_as_float(skimage.io.imread(background_path))
if background.ndim == 3:
background = rgb2gray(background)
threshold = background * 0.5
frames_list = os.listdir(frames_dir)
p = Pool(processes=num_processors)
output = p.starmap(background_subtraction_single, [(
frames_dir, fg_dir, background, threshold, i) for i in range(0, len(frames_list))])
return np.array(output)
def behavior_feature_extraction(resident, intruder, tracking_dir, order=[1, 2]):
'''Function to extract features for quantifying behavior
Args:
resident: coordinates of snout and tailbase of the resident (pd.DataFrame)
intruder: coordinates of snout and tailbase of the intruder (pd.DataFrame)
tracking_dir: directory of tracked masks (str)
order: mask index of resident and intruder (List)
Returns:
df_features: resident to intruder features (pd.DataFrame)
'''
df_features = pd.DataFrame()
for i in range(resident.shape[0]):
df_features.loc[i, 'snout2snout'] = np.sqrt(np.sum((resident.loc[i, [
'snout_y', 'snout_x']].values-intruder.loc[i, ['snout_y', 'snout_x']].values) ** 2))
df_features.loc[i, 'snout2tailbase'] = np.sqrt(np.sum((resident.loc[i, [
'snout_y', 'snout_x']].values-intruder.loc[i, ['tailbase_y', 'tailbase_x']].values) ** 2))
masks = skimage.io.imread(os.path.join(
tracking_dir, str(i) + '.png')) / 255.0
mask_resident = masks[:, :, order[0]].astype(int)
mask_intruder = masks[:, :, order[1]].astype(int)
resident_border = find_contours(mask_resident, 0.5)[0]
intruder_border = find_contours(mask_intruder, 0.5)[0]
resident_snout = np.zeros(mask_resident.shape)
resident_snout[int(resident.loc[i, 'snout_y']),
int(resident.loc[i, 'snout_x'])] = 1
if np.sum(np.sum(resident_snout*mask_intruder)) > 0:
df_features.loc[i, 'snout2body'] = 0
else:
df_features.loc[i, 'snout2body'] = np.min(np.sqrt(np.sum(
(resident.loc[i, ['snout_y', 'snout_x']].values - intruder_border) ** 2, axis=1)))
if np.sum(np.sum(mask_resident*mask_intruder)) > 0:
df_features.loc[i, 'body2body'] = 0
else:
distance = np.zeros((len(resident_border), len(intruder_border)))
for j in range(len(resident_border)):
distance[j, :] = np.sqrt(
np.sum((resident_border[j] - intruder_border) ** 2, axis=1))
df_features.loc[i, 'body2body'] = np.min(distance)
df_features.to_csv(os.path.join(os.path.dirname(tracking_dir),
'resident2intruder.csv'), index=False)
return df_features
| [
11748,
285,
6015,
20471,
13,
19849,
355,
953,
695,
571,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
390,
68,
48... | 2.084759 | 26,121 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
26791,
13,
9122,
4122,
355,
31396,
628,
628
] | 3.419355 | 31 |
from django.urls import path
from django.conf import settings
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("listings/create", views.create_listing, name="create_listing"),
path("listings/watchlist", views.watchlist_items, name="watchlist_items"),
path("listings/watchlistbutton", views.watchlistbtn, name="watchlistbtn"),
path("listings/categories", views.categories, name="categories"),
path("listings/categories/<str:cat>", views.category, name="category"),
path("listings/<int:list_id>", views.listings, name="listings"),
path("listings/<int:list_id>/comment", views.comment, name="comment"),
path("listings/<int:list_id>/bid", views.newbid, name="newbid"),
path("listings/<int:list_id>/close", views.close_auction, name="close_auction"),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
16624,
13,
6371,
82,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
16624,
... | 2.914081 | 419 |
from spiketag.view import cluster_view
import numpy as np
if __name__ == '__main__':
cluview = cluster_view()
group_No = 40
sorting_status = np.random.randint(low=0, high=3, size=group_No) # 0: not ready; 1: ready; 2: done
print(sorting_status)
cluview.set_data(group_No=group_No, sorting_status=sorting_status, nclu_list=group_No*[8])
# cluview.select(8)
print(cluview.cpu_ready_list)
@cluview.event.connect
cluview.run()
| [
6738,
599,
1134,
316,
363,
13,
1177,
1330,
13946,
62,
1177,
198,
11748,
299,
32152,
355,
45941,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
537,
84,
1177,
796,
13946,
62,
1177,
3419,
198,
... | 2.297561 | 205 |
import configparser
import os
from compute import Config_ini
from train.utils import TrainConfig
| [
11748,
4566,
48610,
198,
11748,
28686,
198,
198,
6738,
24061,
1330,
17056,
62,
5362,
198,
6738,
4512,
13,
26791,
1330,
16835,
16934,
628
] | 4.304348 | 23 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# ruler.py
#
from pagebot.elements.element import Element
from pagebot.toolbox.units import pointOffset
from pagebot.toolbox.color import noColor
from pagebot.toolbox.units import units
from pagebot.constants import DEFAULT_HEIGHT, ORIGIN
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
198,
2,
220,
220,
220,
220,
350,
317,
402,
412,
347,
440,
309,
198,
2,
198,
2,
220,
220... | 3.171429 | 245 |
import aisy_sca
from app import *
from custom.custom_models.neural_networks import *
aisy = aisy_sca.Aisy()
aisy.set_resources_root_folder(resources_root_folder)
aisy.set_database_root_folder(databases_root_folder)
aisy.set_datasets_root_folder(datasets_root_folder)
aisy.set_database_name("database_ascad.sqlite")
aisy.set_dataset(datasets_dict["ascad-variable.h5"])
aisy.set_aes_leakage_model(leakage_model="HW", byte=2)
aisy.set_batch_size(400)
aisy.set_epochs(40)
my_mlp = mlp(9, 1400, 200, 6, "relu", 0.001)
aisy.set_neural_network(my_mlp)
aisy.run()
| [
11748,
257,
13560,
62,
1416,
64,
198,
6738,
598,
1330,
1635,
198,
6738,
2183,
13,
23144,
62,
27530,
13,
710,
1523,
62,
3262,
5225,
1330,
1635,
628,
198,
198,
64,
13560,
796,
257,
13560,
62,
1416,
64,
13,
32,
13560,
3419,
198,
64,
... | 2.300412 | 243 |
from django.core.management import BaseCommand
from core.cache import rebuild_all_cache
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
7308,
21575,
198,
6738,
4755,
13,
23870,
1330,
17884,
62,
439,
62,
23870,
628
] | 4.045455 | 22 |
from tuun.backend import ProboBackend
model_config = {'name': 'simplegp', 'ls': 3.0, 'alpha': 1.5, 'sigma': 1e-5}
acqfunction_config = {'name': 'default', 'acq_str': 'ei'}
acqoptimizer_config = {'name': 'default', 'max_iter': 200}
domain_config = {'name': 'real', 'min_max': [(-5, 5)]}
f = lambda x: x[0] ** 4 - x[0] ** 2 + 0.1 * x[0]
pb = ProboBackend(model_config, acqfunction_config, acqoptimizer_config, domain_config)
results = pb.minimize_function(f, 20)
| [
6738,
12777,
403,
13,
1891,
437,
1330,
1041,
2127,
7282,
437,
198,
198,
19849,
62,
11250,
796,
1391,
6,
3672,
10354,
705,
36439,
31197,
3256,
705,
7278,
10354,
513,
13,
15,
11,
705,
26591,
10354,
352,
13,
20,
11,
705,
82,
13495,
103... | 2.404145 | 193 |
from jpntextgen.engine import Engine
engine = Engine()
print(engine.get_address())
print(engine.get_full_name())
print(engine.get_date())
print(engine.get_general_text())
print(engine.get_additional_info())
print(engine.get_email())
print(engine.get_url())
print(engine.get_eng_katakana())
print(engine.get_phone_number())
print(engine.get_post_code())
print(engine.get_price())
print(engine.get_product_code())
print(engine.get_status()) | [
6738,
474,
79,
429,
2302,
5235,
13,
18392,
1330,
7117,
198,
198,
18392,
796,
7117,
3419,
198,
198,
4798,
7,
18392,
13,
1136,
62,
21975,
28955,
198,
4798,
7,
18392,
13,
1136,
62,
12853,
62,
3672,
28955,
198,
4798,
7,
18392,
13,
1136,... | 2.913907 | 151 |
#!/usr/bin/env python3
import numpy as np
import os
import time
import gym
import json
import sys
import functools
from tqdm import tqdm, tnrange
from drlnd.core.common.util import is_notebook, count_boundaries
from drlnd.core.agents.base_agent import AgentBase
from drlnd.core.common.ring_buffer import ContiguousRingBuffer
from drlnd.core.common.prioritized_replay_buffer import PrioritizedReplayBuffer
from drlnd.core.common.logger import get_default_logger
from drlnd.core.common.epsilon import ExponentialEpsilon, LinearEpsilon
from baselines.common.vec_env import SubprocVecEnv
logger = get_default_logger()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
11550,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
1257,
310,
10141,
198,
6738,
256,
80,
360... | 3.034146 | 205 |
#!/usr/bin/env python2
#
# This script changes C++ code, CMake files and FHiCL configuration to use
# larcorealg instead of larcore for geometrye
#
# Change log:
# 20170703 (petrillo@fnal.gov)
# original version
#
import sys, re
import SerialSubstitution
from SerialSubstitution import AddProcessor, RunSubstitutor
################################################################################
if __name__ == "__main__":
#############################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FHiCL configuration
#
Subst = AddProcessor(SerialSubstitution.ProcessorClass("FHiCL"))
Subst.AddFileType("fcl")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CMakeLists.txt
#
Subst = AddProcessor(SerialSubstitution.ProcessorClass("cmake"))
Subst.AddFileNamePattern("CMakeLists.txt")
# note that GeometryTestAlg was also moved, but it did not sport the product name header
Subst.AddWord ("larcore_Geometry", "larcorealg_Geometry")
Subst.AddWord ("larcore_CoreUtils", "larcorealg_CoreUtils")
Subst.AddWord ("larcore_TestUtils", "larcorealg_TestUtils")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# C++ source code (including modules and services)
#
Subst = AddProcessor(SerialSubstitution.ProcessorClass("code"))
Subst.AddFileType("h", "hh", "cc", "cpp", "cxx", "icc", "tcc" )
Subst.AddWord ("larcore/TestUtils/NameSelector.h", "larcorealg/TestUtils/NameSelector.h")
Subst.AddWord ("larcore/TestUtils/boost_unit_test_base.h", "larcorealg/TestUtils/boost_unit_test_base.h")
Subst.AddWord ("larcore/TestUtils/ProviderList.h", "larcorealg/TestUtils/ProviderList.h")
Subst.AddWord ("larcore/TestUtils/unit_test_base.h", "larcorealg/TestUtils/unit_test_base.h")
Subst.AddWord ("larcore/TestUtils/ProviderTestHelpers.h", "larcorealg/TestUtils/ProviderTestHelpers.h")
Subst.AddWord ("larcore/TestUtils/StopWatch.h", "larcorealg/TestUtils/StopWatch.h")
Subst.AddWord ("larcore/Geometry/ChannelMapAlg.h", "larcorealg/Geometry/ChannelMapAlg.h")
Subst.AddWord ("larcore/Geometry/Exceptions.h", "larcorealg/Geometry/Exceptions.h")
Subst.AddWord ("larcore/Geometry/AuxDetGeometryCore.h", "larcorealg/Geometry/AuxDetGeometryCore.h")
Subst.AddWord ("larcore/Geometry/StandaloneGeometrySetup.h", "larcorealg/Geometry/StandaloneGeometrySetup.h")
Subst.AddWord ("larcore/Geometry/AuxDetGeoObjectSorter.h", "larcorealg/Geometry/AuxDetGeoObjectSorter.h")
Subst.AddWord ("larcore/Geometry/LocalTransformation.h", "larcorealg/Geometry/LocalTransformation.h")
Subst.AddWord ("larcore/Geometry/geo.h", "larcorealg/Geometry/geo.h")
Subst.AddWord ("larcore/Geometry/AuxDetChannelMapAlg.h", "larcorealg/Geometry/AuxDetChannelMapAlg.h")
Subst.AddWord ("larcore/Geometry/WireGeo.h", "larcorealg/Geometry/WireGeo.h")
Subst.AddWord ("larcore/Geometry/GeometryCore.h", "larcorealg/Geometry/GeometryCore.h")
Subst.AddWord ("larcore/Geometry/TPCGeo.h", "larcorealg/Geometry/TPCGeo.h")
Subst.AddWord ("larcore/Geometry/PlaneGeo.h", "larcorealg/Geometry/PlaneGeo.h")
Subst.AddWord ("larcore/Geometry/CryostatGeo.h", "larcorealg/Geometry/CryostatGeo.h")
Subst.AddWord ("larcore/Geometry/BoxBoundedGeo.h", "larcorealg/Geometry/BoxBoundedGeo.h")
Subst.AddWord ("larcore/Geometry/GeoObjectSorterStandard.h", "larcorealg/Geometry/GeoObjectSorterStandard.h")
Subst.AddWord ("larcore/Geometry/Decomposer.h", "larcorealg/Geometry/Decomposer.h")
Subst.AddWord ("larcore/Geometry/AuxDetSensitiveGeo.h", "larcorealg/Geometry/AuxDetSensitiveGeo.h")
Subst.AddWord ("larcore/Geometry/AuxDetGeo.h", "larcorealg/Geometry/AuxDetGeo.h")
Subst.AddWord ("larcore/Geometry/SimpleGeo.h", "larcorealg/Geometry/SimpleGeo.h")
Subst.AddWord ("larcore/Geometry/ChannelMapStandardAlg.h", "larcorealg/Geometry/ChannelMapStandardAlg.h")
Subst.AddWord ("larcore/Geometry/StandaloneBasicSetup.h", "larcorealg/Geometry/StandaloneBasicSetup.h")
Subst.AddWord ("larcore/Geometry/OpDetGeo.h", "larcorealg/Geometry/OpDetGeo.h")
Subst.AddWord ("larcore/Geometry/GeoObjectSorter.h", "larcorealg/Geometry/GeoObjectSorter.h")
Subst.AddWord ("larcore/CoreUtils/DereferenceIterator.h", "larcorealg/CoreUtils/DereferenceIterator.h")
Subst.AddWord ("larcore/CoreUtils/DumpUtils.h", "larcorealg/CoreUtils/DumpUtils.h")
Subst.AddWord ("larcore/CoreUtils/ProviderUtil.h", "larcorealg/CoreUtils/ProviderUtil.h")
Subst.AddWord ("larcore/CoreUtils/DebugUtils.h", "larcorealg/CoreUtils/DebugUtils.h")
Subst.AddWord ("larcore/CoreUtils/ProviderPack.h", "larcorealg/CoreUtils/ProviderPack.h")
Subst.AddWord ("larcore/CoreUtils/UncopiableAndUnmovableClass.h", "larcorealg/CoreUtils/UncopiableAndUnmovableClass.h")
Subst.AddWord ("larcore/CoreUtils/RealComparisons.h", "larcorealg/CoreUtils/RealComparisons.h")
Subst.AddWord ("larcore/CoreUtils/quiet_Math_Functor.h", "larcorealg/CoreUtils/quiet_Math_Functor.h")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#############################################################################
sys.exit(RunSubstitutor())
#
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
198,
2,
770,
4226,
2458,
327,
4880,
2438,
11,
327,
12050,
3696,
290,
376,
17250,
5097,
8398,
284,
779,
198,
2,
300,
5605,
382,
14016,
2427,
286,
300,
5605,
382,
329,
22939,
... | 2.095581 | 2,919 |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Sphinx extension to add directives to allow files and code to include the
latest installable version of Flocker.
"""
import importlib
import os
from sphinx.directives.code import LiteralInclude
from sphinx.roles import XRefRole
from flocker import __version__ as version
from flocker.common.version import get_installable_version
from sphinx import addnodes
from sphinx.util import ws_re
PLACEHOLDER = u'|latest-installable|'
def remove_extension(template):
"""
Given a filename or path of a template file, return the same without the
template suffix.
:param unicode template: The filename of or path to a template file which
ends with '.template'.
:return: The given filename or path without the '.template' suffix.
"""
return template[:-len('.template')]
def make_changed_file(path, env):
"""
Given the path to a template file, write a new file with:
* The same filename, except without '.template' at the end.
* A placeholder in the new file changed to the latest installable
version of Flocker.
This new file will be deleted on build completion.
:param unicode path: The path to a template file.
:param sphinx.environment.BuildEnvironment env: The Sphinx build
environment
"""
latest = get_installable_version(version)
new_path = remove_extension(path)
with open(path, 'r') as templated_file:
with open(new_path, 'w') as new_file:
new_file.write(templated_file.read().replace(PLACEHOLDER, latest))
env.app.connect('build-finished',
lambda self, *args: remove_file(new_path))
class VersionDownload(XRefRole):
"""
Similar to downloadable files, but:
* Replaces a placeholder in the downloadable file with the latest
installable version of Flocker.
* Replaces the download link with one which strips '.template' from the
end of the file name.
"""
nodeclass = addnodes.download_reference
class VersionLiteralInclude(LiteralInclude):
"""
Similar to LiteralInclude but replaces a placeholder with the latest
installable version of Flocker. The filename of the file to be included
must end with '.template'.
"""
# Due to the dash in the name, the sphinx-prompt module is unloadable
# using a normal import - use the importlib machinery instead.
sphinx_prompt = importlib.import_module('sphinx-prompt')
class VersionPrompt(sphinx_prompt.PromptDirective):
"""
Similar to PromptDirective but replaces a placeholder with the
latest installable version of Flocker.
Usage example:
.. version-prompt:: bash $
$ brew install flocker-|latest-installable|
"""
| [
2,
15069,
29481,
30146,
12052,
13,
220,
4091,
38559,
24290,
2393,
329,
3307,
13,
198,
198,
37811,
198,
50,
746,
28413,
7552,
284,
751,
34819,
284,
1249,
3696,
290,
2438,
284,
2291,
262,
198,
42861,
2721,
540,
2196,
286,
1610,
12721,
1... | 3.005353 | 934 |
import argparse
import os
import json
import random
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import lpips
import sys
sys.path.append('.') # to run from the project root dir
import models
from thirdparty import LBFGS
from models.dynamic_channel import CHANNEL_CONFIGS, set_uniform_channel_ratio, reset_generator, set_sub_channel_config
from utils.torch_utils import adaptive_resize
torch.backends.cudnn.benchmark = False
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="Image projector to the generator latent spaces")
parser.add_argument("--config", type=str, default='anycost-ffhq-config-f', help="models config")
parser.add_argument("--encoder", action="store_true", help="use encoder prediction as init")
parser.add_argument("--optimizer", type=str, default='lbfgs', help="optimizer used")
parser.add_argument("--n_iter", type=int, default=100, help="optimize iterations")
parser.add_argument("--optimize_sub_g", action="store_true", help="also optimize the sub-generators")
# loss weight
parser.add_argument("--mse_weight", type=float, default=1., help="weight of MSE loss")
parser.add_argument("--enc_reg_weight", type=float, default=0., help="weight of encoder regularization loss")
# file list (sep with space)
parser.add_argument("files", metavar="FILES", nargs="+", help="path to image files to be projected")
args = parser.parse_args()
n_mean_latent = 10000
# build generator to project
generator = models.get_pretrained('generator', args.config).to(device)
generator.eval()
if args.encoder:
encoder = models.get_pretrained('encoder', args.config).to(device)
encoder.eval()
else:
encoder = None
# if the generator is trained with elastic channels and evolution search
if 'flexible' in args.config:
print(' * loading evolution configs...')
with open(os.path.join('assets/evolve_configs/{}.json'.format(args.config))) as f:
evolve_cfgs = json.load(f)
# pick some reduction ratios; you can modify this to include more or fewer
# reduction ratio: search MACs limit (the key in evolve cfgs)
cfg_map = {
'2x': '73G',
'4x': '36G',
'6x': '24G',
'8x': '18G',
'10x': '15G',
}
evolve_cfgs = {k: evolve_cfgs[v] for k, v in cfg_map.items()}
else:
evolve_cfgs = None
# load perceptual loss
percept = lpips.LPIPS(net='vgg', verbose=False).to(device)
# load images to project
resize = min(generator.resolution, 256)
transform = transforms.Compose([
transforms.Resize(generator.resolution),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
imgs = [transform(Image.open(f).convert("RGB")) for f in args.files]
imgs = torch.stack(imgs, 0).to(device)
projected_styles = project_images(imgs)
with torch.no_grad():
rec_images = generator(projected_styles, randomize_noise=False, input_is_style=True)[0]
img_ar = make_image(rec_images)
result_file = {}
for i, input_name in enumerate(args.files):
result_file[input_name] = {
"img": rec_images[i],
"latent": projected_styles[i],
}
img_name = os.path.splitext(os.path.basename(input_name))[0] + "-project.png"
pil_img = Image.fromarray(img_ar[i])
pil_img.save(img_name)
import numpy as np
np.save(os.path.splitext(os.path.basename(input_name))[0] + '.npy', projected_styles[i].cpu().numpy())
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4738,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
6738,
28034,
10178,
1330,
31408,... | 2.545455 | 1,474 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
532,
926,
198,
2,
15069,
3050,
3012,
3457,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,... | 3.450216 | 231 |
"""
notsosimple_project_test.py
---------------------------
Run `decu` on the mock project notsosimple_project.
"""
import os
import decu
import pytest
from subprocess import call
from util import make_teardown_fixture
PROJECT_DIR = 'notsosimple_project/'
teardown = pytest.fixture(scope='module', autouse=True)(
make_teardown_fixture(PROJECT_DIR))
def test_exec():
"""Test that `decu exec src/script.py` generates the appropriate files."""
cfg = decu.config['Script']
call(['decu', 'exec', '{}/script.py'.format(cfg['scripts_dir'])])
assert os.listdir(decu.config['logging']['logs_dir'])
assert os.listdir(cfg['figures_dir'])
assert os.listdir(cfg['results_dir'])
| [
37811,
198,
1662,
82,
418,
320,
1154,
62,
16302,
62,
9288,
13,
9078,
198,
22369,
6329,
198,
198,
10987,
4600,
12501,
84,
63,
319,
262,
15290,
1628,
407,
82,
418,
320,
1154,
62,
16302,
13,
198,
198,
37811,
198,
198,
11748,
28686,
198... | 2.688462 | 260 |
import pandas as pd
import numpy as np
from pathlib import Path
if __name__ == '__main__':
# split_local_data_to_single_files('data/rfam_learn_local/test')
# split_local_data_to_single_files('data/rfam_learn_local/train')
# split_local_data_to_single_files('data/rfam_learn_local/validation')
# print("split test data")
# split_via_dataframe('data/', 'rfam_local_short_train')
# print('split training data')
# split_via_dataframe('data/', 'rfam_local_long_train')
# print('split validation data')
# split_via_dataframe('data/rfam_learn_local', 'validation')
# split_via_dataframe('data', 'eterna_local_test')
# split_via_dataframe('data', 'rfam_taneda_local_test')
# split_via_dataframe('data', 'rfam_local_test')
# split_via_dataframe('data', 'rfam_local_min_400_max_1000_test')
# split_via_dataframe('data', 'rfam_local_min_1000_test')
# split_rfam_anta('data', 'rfam_anta_sc')
# generate_sc_data('data', 'rfam_local_min_1000_test')
# generate_inverse_folding_data('data', 'rfam_local_min_1000_test')
# generate_if_baseline_data('data', 'rfam_local_min_1000_test')
# generate_data_4_anta_sc_run('data', 'rfam_anta_sc')
generate_gap_data('data', 'rfam_local_min_1000_test')
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3108,
8019,
1330,
10644,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
6626,
62,
12001,
62,
7890,
62,... | 2.487129 | 505 |
import subprocess,sys,click
from subprocess import Popen,CREATE_NEW_CONSOLE
from selenium import webdriver
import six
from pyfiglet import figlet_format
from click_help_colors import HelpColorsGroup, HelpColorsCommand
# Coloring libraries
# Try colorama
try:
import colorama
colorama.init()
except ImportError:
colorama = None
# Try termcolor
try:
from termcolor import colored
except ImportError:
colored = None
# Help section coloring and Grouping
@click.group(
cls=HelpColorsGroup,
help_headers_color='yellow',
help_options_color='cyan'
)
# Styling
@cli_group.command()
@click.option('--voice', default = False, help = "Use --voice if you want to code using your voice.")
@click.option('--start', help = "To start voice coding, execute this command in the console : python -m dragonfly load --engine sapi5inproc _*.py --no-recobs-messages. Wait till the program says beginning loop, then clearly speak out your commands")
def enable_voice_coding(voice,start):
"""Enables voice coding."""
log("Larynx Code", color="blue", figlet=True)
log("Welcome to Larynx Code. We enable voice coding in your eitor and general navigations.", "green")
log("A new window will open up shortly, run the command : python -m dragonfly load --engine sapi5inproc _*.py --no-recobs-messages, to enable voice coding", "yellow")
log("LarynxCode --> developed by Balaka Biswas and Leshna Balara, with Click.", "cyan")
encoding = 'latin1'
p = subprocess.Popen(
['start', 'cmd', '/k', 'cd /caster'],
creationflags = CREATE_NEW_CONSOLE,
shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
@cli_group.command()
@click.option('--casterhelp', default = False, help = 'Call this option if you need to refer to the Caster Guide web app')
@click.option("--google", help = "Chooses default browser as Google Chrome")
def chelp(casterhelp,google):
"""Command for launching user guide."""
log("Larynx Code", color="blue", figlet=True)
log("Welcome to Larynx Code. We enable voice coding in your eitor and general navigations.", "green")
log("A new window will open up shortly, directing you to a simple guide. Detailed guides will get downloaded automatically.", "yellow")
log("Developed by Balaka Biswas. Hosted on Heroku.", "cyan")
driver = webdriver.Chrome("C:\\chromedriver.exe")
driver.get("https://larynxcode.herokuapp.com/")
while True:
pass
driver.close()
'''cli_group.add_command(enable_voice_coding)
cli_group.add_command(caster_help)'''
if __name__ == "__main__":
cli_group()
| [
11748,
850,
14681,
11,
17597,
11,
12976,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
43387,
6158,
62,
13965,
62,
10943,
15821,
2538,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
2237,
198,
6738,
12972,
5647,
1616,
1330,
2... | 2.944751 | 905 |
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .. import AWSProperty
from . import boolean, network_port, positive_integer
class AlarmIdentifier(AWSProperty):
"""
Export:
"""
props = {
"Name": (str, True),
"Region": (str, True),
}
class HealthCheckConfiguration(AWSProperty):
"""
Export:
"""
props = {
"AlarmIdentifier": (AlarmIdentifier, False),
"ChildHealthChecks": ([str], False),
"EnableSNI": (boolean, False),
"FailureThreshold": (positive_integer, False),
"FullyQualifiedDomainName": (str, False),
"HealthThreshold": (positive_integer, False),
"InsufficientDataHealthStatus": (str, False),
"Inverted": (boolean, False),
"IPAddress": (str, False),
"MeasureLatency": (boolean, False),
"Port": (network_port, False),
"Regions": ([str], False),
"RequestInterval": (positive_integer, False),
"ResourcePath": (str, False),
"SearchString": (str, False),
"Type": (str, True),
}
class AliasTarget(AWSProperty):
"""
Export:
"""
props = {
"HostedZoneId": (str, True),
"DNSName": (str, True),
"EvaluateTargetHealth": (boolean, False),
}
| [
2,
15069,
357,
66,
8,
2321,
12,
1238,
1828,
11,
2940,
2631,
988,
1279,
4102,
31,
431,
988,
13,
2398,
29,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
4091,
38559,
24290,
2393,
329,
1336,
5964,
13,
628,
198,
6738,
11485,
1330,
... | 2.364437 | 568 |
# Generated by Django 2.2.6 on 2019-11-21 21:06
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
1157,
12,
2481,
2310,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import cv2
face_data = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_data.detectMultiScale(gray, 1.3, 5)
for x,y,w,h in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 5)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
201,
198,
201,
198,
2550,
62,
7890,
796,
269,
85,
17,
13,
34,
28966,
9487,
7483,
7203,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
13,
19875,
4943,
201,
198,
201,
198,
11128,
796,
269,
85,
17,
13,
10798,... | 1.898833 | 257 |
# Generated by Django 2.2.5 on 2020-10-16 07:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
12131,
12,
940,
12,
1433,
8753,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import os
from dotenv import load_dotenv
load_dotenv()
APP_ENV = os.getenv("APP_ENV", default="development") # IMPORTANT: set to "production" in production
APP_VERSION = os.getenv("APP_VERSION", default="v0.0.1") # update upon new releases
| [
628,
198,
11748,
28686,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
24805,
62,
1677,
53,
796,
28686,
13,
1136,
24330,
7203,
24805,
62,
1677,
53,
1600,
4277,
2625,
31267,
4943,... | 2.987805 | 82 |
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices= (
(0, 0, 0),
(1, 0, 0),
(1, 1, 0),
(0, 1 ,0),
(0, 0, 1),
(1, 0, 1),
(1, 1, 1),
(0, 1, 1),
)
edges = (
(0,1),
(1,2),
(2,3),
(3,0),
(0,4),
(1,5),
(2,6),
(3,7),
(4,5),
(5,6),
(6,7),
(7,4)
)
main()
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
6738,
30672,
13,
8763,
1330,
1635,
198,
6738,
30672,
13,
8763,
52,
1330,
1635,
198,
198,
1851,
1063,
28,
357,
198,
220,
220,
220,
357,
15,
11,
657,
11,
657... | 1.562249 | 249 |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ChunkedUploadResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'checksum': 'str',
'chunked_upload_id': 'str',
'chunked_upload_parts': 'list[ChunkedUploadPart]',
'chunked_upload_uri': 'str',
'committed': 'str',
'expiration_date_time': 'str',
'max_chunked_upload_parts': 'str',
'max_total_size': 'str',
'total_size': 'str'
}
attribute_map = {
'checksum': 'checksum',
'chunked_upload_id': 'chunkedUploadId',
'chunked_upload_parts': 'chunkedUploadParts',
'chunked_upload_uri': 'chunkedUploadUri',
'committed': 'committed',
'expiration_date_time': 'expirationDateTime',
'max_chunked_upload_parts': 'maxChunkedUploadParts',
'max_total_size': 'maxTotalSize',
'total_size': 'totalSize'
}
def __init__(self, checksum=None, chunked_upload_id=None, chunked_upload_parts=None, chunked_upload_uri=None, committed=None, expiration_date_time=None, max_chunked_upload_parts=None, max_total_size=None, total_size=None): # noqa: E501
"""ChunkedUploadResponse - a model defined in Swagger""" # noqa: E501
self._checksum = None
self._chunked_upload_id = None
self._chunked_upload_parts = None
self._chunked_upload_uri = None
self._committed = None
self._expiration_date_time = None
self._max_chunked_upload_parts = None
self._max_total_size = None
self._total_size = None
self.discriminator = None
if checksum is not None:
self.checksum = checksum
if chunked_upload_id is not None:
self.chunked_upload_id = chunked_upload_id
if chunked_upload_parts is not None:
self.chunked_upload_parts = chunked_upload_parts
if chunked_upload_uri is not None:
self.chunked_upload_uri = chunked_upload_uri
if committed is not None:
self.committed = committed
if expiration_date_time is not None:
self.expiration_date_time = expiration_date_time
if max_chunked_upload_parts is not None:
self.max_chunked_upload_parts = max_chunked_upload_parts
if max_total_size is not None:
self.max_total_size = max_total_size
if total_size is not None:
self.total_size = total_size
@property
def checksum(self):
"""Gets the checksum of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The checksum of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""Sets the checksum of this ChunkedUploadResponse.
# noqa: E501
:param checksum: The checksum of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._checksum = checksum
@property
def chunked_upload_id(self):
"""Gets the chunked_upload_id of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The chunked_upload_id of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._chunked_upload_id
@chunked_upload_id.setter
def chunked_upload_id(self, chunked_upload_id):
"""Sets the chunked_upload_id of this ChunkedUploadResponse.
# noqa: E501
:param chunked_upload_id: The chunked_upload_id of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._chunked_upload_id = chunked_upload_id
@property
def chunked_upload_parts(self):
"""Gets the chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
:rtype: list[ChunkedUploadPart]
"""
return self._chunked_upload_parts
@chunked_upload_parts.setter
def chunked_upload_parts(self, chunked_upload_parts):
"""Sets the chunked_upload_parts of this ChunkedUploadResponse.
# noqa: E501
:param chunked_upload_parts: The chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
:type: list[ChunkedUploadPart]
"""
self._chunked_upload_parts = chunked_upload_parts
@property
def chunked_upload_uri(self):
"""Gets the chunked_upload_uri of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The chunked_upload_uri of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._chunked_upload_uri
@chunked_upload_uri.setter
def chunked_upload_uri(self, chunked_upload_uri):
"""Sets the chunked_upload_uri of this ChunkedUploadResponse.
# noqa: E501
:param chunked_upload_uri: The chunked_upload_uri of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._chunked_upload_uri = chunked_upload_uri
@property
def committed(self):
"""Gets the committed of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The committed of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._committed
@committed.setter
def committed(self, committed):
"""Sets the committed of this ChunkedUploadResponse.
# noqa: E501
:param committed: The committed of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._committed = committed
@property
def expiration_date_time(self):
"""Gets the expiration_date_time of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The expiration_date_time of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._expiration_date_time
@expiration_date_time.setter
def expiration_date_time(self, expiration_date_time):
"""Sets the expiration_date_time of this ChunkedUploadResponse.
# noqa: E501
:param expiration_date_time: The expiration_date_time of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._expiration_date_time = expiration_date_time
@property
def max_chunked_upload_parts(self):
"""Gets the max_chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The max_chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._max_chunked_upload_parts
@max_chunked_upload_parts.setter
def max_chunked_upload_parts(self, max_chunked_upload_parts):
"""Sets the max_chunked_upload_parts of this ChunkedUploadResponse.
# noqa: E501
:param max_chunked_upload_parts: The max_chunked_upload_parts of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._max_chunked_upload_parts = max_chunked_upload_parts
@property
def max_total_size(self):
"""Gets the max_total_size of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The max_total_size of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._max_total_size
@max_total_size.setter
def max_total_size(self, max_total_size):
"""Sets the max_total_size of this ChunkedUploadResponse.
# noqa: E501
:param max_total_size: The max_total_size of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._max_total_size = max_total_size
@property
def total_size(self):
"""Gets the total_size of this ChunkedUploadResponse. # noqa: E501
# noqa: E501
:return: The total_size of this ChunkedUploadResponse. # noqa: E501
:rtype: str
"""
return self._total_size
@total_size.setter
def total_size(self, total_size):
"""Sets the total_size of this ChunkedUploadResponse.
# noqa: E501
:param total_size: The total_size of this ChunkedUploadResponse. # noqa: E501
:type: str
"""
self._total_size = total_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ChunkedUploadResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChunkedUploadResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
14432,
84,
11712,
30617,
7824,
628,
220,
220,
220,
383,
14432,
84,
11712,
30617,
7824,
3769,
345,
351,
257,
3665,
11,
11282,
11,
290,
2829,
5313,
2594,
7824,
329,
... | 2.239273 | 4,731 |
from unittest import TestCase
from taguri.validator import (
authority_name_validator,
date_validator,
specific_validator,
)
# https://blogs.msdn.microsoft.com/testing123/2009/02/06/email-address-test-cases/
# Note, however, that there are cases left out because the tag URI
# grammar allows or disallows so.
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
7621,
9900,
13,
12102,
1352,
1330,
357,
198,
220,
220,
220,
4934,
62,
3672,
62,
12102,
1352,
11,
198,
220,
220,
220,
3128,
62,
12102,
1352,
11,
198,
220,
220,
220,
2176,
62,
12102,
... | 2.982143 | 112 |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys
import MySQLdb
from PyQt5.uic import loadUiType
ui,_ = loadUiType('id_gen.ui')
if __name__ == '__main__':
main()
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
11748,
25064,
198,
11748,
33476,
994... | 2.260417 | 96 |
#!/usr/bin/env python
#
# Copyright (c) 2013-2016, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
import sys
import os
sys.path.append(os.getenv('HOME') + '/bin/')
import subprocess
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
5304,
11,
35920,
43412,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2393,
318,
9387,
739,
262,
2846,
287,
262,
7223,
38559,
24... | 3.020979 | 143 |
from typing import List
| [
6738,
19720,
1330,
7343,
198
] | 4.8 | 5 |
from ..qt_compat import QtGui, QtCore, QtWidgets
import six
import os
from collections import defaultdict
from ..core import DataModel, LabelManager
DEFAULT_DIR_KEY = "default_dir"
DEFAULT_DATA_KEY = "default_data_dir"
| [
198,
6738,
11485,
39568,
62,
5589,
265,
1330,
33734,
8205,
72,
11,
33734,
14055,
11,
33734,
54,
312,
11407,
198,
198,
11748,
2237,
198,
11748,
28686,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
11485,
7295,
1330,
6060,
17633,
11,
36... | 3 | 79 |
# Generated from \R.g4 by ANTLR 4.9
from antlr4 import *
if __name__ is not None and "." in __name__:
from .RParser import RParser
else:
from RParser import RParser
# This class defines a complete listener for a parse tree produced by RParser.
# Enter a parse tree produced by RParser#parse.
# Exit a parse tree produced by RParser#parse.
# Enter a parse tree produced by RParser#expr.
# Exit a parse tree produced by RParser#expr.
# Enter a parse tree produced by RParser#functiondefbody.
# Exit a parse tree produced by RParser#functiondefbody.
# Enter a parse tree produced by RParser#functiondeflambda.
# Exit a parse tree produced by RParser#functiondeflambda.
# Enter a parse tree produced by RParser#functiondefargslambda.
# Exit a parse tree produced by RParser#functiondefargslambda.
# Enter a parse tree produced by RParser#functiondefargs.
# Exit a parse tree produced by RParser#functiondefargs.
# Enter a parse tree produced by RParser#implicit_column_name.
# Exit a parse tree produced by RParser#implicit_column_name.
# Enter a parse tree produced by RParser#affectation.
# Exit a parse tree produced by RParser#affectation.
# Enter a parse tree produced by RParser#rangeopexpr.
# Exit a parse tree produced by RParser#rangeopexpr.
# Enter a parse tree produced by RParser#exprlist.
# Exit a parse tree produced by RParser#exprlist.
# Enter a parse tree produced by RParser#rightexpr.
# Exit a parse tree produced by RParser#rightexpr.
# Enter a parse tree produced by RParser#formlist.
# Exit a parse tree produced by RParser#formlist.
# Enter a parse tree produced by RParser#form.
# Exit a parse tree produced by RParser#form.
# Enter a parse tree produced by RParser#argumentname.
# Exit a parse tree produced by RParser#argumentname.
# Enter a parse tree produced by RParser#sublist.
# Exit a parse tree produced by RParser#sublist.
# Enter a parse tree produced by RParser#sublistadd.
# Exit a parse tree produced by RParser#sublistadd.
# Enter a parse tree produced by RParser#sub.
# Exit a parse tree produced by RParser#sub.
# Enter a parse tree produced by RParser#subnobracket.
# Exit a parse tree produced by RParser#subnobracket.
# Enter a parse tree produced by RParser#ranges.
# Exit a parse tree produced by RParser#ranges.
# Enter a parse tree produced by RParser#range_simple.
# Exit a parse tree produced by RParser#range_simple.
# Enter a parse tree produced by RParser#range_complexe.
# Exit a parse tree produced by RParser#range_complexe.
# Enter a parse tree produced by RParser#intersections.
# Exit a parse tree produced by RParser#intersections.
# Enter a parse tree produced by RParser#intersection_simple.
# Exit a parse tree produced by RParser#intersection_simple.
# Enter a parse tree produced by RParser#intersection_complexe.
# Exit a parse tree produced by RParser#intersection_complexe.
# Enter a parse tree produced by RParser#constant.
# Exit a parse tree produced by RParser#constant.
# Enter a parse tree produced by RParser#boolean.
# Exit a parse tree produced by RParser#boolean.
# Enter a parse tree produced by RParser#nextexpr.
# Exit a parse tree produced by RParser#nextexpr.
# Enter a parse tree produced by RParser#repeatexpr.
# Exit a parse tree produced by RParser#repeatexpr.
# Enter a parse tree produced by RParser#whileexpr.
# Exit a parse tree produced by RParser#whileexpr.
# Enter a parse tree produced by RParser#forexpr.
# Exit a parse tree produced by RParser#forexpr.
# Enter a parse tree produced by RParser#ifexpr.
# Exit a parse tree produced by RParser#ifexpr.
# Enter a parse tree produced by RParser#ifelseexpr.
# Exit a parse tree produced by RParser#ifelseexpr.
# Enter a parse tree produced by RParser#elseif.
# Exit a parse tree produced by RParser#elseif.
# Enter a parse tree produced by RParser#returnexpr.
# Exit a parse tree produced by RParser#returnexpr.
# Enter a parse tree produced by RParser#functioncall.
# Exit a parse tree produced by RParser#functioncall.
# Enter a parse tree produced by RParser#inlinefunction.
# Exit a parse tree produced by RParser#inlinefunction.
# Enter a parse tree produced by RParser#formula_simple.
# Exit a parse tree produced by RParser#formula_simple.
# Enter a parse tree produced by RParser#formula_simple_A.
# Exit a parse tree produced by RParser#formula_simple_A.
# Enter a parse tree produced by RParser#formula_simple_B.
# Exit a parse tree produced by RParser#formula_simple_B.
# Enter a parse tree produced by RParser#formula_simple_C.
# Exit a parse tree produced by RParser#formula_simple_C.
# Enter a parse tree produced by RParser#affectop.
# Exit a parse tree produced by RParser#affectop.
# Enter a parse tree produced by RParser#functiondef.
# Exit a parse tree produced by RParser#functiondef.
# Enter a parse tree produced by RParser#identifier.
# Exit a parse tree produced by RParser#identifier.
# Enter a parse tree produced by RParser#formop.
# Exit a parse tree produced by RParser#formop.
# Enter a parse tree produced by RParser#rangeop.
# Exit a parse tree produced by RParser#rangeop.
# Enter a parse tree produced by RParser#dotop.
# Exit a parse tree produced by RParser#dotop.
# Enter a parse tree produced by RParser#operator.
# Exit a parse tree produced by RParser#operator.
# Enter a parse tree produced by RParser#comparison.
# Exit a parse tree produced by RParser#comparison.
del RParser
| [
2,
2980,
515,
422,
3467,
49,
13,
70,
19,
416,
3537,
14990,
49,
604,
13,
24,
198,
6738,
1885,
14050,
19,
1330,
1635,
198,
361,
11593,
3672,
834,
318,
407,
6045,
290,
366,
526,
287,
11593,
3672,
834,
25,
198,
220,
220,
220,
422,
7... | 3.253615 | 1,798 |
import logging
logger = logging.getLogger(__name__)
def strict_show_toolbar_callback(request):
"""
Additional check against custom permission ``debug_toolbar.show``.
:param request: HttpRequest
:return: bool
"""
if hasattr(request, 'user') and \
request.user.has_perm('debug_toolbar.show'):
logger.debug('DjDT allowed for "%s" <%s>',
request.user.get_full_name(),
request.user.email)
return show_toolbar_callback(request)
return False
def show_toolbar_callback(request):
"""
For each request, the callback will be invoked to determine if the
machinery of the django-debug-toolbar should be invoked.
In consideration of performance, this should be entirely stateless and
depend on information available in the request only.
We allow two ways that a user can enable this.
1. User-Agent spoofing
Setting the user agent header to include the string "DjangoDebugToolbar"
will enable the toolbar for that request.
2. X-DJDT-SHOW header
Adding a custom header and setting the value to one of "1", "true", "on",
or "yes" (all case insensitive) will enable the toolbar for that request.
NOTE: previously this depended on a custom permission, we've relaxed this
because sometimes it is necessary to debug a live application as an
unauthenticated user.
For the old behaviour, use the ``strict_show_toolbar_callback`` instead.
:param request: HttpRequest
:return: bool
"""
ua = request.META.get('HTTP_USER_AGENT', '')
if ua.find('DjangoDebugToolbar') >= 0:
logger.info('Enable DjDT by User-Agent "%s"', ua)
return True
show = request.META.get('HTTP_X_DJDT_SHOW')
if show is not None:
show = show.lower()
logger.debug('HTTP_X_DJDT_SHOW: %s', show)
if show in ('1', 'true', 'on', 'yes'):
logger.info('Enable DjDT by X-DjDT-SHOW header "%s"', show)
return True
return False
| [
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4299,
7646,
62,
12860,
62,
25981,
5657,
62,
47423,
7,
25927,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
15891,
2198,
10... | 2.752044 | 734 |
import re
import os
import random
import psycopg2
import requests
from .models import User
from flask import request, session
from twilio.twiml.messaging_response import MessagingResponse, Message
from . import app, db
basic_payload = {
'api_key': os.environ.get('AB_API_KEY'),
'cookie': "TestyMcTestface"
}
#Parsing Decision Tree
tax_available = False
while not tax_available:
t = requests.get('https://searchbertha-hrd.appspot.com/_ah/api/search_v2/v2/taxonomy/', params=basic_payload)
tax_available = t.status_code == 200
taxonomy = t.json().get('nodes')
#Flask Routes
@app.route('/', methods=['POST'])
#Misc Funtions
def cleanText(query):
"""cleanText cleans the original text of any non alphanumeric symbols and\
coverts all to lower case"""
query = query.lower()
query = re.sub(r'[^0-9a-zA-Z]+', ' ', query)
return query
#Use Case Functions
#Registration - Use Case 1
def _isRegistered(from_num) :
"""isRegistered Tests to see if the number has been previously registered \
to the ServiceConnect service by testing to see if the number exists in \
the database"""
return db.session.query(User.phone_num).filter_by(phone_num=from_num)\
.scalar() is not None
def _register(from_num, zip_code):
"""_register registers a user to the service by creating a new record in \
the database using the user's phone number and zip code. It also validates \
the zip code and sets the reminder settings for the user"""
ret = { 'message': None, 'cookies': None }
valid_zip = [20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008, 20009, \
20010, 20011, 20012, 20015, 20016, 20017, 20018, 20019, 20020, \
20024, 20032, 20036, 20037, 20045, 20052, 20053, 20057, 20064, \
20202, 20204, 20228, 20230, 20240, 20245, 20260, 20307, 20317, \
20319, 20373, 20390, 20405, 20418, 20427, 20506, 20510, 20520, \
20535, 20540, 20551, 20553, 20560, 20565, 20566, 20593]
zipc = re.findall(r'(\b\d{5}\b)', zip_code)
if len(zipc) == 0 :
ret['message'] = "Invalid Zip Code, please try again!"
ret['cookies'] = {'lastText': 'notRegistered'}
elif len(zipc) > 1 :
ret['message'] = "Please enter one 5 digit zipcode"
ret['cookies'] = {'lastText': 'notRegistered'}
elif int(zipc[0]) not in valid_zip :
ret['message'] = "The textFood is only available in DC."
ret['cookies'] = None
else :
getReminders = bool(random.getrandbits(1))
db.session.add(User(from_num, zipc[0], getReminders))
db.session.commit()
ret['message'] = "{} at zip code {} is now registered to the textFood service"\
.format(from_num, zipc[0])
return ret
#Direct Querying - Use Case 2
def _processQuery(from_num, query, original):
"""_processQuery takes the users query text and returns the appropriate \
information for the requested service."""
#Default Response
ret = {
'message': "No information found on {}".format(original),
'cookies': None
}
if re.match(r'cancel textfood', query):
ret = {
'message': "To comfirm cancellation please input your home zip code",
'cookies': {'lastText':'cancel'}
}
else:
inT = inTaxonomy(query, taxonomy)
if inT['found']:
if inT['children'] is not None:
str_builder = []
str_builder.append("Please text one of the following for more specific information: \n")
for child in inT['children']:
str_builder.append("- {} \n".format(child.get('label')))
ret['message'] = ''.join(str_builder)
else:
payload = basic_payload
payload['serviceTag'] = query
user = db.session.query(User).filter_by(phone_num=from_num).scalar()
r = requests.get("https://searchbertha-hrd.appspot.com/_ah/api/search_v2/v2/zipcodes/"+user.zip_code+"/programs", params=payload)
if r.status_code == 200:
ab_data = r.json()
if ab_data.get('error') is None:
str_builder = []
programs = ab_data.get('programs')
count = 0
for program in programs:
if count > 2:
break
else:
count+=1
str_builder.append("{} - {}: {} \n \n".format(program.get('provider_name'),program.get('next_steps')[0].get('action'),program.get('next_steps')[0].get('contact')))
ret['message'] = ''.join(str_builder)
else:
ret['message'] = "I am sorry we do not know of a {} service in the {} zipcode".format(query, user.zip_code)
else:
ret['message'] = "There is currently an error with the system. Please try again later."
app.logger.warning("Aunt Bertha is not responding correctly")
return ret
#Cancellation - Use Case 4
def _comfirmCancel(from_num, query):
"""_comfirmCancel deletes the user from the database after validating the \
the users zip code"""
user = db.session.query(User).filter_by(phone_num=from_num).scalar()
if str(user.zip_code) == query :
db.session.delete(user)
ret = {
'message': "Your text food account has been cancelled",
'cookies': None
}
else :
ret = {
'message': "Incorrect zip code your account has not been \
cancelled",
'cookies': None
}
db.session.commit()
return ret
| [
11748,
302,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
7007,
198,
6738,
764,
27530,
1330,
11787,
198,
6738,
42903,
1330,
2581,
11,
6246,
198,
6738,
665,
346,
952,
13,
4246,
320,
75,
13,
37348,
3... | 2.173659 | 2,741 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .pix3d_evaluation import Pix3DEvaluator, transform_meshes_to_camera_coord_system
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
6738,
764,
79,
844,
18,
67,
62,
18206,
2288,
1330,
21642,
18,
7206,
2100,
84,
1352,
11,
6121,
62,
6880,
956,
62,
1462,
62,
25695,
62,
37652,
... | 3.340426 | 47 |
import paramiko
| [
11748,
5772,
12125,
628,
198
] | 3.6 | 5 |
"""
Delta E ITP.
https://kb.portrait.com/help/ictcp-color-difference-metric
"""
from ..distance import DeltaE
import math
from ... import util
class DEITP(DeltaE):
"""Delta E ITP class."""
@staticmethod
def name():
"""Name of method."""
return "itp"
@staticmethod
def distance(color, sample, scalar=720, **kwargs):
"""Delta E ITP color distance formula."""
i1, t1, p1 = util.no_nan(color.convert('ictcp').coords())
i2, t2, p2 = util.no_nan(sample.convert('ictcp').coords())
# Equation (1)
return scalar * math.sqrt((i1 - i2) ** 2 + 0.25 * (t1 - t2) ** 2 + (p1 - p2) ** 2)
| [
37811,
198,
42430,
412,
314,
7250,
13,
198,
198,
5450,
1378,
32812,
13,
634,
12907,
13,
785,
14,
16794,
14,
713,
13155,
12,
8043,
12,
26069,
1945,
12,
4164,
1173,
198,
37811,
198,
6738,
11485,
30246,
1330,
16978,
36,
198,
11748,
10688... | 2.277778 | 288 |
#!/usr/bin/env python
try:
from mechanize import Request, urlopen, URLError, HTTPError,ProxyHandler, build_opener, install_opener, Browser
except ImportError:
print "\n[X] Please install mechanize module:"
print " http://wwwsearch.sourceforge.net/mechanize/\n"
exit()
from collections import defaultdict
import random
import threading
from core.constants import USER_AGENTS
from core.target import Target
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
3962,
1096,
1330,
19390,
11,
19016,
9654,
11,
37902,
2538,
81,
1472,
11,
14626,
12331,
11,
44148,
25060,
11,
1382,
62,
404,
877,
11,
2721,
62... | 3.17037 | 135 |
# @param A : list of integers
# @return a list of list of integers
| [
220,
220,
220,
1303,
2488,
17143,
317,
1058,
1351,
286,
37014,
198,
220,
220,
220,
1303,
2488,
7783,
257,
1351,
286,
1351,
286,
37014,
198
] | 3 | 25 |
from __future__ import division
from builtins import str
from past.utils import old_div
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import dictionaries as dlg_populate
import sys
import pickle
from IPython import embed as II
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
3170,
1040,
1330,
965,
198,
6738,
1613,
13,
26791,
1330,
1468,
62,
7146,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72... | 3.045977 | 87 |
__author__ = 'demi'
# Question 5: Date Converter
# Write a procedure date_converter which takes two inputs. The first is
# a dictionary and the second a string. The string is a valid date in
# the format month/day/year. The procedure should return
# the date written in the form <day> <name of month> <year>.
# For example , if the
# dictionary is in English,
english = {1:"January", 2:"February", 3:"March", 4:"April", 5:"May",
6:"June", 7:"July", 8:"August", 9:"September",10:"October",
11:"November", 12:"December"}
# then "5/11/2012" should be converted to "11 May 2012".
# If the dictionary is in Swedish
swedish = {1:"januari", 2:"februari", 3:"mars", 4:"april", 5:"maj",
6:"juni", 7:"juli", 8:"augusti", 9:"september",10:"oktober",
11:"november", 12:"december"}
# then "5/11/2012" should be converted to "11 maj 2012".
# Hint: int('12') converts the string '12' to the integer 12.
print(date_converter(english, '5/11/2012'))
#>>> 11 May 2012
print(date_converter(english, '5/11/12'))
#>>> 11 May 12
print(date_converter(swedish, '5/11/2012'))
#>>> 11 maj 2012
print(date_converter(swedish, '12/5/1791'))
#>>> 5 december 1791
| [
834,
9800,
834,
796,
705,
9536,
72,
6,
628,
198,
2,
18233,
642,
25,
7536,
35602,
353,
198,
198,
2,
19430,
257,
8771,
3128,
62,
1102,
332,
353,
543,
2753,
734,
17311,
13,
383,
717,
318,
198,
2,
257,
22155,
290,
262,
1218,
257,
47... | 2.779126 | 412 |