content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Pretty much trivial logging demo
Default log file - './logs/vCenter.log'
"""
from common.logger import getLogger
from common.logger import configure_loglevel
_logger = getLogger(__name__) # Default logger is using
# _logger = getLogger("vCenterShell") # for Shell App itself
# _logger = getLogger("vCenterCommon") # for Common Utilises
# ONLY IF YOU WANTED CONFIGURE LOG MANUALLY
configure_loglevel("INFO", "INFO", "../../logs/vCenter.log")
if __name__ == "__main__":
_logger.debug("DEBUG SHOULD BE SKIPPED")
_logger.info("INFO IS OK")
_logger.warn("WARNING IS OK")
_logger.error("ERROR IS OK!!!")
_logger.critical("CRITICAL IS OK ?!!!!") | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
35700,
881,
20861,
18931,
13605,
198,
198,
19463,
2604,
2393,
532,
705,
19571,
6404,
82,
14,
85,
23656,
13,
6404,
6,
198,
37811,
198,
198,
6738,
2219,
13,
... | 2.651685 | 267 |
from ..test_signatures import extension_module
| [
6738,
11485,
9288,
62,
12683,
6691,
1330,
7552,
62,
21412,
628,
198
] | 4.083333 | 12 |
import django_filters
from proma.common.helpers import CommonFilterHelper
from .models import Client
| [
11748,
42625,
14208,
62,
10379,
1010,
198,
198,
6738,
1552,
64,
13,
11321,
13,
16794,
364,
1330,
8070,
22417,
47429,
198,
198,
6738,
764,
27530,
1330,
20985,
628
] | 3.714286 | 28 |
#!/usr/bin/env python3
from ctypes import *
import m2m2_core
import common_application_interface
M2M2_PS_SYS_PWR_STATE_ENUM_t = {
"type":c_uint8,
"enum_values": [
("M2M2_PS_SYS_PWR_STATE_ACTIVE",0x0),
("M2M2_PS_SYS_PWR_STATE_FLEXI",0x1),
("M2M2_PS_SYS_PWR_STATE_HIBERNATE",0x2),
("M2M2_PS_SYS_PWR_STATE_SHUTDOWN",0x3),
]
}
M2M2_PS_SYS_COMMAND_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_COMMAND_LOWEST",0x40),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_REQ",0x42),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_RESP",0x43),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_REQ",0x44),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_RESP",0x45),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_REQ",0x48),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_RESP",0x49),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_REQ",0x4A),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_RESP",0x4B),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_REQ",0x4C),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_RESP",0x4D),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_REQ", 0x4E),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_RESP", 0x4F),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_REQ",0x50),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_RESP",0x51),
]
}
ADI_PS_BOARD_TYPE_t = {
"type":c_uint8,
"enum_values": [
("ADI_PS_BOARD_TYPE_UNKNOWN",0x0),
("ADI_PS_BOARD_TYPE_ADPD107_WATCH",0x1),
("ADI_PS_BOARD_TYPE_ADPD107_CHEST_STRAP",0x2),
("ADI_PS_BOARD_TYPE_ADPD185_WATCH",0x3),
("ADI_PS_BOARD_TYPE_ADPD188_WATCH",0x4),
]
}
M2M2_PS_SYS_STATUS_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_STATUS_LOWEST",0x40),
("M2M2_PS_SYS_STATUS_OK",0x41),
("M2M2_PS_SYS_STATUS_ERR_ARGS",0x42),
("M2M2_PS_SYS_STATUS_ERR_NOT_CHKD",0xFF),
]
}
m2m2_ps_sys_cmd_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
]
}
m2m2_ps_sys_pwr_state_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"state",
"type":M2M2_PS_SYS_PWR_STATE_ENUM_t},
]
}
m2m2_ps_sys_date_time_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"year",
"type":c_uint16},
{"name":"month",
"type":c_uint8},
{"name":"day",
"type":c_uint8},
{"name":"hour",
"type":c_uint8},
{"name":"minute",
"type":c_uint8},
{"name":"second",
"type":c_uint8},
{"name":"TZ_sec",
"type":c_uint32},
]
}
m2m2_ps_sys_board_info_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"version",
"type":c_uint16},
{"name":"mac_addr",
"length":6,
"type":c_uint8},
{"name":"device_id",
"type":c_uint32},
{"name":"model_number",
"type":c_uint32},
{"name":"hw_id",
"type":c_uint16},
{"name":"bom_id",
"type":c_uint16},
{"name":"batch_id",
"type":c_uint8},
{"name":"date",
"type":c_uint32},
{"name": "board_type",
"type": ADI_PS_BOARD_TYPE_t},
]
}
m2m2_ps_sys_sensor_app_status = {
"struct_fields":[
{"name":"sensor_app_id",
"type":m2m2_core.M2M2_ADDR_ENUM_t},
{"name":"num_subscribers",
"type":c_uint8},
{"name":"num_start_reqs",
"type":c_uint8},
]
}
m2m2_ps_sys_sensor_apps_info_req_t = {
"struct_fields":[
{"name":"command",
"type":c_uint8},
{"name":"status",
"type":c_uint8},
{"name":"num_sensor_apps",
"type":c_uint16},
{"name":"app_info",
"length":15,
"type":m2m2_ps_sys_sensor_app_status},
]
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
269,
19199,
1330,
1635,
198,
198,
11748,
285,
17,
76,
17,
62,
7295,
198,
198,
11748,
2219,
62,
31438,
62,
39994,
198,
198,
44,
17,
44,
17,
62,
3705,
62,
50,
16309,
... | 1.804818 | 2,034 |
"""
========================================================================
BasePass.py
========================================================================
An "abstract" base class for all passes.
Author : Shunning Jiang
Date : Dec 17, 2017
"""
| [
37811,
198,
23926,
2559,
198,
14881,
14478,
13,
9078,
198,
23926,
2559,
198,
2025,
366,
397,
8709,
1,
2779,
1398,
329,
477,
8318,
13,
198,
198,
13838,
1058,
911,
16596,
32294,
198,
10430,
220,
220,
1058,
4280,
1596,
11,
2177,
198,
378... | 5.906977 | 43 |
# coding=utf-8
from __future__ import unicode_literals
from .. import BaseProvider
localized = True | [
2,
19617,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11485,
1330,
7308,
29495,
198,
198,
12001,
1143,
796,
6407
] | 3.448276 | 29 |
"""Italian language."""
from typing import Dict, Tuple
# Regex to find the pronunciation
pronunciation = r"{IPA\|/([^/]+)/"
# Regex to find the gender
gender = r"{{Pn\|?w?}} ''([fm])[singvol ]*''"
# Float number separator
float_separator = ","
# Thousands separator
thousands_separator = " "
# Markers for sections that contain interesting text to analyse.
head_sections = ("{{-it-}}",)
etyl_section = ["{{etim}}"]
sections = (
*head_sections,
*etyl_section,
"{{acron}",
"{{agg}",
"{{avv}",
"{{art}",
"{{cong}",
"{{inter}",
"{{pref}",
"{{Pn}",
"{{prep}",
"{{pron poss}",
"{{suff}",
"{{sost}",
"{{verb}",
)
# Some definitions are not good to keep (plural, gender, ... )
definitions_to_ignore = (
"{{verb form",
"{{nome",
# "{{agg form",
"{{sost form",
"{{-sost form-",
"{{It-conj",
)
# Templates to ignore: the text will be deleted.
templates_ignored: Tuple[str, ...] = tuple()
# Templates that will be completed/replaced using italic style.
templates_italic: Dict[str, str] = {}
# Templates more complex to manage.
templates_multi: Dict[str, str] = {
# {{context|ecology|lang=it}}
"context": "small(term(parts[1]))",
# {{Est|raro|it}}
"Est": "small(term('per estensione'))",
# {{Etim-link|aggrondare}}
# {{Etim-link||cervice}}
"Etim-link": "'Vedi: ' + parts[2] if len(parts) == 3 else 'Vedi: ' + parts[1]",
# {{Glossa|raro|it}}
"Glossa": "small(term(parts[1]))",
# {{la}}
"la": "'latino'",
# {{Lett|non comune|it}}
"Lett": "small(term('letteralmente'))",
# {{Nodef|it}}
"Nodef": "'-definizione mancante-'",
# {{Noetim||it}}
"Noetim": "'-etimologia mancante-'",
# {{Quote|...}}
"Quote": "'«' + parts[1] + '» ' + term(parts[2])",
# {{Tabs|aggrondato|aggrondati|aggrondata|aggrondate}}
"Tabs": "'Masc. sing. ' + parts[1] + ', masc. plur. ' + parts[2] + ', fem. sing. ' + parts[3] + ', fem. plur. ' + parts[4]", # noqa
# {{Taxon|Chromis chromis|Chromis chromis}}
"Taxon": "'la sua classificazione scientifica è ' + strong(italic(parts[1]))",
# {{Term|statistica|it}}
"Term": "small(term(parts[1]))",
"term": "small(term(parts[1]))",
# {{Vd|acre#Italiano|acre}}
"Vd": "'Vedi: ' + parts[2]",
}
# Release content on GitHub
# https://github.com/BoboTiG/ebook-reader-dict/releases/tag/it
release_description = """\
Numero di parole: {words_count}
Export Wiktionary: {dump_date}
File disponibili:
- [Kobo]({url_kobo}) (dicthtml-{locale}.zip)
- [StarDict]({url_stardict}) (dict-{locale}.zip)
- [DictFile]({url_dictfile}) (dict-{locale}.df)
<sub>Aggiornato il {creation_date}</sub>
""" # noqa
# Dictionary name that will be printed below each definition
wiktionary = "Wikizionario (ɔ) {year}"
| [
37811,
45696,
3303,
526,
15931,
198,
6738,
19720,
1330,
360,
713,
11,
309,
29291,
198,
198,
2,
797,
25636,
284,
1064,
262,
41694,
198,
31186,
24978,
796,
374,
1,
90,
4061,
32,
59,
91,
14,
26933,
61,
14,
48688,
20679,
1,
198,
198,
... | 2.286416 | 1,222 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import MagneticField
from math import atan2, sin, cos, sqrt
if __name__ == '__main__':
listener()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
10903,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
44629,
15878,
198,
6738,
10688,
1330,
379,
272,
17,
1... | 2.850746 | 67 |
"""Module containing tests for the delver tool"""
import unittest
from unittest import mock
from context import core as mod_ut
class TestDelveFunctional(unittest.TestCase):
"""Functional tests for the delver tool"""
def setUp(self):
"""Initialize frequently used test objects"""
self.test_obj = {"foo": ["bar", {"baz": 3}]}
print_patch = mock.patch("delver.core.six.print_")
input_patch = mock.patch("delver.core.six_input")
self.fake_print = print_patch.start()
self.fake_input = input_patch.start()
self.addCleanup(print_patch.stop)
self.addCleanup(input_patch.stop)
def _extract_print_strings(self, call_args):
"""Extract the actual strings that make up the calls to the patched
print function.
:param call_args: the list of arguments passed to the patched function
:type call_args: ``list``
:return: list of strings that were arguments to the patched print
function
:rtype: ``list`` of ``str``
"""
return [x[0][0] for x in call_args]
def test_single_navigate(self):
"""Test a single navigation and exit"""
self.fake_input.side_effect = ["0", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertListEqual(result_print_args, target_print_args)
def test_invalid_key_index(self):
"""Test an invalid index message is displayed"""
self.fake_input.side_effect = ["1", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid Index",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_invalid_command(self):
"""Test an invalid command message is displayed"""
self.fake_input.side_effect = ["blah", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid command; please specify one of ['<key index>', u, q]",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_advanced_navigation(self):
"""Test navigating deeper into a data structure and back out"""
self.fake_input.side_effect = ["0", "1", "0", "u", "0", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
if __name__ == "__main__":
unittest.main()
| [
37811,
26796,
7268,
5254,
329,
262,
1619,
332,
2891,
37811,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
4732,
1330,
4755,
355,
953,
62,
315,
628,
198,
4871,
6208,
35,
9954,
22203,
282,
7,
403,
7... | 1.765625 | 4,224 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#import gitlab
import os
import sys
import getopt
import json
import requests
get_mr_title = False
project_id = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:t:ti:p:b",["token=", "title", "project=", "branch="])
except getopt.GetoptError:
print('test.py [-c | --commit] [-t | --token {token}]')
sys.exit(2)
for opt, arg in opts:
#print('[DEBUG] {0} {1}'.format(opt, arg))
if opt == '-h':
print('[commit.py] -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-t", "--token"):
if arg is None:
raise ValueError('Token switch was specified, however no token was supplied.')
ci_job_token = arg
elif opt in ("-ti", "--title"):
get_mr_title = True
elif opt in ("-p", "--project"):
project_id = str(arg)
elif opt in ("-b", "--branch"):
git_branch = arg
# private token or personal token authentication
#gl = gitlab.Gitlab('https://gitlab.com', private_token=ci_job_token)
url = 'https://gitlab.com/api/v4/projects/' + project_id + '/merge_requests?state=opened&source_branch=' + git_branch
merge_requests = ""
try:
if os.environ['CI_JOB_TOKEN'] is not None:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
if os.environ['CI_JOB_TOKEN'] == ci_job_token:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
except:
pass
if not isinstance(merge_requests, list):
headers = {'PRIVATE-TOKEN': ci_job_token}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
#print('\n\nmerge_requests=[-{0}-][]\n\n\n\n\n'.format(merge_requests))
#project_mrs = project.mergerequests.list()
#mrs = gl.mergerequests.list()
mr_title = 'failed to fetch Merge Request title'
mr_first_commit = ''
target_branch = ''
if isinstance(merge_requests, list):
if len(merge_requests) > 0:
for mr in merge_requests:
if mr['source_branch'] == git_branch and str(mr['target_project_id']) == str(project_id) and str(mr['state']) == 'opened':
mr_title = mr['title']
if get_mr_title:
print('{0}'.format(mr_title))
else:
print('ci: No Merge Request found, MR count "0"')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
2,
11748,
17606,
23912,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
651,
8738,
198,
11748,
33918,
198,
11... | 2.375127 | 981 |
# Third Party (PyPI) Imports
import requests
| [
2,
10467,
3615,
357,
20519,
11901,
8,
1846,
3742,
198,
11748,
7007,
628
] | 3.538462 | 13 |
import time
import requests
from bs4 import BeautifulSoup as bs
url= []
for page in range(1,9):
time.sleep(5)
source = 'https://www.ndtv.com/latest/page-{}'.format(page)
source = requests.get(source).text
soup = bs(source,'html5lib')
soup = soup.find('div',{'class': 'new_storylising'})
for i in soup.find_all("a"):
try:
if (i['href'].startswith('/') or "ndtv" in i['href']):
if i['href'] not in url:
url.append(i["href"])
else:
continue
except KeyError:
pass
total_news = 1;
for i in range(len(url)):
print(url[i])
for i in range(len(url)):
try:
r = requests.get(url[i])
title = bs(r.content, 'html5lib')
print(total_news,"Title = ",title.title.string)
news_div = title.find('div', {'class': 'ins_dateline'})
author = news_div.find('span', {'itemprop': 'name'})
print("Author = ",author.string)
date = title.find('span', {'itemprop': 'dateModified'})
print("Date = ",date.string)
article_div = title.find('div', {'id': 'ins_storybody'})
article_p = article_div.findAll('p')
print("Article = ")
for i in article_p:
try:
if "None" in i.string:
continue
else:
print(i.string)
except TypeError:
pass
total_news +=1
print("--!--=--!--=--!--=--!--=--!--=--!--=--!--=----!!!!!----=--!--=--!--=--!--=--!--=--!--=--!--=--!--")
except AttributeError:
pass | [
11748,
640,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
198,
6371,
28,
17635,
198,
1640,
2443,
287,
2837,
7,
16,
11,
24,
2599,
198,
220,
220,
220,
640,
13,
42832,
7,
20,
8,
198,
220,
220,
220,... | 2.233871 | 620 |
create_empty_blob = "SELECT lo_creat(-1)"
write_data_to_blob = "SELECT lo_put(CAST(:loid AS OID), :offset, :data)"
read_data_from_blob = "SELECT lo_get(CAST(:loid AS OID), :offset, :length)"
delete_blob = "SELECT lo_unlink(CAST(:loid AS OID))"
get_size_of_blob_function = """
CREATE OR REPLACE FUNCTION pg_temp.get_lo_size(loid INTEGER)
RETURNS BIGINT AS $lo_size$
DECLARE
file_descriptor INTEGER;
file_size BIGINT;
BEGIN
-- Open large object for reading.
-- Parameter "x'40000'" is equivalent to postgres large object mode "INV_READ"
-- which is necessary for method to work
file_descriptor := lo_open(CAST(loid AS OID), x'40000' :: INT);
-- Seek to the end
-- "Seek" command = "2"
PERFORM lo_lseek64(file_descriptor, 0, 2);
-- Fetch current file position - location of the last byte
file_size := lo_tell64(file_descriptor);
-- Close open file.
PERFORM lo_close(file_descriptor);
RETURN file_size;
END;
$lo_size$
LANGUAGE plpgsql;
"""
get_size_of_blob = "SELECT pg_temp.get_lo_size(:loid);"
| [
17953,
62,
28920,
62,
2436,
672,
796,
366,
46506,
2376,
62,
20123,
32590,
16,
16725,
198,
198,
13564,
62,
7890,
62,
1462,
62,
2436,
672,
796,
366,
46506,
2376,
62,
1996,
7,
44647,
7,
25,
75,
1868,
7054,
440,
2389,
828,
1058,
28968,
... | 2.469626 | 428 |
from web3._utils.threads import (
Timeout,
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
| [
6738,
3992,
18,
13557,
26791,
13,
16663,
82,
1330,
357,
198,
220,
220,
220,
3862,
448,
11,
198,
8,
198,
6738,
3992,
18,
13,
15234,
4157,
13,
2788,
62,
4879,
353,
1330,
357,
198,
220,
220,
220,
20313,
51,
7834,
29495,
11,
198,
8,
... | 2.666667 | 45 |
# -*- coding: utf-8 -*-
# (C) 2016-2021 Muthiah Annamalai
#
# This file is part of 'open-tamil' package
# We generate unigram and bi-gram statistics for Tamil texts
#
import tamil
from ngram.LetterModels import Unigram
import codecs
import pprint
import copy
import operator
from functools import cmp_to_key
import sys
import glob
import os
if __name__ == "__main__":
run('plain_text','pm_bigram_sorted_042521.txt')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
357,
34,
8,
1584,
12,
1238,
2481,
337,
1071,
9520,
5506,
321,
282,
1872,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
705,
9654,
12,
83,
321,
346,
6,
5301,
198,
... | 2.788079 | 151 |
import grpc
from django.utils.functional import SimpleLazyObject
client = SimpleLazyObject(lambda:grpc.insecure_channel('127.0.0.1:8001'))
| [
11748,
1036,
14751,
201,
198,
6738,
42625,
14208,
13,
26791,
13,
45124,
1330,
17427,
43,
12582,
10267,
201,
198,
16366,
796,
17427,
43,
12582,
10267,
7,
50033,
25,
2164,
14751,
13,
259,
22390,
62,
17620,
10786,
16799,
13,
15,
13,
15,
... | 2.769231 | 52 |
import pybitflyer
import key
from chart.controllers import get_data
import logging
logger = logging.getLogger(__name__)
class BitFlayer_Order():
"""[summary] bitflyer に成り行き注文で売買注文を行うクラス
"""
def AvailableBalance(self):
"""[summary]get available balance from bitflyerapi.
Returns:
[type] dict : [description] like {"JPY": 50000, "BTC_JPY": 0.05} dict.
"""
b = get_data.Balance(self.api_key, self.api_secret, code=self.product_code)
balance_code = self.product_code.split("_")[0]
balance = b.GetBalance()
available_JPY = balance["JPY"]["available"]
available_CRP = balance[balance_code]["available"]
d = {"JPY": available_JPY, self.product_code: available_CRP}
return d
def AdjustSize(self, size=0.00000001):
"""[summary] 手数料込みで取り扱えるsizeを計算する。
Args:
size (float, optional): [description]. Defaults to 0.00000001.
Returns:
[type]float : [description] 1 satoshi 刻み
"""
tax = self.api.gettradingcommission(product_code=self.product_code)["commission_rate"]
useable = 1.0 - tax
size = size * useable
size = int(size * 100000000) / 100000000
return size
def BUY(self, currency, use_parcent=0.9):
"""[summary] 買いたい額を円で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
ticker = get_data.Ticker(self.api_key, self.api_secret, self.product_code).ticker
price = ticker["best_ask"]
usecurrency = currency * use_parcent
size = 1 / (price / usecurrency)
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
buy_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="BUY", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in buy_code.keys():
return buy_code
else:
logger.error(Exception("Cant BUY"))
print("Cant BUY")
return None
def SELL(self, size=0.00000001):
"""[summary] 売りたい量のbitcoinをbitcoinの枚数で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
code (str, optional): [description]. Defaults to "BTC_JPY".
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
sell_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="SELL", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in sell_code.keys():
return sell_code
else:
logger.error(Exception("Cant SELL"))
print("Cant SELL")
return None
| [
11748,
12972,
2545,
12254,
263,
201,
198,
201,
198,
11748,
1994,
201,
198,
201,
198,
6738,
8262,
13,
3642,
36667,
1330,
651,
62,
7890,
201,
198,
11748,
18931,
201,
198,
201,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
... | 1.898858 | 1,839 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## Autor: David Ochoa
import numpy as np
from vispy.scene.visuals import Line
from pymouse import PyMouse
CV = np.arange(0, 2.05, 0.05, dtype=np.float32) * 3.14159
ZCV = np.zeros(CV.size, dtype=np.float32)
C_xy = np.array([np.cos(CV), np.sin(CV), ZCV]).T
C_xz = np.array([np.cos(CV), ZCV, np.sin(CV)]).T
C_yz = np.array([ZCV, np.cos(CV), np.sin(CV)]).T
sphere_pt = np.concatenate([C_xy, C_xz, C_yz])
class Mouse_trace:
"""Mouse tracing Class. It uses vispy to visualization."""
def set_bound(self, boundaries):
"""Updates the boundaries."""
self.bound = boundaries
self.sizexyz = np.abs(boundaries[:,1] - boundaries[:,0])
def step(self, time_step):
"""Calculate the new position and speed."""
mpos = self.mouse.position()
self.pos = np.asarray([mpos[0], self.bound[1,1] - mpos[1], 0])
self.update_visual()
def init_visual(self, view):
"""Initialize the object visual."""
self.trace = np.repeat(self.pos, self.tail_steps).reshape((3,self.tail_steps)).T
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual = Line(pos = pos, color=self.color)
view.add(self.visual)
def update_visual(self):
"""Updates the object visual."""
self.trace[1:] = self.trace[0:-1]
self.trace[0] = self.pos
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual.set_data(pos = pos)
def shake(self):
"""Inverts the z position and gives the ball a random velocity."""
pass
if __name__ == '__main__':
print(Ball_trace.__doc__)
exit()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2235,
5231,
273,
25,
3271,
440,
6679,
64,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1490,
9078,
13,
29734,
13,... | 2.262032 | 748 |
# coding: utf8
import base64
import codecs
import hashlib
import json
import math
import re
import string
import sys
import uuid
import webbrowser
import datetime
import sublime
import sublime_plugin
try:
from .encodingutils.escape_table import (
html_escape_table,
html5_escape_table,
html_reserved_list,
xml_escape_table
)
except ValueError:
from encodingutils.escape_table import (
html5_escape_table,
html_escape_table,
html_reserved_list,
xml_escape_table
)
try:
import urllib.parse
quote_plus = urllib.parse.quote_plus
unquote_plus = urllib.parse.unquote_plus
except ImportError:
import urllib
try:
unichr(32)
except NameError:
CSS = '''
div.sub-enc_utils { padding: 10px; margin: 0; }
.sub-enc_utils h1, .sub-enc_utils h2, .sub-enc_utils h3,
.sub-enc_utils h4, .sub-enc_utils h5, .sub-enc_utils h6 {
{{'.string'|css}}
}
.sub-enc_utils blockquote { {{'.comment'|css}} }
.sub-enc_utils a { text-decoration: none; }
'''
frontmatter = {
"markdown_extensions": [
"markdown.extensions.admonition",
"markdown.extensions.attr_list",
"markdown.extensions.def_list",
"markdown.extensions.nl2br",
# Smart quotes always have corner cases that annoy me, so don't bother with them.
{"markdown.extensions.smarty": {"smart_quotes": False}},
"pymdownx.extrarawhtml",
"pymdownx.keys",
{"pymdownx.escapeall": {"hardbreak": True, "nbsp": True}},
# Sublime doesn't support superscript, so no ordinal numbers
{"pymdownx.smartsymbols": {"ordinal_numbers": False}}
]
}
class EncDocCommand(sublime_plugin.WindowCommand):
"""Open doc page."""
re_pkgs = re.compile(r'^Packages')
def on_navigate(self, href):
"""Handle links."""
if href.startswith('sub://Packages'):
sublime.run_command('open_file', {"file": self.re_pkgs.sub('${packages}', href[6:])})
else:
webbrowser.open_new_tab(href)
def run(self, page):
"""Open page."""
try:
# import mdpopups
# import pymdownx
has_phantom_support = (mdpopups.version() >= (1, 10, 0)) and (int(sublime.version()) >= 3124)
fmatter = mdpopups.format_frontmatter(frontmatter) if pymdownx.version_info[:3] >= (4, 3, 0) else ''
except Exception:
fmatter = ''
has_phantom_support = False
if not has_phantom_support:
sublime.run_command('open_file', {"file": page})
else:
text = sublime.load_resource(page.replace('${packages}', 'Packages'))
view = self.window.new_file()
view.set_name('Sublime Encoding Utils - Quick Start')
view.settings().set('gutter', False)
view.settings().set('word_wrap', False)
if has_phantom_support:
mdpopups.add_phantom(
view,
'quickstart',
sublime.Region(0),
fmatter + text,
sublime.LAYOUT_INLINE,
css=CSS,
wrapper_class="sub-notify",
on_navigate=self.on_navigate
)
else:
view.run_command('insert', {"characters": text})
view.set_read_only(True)
view.set_scratch(True)
# def getSelection(self):
# text = []
# if View().sel():
# for region in View().sel():
# if region.empty():
# text.append(View().substr(View().line(region)))
# else:
# text.append(View().substr(region))
# return text
| [
2,
19617,
25,
3384,
69,
23,
198,
11748,
2779,
2414,
198,
11748,
40481,
82,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
10688,
198,
11748,
302,
198,
11748,
4731,
198,
11748,
25064,
198,
11748,
334,
27112,
198,
11748,
3992,
40... | 2.082102 | 1,827 |
import time
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import MultiTaskLasso as MultiTaskLasso_sk
from sparseglm.estimators import MultiTaskLasso
from sparseglm.utils import make_correlated_data, compute_alpha_max
n_samples = 100
n_features = 3000
n_tasks = 80
snr = 2
corr = 0.7
density = 0.1
tol = 1e-9
reg = 0.1
X, Y, _ = make_correlated_data(
n_samples=n_samples,
n_features=n_features,
n_tasks=n_tasks,
corr=corr,
snr=snr,
density=density,
random_state=0,
)
X_sparse = sp.csc_matrix(X * np.random.binomial(1, 0.1, X.shape))
alpha_max = compute_alpha_max(X, Y)
estimator_sk = MultiTaskLasso_sk(
alpha_max * reg, fit_intercept=False, tol=tol, max_iter=10 ** 6
)
estimator_rl = MultiTaskLasso(alpha_max * reg, tol=tol, verbose=False)
print("Fitting dense matrices...")
coef_sk, duration_sk = time_estimator(estimator_sk, X, Y)
coef_rl, duration_rl = time_estimator(estimator_rl, X, Y)
np.testing.assert_allclose(coef_sk, coef_rl, atol=1e-5)
print("Fitting sparse matrices...")
coef_sk_sparse, duration_sk_sparse = time_estimator(
estimator_sk, X_sparse.toarray(), Y
)
coef_rl_sparse, duration_rl_sparse = time_estimator(estimator_rl, X_sparse, Y)
np.testing.assert_allclose(coef_sk_sparse, coef_rl_sparse, atol=1e-5)
print("=" * 5 + " RESULTS " + "=" * 5)
print(f"[DENSE] Scikit-learn :: {duration_sk} s")
print(f"[DENSE] SparseGLM :: {duration_rl} s")
print("--" * 5)
print(f"[SPARSE] Scikit-learn :: {duration_sk_sparse} s")
print(f"[SPARSE] SparseGLM :: {duration_rl_sparse} s")
| [
11748,
640,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
198,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
15237,
25714,
43,
28372,
355,
15237,
25714,
43,
28372,
62,
8135,
198,
673... | 2.287172 | 686 |
import math
import os
| [
11748,
10688,
198,
11748,
28686,
198,
220,
220,
220,
220
] | 2.6 | 10 |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Second order CASSCF
'''
import sys
import time
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.mcscf import casci, mc1step
from pyscf.mcscf.casci import get_fock, cas_natorb, canonicalize
from pyscf.mcscf import mc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import ao2mo
from pyscf import scf
from pyscf.scf import ciah
from pyscf import fci
# gradients, hessian operator and hessian diagonal
u, ci_kf = extract_rotation(casscf, dr, u, ci_kf)
log.debug(' tot inner=%d |g|= %4.3g (%4.3g %4.3g) |u-1|= %4.3g |dci|= %4.3g',
stat.imic, norm_gall, norm_gorb, norm_gci,
numpy.linalg.norm(u-numpy.eye(nmo)),
numpy.linalg.norm(ci_kf-ci0))
return u, ci_kf, norm_gkf, stat, dxi
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=logger.NOTE, dump_chk=True):
'''CASSCF solver
'''
log = logger.new_logger(casscf, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start newton CASSCF')
if callback is None:
callback = casscf.callback
mo = mo_coeff
nmo = mo_coeff.shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
e_tot, e_ci, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris, False,
casscf.natorb, verbose=log)
return True, e_tot, e_ci, fcivec, mo, mo_energy
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = -1
de, elast = e_tot, e_tot
dr0 = None
t2m = t1m = log.timer('Initializing newton CASSCF', *cput0)
imacro = 0
tot_hop = 0
tot_kf = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
u, fcivec, norm_gall, stat, dr0 = \
update_orb_ci(casscf, mo, fcivec, eris, dr0, conv_tol_grad*.3, verbose=log)
tot_hop += stat.tot_hop
tot_kf += stat.tot_kf
t2m = log.timer('update_orb_ci', *t2m)
eris = None
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t2m)
e_tot, e_ci, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
log.timer('CASCI solver', *t2m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol and norm_gall < conv_tol_grad):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('newton CASSCF converged in %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
else:
log.info('newton CASSCF not converged, %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, False, casscf.natorb, casdm1, log)
if casscf.natorb: # dump_chk may save casdm1
occ, ucas = casscf._eig(-casdm1, ncore, nocc)[0]
casdm1 = -occ
if dump_chk:
casscf.dump_chk(locals())
log.timer('newton CASSCF', *cput0)
return conv, e_tot, e_ci, fcivec, mo, mo_energy
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
import pyscf.fci
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -15.950852049859-mol.energy_nuc())
mol.atom = [
['H', ( 5.,-1. , 1. )],
['H', ( 0.,-5. ,-2. )],
['H', ( 4.,-0.5 ,-3. )],
['H', ( 0.,-4.5 ,-1. )],
['H', ( 3.,-0.5 ,-0. )],
['H', ( 0.,-3. ,-1. )],
['H', ( 2.,-2.5 , 0. )],
['H', ( 1., 1. , 3. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.62638367550087, emc - -3.6268060528596635)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -3.62638367550087)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = pyscf.fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
mc.verbose = 4
emc = mc.mc1step(mo)[0]
#mc.analyze()
print(emc - -75.7155632535814)
mc.internal_rotation = True
emc = mc.mc1step(mo)[0]
print(emc - -75.7155632535814)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
6434,
25,
1195,
320,
278,
3825,
1279,
418,
343,
457,
13,
19155,
31,
14816,
13,
785,
29,
198,
2,
198,
198,
7061,
6,
198,
12211,
1502,
35106,
6173,
37,
198,
7061,
6,
198... | 1.807785 | 3,751 |
import torch
import torchvision
from torchvision import transforms, datasets
import torchvision.transforms as standard_transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import sys
# net = SegNetWithSkipConnection()
# net.zero_grad()
# DATA_PATH = '/home/sur/SemSeg/cityscape/'
# train = datasets.Cityscapes(DATA_PATH, split = 'train', mode = 'fine', target_type = 'semantic',transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# test = datasets.Cityscapes(DATA_PATH, split = 'test', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# val = datasets.Cityscapes(DATA_PATH, split = 'val', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# trainset = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
# testset = torch.utils.data.DataLoader(test, batch_size=2, shuffle=False)
# valset = torch.utils.data.DataLoader(val, batch_size=2, shuffle=True)
# for data in trainset:
# X, y = data
# print(X.size(),y.size())
# output = net(X)
# break
| [
11748,
28034,
198,
11748,
28034,
10178,
198,
6738,
28034,
10178,
1330,
31408,
11,
40522,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
3210,
62,
7645,
23914,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
... | 3.022945 | 523 |
from typing import Iterator
from cog import BasePredictor, Path
| [
6738,
19720,
1330,
40806,
1352,
198,
198,
6738,
43072,
1330,
7308,
47,
17407,
273,
11,
10644,
628
] | 3.882353 | 17 |
# -*- coding: utf-8 -*-
## \package wizbin.launchers
# MIT licensing
# See: docs/LICENSE.txt
import os, shutil, wx
from dbr.language import GT
from dbr.log import DebugEnabled
from dbr.log import Logger
from fileio.fileio import ReadFile
from fileio.fileio import WriteFile
from globals.ident import btnid
from globals.ident import chkid
from globals.ident import inputid
from globals.ident import listid
from globals.ident import pgid
from globals.ident import txtid
from globals.strings import GS
from globals.strings import TextIsEmpty
from globals.tooltips import SetPageToolTips
from input.list import ListCtrl
from input.select import ComboBox
from input.select import ComboBoxESS
from input.text import TextArea
from input.text import TextAreaESS
from input.text import TextAreaPanel
from input.toggle import CheckBox
from input.toggle import CheckBoxESS
from ui.button import CreateButton
from ui.dialog import ConfirmationDialog
from ui.dialog import ShowDialog
from ui.dialog import ShowErrorDialog
from ui.layout import BoxSizer
from ui.style import layout as lyt
from ui.textpreview import TextPreview
from wiz.helper import GetAllTypeFields
from wiz.helper import GetField
from wiz.helper import GetMainWindow
from wiz.wizard import WizardPage
## Page for creating a system menu launcher
## Constructor
#
# \param parent
# Parent <b><i>wx.Window</i></b> instance
## Retrieves page data for export
## Formats the launcher information for export
## Retrieves the filename to be used for the menu launcher
## TODO: Doxygen
## TODO: Doxygen
## Handles button event from clear categories button
## Saves launcher information to file
#
# FIXME: Might be problems with reading/writing launchers (see OnLoadLauncher)
# 'Others' field not being completely filled out.
## Loads a .desktop launcher's data
#
# FIXME: Might be problems with reading/writing launchers (see OnExportLauncher)
# 'Others' field not being completely filled out.
## TODO: Doxygen
## TODO: Doxygen
## Enables/Disables fields for creating a launcher
## Resets all fields to default values
## TODO: Doxygen
## Fills out launcher information from loaded file
#
# \param data
# Information to fill out menu launcher fields
# \param enabled
# \b \e bool : Launcher will be flagged for export if True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2235,
3467,
26495,
266,
528,
8800,
13,
38722,
3533,
198,
198,
2,
17168,
15665,
198,
2,
4091,
25,
34165,
14,
43,
2149,
24290,
13,
14116,
628,
198,
11748,
28686,
11... | 3.07722 | 777 |
"""
pycmark_vfm.readers
~~~~~~~~~~~~~~~~~~~
Vivliostyle Flavored Markdown readers for docutils.
:copyright: Copyright 2020 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
import re
from pycmark.readers import LineReaderDecorator
class WalledBlockReader(LineReaderDecorator):
"""A reader for walled blocks."""
pattern = re.compile(r'^ {0,3}={3,}\s*$')
def fetch(self, relative: int = 0, **kwargs) -> str:
"""Returns a line until the end of walled block."""
line = self.reader.fetch(relative, **kwargs)
if kwargs.get('allow_endmarker') is True:
return line
elif self.pattern.match(line):
raise IOError
else:
return line
def consume_endmarker(self) -> None:
"""Consumes the end marker of wall block."""
line = self.fetch(1, allow_endmarker=True)
if self.pattern.match(line):
self.step(1)
| [
37811,
198,
220,
220,
220,
12972,
66,
4102,
62,
85,
38353,
13,
961,
364,
198,
220,
220,
220,
220,
27156,
4907,
93,
628,
220,
220,
220,
25313,
4528,
455,
2349,
33026,
1850,
2940,
2902,
7183,
329,
2205,
26791,
13,
628,
220,
220,
220,
... | 2.383292 | 407 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-21 14:42
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
2931,
12,
2481,
1478,
25,
3682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
GRAB_SPIDER_CONFIG = {
'global': {
'spider_modules': ['tests.script_crawl'],
},
}
| [
38,
3861,
33,
62,
4303,
41237,
62,
10943,
16254,
796,
1391,
198,
220,
220,
220,
705,
20541,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
2777,
1304,
62,
18170,
10354,
37250,
41989,
13,
12048,
62,
66,
13132,
6,
4357,
198... | 1.96 | 50 |
import functools
from marshmallow import Schema, fields
requiredString = functools.partial(fields.String, required=True)
requiredInteger = functools.partial(fields.Integer, required=True)
requiredBool = functools.partial(fields.Bool, required=True)
requiredFloat = functools.partial(fields.Float, required=True)
requiredEmail = functools.partial(fields.Email, required=True)
requiredDate = functools.partial(fields.Date, required=True)
requiredDateTime = functools.partial(fields.DateTime, required=True)
requiredNested = functools.partial(fields.Nested, required=True)
class AddBikesSchema(Schema):
"""Validate the schema for the add-bikes requests."""
availability_id = requiredInteger()
bikes = fields.List(fields.String(), required=True)
class ReplaceBikesSchema(Schema):
"""Validate the schema for the replace-bikes requests."""
availability_id = requiredInteger()
bike_picked = requiredString()
bike_returned = requiredString()
| [
11748,
1257,
310,
10141,
198,
198,
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
198,
198,
35827,
10100,
796,
1257,
310,
10141,
13,
47172,
7,
25747,
13,
10100,
11,
2672,
28,
17821,
8,
198,
35827,
46541,
796,
1257,
310,
10141,
13,
4... | 3.312081 | 298 |
import boto3
| [
11748,
275,
2069,
18,
628,
198
] | 2.5 | 6 |
import pandas as pd
import numpy as np
vim= pd.read_json("MOCK_DATA.json")
con= vim.to_csv("MOCK_DATA.csv") | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
31124,
28,
279,
67,
13,
961,
62,
17752,
7203,
44,
11290,
62,
26947,
13,
17752,
4943,
198,
1102,
28,
43907,
13,
1462,
62,
40664,
7203,
44,
11290,
62,
2694... | 2.347826 | 46 |
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2020/1/17 22:11
# @Author : Fangyang
# @Software : PyCharm
from enum import Enum, unique
class KBarType(Enum):
'''
K 线种类
0 - 5 分钟K 线
1 - 15 分钟K 线
2 - 30 分钟K 线
3 - 1 小时K 线
4 - 日K 线
5 - 周K 线
6 - 月K 线
7 - 1 分钟
8 - 1 分钟K 线
9 - 日K 线
10 - 季K 线
11 - 年K 线
'''
KLINE_TYPE_5MIN = MINUTE_5 = 0
KLINE_TYPE_15MIN = MINUTE_15 = 1
KLINE_TYPE_30MIN = MINUTE_30 = 2
KLINE_TYPE_1HOUR = HOUR = 3
KLINE_TYPE_DAILY = DAILY = 4
KLINE_TYPE_WEEKLY = WEEKLY = 5
KLINE_TYPE_EXHQ_1MIN = MINUTE = 7
KLINE_TYPE_1MIN = 8
# vnpy.trade.constant 的 Interval 枚举类
# 在 pytdxLoader 读取数据的时候, 将vnpy界面拿到的参数转成pytdx
# MINUTE_5 = 0
# MINUTE_15 = 1
# MINUTE_30 = 2
# HOUR = 3
KLINE_TYPE_MONTHLY = 6
KLINE_TYPE_RI_K = 9
KLINE_TYPE_3MONTH = 10
KLINE_TYPE_YEARLY = 11
class FutureMarketCode(Enum):
'''
使用pytdx获取
data_df = ex_api.to_df(ex_api.get_markets())
'''
CFFEX = 47 # 中国金融期货交易所(期货), 期权是 7
SHFE = 30 # 上海期货交易所
CZCE = 28 # 郑州商品交易所
DCE = 29 # 大连商品交易所
INE = 30 # 上海国际能源交易中心
if __name__ == '__main__':
x = FutureMarketCode['SHFE'].value
print(1)
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
2488,
27354,
8079,
1058,
12131,
14,
16,
14,
1558,
2534,
25,
1157,
198,
2,
2488,
13838,
220,
220,
1058,
... | 1.478923 | 854 |
""" This module is used for setting an intial test configs and values for
the rdfframework """
import datetime
import multiprocessing as mp
import multiprocessing.managers as managers
import pdb
from rdfframework.utilities import SimpleMapReduce
from rdfframework.datatypes import pyrdf, BaseRdfDataType, Uri
def convert_results(data, **kwargs):
""" converts the results of a query to RdfDatatype instances
args:
data: a list of triples
"""
if kwargs.get("multiprocessing", False):
manager = SharedManager()
manager.register("BaseRdfDataType", BaseRdfDataType)
manager.register("Uri", Uri)
data_l = len(data)
group_size = data_l // pool_size
if data_l % pool_size:
group_size += 1
split_data = [data[i:i + group_size]
for i in range(0, data_l, group_size)]
output = manager.Queue()
# output = manager.list()
# output_data = POOL.map(convert_row, split_data)
workers = [mp.Process(target=convert_batch, args=(item, output,))
for item in split_data]
for worker in workers:
# worker.Daemon = True
worker.start()
results = []
while True:
running = any(p.is_alive() for p in workers)
while not output.empty():
results += output.get()
if not running:
break
print("Finished - workers not stoped")
for worker in workers:
worker.join()
# pdb.set_trace()
# return output
for i in range(output.qsize()):
results += output.get()
return results
else:
return [{key:pyrdf(value) for key, value in row.items()}
for row in data]
pool_size = mp.cpu_count() - 1 or 1
DATA = [
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'literal', 'value': 'Chris Clark'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Collection'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'literal', 'value': 'Veterans Remember'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'literal', 'value': '970west'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'literal', 'value': '970west -- veterans remember'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'literal', 'value': 'wwii'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'literal', 'value': 'Charlie Blackmer'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'literal', 'value': 'Laura Mullenix'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Place'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'literal', 'value': 'Grand Junction, Colorado'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Summary'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'literal',
'value': 'Interview with Mesa County Libraries production team.'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/instanceOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/partOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3186684'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3193326'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/summary'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'datatype': 'http://www.w3.org/2001/XMLSchema#dateTime',
'type': 'literal',
'value': '2018-03-28T21:01:01.049Z'},
'p': {'type': 'uri',
'value': 'http://knowledgelinks.io/ns/data-structures/esIndexTime'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Work'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/MovingImage'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Local'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'literal', 'value': 'mesa:48'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'literal', 'value': '2017-08-16T21:06:55.434652'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationDate'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/GenerationProcess'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'literal',
'value': 'Generated by BIBCAT version i1.13.0 from KnowledgeLinks.io',
'xml:lang': 'en'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/mainTitle'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Title'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Carrier'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'literal', 'value': 'Moving Image'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'uri', 'value': 'https://marmot.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Distribution'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/CoverArt'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri',
'value': 'https://islandora.marmot.org/islandora/object/mesa:48/datastream/TN/view'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Publication'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/NGO'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'literal', 'value': 'Marmot Library Network', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/Library'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'bnode', 'value': 't3194298'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationProcess'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196122'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/title'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3200840'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/coverArt'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3192025'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3194722'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3199929'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3202252'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196572'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/carrier'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3190361'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/identifiedBy'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Instance'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}}]
if __name__ == '__main__':
time_test(DATA)
time_test(DATA, multiprocessing=True)
time_test(DATA)
time_test(DATA, multiprocessing=True)
from rdfframework.sparql import get_all_item_data
from rdfframework.connections import Blazegraph
from rdfframework.datatypes import RdfNsManager
RdfNsManager({"bf": "http://id.loc.gov/ontologies/bibframe/"})
data_iri = "<https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008>"
conn = Blazegraph(namespace="plain2peak")
data = get_all_item_data(data_iri, conn)
print("data count: ", len(data))
time_test(data)
time_test(data, multiprocessing=True)
| [
37811,
220,
220,
220,
770,
8265,
318,
973,
329,
4634,
281,
493,
498,
1332,
4566,
82,
290,
3815,
329,
198,
1169,
374,
67,
487,
859,
6433,
37227,
198,
11748,
4818,
8079,
198,
11748,
18540,
305,
919,
278,
355,
29034,
198,
11748,
18540,
... | 2.069045 | 10,037 |
import os.path as osp
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmseg.core import mean_iou
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation.
An example of file structure is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
"""
CLASSES = None
PALETTE = None
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_file = osp.join(img_dir, img_name + img_suffix)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir, img_name + seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_file = osp.join(img_dir, img)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir,
img.replace(img_suffix, seg_map_suffix))
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def get_gt_seg_maps(self):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
gt_seg_map = mmcv.imread(
img_info['ann']['seg_map'], flag='unchanged', backend='pillow')
if self.reduce_zero_label:
# avoid using underflow conversion
gt_seg_map[gt_seg_map == 0] = 255
gt_seg_map = gt_seg_map - 1
gt_seg_map[gt_seg_map == 254] = 255
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mIoU']
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps()
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
all_acc, acc, iou = mean_iou(
results, gt_seg_maps, num_classes, ignore_index=self.ignore_index)
summary_str = ''
summary_str += 'per class results:\n'
line_format = '{:<15} {:>10} {:>10}\n'
summary_str += line_format.format('Class', 'IoU', 'Acc')
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
for i in range(num_classes):
iou_str = '{:.2f}'.format(iou[i] * 100)
acc_str = '{:.2f}'.format(acc[i] * 100)
summary_str += line_format.format(class_names[i], iou_str, acc_str)
summary_str += 'Summary:\n'
line_format = '{:<15} {:>10} {:>10} {:>10}\n'
summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc')
iou_str = '{:.2f}'.format(np.nanmean(iou) * 100)
acc_str = '{:.2f}'.format(np.nanmean(acc) * 100)
all_acc_str = '{:.2f}'.format(all_acc * 100)
summary_str += line_format.format('global', iou_str, acc_str,
all_acc_str)
print_log(summary_str, logger)
eval_results['mIoU'] = np.nanmean(iou)
eval_results['mAcc'] = np.nanmean(acc)
eval_results['aAcc'] = all_acc
return eval_results
| [
11748,
28686,
13,
6978,
355,
267,
2777,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
11748,
8085,
33967,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
8085,
33967,
13,
26791,
1330,
3601,
62,
6404,
198,
6738,
28034,
13,
26791,
13,... | 2.062974 | 4,351 |
from unittest import TestCase
from Ship import Ship, Repair, Battleship, Corvette, Alignment
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
16656,
1330,
16656,
11,
28912,
11,
25467,
1056,
11,
49105,
11,
978,
16747,
628
] | 4.086957 | 23 |
from toolz import curry
@curry
def nth(n, xs):
"""Returns the nth element of the given list or string. If n is negative the
element at index length + n is returned"""
try:
return xs[n]
except (IndexError, TypeError):
if type(xs) is str:
return ""
return None
| [
6738,
2891,
89,
1330,
34611,
628,
198,
31,
66,
16682,
198,
4299,
299,
400,
7,
77,
11,
2124,
82,
2599,
198,
220,
220,
220,
37227,
35561,
262,
299,
400,
5002,
286,
262,
1813,
1351,
393,
4731,
13,
1002,
299,
318,
4633,
262,
198,
220,... | 2.445313 | 128 |
import scipy.io as sio
import time
import os
import numpy as np
import gym
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Subtract, Concatenate, BatchNormalization
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import tensorflow as tf
nowtime = time.strftime("%y_%m_%d_%H",time.localtime())
ENV_NAME = 'uav-D2Ddy-v0'
# ENV_NAME = 'discrete-action-uav-stable-2d-v0'
if not os.path.exists(ENV_NAME+'-'+nowtime):
os.mkdir(ENV_NAME+'-'+nowtime)
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
policy_list = ['maxG', 'minSNR', 'cline']
# Next, we build a very simple model regardless of the dueling architecture
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Dense(1024,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.1),
bias_regularizer=regularizers.l2(0.1))(flattened_observation)
x = Activation('relu')(x)
x = Dense(128,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(x)
x = Activation('relu')(x)
x = Dense(nb_actions)(x)
x = Activation('linear')(x)
model = Model(inputs=[observation_input], outputs=[x])
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling networ
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-3, policy=policy)
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
history = dqn.learning(env, Given_policy, policy_list, nb_steps=5e6, visualize=False, log_interval=1000, verbose=2,
nb_max_episode_steps=1000, imitation_leaning_time=0, reinforcement_learning_time=1e10)
sio.savemat(ENV_NAME+'-'+nowtime+'/fit.mat', history.history)
# After training is done, we save the final weights.
dqn.save_weights(ENV_NAME+'-'+nowtime+'/fit-weights.h5f', overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
history = dqn.test(env, nb_episodes=10, visualize=True, nb_max_episode_steps=5000)
sio.savemat(ENV_NAME+'-'+nowtime+'/test.mat', history.history) | [
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
11748,
640,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
198,
198,
6738,
41927,
292,
1330,
3218,
11341,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
... | 2.472067 | 1,432 |
def headify(arg):
'''
Eliminates frustration resulting from the automatic formatting
of request header keys.
>>> headify('AUTH_TOKEN')
'Auth-Token'
# TODO::
# >>> headify({'AUTH_TOKEN': 'Unchanged_Value'})
# {'Auth-Token': 'Unchanged_Value'}
'''
func = lambda x: '-'.join([_.title() for _ in x.split('_')])
return func(arg)
| [
198,
4299,
1182,
1958,
7,
853,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
27405,
17540,
14285,
7186,
422,
262,
11353,
33313,
198,
220,
220,
220,
286,
2581,
13639,
8251,
13,
628,
220,
220,
220,
13163,
1182,
1958,
10786,
... | 2.41875 | 160 |
"""
BSD 3-Clause License
Copyright (c) 2018, Maël Kimmerlin, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from aiohttp import web
import uuid
from marshmallow import Schema, fields, post_load, ValidationError, validate
import logging
import utils
from helpers_n_wrappers import container3, utils3
| [
37811,
198,
21800,
513,
12,
2601,
682,
13789,
198,
198,
15269,
357,
66,
8,
2864,
11,
6669,
26689,
75,
6502,
647,
2815,
11,
317,
282,
1462,
2059,
11,
17837,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
396,
3890,
290,
779,
287,
2723,
... | 3.459459 | 518 |
import pigpio
import time
if __name__ == "__main__":
main()
| [
11748,
12967,
79,
952,
198,
11748,
640,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.5 | 26 |
students_dict = {
"Ram": "Cricket",
"Naresh": "Football",
"Vani": "Tennis",
"Rahim": "Cricket"
}
# Write your code here
n = int(input())
for i in range(n):
key_value_pair = input().split()
key, value = key_value_pair[0], key_value_pair[1]
students_dict[key]= value
print(students_dict)
| [
19149,
658,
62,
11600,
796,
1391,
198,
220,
220,
220,
366,
33754,
1298,
366,
34,
5557,
316,
1600,
198,
220,
220,
220,
366,
26705,
3447,
1298,
366,
37316,
1600,
198,
220,
220,
220,
366,
53,
3216,
1298,
366,
51,
10679,
1600,
198,
220,... | 2.234483 | 145 |
#!/usr/bin/env python3
"""Pentago neural net training"""
import argparse
import datasets
import equivariant as ev
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import numbers
import optax
import timeit
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
47,
298,
3839,
17019,
2010,
3047,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
40522,
198,
11748,
1602,
35460,
415,
355,
819,
198,
11748,
387,
28643,
355,
289,
74,
198,
... | 3 | 91 |
from irekua_database.models import Collection
from irekua_filters.devices import physical_devices as device_utils
from irekua_permissions.data_collections import (
devices as device_permissions)
from selia.views.utils import SeliaList
from selia.views.create_views import SeliaSelectView
| [
6738,
35918,
74,
6413,
62,
48806,
13,
27530,
1330,
12251,
198,
198,
6738,
35918,
74,
6413,
62,
10379,
1010,
13,
42034,
1330,
3518,
62,
42034,
355,
3335,
62,
26791,
198,
6738,
35918,
74,
6413,
62,
525,
8481,
13,
7890,
62,
4033,
26448,
... | 3.511905 | 84 |
from hashlib import blake2b
import logging
import pytest
from config import Config, Network
from ergo_python_appkit.appkit import ErgoAppKit, ErgoValueT
from sigmastate.lang.exceptions import InterpreterException
from org.ergoplatform.appkit import Address, CoveringBoxes, ErgoToken
import java
CFG = Config[Network]
DEBUG = True # CFG.DEBUG
| [
6738,
12234,
8019,
1330,
698,
539,
17,
65,
201,
198,
11748,
18931,
201,
198,
11748,
12972,
9288,
201,
198,
6738,
4566,
1330,
17056,
11,
7311,
201,
198,
6738,
1931,
2188,
62,
29412,
62,
1324,
15813,
13,
1324,
15813,
1330,
5256,
2188,
4... | 3.033898 | 118 |
import numpy as np
import schnell as snl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Helvetica']})
rc('text', usetex=True)
freqs = np.geomspace(8., 1010., 2048)
dets = [snl.GroundDetector('Hanford', 46.4, -119.4, 171.8,
'data/aLIGO.txt'),
snl.GroundDetector('Livingstone', 30.7, -90.8, 243.0,
'data/aLIGO.txt'),
snl.GroundDetector('Virgo', 43.6, 10.5, 116.5,
'data/Virgo.txt'),
snl.GroundDetector('KAGRA', 36.3, 137.2, 225.0,
'data/KAGRA.txt'),
snl.GroundDetector('Cosmic Explorer', 37.24804, -115.800155, 0.,
'data/CE1_strain.txt')]
et = snl.GroundDetectorTriangle(name='ET0', lat=40.1, lon=9.0,
fname_psd='data/ET.txt', detector_id=0)
plt.figure()
plt.plot(freqs, dets[0].psd(freqs), 'k-', label='LIGO')
plt.plot(freqs, dets[2].psd(freqs), 'k--', label='Virgo')
plt.plot(freqs, dets[3].psd(freqs), 'k:', label='KAGRA')
plt.loglog()
plt.xlim([10, 1000])
plt.ylim([2E-48, 2E-43])
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_LIGO.pdf", bbox_inches='tight')
freqsa = np.geomspace(6, 5000., 3072)
freqsb = np.geomspace(1., 10010., 3072)
plt.figure()
plt.plot(freqsb, et.psd(freqsb), 'k-', label='ET-D')
plt.plot(freqsb, dets[4].psd(freqsb), 'k--', label='CE-S1')
plt.plot(freqsa, dets[0].psd(freqsa), 'k:', label='LIGO A+')
plt.xlim([1.5, 1E4])
plt.ylim([5E-50, 9E-42])
plt.loglog()
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.gca().set_yticks([1E-48, 1E-46, 1E-44, 1E-42])
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_ET.pdf", bbox_inches='tight')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
264,
1349,
695,
355,
3013,
75,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
48321,
198,
6015,
10786,
10331,
3256,
12429,
90,
6,
17989,
10354... | 1.819383 | 1,135 |
from django.test import TestCase
import pytest
from items.models import Item_Category, Small_Item, Large_Item
# Create your tests here.
@pytest.mark.django_db
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
11748,
12972,
9288,
198,
6738,
3709,
13,
27530,
1330,
9097,
62,
27313,
11,
10452,
62,
7449,
11,
13601,
62,
7449,
198,
198,
2,
13610,
534,
5254,
994,
13,
628,
198,
31,
9078,
9288,
... | 3.24 | 50 |
from minecraft.networking.packets import (
Packet, AbstractKeepAlivePacket, AbstractPluginMessagePacket, PacketBuffer
)
from minecraft.networking.types import (
Integer, FixedPointInteger, Angle, UnsignedByte, Byte, Boolean, UUID,
Short, VarInt, Double, Float, String, Enum, Difficulty, Dimension,
GameMode, Vector, Direction, PositionAndLook, multi_attribute_alias,
VarIntPrefixedByteArray, MutableRecord, Long
)
from minecraft.networking.types import mynbt
import numpy
GLOBAL_BITS_PER_BLOCK = 14 #TODO
| [
6738,
6164,
3323,
13,
3262,
16090,
13,
8002,
1039,
1330,
357,
198,
220,
220,
220,
6400,
316,
11,
27741,
15597,
2348,
425,
47,
8317,
11,
27741,
37233,
12837,
47,
8317,
11,
6400,
316,
28632,
198,
8,
198,
198,
6738,
6164,
3323,
13,
326... | 3.105263 | 171 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='fast2phy',
description="Convert aligned FASTA format to interleaved PHYLIP format",
license='MIT',
author='David M Noriega',
author_email='davidmnoriega@gmail.com',
version='1.0',
install_requires=['numpy', 'pyfasta'],
packages=['fast2phy'],
entry_points={
'console_scripts': ['fast2phy=fast2phy:main']}
)
| [
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
26791,
13,
7295,
1330,
9058,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
7217,
17,
6883,
3256,
... | 2.563536 | 181 |
import pdb
import os
import pathlib
import sys
from termcolor import colored
from lib.style import Style
from rich import print as pprint
from rich.panel import Panel
import readline
import re
| [
11748,
279,
9945,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
25064,
198,
6738,
3381,
8043,
1330,
16396,
198,
6738,
9195,
13,
7635,
1330,
17738,
198,
6738,
5527,
1330,
3601,
355,
279,
4798,
198,
6738,
5527,
13,
35330,
1330,
1... | 3.636364 | 55 |
import os
from setuptools import setup
BASEDIR_PATH = os.path.abspath(os.path.dirname(__file__))
setup(
name="botaxon",
version=get_version(),
author="Geoffrey GUERET",
author_email="geoffrey@gueret.tech",
description="Taxonomic parser for (sub)species botanical names.",
long_description=open(os.path.join(BASEDIR_PATH, "README.md"), "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/ggueret/botaxon",
license="MIT",
packages=["botaxon"],
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
tests_require=["pytest==4.4.1", "pytest-cov==2.6.1"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
33,
42827,
4663,
62,
34219,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
40406,
7,
198,
220,
220,
... | 2.567619 | 525 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import glob
import os.path as path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
analyze_player.py
This program implements functions to analyze (and assist in analyzing) player
stats.
"""
def join_years(player_dir):
"""Join the stat years for a player into one pandas dataframe.
:player_dir: TODO
:returns: TODO
"""
# Sort the files by year.
year_csvs = sorted(glob(path.join(player_dir, "*")))
dfs = []
master_df = pd.DataFrame()
for csv in year_csvs:
df = pd.read_csv(csv, parse_dates=True, index_col=0)
master_df = master_df.append(df)
return master_df
def get_player_dfs(player_dir):
"""
Return the player dataframes for every player in `player_dir/*`, using a
dictionary with player name as the key.
:player_dir: Path to player stat directory.
:returns: Dictionary of (name, player_df) key-value pairs.
"""
player_dirs = glob(path.join(player_dir, "*"))
df_dict = {}
for directory in player_dirs:
name = path.basename(path.normpath(directory))
df = join_years(directory)
df_dict[name] = df
return df_dict
def get_team_df(directory, ignore_index=False):
"""Return the dataframe that contains every player on the team.
This...probably doesn't do what we want just yet. Games won't be assigned
the correct index unless everyone has stats on every game.
For example: A freshman in 2017 has the first game of the 2017 season
listed as 0, while a Sophomore might have that listed as 30. Without
consistency, we can't reason about the data. Maybe we should learn about
Pandas' time series features...
2017-09-09: I have modified our parser to include dates, at the expense of
simple plotting. Pandas understands dates very well, and it takes a gap in
dates to mean a gap in data. This gap carries over into plots, making it
look very bad. To fix this, we can reset the index (df.reset_index()) to
temporarily get the 0..n index back. This allows plotting the way it worked
before, but with one extra step. (See below in __main__.)
For single player analysis, this is nice. For multi-player analysis, we
will need to be careful, but having the dates is crucial.
:directory: TODO
:returns: TODO
"""
df_dict = get_player_dfs(directory)
master_df = pd.DataFrame()
for name, df in df_dict.items():
master_df = master_df.append(df, ignore_index=ignore_index)
return master_df
def team_date_mean(team_df):
"""TODO: Docstring for team_mean.
:team_df: TODO
:stat: TODO
:returns: TODO
"""
return team_df.reset_index().groupby("Date").mean()
def team_scatter_plot(team_df_dict, x, y, filter=None):
"""TODO: Docstring for team_scatter_plot.
:team_df_dict: TODO
:returns: TODO
"""
num = plt.figure()
ax = plt.gca()
for name, df in team_df_dict.items():
if filter and filter(df):
plt.plot(df[x], df[y], "o", label=name)
if __name__ == "__main__":
# Example analysis.
plt.style.use("ggplot")
team_df = get_team_df("./player_stats/")
team_df_dict = get_player_dfs("./player_stats/")
top_percentile = team_df["pct"].quantile(.6)
filter_high = lambda df: df["pct"].mean() >= top_percentile
filter_low = lambda df: len(df["pct"].dropna()) > 0 and not filter_high(df)
team_scatter_plot(team_df_dict, "ta", "k", filter_high)
xs = np.linspace(0, 50)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT above 60th percentile")
plt.legend()
team_scatter_plot(team_df_dict, "ta", "k", filter_low)
xs = np.linspace(0, 45)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT below 60th percentile")
plt.legend()
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
15095,
1330,
15095,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
11748,
19798,
292,
355,
279,
67,
198,
... | 2.612018 | 1,531 |
'''Objects and methods to support text corpus storage and manipulation'''
import numpy as np
import pandas as pd
import re
import string
from nltk.tokenize import TreebankWordTokenizer
from sklearn.feature_extraction.text import CountVectorizer
# Looks up a dict key up by its values
# Uses get_key to lookup a sequence of words or characters
# Converts a text-type column of a categorical variable to integers
# Converts a list of tokens to an array of integers
# Pads a 1D sequence of integers (representing words)
| [
7061,
6,
10267,
82,
290,
5050,
284,
1104,
2420,
35789,
6143,
290,
17512,
7061,
6,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
4731,
198,
198,
6738,
299,
2528,
74,
13,
30001,
... | 3.76259 | 139 |
# _plot_umap.py
__module_name__ = "_plot_umap.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["vinyard@g.harvard.edu",])
# package imports #
# --------------- #
import matplotlib.pyplot as plt
import numpy as np
import vinplots
def _setup_plot():
""""""
plot = vinplots.Plot()
plot.construct(nplots=2, ncols=2, figsize_width=2, figsize_height=1.2)
plot.style(spines_to_delete=["top", "right"],
color="grey",
spines_to_color=['bottom', 'left'],
spines_positioning_amount=5)
ax = plot.AxesDict[0][0]
return plot, ax
def _plot_umap(adata, umap_key, plot_by, colors_dict=False):
""""""
try:
adata.obs = adata.obs.reset_index()
except:
pass
umap = adata.obsm[umap_key]
if not colors_dict:
c = vinplots.color_palettes.SHAREseq
plot, ax = _setup_plot()
for n, i in enumerate(adata.obs[plot_by].unique()):
if colors_dict:
c_ = colors_dict[i]
else:
c_ = c[n]
idx = adata.obs.loc[adata.obs[plot_by] == i].index.astype(int)
ax.scatter(umap[:, 0][idx], umap[:, 1][idx], c=c_, label=i, s=5, alpha=0.8)
ax.set_title("Harmonized Data")
ax.legend(bbox_to_anchor=(1.05, 1.05), edgecolor="white", markerscale=2)
plt.tight_layout()
return plot
| [
198,
2,
4808,
29487,
62,
388,
499,
13,
9078,
198,
198,
834,
21412,
62,
3672,
834,
796,
45434,
29487,
62,
388,
499,
13,
9078,
1,
198,
834,
9800,
834,
796,
33172,
27071,
22179,
7,
14692,
13256,
412,
13,
569,
3541,
446,
8973,
8,
198,... | 2.010145 | 690 |
# Drakkar-Software ginit
# Copyright (c) Drakkar-Software, All rights reserved.
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
from ginit import util
from ginit.util import (get_python_path_from_path, drop_file_extension, )
from ginit import module
from ginit.module import (ModuleVisitor)
from ginit import visitor
from ginit.visitor import (visit_path)
__project__ = "ginit"
__version__ = "1.1.0"
FILE_TO_IGNORE = ['__init__', '__main__']
PATCHER_FILES_TO_IGNORE = ['__init__.py', '__init__.pxd']
FOLDERS_TO_IGNORE = ['__pycache__']
FUNCTIONS_TO_IGNORE = ['__init__', '__str__', '__repr__', '__del__']
DIRECTORY_MODULES = "."
INIT_SEPARATOR = ", "
IMPORT_MODULE_SEPARATOR = "."
DEFAULT_IMPORT_PATCH_MAX_DEPTH = 2
PYTHON_IMPORT = "import"
PYTHON_INIT = "__init__.py"
PYTHON_EXTS = [".py"]
CYTHON_IMPORT = "cimport"
CYTHON_INIT = "__init__.pxd"
CYTHON_EXTS = [".pxd", ".pyx"]
__all__ = ['__project__', '__version__',
'ModuleVisitor', 'visit_path',
'get_python_path_from_path', 'drop_file_extension',
'DIRECTORY_MODULES', 'FILE_TO_IGNORE', 'FOLDERS_TO_IGNORE', 'PATCHER_FILES_TO_IGNORE',
'DEFAULT_IMPORT_PATCH_MAX_DEPTH', 'IMPORT_MODULE_SEPARATOR',
'PYTHON_INIT', 'PYTHON_IMPORT', 'PYTHON_EXTS',
'CYTHON_INIT', 'CYTHON_IMPORT', 'CYTHON_EXTS']
| [
2,
220,
12458,
74,
21070,
12,
25423,
308,
15003,
198,
2,
220,
15069,
357,
66,
8,
12458,
74,
21070,
12,
25423,
11,
1439,
2489,
10395,
13,
198,
2,
220,
17168,
13789,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
... | 2.45036 | 695 |
from datetime import datetime
from app.models.game import Game
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
598,
13,
27530,
13,
6057,
1330,
3776,
628
] | 3.823529 | 17 |
# -*- coding: utf-8 -*-
cmColorForegroundChanged = 71
cmColorBackgroundChanged = 72
cmColorSet = 73
cmNewColorItem = 74
cmNewColorIndex = 75
cmSaveColorIndex = 76
cmSetColorIndex = 77
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11215,
10258,
16351,
2833,
31813,
796,
9166,
198,
11215,
10258,
21756,
31813,
796,
7724,
198,
11215,
10258,
7248,
796,
8854,
198,
11215,
3791,
10258,
7449,
796,
8915,
198,
... | 2.920635 | 63 |
from jose.jwa import keys
from enum import Enum
__all__ = ['SigEnum', 'SigDict', ]
SigDict = dict((i.name, i.name) for i in SigEnum)
| [
6738,
474,
577,
13,
73,
10247,
1330,
8251,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
834,
439,
834,
796,
37250,
50,
328,
4834,
388,
3256,
705,
50,
328,
35,
713,
3256,
2361,
628,
198,
50,
328,
35,
713,
796,
8633,
19510,
72,
13,
... | 2.344828 | 58 |
# ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import ngraph_bridge
import tensorflow as tf
import numpy as np
import re
import os
import pdb
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
import argparse
import pickle as pkl
def modify_node_names(graph_def, node_map):
'''
Accepts a graphdef and a map of node name to new node name.
Replaces the nodes with their new names in the graphdef
'''
for node in graph_def.node:
if node.name in node_map:
old_name = node.name
new_name = node_map.get(node.name)
# print("Replacing: ", node.name, " with ", new_name)
node.name = new_name
for _node in graph_def.node:
for idx, inp_name in enumerate(_node.input):
# removing the part after ':' in the name
# removing ^ if present (control dependency)
colon_split = inp_name.split(':')
assert len(colon_split) <= 2
control_dependency_part = '^' if inp_name[0] == '^' else ''
colon_part = '' if len(
colon_split) == 1 else ':' + colon_split[1]
if inp_name.lstrip('^').split(':')[0] == old_name:
_node.input[idx] = control_dependency_part + \
new_name + colon_part
# TODO: Do we need to edit this anywhere else other than inputs?
return graph_def
def sanitize_node_names(graph_def):
'''
remove '_' from node names. '_' at the beginning of node names indicate internal ops
which might cause TB to complain
'''
return modify_node_names(graph_def, {
node.name: node.name[1:]
for node in graph_def.node
if node.name[0] == "_"
})
def prepend_to_name(graph_def, node_map):
'''
prepend an extra string to the node name (presumably a scope, to denote encapsulate)
'''
return modify_node_names(
graph_def, {
node.name: node_map[node.name] + node.name
for node in graph_def.node
if node.name in node_map
})
def load_file(graph_file, input_binary, modifier_function_list=[]):
'''
can load protobuf (pb or pbtxt). can modify only pbtxt for now
'''
if not gfile.Exists(graph_file):
raise Exception("Input graph file '" + graph_file + "' does not exist!")
graphdef = graph_pb2.GraphDef()
with open(graph_file, "r") as f:
protobuf_str = f.read()
try:
if input_binary:
graphdef.ParseFromString(protobuf_str)
else:
text_format.Merge(protobuf_str, graphdef)
except:
raise Exception("Failed to read pb or pbtxt. input_binary is " +
str(input_binary) + " maybe try flipping it?")
for modifier_function in modifier_function_list:
graphdef = modifier_function(graphdef)
return graphdef
visualizations_supported = [protobuf_to_dot, protobuf_to_grouped_tensorboard]
if __name__ == "__main__":
helptxt = '''
Convert protobuf to different visualizations (dot, tensorboard).
Sample usage from command line:
python ngtf_graph_viewer.py pbtxtfile.pbtxt ./vis # read pbtxt and generate TB
python ngtf_graph_viewer.py -v 1 pbtxtfile.pbtxt ./vis # read pbtxt and generate dot
python ngtf_graph_viewer.py -b pbtxtfile.pb ./vis # read pb and generate TB
python ngtf_graph_viewer.py -b -v 1 pbtxtfile.pb ./vis # read pb and generate dot
python ngtf_graph_viewer.py -c nodemap.pkl pbtxtfile.pbtxt ./vis # read pbtxt, remap node names and generate TB
One can also import the file and use its functions
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=helptxt)
parser.add_argument("input", help="The input protobuf (pb or pbtxt)")
parser.add_argument("out", help="The output directory")
parser.add_argument(
'-b',
dest='binary',
action='store_true',
help=
"Add this flag to indicate its a .pb. Else it is assumed to be a .pbtxt"
)
parser.add_argument(
"-v",
"--visualize",
type=int,
default=1,
help=
"Enter 0 (protobuf->dot) or 1 (protobuf->Tensorboard). By default it converts to tensorboard"
)
parser.add_argument(
"-c",
"--cluster",
help=
"An file that contains the node-to-cluster map that can be used to group them into clusters"
)
args = parser.parse_args()
node_map = {} if args.cluster is None else pkl.load(
open(args.cluster, 'rb'))
visualizations_supported[args.visualize](args.input, args.out, args.binary,
node_map)
| [
2,
38093,
25609,
28,
198,
2,
220,
15069,
2864,
8180,
10501,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.419616 | 2,345 |
import sys
from collections import deque
import numpy as np
if __name__ == '__main__':
main()
| [
11748,
25064,
201,
198,
6738,
17268,
1330,
390,
4188,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388,
3419,
198
... | 2.571429 | 42 |
from pytest import approx
from enforce_typing import enforce_types
from agents.PoolAgent import PoolAgent
from util import globaltokens
from util.base18 import fromBase18
from .. import KPIs
@enforce_types
@enforce_types
@enforce_types
@enforce_types
@enforce_types
| [
6738,
12972,
9288,
1330,
5561,
198,
198,
6738,
4605,
62,
774,
13886,
1330,
4605,
62,
19199,
198,
198,
6738,
6554,
13,
27201,
36772,
1330,
19850,
36772,
198,
6738,
7736,
1330,
15095,
2501,
482,
641,
198,
6738,
7736,
13,
8692,
1507,
1330,... | 3.232558 | 86 |
from hyde.errors import BaseError
| [
6738,
2537,
2934,
13,
48277,
1330,
7308,
12331,
628,
198
] | 3.6 | 10 |
import math
from django.http import HttpResponse
from django.template import Context, loader
from django.shortcuts import render, get_object_or_404
from info.models import Brother, Officer, BrotherEntity
from info import utility
from marketing.models import Picture as MarketingPic
from articles.models import Article
max_brothers_per_page = 24
standard_brothers_per_page = 9
brothers_per_row = 3
max_pages_listed_on_screen = 5
officers_per_row = 2
exec_board_members_per_row_on_about_page = 3
def general_listing(request, isAlumniFilter, isPledgeFilter, name):
'''
Retrieves all of the information necessary for each of the brother listings.
Retrieves information based on the isAlumniFilter and isPledgeFilter
'''
brothers_count = get_brother_count(request)
page_number = get_page_number(request)
brothers_range_min = (page_number - 1) * brothers_count
brothers_range_max = (page_number) * brothers_count
brothers = Brother.objects.filter(isAlumni=isAlumniFilter, isPledge=isPledgeFilter).order_by(
'lastName', 'firstName', 'middleName')
number_of_brothers = len(brothers)
total_pages = int(math.ceil(number_of_brothers / float(brothers_count)))
brothers = brothers[brothers_range_min:brothers_range_max]
brothers = convert_brothers_to_brotherentities(brothers)
brother_list_list = utility.convert_array_to_YxZ(brothers, brothers_per_row) if len(brothers) > 0 else None
page_numbers_list = calculate_page_range(total_pages, page_number)
next_page = page_number + 1 if number_of_brothers > brothers_range_max else 0
prev_page = page_number - 1
context_dict = {
'brotherType': name,
'brother_list_list' : brother_list_list,
'page_number' : page_number,
'prev_page': prev_page,
'next_page' : next_page,
'page_numbers' : page_numbers_list
}
if brothers_count != standard_brothers_per_page:
context_dict['brothers_count'] = brothers_count
c = Context(context_dict)
t = loader.get_template('brothers_list.html')
return HttpResponse(t.render(c))
def convert_brothers_to_brotherentities(broList):
'''
Converts a set of brothers and converts them to brother entities
which contain more information
'''
broEList = []
for bro in broList:
broEList.append(BrotherEntity(bro))
return broEList
def get_brother_count(request):
'''
Finds the requested number of brothers and corrects it if there are any issues
If the number is invalid, it will return standard_brothers_per_page
'''
brothers_count = request.GET.get('count',str(standard_brothers_per_page))
try:
brothers_count = int(brothers_count)
if brothers_count > max_brothers_per_page:
brothers_count = max_brothers_per_page
except:
brothers_count = standard_brothers_per_page
return brothers_count
def get_page_number(request):
'''
Finds the page number and corrects it if there are any issues
If the page number is invalid, it will return 1
'''
page_number = request.GET.get('page','1')
try:
page_number = int(page_number)
if page_number < 1:
page_number = 1
except:
page_number = 1
return page_number
def calculate_page_range(total_pages, page_number):
'''
This determines which page numbers to show at the bottom of the brothers list pages.
It returns a list of integers that should be displayed on the page based on the total
number of pages and the current page number.
'''
if total_pages == 1: # If there is only the one page, there is no need to display page numbers
return []
elif total_pages <= max_pages_listed_on_screen: # In this case, just display all of the available pages
min_page_number_displayed = 1
max_page_number_displayed = total_pages + 1
elif page_number - max_pages_listed_on_screen / 2 <= 1: # We are near the beginning. In this case, display from page 1 to max_pages_listed_on_screen
min_page_number_displayed = 1
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
elif page_number + max_pages_listed_on_screen / 2 >= total_pages: # We are near the end. In this case, display from (end - max_pages_listed_on_screen) to end
max_page_number_displayed = total_pages + 1
min_page_number_displayed = max_page_number_displayed - max_pages_listed_on_screen
else: # We are somewhere in the middle. In this case, just display some pages on either side
min_page_number_displayed = page_number - max_pages_listed_on_screen / 2
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
page_numbers_list = range(min_page_number_displayed,max_page_number_displayed)
return page_numbers_list
| [
11748,
10688,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
30532,
11,
40213,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
19... | 2.687602 | 1,847 |
from django.conf.urls import url
from django.contrib import admin
from wawhfd.views import (
IndexView,
DatesListView,
DatesEditView,
DatesDeleteView,
RecipesListView,
RecipesAddView,
RecipesEditView,
RecipesDeleteView,
)
urlpatterns = [
url(r'^$', IndexView.as_view()),
url(r'^api/dates/$', DatesListView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/edit/$', DatesEditView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/delete/$', DatesDeleteView.as_view()),
url(r'^api/recipes/$', RecipesListView.as_view()),
url(r'^api/recipes/add/$', RecipesAddView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/edit/$', RecipesEditView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/delete/$', RecipesDeleteView.as_view()),
url(r'^admin/', admin.site.urls),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
266,
707,
71,
16344,
13,
33571,
1330,
357,
198,
220,
220,
220,
12901,
7680,
11,
198,
220,
220,
220,
44712,
... | 2.198492 | 398 |
#!/usr/bin/env python
# Copyright (C) 2019-2020 Emetophobe (snapnaw@gmail.com)
# https://github.com/Emetophobe/steamutils/
import os
import re
import glob
import argparse
def list_games(steamdir):
""" Get the list of installed Steam games. """
# Make sure the directory is valid
steam_apps = os.path.join(os.path.abspath(steamdir), 'steamapps')
steam_common = os.path.join(steam_apps, 'common')
if not os.path.isdir(steam_apps) and not os.path.isdir(steam_common):
raise ValueError('Error: Invalid steam directory.')
# Get list of manifest files from the steamapps directory
acf_files = glob.glob(os.path.join(steam_apps, 'appmanifest_*.acf'))
# Parse manifest files and create a list of game dicts
games = []
for filename in acf_files:
with open(filename, 'r') as fp:
manifest = {}
for line in fp:
# Extract the key/value pairs
matches = re.findall(r'"(.*?)"', line) # find strings inside double quotes
if len(matches) == 2: # require a pair of strings
key, value = matches[0], matches[1]
manifest[key] = value # store the key/value pair
# Add the full path to the installdir and manifest file
manifest['installdir'] = os.path.join(steam_common, manifest['installdir'])
manifest['manifest'] = filename
games.append(manifest)
return sorted(games, key=lambda k: k['name'])
def print_games(games):
""" Print a tabular games list. """
row = '{:<50} {:<10} {}'
print(row.format('Name', 'App Id', 'Location'))
for game in games:
print(row.format(game['name'], game['appid'], game['installdir']))
def print_detailed_games(games):
""" Print a detailed games list. """
for game in games:
print()
print('name:', game['name'])
print('appid:', game['appid'])
print('installdir:', game['installdir'])
print('manifest:', game['manifest'])
print('size:', format_size(game['SizeOnDisk']))
def format_size(size):
""" Format install size into a human readable string. """
size = int(size)
for suffix in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'):
if size < 1024:
return '{:.1f}{}'.format(size, suffix)
size /= 1024
return '{:1f}YB'.format(size)
if __name__ == '__main__':
try:
main()
except (OSError, ValueError) as e:
print(e)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
34,
8,
13130,
12,
42334,
220,
220,
2295,
316,
2522,
5910,
357,
45380,
77,
707,
31,
14816,
13,
785,
8,
198,
2,
3740,
1378,
12567,
13,
785,
14,
36,
4164,
2522,
5910,
... | 2.326606 | 1,090 |
#!/usr/bin/env python3
import sys
from collections import OrderedDict
import json
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
33918,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
138... | 2.818182 | 44 |
# ===========
# Libraries
# ===========
from ..size import Size
from ..filenames import FilenamesHandler
from ..args import args
# ===================
# Class Declaration
# ===================
| [
2,
796,
2559,
855,
198,
2,
220,
46267,
198,
2,
796,
2559,
855,
198,
6738,
11485,
7857,
1330,
12849,
198,
6738,
11485,
10379,
268,
1047,
1330,
7066,
268,
1047,
25060,
198,
6738,
11485,
22046,
1330,
26498,
628,
198,
2,
36658,
855,
198,
... | 3.862745 | 51 |
# encoding: utf-8
"""
Temporary stand-in for main oxml module that came across with the
PackageReader transplant. Probably much will get replaced with objects from
the pptx.oxml.core and then this module will either get deleted or only hold
the package related custom element classes.
"""
from __future__ import absolute_import
from lxml import etree
from .constants import NAMESPACE as NS, RELATIONSHIP_TARGET_MODE as RTM
from ..oxml import parse_xml, register_element_cls
from ..oxml.simpletypes import (
ST_ContentType, ST_Extension, ST_TargetMode, XsdAnyUri, XsdId
)
from ..oxml.xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore
)
nsmap = {
'ct': NS.OPC_CONTENT_TYPES,
'pr': NS.OPC_RELATIONSHIPS,
'r': NS.OFC_RELATIONSHIPS,
}
class CT_Default(BaseOxmlElement):
"""
``<Default>`` element, specifying the default content type to be applied
to a part with the specified extension.
"""
extension = RequiredAttribute('Extension', ST_Extension)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Override(BaseOxmlElement):
"""
``<Override>`` element, specifying the content type to be applied for a
part with the specified partname.
"""
partName = RequiredAttribute('PartName', XsdAnyUri)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Relationship(BaseOxmlElement):
"""
``<Relationship>`` element, representing a single relationship from a
source to a target part.
"""
rId = RequiredAttribute('Id', XsdId)
reltype = RequiredAttribute('Type', XsdAnyUri)
target_ref = RequiredAttribute('Target', XsdAnyUri)
targetMode = OptionalAttribute(
'TargetMode', ST_TargetMode, default=RTM.INTERNAL
)
@classmethod
def new(cls, rId, reltype, target, target_mode=RTM.INTERNAL):
"""
Return a new ``<Relationship>`` element.
"""
xml = '<Relationship xmlns="%s"/>' % nsmap['pr']
relationship = parse_xml(xml)
relationship.rId = rId
relationship.reltype = reltype
relationship.target_ref = target
relationship.targetMode = target_mode
return relationship
class CT_Relationships(BaseOxmlElement):
"""
``<Relationships>`` element, the root element in a .rels file.
"""
relationship = ZeroOrMore('pr:Relationship')
def add_rel(self, rId, reltype, target, is_external=False):
"""
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
"""
target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL
relationship = CT_Relationship.new(rId, reltype, target, target_mode)
self._insert_relationship(relationship)
@classmethod
def new(cls):
"""
Return a new ``<Relationships>`` element.
"""
xml = '<Relationships xmlns="%s"/>' % nsmap['pr']
relationships = parse_xml(xml)
return relationships
@property
def xml(self):
"""
Return XML string for this element, suitable for saving in a .rels
stream, not pretty printed and with an XML declaration at the top.
"""
return oxml_tostring(self, encoding='UTF-8', standalone=True)
class CT_Types(BaseOxmlElement):
"""
``<Types>`` element, the container element for Default and Override
elements in [Content_Types].xml.
"""
default = ZeroOrMore('ct:Default')
override = ZeroOrMore('ct:Override')
def add_default(self, ext, content_type):
"""
Add a child ``<Default>`` element with attributes set to parameter
values.
"""
return self._add_default(extension=ext, contentType=content_type)
def add_override(self, partname, content_type):
"""
Add a child ``<Override>`` element with attributes set to parameter
values.
"""
return self._add_override(
partName=partname, contentType=content_type
)
@classmethod
def new(cls):
"""
Return a new ``<Types>`` element.
"""
xml = '<Types xmlns="%s"/>' % nsmap['ct']
types = parse_xml(xml)
return types
register_element_cls('ct:Default', CT_Default)
register_element_cls('ct:Override', CT_Override)
register_element_cls('ct:Types', CT_Types)
register_element_cls('pr:Relationship', CT_Relationship)
register_element_cls('pr:Relationships', CT_Relationships)
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
12966,
5551,
1302,
12,
259,
329,
1388,
12018,
4029,
8265,
326,
1625,
1973,
351,
262,
198,
27813,
33634,
23319,
13,
18578,
881,
481,
651,
6928,
351,
5563,
422,
198,
1169,
279,
457... | 2.676331 | 1,690 |
# Import packages for plotting and system
import getopt
import random
import sys
from collections import deque
# make sure the root path is in system path
from pathlib import Path
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from importlib_resources import path
import matplotlib.pyplot as plt
import numpy as np
import torch
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
#from flatland.utils.rendertools import RenderTool
import fc_treeobs.nets
from fc_treeobs.dueling_double_dqn import Agent
from fc_treeobs.utils import norm_obs_clip, split_tree_into_feature_groups
if __name__ == '__main__':
main(sys.argv[1:])
| [
2,
17267,
10392,
329,
29353,
290,
1080,
198,
11748,
651,
8738,
198,
11748,
4738,
198,
11748,
25064,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
2,
787,
1654,
262,
6808,
3108,
318,
287,
1080,
3108,
198,
6738,
3108,
8019,
1330,
10644,
... | 3.193772 | 289 |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 10:38:40 2021
PORTFOLIO MODEL - WHY GLOBAL SENSITIVITY?
@author: PMR
"""
# %% Import libraries
import chaospy as cp
import numpy as np
import matplotlib.pyplot as plt
# %% Portfolio model
# %% Setup problem
Q1_mean = 0
Q1_std = 1
Q2_mean = 0
Q2_std = 3
c1 = 2
c2 = 1
Q1_distro = cp.Normal(Q1_mean, Q1_std)
Q2_distro = cp.Normal(Q2_mean, Q2_std)
np.random.seed(1)
nSamples = 1000
J_distro = cp.J(Q1_distro, Q2_distro)
samples = J_distro.sample(nSamples).T
# %% Evaluate the model
Y_all = []
for i in range(nSamples):
Q1, Q2 = samples[i,0], samples[i,1]
Y = portfolio_model(c1, c2, Q1, Q2)
Y_all.append(Y)
# %% Plots
plt.figure('q1 v. y')
plt.title('q1 v. y')
plt.scatter(samples[:,0], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
plt.figure('q2 v. y')
plt.title('q2 v. y')
plt.scatter(samples[:,1], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
26223,
5267,
1248,
838,
25,
2548,
25,
1821,
33448,
201,
198,
15490,
37,
3535,
9399,
19164,
3698,
532,
44463,
10188,
9864,
1847,
311,
16... | 1.872852 | 582 |
from . import common as cm
# def mutate(self, client_customer_id=None, sync=None):
# if client_customer_id:
# self.client.SetClientCustomerId(client_customer_id)
# result = self.service.mutate(self.helper.operations)
# for item in result.value:
# item['returnType'] = 'ManagedCustomer'
# return result.value
| [
6738,
764,
1330,
2219,
355,
12067,
628,
198,
220,
220,
220,
1303,
825,
4517,
378,
7,
944,
11,
5456,
62,
23144,
263,
62,
312,
28,
14202,
11,
17510,
28,
14202,
2599,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
611,
5456,
62,
23144... | 2.356688 | 157 |
import json
| [
11748,
33918,
628
] | 4.333333 | 3 |
import numpy as np
def qubo_to_ising(mat: np.ndarray):
"""inplace-convert numpy matrix from qubo to ising.
Args:
mat (np.ndarray): numpy matrix
"""
mat /= 4
for i in range(mat.shape[0]):
mat[i, i] += np.sum(mat[i, :])
def chimera_to_ind(r: int, c: int, z: int, L: int):
"""[summary]
Args:
r (int): row index
c (int): column index
z (int): in-chimera index (must be from 0 to 7)
L (int): height and width of chimera-units (total number of spins is :math:`L \\times L \\times 8`)
Raises:
ValueError: [description]
Returns:
int: corresponding Chimera index
"""
if not (0 <= r < L and 0 <= c < L and 0 <= z < 8):
raise ValueError(
'0 <= r < L or 0 <= c < L or 0 <= z < 8. '
'your input r={}, c={}, z={}, L={}'.format(r, c, z, L))
return r * L * 8 + c*8 + z
| [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
627,
2127,
62,
1462,
62,
1710,
7,
6759,
25,
45941,
13,
358,
18747,
2599,
198,
220,
220,
220,
37227,
259,
5372,
12,
1102,
1851,
299,
32152,
17593,
422,
627,
2127,
284,
318,
278,
13,
628,... | 2.116279 | 430 |
algorithm = "fourier"
potential = "morse"
D = 3
a = 0.3
T = 10
dt = 0.005
eps = 0.2
f = 4.0
ngn = 4096
basis_size = 4
leading_component = 0
P = 1.0j
Q = 1
S = 0.0
p = 0.0
q = 1.5
parameters = [ (P, Q, S, p, q) ]
coefficients = [[(0, 1.0)]]
write_nth = 20
| [
282,
42289,
796,
366,
69,
280,
5277,
1,
198,
198,
13059,
1843,
796,
366,
4491,
325,
1,
198,
35,
796,
513,
198,
64,
796,
657,
13,
18,
198,
198,
51,
796,
838,
198,
28664,
796,
657,
13,
22544,
198,
198,
25386,
796,
657,
13,
17,
1... | 1.865248 | 141 |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Core storage related features."""
import logging
from cauliflowervest.client import util
DISKUTIL = '/usr/sbin/diskutil'
class Error(Exception):
"""Base error."""
class CouldNotUnlockError(Error):
"""Could not unlock volume error."""
class CouldNotRevertError(Error):
"""Could not revert volume error."""
class VolumeNotEncryptedError(Error):
"""Volume is not encrypted error."""
class State(object):
"""Fake enum to represent the possible states of core storage."""
ENABLED = 'CORE_STORAGE_STATE_ENABLED'
ENCRYPTED = 'CORE_STORAGE_STATE_ENCRYPTED'
FAILED = 'CORE_STORAGE_STATE_FAILED'
NONE = 'CORE_STORAGE_STATE_NONE'
UNKNOWN = 'CORE_STORAGE_STATE_UNKNOWN'
def IsBootVolumeEncrypted():
"""Returns True if the boot volume (/) is encrypted, False otherwise."""
try:
csinfo_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', '/'))
except util.ExecError:
return False # Non-zero return means / volume isn't a CoreStorage volume.
lvf_uuid = csinfo_plist.get('MemberOfCoreStorageLogicalVolumeFamily')
if lvf_uuid:
try:
lvf_info_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', lvf_uuid))
except util.ExecError:
return False # Couldn't get info on Logical Volume Family UUID.
return lvf_info_plist.get(
'CoreStorageLogicalVolumeFamilyEncryptionType') == 'AES-XTS'
return False
def GetRecoveryPartition():
"""Determine the location of the recovery partition.
Returns:
str, like "/dev/disk0s3" where the recovery partition is, OR
None, if no recovery partition exists or cannot be detected.
"""
try:
disklist_plist = util.GetPlistFromExec((DISKUTIL, 'list', '-plist'))
except util.ExecError:
logging.exception('GetRecoveryPartition() failed to get partition list.')
return
alldisks = disklist_plist.get('AllDisksAndPartitions', [])
for disk in alldisks:
partitions = disk.get('Partitions', [])
for partition in partitions:
if partition.get('VolumeName') == 'Recovery HD':
return '/dev/%s' % partition['DeviceIdentifier']
def GetCoreStoragePlist(uuid=None):
"""Returns a dict of diskutil cs info plist for a given str CoreStorage uuid.
Args:
uuid: str, optional, CoreStorage uuid. If no uuid is provided, this function
returns a diskutil cs list plist..
Returns:
A dict of diskutil cs info/list -plist output.
Raises:
Error: The given uuid was invalid or there was a diskutil error.
"""
if uuid:
if not util.UuidIsValid(uuid):
raise Error
cmd = [DISKUTIL, 'corestorage', 'info', '-plist', uuid]
else:
cmd = [DISKUTIL, 'corestorage', 'list', '-plist']
try:
return util.GetPlistFromExec(cmd)
except util.ExecError:
raise Error
def GetStateAndVolumeIds():
"""Determine the state of core storage and the volume IDs (if any).
In the case that core storage is enabled, it is required that every present
volume is encrypted, to return "encrypted" status (i.e. the entire drive is
encrypted, for all present drives). Otherwise ENABLED or FAILED state is
returned.
Returns:
tuple: (State, [list; str encrypted UUIDs], [list; str unencrypted UUIDs])
Raises:
Error: there was a problem getting the corestorage list, or family info.
"""
state = State.NONE
volume_ids = []
encrypted_volume_ids = []
failed_volume_ids = []
cs_plist = GetCoreStoragePlist()
groups = cs_plist.get('CoreStorageLogicalVolumeGroups', [])
if groups:
state = State.ENABLED
for group in groups:
for family in group.get('CoreStorageLogicalVolumeFamilies', []):
family_plist = GetCoreStoragePlist(family['CoreStorageUUID'])
enc = family_plist.get('CoreStorageLogicalVolumeFamilyEncryptionType', '')
for volume in family['CoreStorageLogicalVolumes']:
volume_id = volume['CoreStorageUUID']
volume_plist = GetCoreStoragePlist(volume_id)
conv_state = volume_plist.get(
'CoreStorageLogicalVolumeConversionState', '')
# Known states include: Pending, Converting, Complete, Failed.
if conv_state == 'Failed':
failed_volume_ids.append(volume_id)
elif enc == 'AES-XTS':
# If conv_state is not 'Failed' and enc is correct, consider the
# volume encrypted to include those that are still encrypting.
# A potential TODO might be to separate these.
encrypted_volume_ids.append(volume_id)
else:
volume_ids.append(volume_id)
if failed_volume_ids:
state = State.FAILED
elif encrypted_volume_ids and not volume_ids:
state = State.ENCRYPTED
# For now at least, consider "failed" volumes as encrypted, as the same
# actions are valid for such volumes. For example: revert.
encrypted_volume_ids.extend(failed_volume_ids)
return state, encrypted_volume_ids, volume_ids
def GetState():
"""Check if core storage is in place.
Returns:
One of the class properties of State.
"""
state, _, _ = GetStateAndVolumeIds()
return state
def GetVolumeSize(uuid, readable=True):
"""Return the size of the volume with the given UUID.
Args:
uuid: str, ID of the volume in question
readable: Optional boolean, default true: return a human-readable string
when true, otherwise int number of bytes.
Returns:
str or int, see "readable" arg.
Raises:
Error: there was a problem getting volume info.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
try:
plist = util.GetPlistFromExec(
(DISKUTIL, 'corestorage', 'info', '-plist', uuid))
except util.ExecError:
logging.exception('GetVolumeSize() failed to get volume info: %s', uuid)
raise Error
num_bytes = plist['CoreStorageLogicalVolumeSize']
if readable:
return '%.2f GiB' % (num_bytes / (1<<30))
else:
return num_bytes
def UnlockVolume(uuid, passphrase):
"""Unlock a core storage encrypted volume.
Args:
uuid: str, uuid of the volume to unlock.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
returncode, _, stderr = util.Exec(
(DISKUTIL, 'corestorage', 'unlockVolume', uuid, '-stdinpassphrase'),
stdin=passphrase)
if (returncode != 0 and
'volume is not locked' not in stderr and
'is already unlocked' not in stderr):
raise CouldNotUnlockError(
'Could not unlock volume (%s).' % returncode)
def RevertVolume(uuid, passphrase):
"""Revert a core storage encrypted volume (to unencrypted state).
Args:
uuid: str, uuid of the volume to revert.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotRevertError: the volume was unlocked, but cannot be reverted.
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
UnlockVolume(uuid, passphrase)
returncode, _, _ = util.Exec(
(DISKUTIL, 'corestorage', 'revert', uuid, '-stdinpassphrase'),
stdin=passphrase)
if returncode != 0:
raise CouldNotRevertError('Could not revert volume (%s).' % returncode)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2813,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 2.867569 | 2,809 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 1.785714 | 14 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making Metis available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import time
import os
import threading
from app.dao.time_series_detector import anomaly_op
from app.dao.time_series_detector import sample_op
from app.dao.time_series_detector import train_op
from app.utils.utils import *
from app.service.time_series_detector.algorithm import isolation_forest, ewma, polynomial_interpolation, statistic, xgboosting
from app.config.errorcode import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../../model/time_series_detector/')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
3395,
271,
1695,
13,
198,
15269,
357,
34... | 3.58806 | 335 |
# Generated by Django 3.2.2 on 2021-05-10 09:17
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
17,
319,
33448,
12,
2713,
12,
940,
7769,
25,
1558,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.567568 | 37 |
from . import GlobalConstants as GC
from . import configuration as cf
from . import Utility, Image_Modification, Engine, GenericMapSprite
| [
6738,
764,
1330,
8060,
34184,
1187,
355,
20145,
198,
6738,
764,
1330,
8398,
355,
30218,
198,
6738,
764,
1330,
34030,
11,
7412,
62,
5841,
2649,
11,
7117,
11,
42044,
13912,
38454,
578,
198
] | 4.181818 | 33 |
if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Model, as_variable
from dezero import setup_variable
from dezero.utils import plot_dot_graph
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP
setup_variable()
if __name__ == '__main__':
x = Variable(np.array([[0.2, -0.4]]))
model = MLP((10, 3))
y = model(x)
p = softmaxld(y)
print(y)
print(p)
a = np.array([[0.2, -0.4], [0.3, 0.5], [1.3, -3.2], [2.1, 0.3]])
t = np.array([2, 0, 1, 0])
z = model(a)
loss = F.softmax_cross_entropy_simple(z, t)
print(loss)
| [
361,
705,
834,
7753,
834,
6,
287,
15095,
874,
33529,
198,
220,
220,
220,
1330,
28686,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
25064,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,... | 2.201183 | 338 |
#!/usr/bin/env python3
from llvmlite import ir
i1 = ir.IntType(1)
i8 = ir.IntType(8)
i16 = ir.IntType(16)
i32 = ir.IntType(32)
i64 = ir.IntType(64)
void = ir.VoidType()
m = ir.Module()
fty = ir.FunctionType(void, [i32, i32, i32])
f = ir.Function(m, fty, "cmov_test")
entry = f.append_basic_block("entry")
bld = ir.IRBuilder(entry)
cond_v = f.args[0]
cond_v.name = "cond"
true_v = f.args[1]
true_v.name = "true_val"
false_v = f.args[2]
false_v.name = "false_val"
bool_v = bld.icmp_unsigned("==", cond_v, cond_v.type(0), name="cmov_cond")
# cur_bb = bld.basic_block
# with bld.if_else(bool_v) as (then, otherwise):
# with then:
# true_bb = bld.basic_block
# with otherwise:
# false_bb = bld.basic_block
bld.select(bool_v, true_v, false_v, name="cmov_val")
print(m)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
32660,
85,
4029,
578,
1330,
4173,
198,
198,
72,
16,
796,
4173,
13,
5317,
6030,
7,
16,
8,
198,
72,
23,
796,
4173,
13,
5317,
6030,
7,
23,
8,
198,
72,
1433,
796,
41... | 2.094987 | 379 |
from .exporters import export_html, export_pdf # noqa: F401
| [
6738,
764,
1069,
1819,
1010,
1330,
10784,
62,
6494,
11,
10784,
62,
12315,
220,
1303,
645,
20402,
25,
376,
21844,
198
] | 2.904762 | 21 |
import pytest
from sovtokenfees.test.constants import NYM_FEES_ALIAS
from sovtokenfees.test.helper import add_fees_request_with_address
from indy_common.constants import NYM
from plenum.common.exceptions import RequestRejectedException
| [
11748,
12972,
9288,
198,
6738,
523,
85,
30001,
69,
2841,
13,
9288,
13,
9979,
1187,
1330,
6645,
44,
62,
15112,
1546,
62,
1847,
43429,
198,
6738,
523,
85,
30001,
69,
2841,
13,
9288,
13,
2978,
525,
1330,
751,
62,
69,
2841,
62,
25927,
... | 3.12987 | 77 |
from quart import current_app, request, redirect, jsonify
from urllib.parse import quote_plus, parse_qs, urlparse
from quart.exceptions import MethodNotAllowed
from datetime import datetime, timedelta
from typing import List, Tuple
import jwt
import os
from api.models import Token, User
from api.app import API
from .. import bp
import utils
DISCORD_ENDPOINT = "https://discord.com/api"
request: utils.Request
SCOPES = ["identify"]
current_app: API
async def exchange_code(
*, code: str, scope: str, redirect_uri: str, grant_type: str = "authorization_code"
) -> Tuple[dict, int]:
"""Exchange discord oauth code for access and refresh tokens."""
async with current_app.http_session.post(
"%s/v6/oauth2/token" % DISCORD_ENDPOINT,
data=dict(
code=code,
scope=scope,
grant_type=grant_type,
redirect_uri=redirect_uri,
client_id=os.environ["DISCORD_CLIENT_ID"],
client_secret=os.environ["DISCORD_CLIENT_SECRET"],
),
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
return await response.json(), response.status
async def get_user(access_token: str) -> dict:
"""Coroutine to fetch User data from discord using the users `access_token`"""
async with current_app.http_session.get(
"%s/v6/users/@me" % DISCORD_ENDPOINT,
headers={"Authorization": "Bearer %s" % access_token},
) as response:
return await response.json()
def format_scopes(scopes: List[str]) -> str:
"""Format a list of scopes."""
return " ".join(scopes)
def get_redirect(callback: str, scopes: List[str]) -> str:
"""Generates the correct oauth link depending on our provided arguments."""
return (
"{BASE}/oauth2/authorize?response_type=code"
"&client_id={client_id}"
"&scope={scopes}"
"&redirect_uri={redirect_uri}"
"&prompt=consent"
).format(
BASE=DISCORD_ENDPOINT,
client_id=os.environ["DISCORD_CLIENT_ID"],
scopes=format_scopes(scopes),
redirect_uri=quote_plus(callback),
)
def is_valid_url(string: str) -> bool:
"""Returns boolean describing if the provided string is a url"""
result = urlparse(string)
return all((result.scheme, result.netloc))
@bp.route("/discord/redirect", methods=["GET"])
async def redirect_to_discord_oauth_portal():
"""Redirect user to correct oauth link depending on specified domain and requested scopes."""
qs = parse_qs(request.query_string.decode())
callback = qs.get(
"callback", (request.scheme + "://" + request.host + "/auth/discord/callback")
)
if isinstance(callback, list): #
callback = callback[0]
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
return redirect(get_redirect(callback=callback, scopes=SCOPES))
@bp.route("/discord/callback", methods=["GET", "POST"])
async def discord_oauth_callback():
"""
Callback endpoint for finished discord authorization flow.
GET -> Only used in DEBUG mode.
Gets code from querystring.
POST -> Gets code from request data.
"""
if request.method == "GET":
if not current_app.debug:
# A GET request to this endpoint should only be used in testing.
raise MethodNotAllowed(("POST",))
qs = parse_qs(request.query_string.decode())
code = qs.get("code")
if code is not None:
code = code[0]
callback = request.scheme + "://" + request.host + "/auth/discord/callback"
elif request.method == "POST":
data = await request.json
code = data.get("code")
callback = data.get("callback", "")
else:
raise RuntimeWarning("Unexpected request method. (%s)" % request.method)
if code is None:
return (
jsonify(
{
"error": "Bad Request",
"message": "Missing code in %s." % "querystring arguments"
if request.method == "GET"
else "JSON data",
}
),
400,
)
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
access_data, status_code = await exchange_code(
code=code, scope=format_scopes(SCOPES), redirect_uri=callback
)
if access_data.get("error", False):
if status_code == 400:
return (
jsonify(
{
"error": "Bad Request",
"message": "Discord returned 400 status.",
"data": access_data,
}
),
400,
)
raise RuntimeWarning(
"Unpredicted status_code.\n%s\n%s" % (str(access_data), status_code)
)
expires_at = datetime.utcnow() + timedelta(seconds=access_data["expires_in"])
expires_at.replace(microsecond=0)
user_data = await get_user(access_token=access_data["access_token"])
user_data["id"] = uid = int(user_data["id"])
user = await User.fetch(id=uid)
if user is None:
user = await User.create(
id=user_data["id"],
username=user_data["username"],
discriminator=user_data["discriminator"],
avatar=user_data["avatar"],
)
await Token(
user_id=user.id,
data=access_data,
expires_at=expires_at,
token=access_data["access_token"],
).update()
token = jwt.encode(
{"uid": user.id, "exp": expires_at, "iat": datetime.utcnow()},
key=os.environ["SECRET_KEY"],
)
return jsonify(token=token, exp=expires_at)
| [
6738,
28176,
1330,
1459,
62,
1324,
11,
2581,
11,
18941,
11,
33918,
1958,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
62,
9541,
11,
21136,
62,
48382,
11,
19016,
29572,
198,
6738,
28176,
13,
1069,
11755,
1330,
11789,
3673,
3237,
6... | 2.240955 | 2,681 |
import numpy as np
import matplotlib.pyplot as plt
# Soluciones a los ejercicios de la seccion 6.2.5
# del libro A Survey of Computational Physics Introductory Computational Science
# de Landau, Paez, Bordeianu (Python Multimodal eTextBook Beta4.0)
#1. Write a double-precision program to integrate an arbitrary function numerically
# using the trapezoid rule, the Simpson rule, and Gaussian quadrature.
# 2 Compute the relative error (epsilon=abs(numerical-exact)/exact) in each case.
# Present your data in tabular form for N=2,10,20,40,80,160
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
2,
4294,
1229,
295,
274,
257,
22346,
304,
73,
2798,
291,
4267,
390,
8591,
384,
535,
295,
718,
13,
17,
13,
20,
220,
198,
2,
1619... | 3.094972 | 179 |
import random, math, numpy as np
import networkx
import networkx.utils
#from networkx.generators.classic import empty_graph
def random_stub_triangle_graph(s, t, seed=None):
"""Return a random graph G(s,t) with expected degrees given by s+2*t.
:Parameters:
- `s`: list - count of stubs emanating from node[i]
- `t`: list - count of triangles including node[i]
- `seed`: seed for random number generator (default=None)
>>> z=[10 for i in range(100)]
>>> G=nx.random_stub_triangle_graph
Reference::
@Article{arXiv:0903.4009v1,
author = {M. E. J. Newman},
title = {Random graphs with clustering},
journal = {},
year = {2009},
volume = {},
pages = {},
}
"""
if len(s) != len (t) :
msg = "NetworkXError: stub and triangle vector must be same length"
raise networkx.NetworkXError(msg)
if sum(s)%2 != 0 :
msg = "NetworkXError: sum(stubs) must be even"
raise networkx.NetworkXError(msg)
if sum(t)%3 != 0 :
msg = "NetworkXError: sum(triangles) % 3 must be zero"
raise networkx.NetworkXError(msg)
n = len(s)
# allow self loops, exclude in later code
G=networkx.empty_graph(n,create_using=networkx.Graph())
G.name="random_stub_triangle_graph"
if n==0 or (max(s)==0 and max(t)==0): # done if no edges
return G
#might not be needed
#d = sum(s+2*t)
#rho = 1.0 / float(d) # Vol(G)
if seed is not None:
random.seed(seed)
# connect triangle corners
# Get a list of nodes that have triangle corners
triNodes = [ x for x in range(n) if t[x]>0 ]
tri = list(t)
while len(triNodes) >= 3:
[A,B,C] = random.sample(triNodes,3)
#if not (G.has_edge(A,B) or G.has_edge(A,C) or G.has_edge(B,C)):
G.add_cycle([A,B,C])
for node in [A,B,C]:
tri[node] -= 1
if tri[node] == 0: triNodes.remove(node)
# connect stubs
# Get a list of nodes that have stubs
stubNodes = [ x for x in range(n) if s[x]>0 ]
stubs = list(s)
while len(stubNodes) >= 2:
[A,B] = random.sample(stubNodes,2)
#if not (G.has_edge(A,B)):
G.add_edge(A,B)
for node in [A,B]:
stubs[node] -= 1
if stubs[node] == 0: stubNodes.remove(node)
"""
for node in xrange(n):
for v in xrange(u,n):
if random.random() < w[u]*w[v]*rho:
G.add_edge(u,v)
"""
return G
def max_clustering( degSeq ):
""" Return a valid degree sequence with high clustering.
"""
# Floors given degree sequence, then pair as many edges as possible into
# triangle corners, assigns any left over edges an non-triangle edges
[t,s]=[list(a) for a in zip(*[divmod(math.floor(x),2) for x in degSeq])]
# T must be a multile of 3
removeT = int(sum(t)%3.0)
removeS = int(sum(s)%2.0)
for extra in range(removeT):
edge = random.randint(0,sum(t))
rmIndex = [ x>=edge for x in np.cumsum(t)].index(True)
t[rmIndex] -= 1
for extra in range(removeS):
edge = random.randint(0,sum(s))
rmIndex = [ x>=edge for x in np.cumsum(s)].index(True)
s[rmIndex] -= 1
return [t,s]
| [
11748,
4738,
11,
10688,
11,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
198,
11748,
3127,
87,
13,
26791,
198,
2,
6738,
3127,
87,
13,
8612,
2024,
13,
49421,
1330,
6565,
62,
34960,
198,
198,
4299,
4738,
62,
301,
549,
62,
28461,
9248... | 2.036137 | 1,688 |
# Created by giuseppe
# Date: 22/11/19
from gym.envs.registration import register
register(
id='KukaPush-v0',
entry_point='gym_kuka.envs:KukaPush',
) | [
2,
15622,
416,
308,
72,
1904,
27768,
198,
2,
7536,
25,
2534,
14,
1157,
14,
1129,
198,
198,
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
42,
14852,
49222,
12,
85,
15... | 2.338235 | 68 |
import enum
import typing
from ._types import StrOrBytes
from .exceptions import ProtocolError, SOCKSError
from .utils import (
AddressType,
decode_address,
encode_address,
get_address_port_tuple_from_address,
)
class SOCKS4ReplyCode(bytes, enum.Enum):
"""Enumeration of SOCKS4 reply codes."""
REQUEST_GRANTED = b"\x5A"
REQUEST_REJECTED_OR_FAILED = b"\x5B"
CONNECTION_FAILED = b"\x5C"
AUTHENTICATION_FAILED = b"\x5D"
class SOCKS4Command(bytes, enum.Enum):
"""Enumeration of SOCKS4 command codes."""
CONNECT = b"\x01"
BIND = b"\x02"
class SOCKS4Request(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4 proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4Request":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4Request instance.
Raises:
SOCKSError: If a domain name or IPv6 address was supplied.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
if atype != AddressType.IPV4:
raise SOCKSError(
"IPv6 addresses and domain names are not supported by SOCKS4"
)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
self.addr,
user_id,
b"\x00",
]
)
class SOCKS4ARequest(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4A proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4ARequest":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4ARequest instance.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
b"\x00\x00\x00\xFF", # arbitrary final non-zero byte
user_id,
b"\x00",
self.addr,
b"\x00",
]
)
class SOCKS4Reply(typing.NamedTuple):
"""Encapsulates a reply from the SOCKS4 proxy server
Args:
reply_code: The code representing the type of reply.
port: The port number returned.
addr: Optional IP address returned.
"""
reply_code: SOCKS4ReplyCode
port: int
addr: typing.Optional[str]
@classmethod
def loads(cls, data: bytes) -> "SOCKS4Reply":
"""Unpacks the reply data into an instance.
Returns:
The unpacked reply instance.
Raises:
ProtocolError: If the data does not match the spec.
"""
if len(data) != 8 or data[0:1] != b"\x00":
raise ProtocolError("Malformed reply")
try:
return cls(
reply_code=SOCKS4ReplyCode(data[1:2]),
port=int.from_bytes(data[2:4], byteorder="big"),
addr=decode_address(AddressType.IPV4, data[4:8]),
)
except ValueError as exc:
raise ProtocolError("Malformed reply") from exc
class SOCKS4Connection:
"""Encapsulates a SOCKS4 and SOCKS4A connection.
Packs request objects into data suitable to be send and unpacks reply
data into their appropriate reply objects.
Args:
user_id: The user ID to be sent as part of the requests.
"""
def send(self, request: typing.Union[SOCKS4Request, SOCKS4ARequest]) -> None:
"""Packs a request object and adds it to the send data buffer.
Args:
request: The request instance to be packed.
"""
user_id = request.user_id or self.user_id
self._data_to_send += request.dumps(user_id=user_id)
def receive_data(self, data: bytes) -> SOCKS4Reply:
"""Unpacks response data into a reply object.
Args:
data: The raw response data from the proxy server.
Returns:
The appropriate reply object.
"""
self._received_data += data
return SOCKS4Reply.loads(bytes(self._received_data))
def data_to_send(self) -> bytes:
"""Returns the data to be sent via the I/O library of choice.
Also clears the connection's buffer.
"""
data = bytes(self._data_to_send)
self._data_to_send = bytearray()
return data
| [
11748,
33829,
198,
11748,
19720,
198,
198,
6738,
47540,
19199,
1330,
4285,
5574,
45992,
198,
6738,
764,
1069,
11755,
1330,
20497,
12331,
11,
311,
11290,
5188,
81,
1472,
198,
6738,
764,
26791,
1330,
357,
198,
220,
220,
220,
17917,
6030,
... | 2.252125 | 3,411 |
f = open('Conventions.txt','r')
message = f.read()
print(message)
f.close()
print (message.splitlines())
| [
69,
796,
1280,
10786,
3103,
16593,
13,
14116,
41707,
81,
11537,
198,
20500,
796,
277,
13,
961,
3419,
198,
4798,
7,
20500,
8,
198,
69,
13,
19836,
3419,
198,
198,
4798,
357,
20500,
13,
35312,
6615,
28955,
198,
220,
198
] | 2.7 | 40 |
"""This module implements the security group helper module for AWSHeet that is aimed at providing idempotent AWS EC2 security groups.
Currently, it only supports Security Groups that are in a VPC.
Rules are created using the SecurityGroupRule type and then they are collected together inside an iterable (usually a set is used).
This collection of rules is then passed along to the SecurityGroupHelper constructor which is also passed a name and a description.
Example 1 - give access to a specified list of IP addresses:
#create a new security group that gives access to the following ips to port 80
cidr_ips = ['192.168.0.1/32', '10.10.11.12/24', '155.246.0.0/16']
http_port = 80
rules_for_new_security_group = set()
for cidr_ip_x in cidr_ips:
rules_for_new_security_group.add(SecurityGroupRule(ip_protocol='tcp', from_port=http_port, to_port=http_port, cidr_ip=cidr_ip_x, src_group=None))
new_security_group = SecurityGroupHelper(name='New Test Group', description='just a simple example group', rules=rules_for_new_security_group)
Example 2 - give two seperate security groups mutual access to SSH / port 22:
sg1_rules = set() #- the set of rules for the first security group
sg2_rules = set() #- the set of rules for the second security group
#- a shared rule based on IP address and ICMP
all_icmp = SecurityGroupRule(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip='0.0.0.0/0', src_group=None)
sg1_rules.add(all_icmp)
sg2_rules.add(all_icmp)
#- use an '@' symbol in the src_group name to specify a group by name, even if the group doesn't exist yet
sg1_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup2'))
sg2_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup1'))
#- create the actual groups
sg1 = SecurityGroupHelper(name='securityGroup1', description='example group 1', rules=sg1_rules)
sg2 = SecurityGroupHelper(name='securityGroup2', description='example group 2', rules=sg2_rules)
#- program exits
#- at program exit, the remaining dependencies will now be converged
#- this is an easy way of forward referencing when you create the rules so that the referenced groups
#- don't have to exist at that time, as long as they are created within the lifetime of the same program
"""
from .awshelper import AWSHelper
import time
import re
import os
import json
import subprocess
import tempfile
import argparse
import sys
import logging
import atexit
import boto
import boto.ec2
import boto.ec2.elb
import boto.cloudformation
import collections
import ipaddress
import boto.exception
import copy
#- used to wait between successive API calls
AWS_API_COOLDOWN_PERIOD = 1.0
#TODO: IMPLEMENT TAGGING
#- no need for a full class. These are simple tuples
#- TODO: actually having rules as immutables makes normalization more complex.
#- refactor this particular tuple into its own class and define rules of
#- interaction between security groups and rules they contain
#- as rules themselves do need access to the heet object and to the boto_sg
#- to perform some aspects of normalization
SecurityGroupRule = collections.namedtuple('SecurityGroupRule', ['ip_protocol', 'from_port', 'to_port', 'cidr_ip', 'src_group'])
#- rm_group: only try to delete the group, fail if the API call fails
#- rm_instances: delete all the instances in this group before attempting deletion of this security group
#- rm_enis: delete all of the Elastic Network Interfaces in this security group before attempting deletion of this security group
SecurityGroupDeleteMode = collections.namedtuple('SecurityGroupDeleteMode', ['rm_group', 'rm_instances', 'rm_enis'])
#- this defines the identity of the security group to Heet Code
#- as long as none of these change, we will converge the same AWS resource
#- VPC ID
#- Heet Project Name (Base Name / the name of the script)
#- Heet Environment (usually, testing, staging or production)
#- Security Group Name
SgTag = collections.namedtuple('SecurityGroupIDTag',[ 'environment', 'project_name', 'vpc_id', 'sg_name'])
class SecurityGroupHelper(AWSHelper):
"""modular and convergent security groups in VPC (and only in VPC)
Params"""
def normalize_aws_sg_rules(self, aws_sg):
"""AWS has grants and rules, but we work with them as a logical unit.
The rules have the ip_protocol, from_port, to_port while the grants have the remaining parameters,
which are the mutually exclusive group_id or cidr_ip parameters
Also normalize sg-ids that are references to 'self'
and convert the security group IDs to resource references for SGs in this module"""
boto_self = self.get_resource_object()
normalized_rules = set()
if aws_sg is not None:
for rule in aws_sg.rules:
for grant in rule.grants:
normalized_group_id = grant.group_id
rule = SecurityGroupRule(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, normalized_group_id)
#- be sure that we are always comparing similarly normalized rules
#- apply self.normalize_rule to API returned rules as well
normalized_rules.add(self.normalize_rule(rule))
return normalized_rules
def get_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
boto_group = None
#- build the tag and find it by tag
(tag_name, tag_value) = self.heet_id_tag
matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
if matching_groups:
#- if there's more than one security group in the same project and environment with the same name,
#- this is worthy of logging an error as it isn't expected
if len(matching_groups) > 1:
self.heet.logger.warn("multiple security groups returned!: search tag:[{}: {}]".format(tag_name, tag_value))
boto_group = matching_groups[0]
self.aws_id = boto_group.id
return boto_group
def get_or_create_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
(tag_name, tag_value) = self.heet_id_tag
boto_group = self.get_resource_object()
if not boto_group and not self.heet.args.destroy:
#- it doesn't exist yet
try:
self.heet.logger.debug('get_or_create_resource_object: creating new security group')
boto_group = self.conn.create_security_group(name=self.aws_name, description=self.description, vpc_id=self.vpc_id)
except boto.exception.EC2ResponseError as err:
print 'AWS EC2 API error: {} ({})'.format(err.message, err)
return None
self.heet.logger.debug('get_or_create_resource_object: successfully created new security group, waiting to tag')
time.sleep(AWS_API_COOLDOWN_PERIOD)
self.heet.logger.debug('get_or_create_resource_object: tagging new security group: [{}:{}]'.format(tag_name, tag_value))
try:
#- sometimes a short sleep isn't enough, and we really don't want to exit before tagging
#- as that makes the next convergence cycle fail until the group is deleted manually.
boto_group.add_tag(key=tag_name, value=tag_value)
self.heet.logger.debug('get_or_create_resource_object: successfully created new tagged group.')
self.aws_id = boto_group.id
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
self.heet.logger.debug('get_or_create_resource: setting ID tag failed. Waiting to try again...')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
raise err
return boto_group
def make_key_from_rule(self, rule):
"""Just join all the things together to make a unique string"""
key = '/'.join([str(rule.ip_protocol), str(rule.from_port), str(rule.to_port), str(rule.cidr_ip), str(rule.src_group)])
return key
def get_src_group_from_key(self, key):
"""Just undo make_key_from_rule to get the source group"""
return key.split('/')[-1]
def rule_fails_check(self, rule):
"""Checks that the rule has all the needed attributes
Returns a list of strings with error messages for each test the rule failed.
If it passes, then the list will be empty.
As well, this populates self.src_group_references dict"""
#- a list of all the ways that the rule has failed
rule_status = []
if str(rule.ip_protocol) not in ['tcp','udp', 'icmp', '-1']:
rule_status.append('bad value for ip_protocol in rule {}'.format(str(rule)))
#- try to convert to float to check if it is a valid port number
try:
if rule.from_port is not None and rule.from_port < 0 and rule.from_port != -1:
rule_status.append('rule from_port is a negative number that is not -1: [{}]'.format(rule.from_port))
raise TypeError()
float(rule.from_port)
except TypeError as err:
if rule.from_port is None:
pass
else:
rule_status.append('rule from port is not a valid integer')
try:
if rule.to_port is not None and rule.to_port < 0 and rule.to_port != -1:
rule_status.append('rule to_port is a negative number that is not -1: [{}]'.format(rule.to_port))
raise TypeError()
float(rule.to_port)
except TypeError as err:
if rule.to_port is None:
pass
else:
rule_status.append('rule to port is not a valid integer')
#- Check the (.cidr_ip, .src_group) pair compliance
#- need to have exactly one of src_group, cidr_ip
if rule.cidr_ip is not None:
#self.heet.logger.debug(' ^^^ rule has cidr_ip')
if rule.src_group is not None:
self.heet.logger.debug(' ^^^ rule has both cidr_ip and src_group')
rule_status.append('Can\'t have both cidr_ip and src_group set simultaneously: rule {}'.format(str(rule)))
else:
#self.heet.logger.debug(' ^^^ rule has only cidr_ip')
#- test the cidr_ip
try:
ipaddress.IPv4Network(unicode(rule.cidr_ip))
except ValueError as err:
#self.heet.logger.debug(' ^^^ rule has invalid cidr_ip')
rule_status.append('rule has an invalid cidr_ip value: [{}]'.format(rule.cidr_ip))
elif rule.cidr_ip is None and rule.src_group is None:
#self.heet.logger.debug(' ^^^ rule has neither cidr_ip nor src_group')
rule_status.append('Must specify one or other of [cidr_ip, src_group]')
else:
if rule.src_group == 'self':
#self.heet.logger.debug(' ^^^ rule src_group refers to "self"')
boto_self = self.get_or_create_resource_object()
if not boto_self:
return
self.src_group_references[boto_self.id] = boto_self
elif rule.src_group != 'self' and not self.rule_has_dependent_reference(rule):
#self.heet.logger.debug('^^^ rule that references AWS SG directly: {}'.format(rule.src_group))
#- get the boto object for the reference security group so we
#- can pass that object into boto's authorize() method
src_group_resource = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(src_group_resource) <= 0:
#self.heet.logger.debug('^^^ rule references another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
rule_status.append('References another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
else:
self.src_group_references[rule.src_group] = src_group_resource[0]
self.heet.logger.debug('added src_group_references[{}]'.format(rule.src_group))
elif self.heet.is_resource_ref(rule.src_group):
#- this is a reference to another heet security group helper object
#- we should make sure that this actually exists before saying its okay
#- but we can only do that after we have a comprehensive list of all the
#- security groups to be created, which we will only have at the end of the
#- program.
#- So here, we add this name to a list of things which will be done at exit.
#self.heet.logger.debug('^^^ rule seems to be a new style resource reference.')
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
return rule_status
def is_aws_reference(self, src_group):
"""Check if the src_group argument looks like an AWS security group ID
Just means the first three characters are 'sg-'"""
is_ref = False
if src_group and src_group[0] == 's' and src_group[1] == 'g' and src_group[2] == '-' and len(src_group.split('-')) == 2:
is_ref = True
return is_ref
def get_boto_src_group(self, src_group):
"""src_group can be:
* @resource-reference
* 'sg-xxxxxxx'
Return a boto object that can be used in authorize / revoke"""
boto_sg = None
if self.heet.is_resource_ref(src_group):
self.heet.logger.debug('get_boto_src_group: will try to look [{}] up as heet resource ref'.format(src_group))
try:
rr = self.heet.resource_refs[src_group]
boto_sg = rr.get_resource_object()
except KeyError as err:
self.heet.logger.debug('get_boto_src_group: failed to lookup [{}] in heet resource refs table'.format(src_group))
boto_sg = None
elif self.is_aws_reference(src_group):
self.heet.logger.debug('get_boto_src_group: will try to retrieve sg id [{}] from AWS API'.format(src_group))
#XXX we should actually get it by tag
# move create tag to be a utility function
#(tag_name, tag_value) = self.heet_id_tag
#matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
matching_groups = self.conn.get_all_security_groups(group_ids=[src_group])
if not matching_groups:
self.heet.logger.debug('get_boto_src_group: aws returned no groups with tag ([{}],[{}])'.format(tag_name, tag_value))
boto_sg = None
else:
self.heet.logger.debug('get_boto_src_group: aws returned matching group')
boto_sg = matching_groups[0]
else:
self.heet.logger.debug('get_boto_src_group: can not tell what type of src_group format this is: [{}]'.format(src_group))
boto_sg = None
return boto_sg
def normalize_rule(self, rule):
"""Normalize SecurityGroupRule attributes that can have multiple values representing the same thing into one well-defined value
Currently only checks from_port and to_port for '-1' or None and normalizes them to be None as that's what the API returns"""
#- make a mutable copy
new_rule = {'ip_protocol' : rule.ip_protocol,
'from_port' : rule.from_port,
'to_port' : rule.to_port,
'cidr_ip' : rule.cidr_ip,
'src_group' : rule.src_group }
#- just go through and normalize all the values one by one and make a new rule at the end
#- out of all the stuff we collect throughout the normalization tests
if new_rule['src_group'] == 'self':
#- normalize_rule called from add_rules which is called from init, so we may not exist: call get_or_create.
boto_self = self.get_or_create_resource_object()
if not boto_self:
return rule
new_rule['src_group'] = boto_self.id
if self.heet.is_resource_ref(new_rule['src_group']):
try:
#- try to look it up
self.heet.logger.debug('Normalizing resource_reference: {}'.format(rule.src_group))
#boto_sg = self.heet.resource_refs[new_rule['src_group']].get_resource_object()
boto_sg = self.get_boto_src_group(rule.src_group)
if boto_sg:
self.heet.logger.debug('*** resolved resource_reference: {}'.format(rule.src_group))
self.heet.logger.debug('*** adding local resource_reference: {}'.format(rule.src_group))
self.src_group_references[boto_sg.id] = boto_sg
new_rule['src_group'] = boto_sg.id
else:
self.heet.logger.debug('normalize_rule: get_resource_object returned nothing for group: {}.'.format(rule.src_group))
except KeyError as err:
self.heet.logger.debug('*** normalize_rule: resource_reference not found: {}, will handle in 2nd pass'.format(rule.src_group))
#- it wasn't in the reference table yet,
#- we'll handle this in converge() and converge_dependency()
pass
if rule.ip_protocol == -1:
self.heet.logger.debug('Normalizing ip_protocol: {} to str(-1)'.format(rule.ip_protocol))
new_rule['ip_protocol'] = '-1'
#- we check for None explicitly also to short-circuit else the int() will fail w/ TypeError and we want it to pass
if new_rule['from_port'] is None or new_rule['to_port'] is None or int(new_rule['from_port']) == -1 or int(new_rule['to_port']) == -1:
#self.heet.logger.debug('Normalizing port range: {} .. {} to [None .. None]'.format(rule.from_port, rule.to_port))
new_rule['from_port'] = None
new_rule['to_port'] = None
final_rule = SecurityGroupRule(new_rule['ip_protocol'], new_rule['from_port'], new_rule['to_port'], new_rule['cidr_ip'], new_rule['src_group'])
return final_rule
def add_rule(self, rule):
"""Print out why a rule fails to be added, else add a rule to this security group
Rule will be normalized and added to one of two lists of rules:
One group is for rules that can be converged immediately
(those ones have no src_group resource references)
The other group is for rules that will be converged after the resource
reference table has been built
"""
normalized_rule = self.normalize_rule(rule)
failures = self.rule_fails_check(normalized_rule)
if not failures:
self.rules.add(normalized_rule)
else:
for err in failures:
self.heet.logger.error('Security Group failed sanity checks: ')
self.heet.logger.error(' : ' + err)
return
def build_heet_id_tag(self):
"""The tag is what defines a security group as a unique component of heet code
This format has the following consequences:
* you can change the id of a security group and still converge
* you can not converge across projects, environments or sgs with different names, or different VPCs
* you can change the rules of an SG and converge"""
sg_tag = SgTag(self.heet.get_environment(), self.heet.base_name, self.vpc_id, self.aws_name)
tag_value = '/'.join(sg_tag)
tag_name = 'AWSHeet'
return (tag_name, tag_value)
def build_aws_name(self, base_name):
"""The name of the security group is basically the Tag concatenated in order, minus the vpc id
NB: AWS only determines SG uniqueness by (VPC_ID, SG Name), so if you want the same code for different environments,
you have to add some additional environment-specific info to the name"""
return '-'.join([self.heet.get_environment(), self.heet.base_name, base_name])
def rule_has_dependent_reference(self, rule):
"""Check if the rule refers to a security group that is another Heet object
For now, we do that by passing in the heet base_name of the group prefixed with an '@'"""
return self.heet.is_resource_ref(rule.src_group)
def base_name_to_ref(self, base_name):
"""Converts the Heet Script's SG base name into a name reference.
Currently, this just means that it is prepended with an '@'"""
return '@' + base_name
def ref_to_base_name(self,base_name):
"""The opposite of the above."""
if base_name[0] == '@':
return base_name[1:]
else:
self.heet.logger.error("Trying to dereference a SG name that isn't a reference: {}".format(base_name))
return None
def converge(self):
"""Adds missing rules, revokes extra rules, creates entire group if necessary
if the rule can't be converged yet (due to an unresolveable resource reference,
we'll let heet know to call us at the module exit time and re-try via converge_dependency()
when we have the full module resource reference table"""
self.heet.logger.info("Converging security group: %s" % self.aws_name)
boto_self = self.get_resource_object()
if boto_self is None:
self.heet.logger.debug("Creating new group: %s" % self.aws_name)
boto_self = self.conn.create_security_group(self.aws_name, self.description, self.vpc_id)
self.aws_id = boto_self.id
remote_rules = set()
(tag_name,tag_value) = self.heet_id_tag
try:
boto_self.add_tag(key=tag_name, value=tag_value)
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
#- wait for API consistency - sleep momentarily before adding tag
self.heet.logger.debug('converge: set_tag failed due to SG not found. Waiting a moment then trying again.')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
self.heet.logger.debug("Using pre-existing group: %s" % self.aws_name)
self.aws_id = boto_self.id
remote_rules = set(self.normalize_aws_sg_rules(boto_self))
self.src_group_references['self'] = boto_self
self.src_group_references[boto_self.id] = boto_self
if self.rules:
desired_rules = set(self.rules)
else:
desired_rules = set()
for rule in desired_rules:
#- if it isn't there, add it
if rule in remote_rules:
#- the rule we want to add is already there, so skip it
self.heet.logger.debug("Already Authorized: %s on %s" % (rule, self))
else:
if rule.src_group:
#- check if this rule can be converged now or later
if self.rule_has_dependent_reference(rule):
self.heet.logger.debug("-- Rule refers to another Heet group. Will converge_dependency() atexit: {}".format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
elif self.is_aws_reference(rule.src_group):
#- use the src_group object we already got when we checked the rule
self.heet.logger.info("Adding Authorization: %s on %s" % (rule, self))
try:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip, self.src_group_references[rule.src_group])
except KeyError as err:
print ""
print ""
print 'FATAL ERROR: key error in src_group_references. looked for [{}] in:'.format(rule.src_group)
print self.src_group_references
print ""
print ""
os._exit(-1)
else:
print "Unexpected Rule format: {}".format(rule)
raise AttributeError('Source Group reference can NOT be converged')
else:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip)
#- remove all the rules that we didn't explicitly declare we want in this group
#- if they can currently be resolved (can only resolve names present in the resource reference table at the moment
#- of execution. )
#- any desired rule that is still in resource reference form because it couldn't be resolved yet will not match
#- anything, so we remove all the resource reference rules from the desired rules before comparison
desired_rules_copy = copy.copy(desired_rules)
for rule in desired_rules_copy:
if self.rule_has_dependent_reference(rule):
desired_rules.discard(rule)
for rule in remote_rules:
if rule not in desired_rules:
if self.is_aws_reference(rule.src_group):
#- skip this rule for now
self.heet.logger.debug('converge: skipping rule with aws sg id: [{}]'.format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
#- continue looping, but skip this rule now that we've registered it for convergence at exit
continue
else:
self.heet.logger.info("Removing remote rule not declared locally: {} in {}".format(rule, self))
print ""
print ""
print "DEBUG: removing rule"
print "remote: "
print str(remote_rules)
print ""
print "current rule being tested: "
print str(rule)
print ""
print "desired rules: "
print str(desired_rules)
print ""
print ""
#- boto-specific: get the referring security group boto-level object to delete this rule
#- TODO: this may be redundant if normalization strips the boto object for the src_group
#- as I'm resolving here. This isn't necessary if the pre-normalized rule has the object in it
ref_sg = None
if rule.src_group is not None:
if rule.src_group == 'self':
ref_sg = [self.get_or_create_resource_object()]
elif self.is_aws_reference(rule.src_group):
ref_sg = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(ref_sg) >= 1:
ref_sg = ref_sg[0]
else:
self.heet.logger.error("Rule to delete references another Security Group that no longer exists. Will fail...")
reg_sg = None
if rule.src_group is not None and ref_sg is None:
#- if we didn't just find it, skip it for now
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
else:
boto_self.revoke(rule.ip_protocol, rule.from_port, rule.to_port, rule.cidr_ip, ref_sg)
#- Post Converge Hook
self.post_converge_hook()
def converge_dependent_add_rule(self, init_rule):
"""Called from converge_dependency for the rules that needed to be added
but used a resource reference that couldn't yet be resolved on first pass in converge()"""
boto_self = self.get_resource_object()
resource_name = init_rule.src_group
boto_src_group = self.heet.resource_refs[resource_name].get_resource_object()
#- TODO: clean this up
#- we need the ID for comparisons, but we need the object for the API call
#- and we start with a resource reference
new_rule = SecurityGroupRule(init_rule.ip_protocol,
init_rule.from_port,
init_rule.to_port,
init_rule.cidr_ip,
boto_src_group.id)
normalized_rule = self.normalize_rule(new_rule)
final_rule = SecurityGroupRule(normalized_rule.ip_protocol,
normalized_rule.from_port,
normalized_rule.to_port,
normalized_rule.cidr_ip,
boto_src_group)
remote_rules = self.normalize_aws_sg_rules(boto_self)
if normalized_rule not in remote_rules:
boto_self.authorize(final_rule.ip_protocol, final_rule.from_port, final_rule.to_port, final_rule.cidr_ip, final_rule.src_group)
time.sleep(AWS_API_COOLDOWN_PERIOD)
return
def converge_dependent_remove_test(self, remote_rule):
"""Take this rule that has an AWS SG ID and is an existing remote rule and now check if this rule is a desired rule or not."""
#- first take all the current desired rules and re-normalize them so the resource references will be looked up
boto_self = self.get_resource_object()
desired_rules = set()
for rule_x in self.rules:
desired_rules.add(self.normalize_rule(rule_x))
if remote_rule not in desired_rules:
self.heet.logger.debug('converge_dependent_remove_test: removing rule [{}]'.format(remote_rule))
boto_src_group = self.get_boto_src_group(remote_rule.src_group)
boto_self.revoke(remote_rule.ip_protocol, remote_rule.from_port, remote_rule.to_port, remote_rule.cidr_ip, boto_src_group)
return
def converge_dependency(self, key):
"""converge_at_exit: this convergence pattern is different than the single-call of converge.
converge_dependency will be called once for every rule that needed to be converged at exit.
This is where we converge the rules that refer to other security groups that are declared in the same AWSHeet module
Dependencies here is any security group rule that referenced another Heet group that is being declared in this script.
If it is the first time the group is created, the referenced group will not exist yet, and so the rule will fail convergence.
So, to keep it simple, any group that refers to another group in a Heet script will be put off to be converged after we are
sure that the creation of the rule should not fail unless there has been an actual error."""
self.heet.logger.debug("----CONVERGE_DEPENDENCY() {}: {}---- {} of {} rules to process".format(self.base_name, key, self._num_converged_dependencies+1, len(self.dependent_rules)))
self._num_converged_dependencies += 1
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug('converge_dependency: no boto_object found. returning without issuing any API calls')
return
#- lookup the rule as it was when we saved it
init_rule = self.dependent_rules[key]
#- grab the group we need from the resource references
if key == 'DESTROY_AGAIN':
self.heet.logger.debug('converge_dependency: destroying 2nd round')
self.destroy()
else:
src_group_name = self.get_src_group_from_key(key)
if self.heet.is_resource_ref(src_group_name):
#- a bit opaque, but resource references are only called for rules that are trying
#- to be added, so we know if we see a resource reference here that this rule was
#- trying to be added and failed due to a resource reference being unable to be resolved
self.heet.logger.debug('converge_dependency: add_rule detected: [{}]'.format(init_rule))
self.converge_dependent_add_rule(init_rule)
elif self.is_aws_reference(src_group_name):
#- equally opaque, the only other rules we register to be called back for are rules
#- that existed remotely that referred to an AWS ID that we couldn't look up at the time
#- that we needed to check if it should be removed or not
self.heet.logger.debug('converge_dependency: remove_test detected: [{}]'.format(init_rule))
self.converge_dependent_remove_test(init_rule)
return
def destroy(self):
"""Try to remove everything from existence."""
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug("destroy [{}]: no resource object found, returning without any API calls.".format(self.base_name))
return
#- Pre Destroy Hook
self.pre_destroy_hook()
self.heet.logger.info("deleting SecurityGroup [{}]".format(self.aws_name))
#- first delete any src_group rules so the group can be deleted
self.heet.logger.debug('destroy [{}]: testing [{}] rules to remove ones w/ src_groups'.format(self.aws_name, len(boto_self.rules)))
rules_copy = copy.deepcopy(boto_self.rules)
if isinstance(rules_copy, collections.Iterable) and len(rules_copy) > 0:
for boto_rule in rules_copy:
self.heet.logger.debug('destroy [{}]: testing rule for src_group: [{}]'.format(self.aws_name, boto_rule))
for boto_grant in boto_rule.grants:
if boto_grant.group_id is not None:
self.heet.logger.debug('destroy [{}]: found rule with group_id: [{}]'.format(self.aws_name, boto_grant.group_id))
try:
src_group_ref = self.conn.get_all_security_groups(group_ids=[boto_grant.group_id])[0]
self.heet.logger.debug('destroy [{}]: removing rule with src_group to remove group.({}:{})'.format(self.aws_name, boto_grant.group_id, src_group_ref.name))
boto_self.revoke(boto_rule.ip_protocol, boto_rule.from_port, boto_rule.to_port, boto_grant.cidr_ip, src_group_ref)
time.sleep(AWS_API_COOLDOWN_PERIOD)
except boto.exception.EC2ResponseError as err:
self.heet.logger.debug('destroy [{}]: failed to remove rule: [{}]'.format(self.aws_name, err.message))
self.heet.logger.debug('destroy [{}]: done removing rules.'.format(self.aws_name))
try:
time.sleep(AWS_API_COOLDOWN_PERIOD)
boto_self.delete()
self.heet.logger.info('Successfully deleted group {}.'.format(self.aws_name))
except boto.exception.EC2ResponseError as err:
if 'DESTROY_AGAIN' in self.dependent_rules:
self.heet.logger.info("*** Unable to delete {}. {}".format(self.aws_name, err.message))
else:
#- try again after all the other groups rules are deleted
self.heet.add_dependent_resource(self, 'DESTROY_AGAIN')
self.dependent_rules['DESTROY_AGAIN'] = 'placeholder'
return
| [
37811,
1212,
8265,
23986,
262,
2324,
1448,
31904,
8265,
329,
30865,
1544,
316,
326,
318,
8998,
379,
4955,
4686,
368,
13059,
298,
30865,
13182,
17,
2324,
2628,
13,
198,
21327,
11,
340,
691,
6971,
4765,
27441,
326,
389,
287,
257,
569,
5... | 2.28089 | 16,138 |
from urllib.request import urlopen
if __name__ == '__main__':
output_list = read('output')
for index,n in enumerate(read('input')):
special_number = solve(int(n))
if output_list[index] != special_number:
print('Failed for value n = {}'.format(n) )
| [
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5072,
62,
4868,
796,
1100,
10786,
22915,
11537,
198,
220,
220,
220,
329,
6376,
11,
77,
... | 2.436975 | 119 |
# coding: utf-8
from __future__ import division
import unicodedata, math, re, sys, string, os, ntpath, numpy as np
from time import gmtime, strftime
from io import open, StringIO
from imp import reload
from difflib import SequenceMatcher
try:
from itertools import izip
except ImportError:
izip = zip
WORD = re.compile(r'\w+') | [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
28000,
9043,
1045,
11,
10688,
11,
302,
11,
25064,
11,
4731,
11,
28686,
11,
299,
83,
6978,
11,
299,
32152,
355,
45941,
198,
6738,
640,
1330,
308,
... | 2.938596 | 114 |
# Copyright (C) 2019 Simon Biggs
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version (the "AGPL-3.0+").
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License and the additional terms for more
# details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ADDITIONAL TERMS are also included as allowed by Section 7 of the GNU
# Affero General Public License. These additional terms are Sections 1, 5,
# 6, 7, 8, and 9 from the Apache License, Version 2.0 (the "Apache-2.0")
# where all references to the definition "License" are instead defined to
# mean the AGPL-3.0+.
# You should have received a copy of the Apache-2.0 along with this
# program. If not, see <http://www.apache.org/licenses/LICENSE-2.0>.
from .packages import draw_packages
from .directories import draw_directory_modules
from .files import draw_file_modules
| [
2,
15069,
357,
34,
8,
13130,
11288,
4403,
14542,
198,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
262,
22961,
6708,
3529,
3611,
5094,
13789,
355,
3199,
... | 3.656425 | 358 |
from datetime import datetime
from yaml import Dumper, dump
from webbrowser import open_new_tab
from pathlib import Path
from yattag import Doc, indent
from ingest_validation_tools.message_munger import munge
# Force dump not to use alias syntax.
# https://stackoverflow.com/questions/13518819/avoid-references-in-pyyaml
Dumper.ignore_aliases = lambda *args: True
def _build_list(anything, path=None):
'''
>>> flat = _build_list({
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ],
... 'string': 'like this',
... 'number': 42
... })
>>> print('\\n'.join(flat))
nested dict: like: this
nested array: like
nested array: this
string: like this
number: 42
'''
prefix = f'{path}: ' if path else ''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
return [f'{prefix}{k}: {v}' for k, v in anything.items()]
else:
to_return = []
for k, v in anything.items():
to_return += _build_list(v, path=f'{prefix}{k}')
return to_return
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
return [f'{prefix}{v}' for v in anything]
else:
to_return = []
for v in anything:
to_return += _build_list(v, path=path)
return to_return
else:
return [f'{prefix}{anything}']
def _build_doc(tag, line, anything):
'''
>>> doc, tag, text, line = Doc().ttl()
>>> _build_doc(tag, line, {
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ]
... })
>>> print(indent(doc.getvalue()))
<details>
<summary>nested dict</summary>
<dl>
<dt>like</dt>
<dd>this</dd>
</dl>
</details>
<details>
<summary>nested array</summary>
<ul>
<li>like</li>
<li>this</li>
</ul>
</details>
'''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
with tag('dl'):
for k, v in anything.items():
line('dt', k)
line('dd', v)
else:
for k, v in anything.items():
with tag('details'):
line('summary', k)
_build_doc(tag, line, v)
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
with tag('ul'):
for v in anything:
line('li', v)
else:
for v in anything:
_build_doc(tag, line, v)
else:
line('div', anything)
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
331,
43695,
1330,
360,
15829,
11,
10285,
198,
6738,
3992,
40259,
1330,
1280,
62,
3605,
62,
8658,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
331,
1078,
363,
1330,
14432,
11,
33793,
198,
... | 2.004723 | 1,482 |
import argparse
import os
from multiprocessing import Pool
import logging
import random
import copy
import tensorflow as tf
from generic.data_provider.iterator import BasicIterator
from generic.tf_utils.evaluator import Evaluator
from generic.data_provider.image_loader import get_img_builder
from guesswhat.models.oracle.oracle_network import OracleNetwork
from guesswhat.models.qgen.qgen_lstm_network import QGenNetworkLSTM
from guesswhat.models.guesser.guesser_network import GuesserNetwork
from guesswhat.models.looper.basic_looper import BasicLooper
from guesswhat.models.qgen.qgen_wrapper import QGenWrapper, QGenUserWrapper
from guesswhat.models.oracle.oracle_wrapper import OracleWrapper, OracleUserWrapper
from guesswhat.models.guesser.guesser_wrapper import GuesserWrapper, GuesserUserWrapper
from guesswhat.data_provider.guesswhat_dataset import Dataset
from guesswhat.data_provider.looper_batchifier import LooperBatchifier
from guesswhat.data_provider.guesswhat_tokenizer import GWTokenizer
from generic.utils.config import load_config, get_config_from_xp
if __name__ == '__main__':
parser = argparse.ArgumentParser('Question generator (policy gradient baseline))')
parser.add_argument("-data_dir", type=str, required=True, help="Directory with data")
parser.add_argument("-img_dir", type=str, help='Directory with images to feed networks')
parser.add_argument("-img_raw_dir", type=str, help='Directory with images to display')
parser.add_argument("-crop_dir", type=str, help='Directory with crops')
parser.add_argument("-exp_dir", type=str, required=False, help="Directory to output dialogue")
parser.add_argument("-config", type=str, required=True, help='Config file')
parser.add_argument("-dict_file", type=str, default="dict.json", help="Dictionary file name")
parser.add_argument("-networks_dir", type=str, help="Directory with pretrained networks")
parser.add_argument("-oracle_identifier", type=str, default="156cb3d352b97ba12ffd6cf547281ae2", required=False , help='Oracle identifier - if none: user must be the oracle') # Use checkpoint id instead?
parser.add_argument("-qgen_identifier", type=str, default="7b24d8b68f94bde9774cd9555584fd93", required=False, help='Qgen identifier - if none: user must be the Qgen')
parser.add_argument("-guesser_identifier", type=str, required=False, help='Guesser identifier - if none: user must be the guesser')
parser.add_argument("-gpu_ratio", type=float, default=0.95, help="How many GPU ram is required? (ratio)")
args = parser.parse_args()
eval_config, exp_identifier, save_path = load_config(args.config, args.exp_dir)
# Load all networks configs
logger = logging.getLogger()
###############################
# LOAD DATA
#############################
# Load image
logger.info('Loading images..')
image_builder = get_img_builder(eval_config['image'], args.img_dir)
crop_builder = None
if eval_config.get('crop', False):
logger.info('Loading crops..')
crop_builder = get_img_builder(eval_config['crop'], args.crop_dir, is_crop=True)
# Load data
logger.info('Loading data..')
trainset = Dataset(args.data_dir, "train", image_builder, crop_builder)
validset = Dataset(args.data_dir, "valid", image_builder, crop_builder)
testset = Dataset(args.data_dir, "test", image_builder, crop_builder)
dataset, dummy_dataset = trainset, validset
dataset.games = trainset.games + validset.games + testset.games
dummy_dataset.games = []
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
# Load dictionary
logger.info('Loading dictionary..')
tokenizer = GWTokenizer(os.path.join(args.data_dir, args.dict_file))
###############################
# START TRAINING
#############################
# CPU/GPU option
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
###############################
# LOAD NETWORKS
#############################
if args.oracle_identifier is not None:
oracle_config = get_config_from_xp(os.path.join(args.networks_dir, "oracle"), args.oracle_identifier)
oracle_network = OracleNetwork(oracle_config, num_words=tokenizer.no_words)
oracle_var = [v for v in tf.global_variables() if "oracle" in v.name]
oracle_saver = tf.train.Saver(var_list=oracle_var)
oracle_saver.restore(sess, os.path.join(args.networks_dir, 'oracle', args.oracle_identifier, 'params.ckpt'))
oracle_wrapper = OracleWrapper(oracle_network, tokenizer)
else:
oracle_wrapper = OracleUserWrapper(tokenizer)
logger.info("No Oracle was registered >>> use user input")
if args.guesser_identifier is not None:
guesser_config = get_config_from_xp(os.path.join(args.networks_dir, "guesser"), args.guesser_identifier)
guesser_network = GuesserNetwork(guesser_config["model"], num_words=tokenizer.no_words)
guesser_var = [v for v in tf.global_variables() if "guesser" in v.name]
guesser_saver = tf.train.Saver(var_list=guesser_var)
guesser_saver.restore(sess, os.path.join(args.networks_dir, 'guesser', args.guesser_identifier, 'params.ckpt'))
guesser_wrapper = GuesserWrapper(guesser_network)
else:
guesser_wrapper = GuesserUserWrapper(tokenizer, img_raw_dir=args.img_raw_dir)
logger.info("No Guesser was registered >>> use user input")
if args.qgen_identifier is not None:
qgen_config = get_config_from_xp(os.path.join(args.networks_dir, "qgen"), args.qgen_identifier)
qgen_network = QGenNetworkLSTM(qgen_config["model"], num_words=tokenizer.no_words, policy_gradient=False)
qgen_var = [v for v in tf.global_variables() if "qgen" in v.name] # and 'rl_baseline' not in v.name
qgen_saver = tf.train.Saver(var_list=qgen_var)
qgen_saver.restore(sess, os.path.join(args.networks_dir, 'qgen', args.qgen_identifier, 'params.ckpt'))
qgen_network.build_sampling_graph(qgen_config["model"], tokenizer=tokenizer, max_length=eval_config['loop']['max_depth'])
qgen_wrapper = QGenWrapper(qgen_network, tokenizer,
max_length=eval_config['loop']['max_depth'],
k_best=eval_config['loop']['beam_k_best'])
else:
qgen_wrapper = QGenUserWrapper(tokenizer)
logger.info("No QGen was registered >>> use user input")
looper_evaluator = BasicLooper(eval_config,
oracle_wrapper=oracle_wrapper,
guesser_wrapper=guesser_wrapper,
qgen_wrapper=qgen_wrapper,
tokenizer=tokenizer,
batch_size=1)
logs = []
# Start training
final_val_score = 0.
batchifier = LooperBatchifier(tokenizer, generate_new_games=False)
while True:
# Start new game
while True:
id_str = input('Do you want to play a new game? (Yes/No) --> ').lower()
if id_str == "y" or id_str == "yes": break
elif id_str == "n" or id_str == "no": exit(0)
# Pick id image
image_id = 0
while True:
id_str = int(input('What is the image id you want to select? (-1 for random id) --> '))
if id_str in image_id_set:
image_id = id_str
break
elif id_str == -1:
image_id = random.choice(list(image_id_set.keys()))
break
else:
print("Could not find the following image id: {}".format(id_str))
game = [g for g in dataset.games if g.image.id == image_id][0]
game = copy.deepcopy(game)
print("Selecting image {}".format(game.image.filename))
# Pick id object
print("Available objects")
for i, obj in enumerate(game.objects):
print(" -", i, ":", obj.category, "\t", obj.bbox)
print("Type '(S)how' to display the image with the object")
while True:
id_str = input('Which object id do you want to select? (-1 for random id) --> ')
if id_str == "S" or id_str.lower() == "show":
game.show(img_raw_dir=args.img_raw_dir, display_index=True)
continue
id_str = int(id_str)
if 0 <= id_str < len(game.objects):
object_index = id_str
object_id = game.objects[object_index].id
break
elif id_str == -1:
object_id = random.choice(game.objects).id
break
else:
print("Could not find the following object index: {}".format(id_str))
game.object_id = object_id
dummy_dataset.games = [game]
iterator = BasicIterator(dummy_dataset, batch_size=1, batchifier=batchifier)
success = looper_evaluator.process(sess, iterator, mode="greedy")
| [
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
4866,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
14276,
13,
7890,
62,
15234,
1304,
... | 2.307418 | 4,206 |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
test_deps = [
'pytest',
'flake8',
'pylint',
'mypy',
]
extras = {
'test': test_deps
}
setuptools.setup(
name="osm_rasterize",
version="0.0.0",
author="Ruben Lipperts",
author_email="",
description="Map OSM data onto a grid",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rlipperts/osm-rasterize",
package_dir={'': 'src'},
packages=['osm_rasterize'],
package_data={'osm_rasterize': ['py.typed']},
tests_require=test_deps,
extras_require=extras,
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Games/Entertainment",
],
python_requires='~=3.9',
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
9288,
... | 2.318735 | 411 |
import six
import time
import logging
from elasticsearch.helpers import BulkIndexError
from aleph.core import es_index, db
from aleph.index.mapping import TYPE_RECORD
from aleph.model import DocumentRecord
from aleph.index.util import bulk_op, query_delete
from aleph.text import index_form
log = logging.getLogger(__name__)
def clear_records(document_id):
"""Delete all records associated with the given document."""
q = {'term': {'document_id': document_id}}
query_delete(q, doc_type=TYPE_RECORD)
def generate_records(document):
"""Generate index records, based on document rows or pages."""
q = db.session.query(DocumentRecord)
q = q.filter(DocumentRecord.document_id == document.id)
for record in q.yield_per(1000):
texts = [record.text]
if record.data is not None:
texts.extend(record.data.values())
yield {
'_id': record.id,
'_type': TYPE_RECORD,
'_index': six.text_type(es_index),
'_source': {
'document_id': document.id,
'collection_id': document.collection_id,
'index': record.index,
'sheet': record.sheet,
'text': index_form(texts)
}
}
| [
11748,
2237,
198,
11748,
640,
198,
11748,
18931,
198,
6738,
27468,
12947,
13,
16794,
364,
1330,
47900,
15732,
12331,
198,
198,
6738,
31341,
746,
13,
7295,
1330,
1658,
62,
9630,
11,
20613,
198,
6738,
31341,
746,
13,
9630,
13,
76,
5912,
... | 2.36194 | 536 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import telemetry.timeline.event as timeline_event
from telemetry.testing import test_page_test_results
from telemetry.web_perf.metrics import startup
# Attributes defined outside __init__
# pylint: disable=attribute-defined-outside-init
| [
2,
15069,
1853,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
11748... | 3.678261 | 115 |
from gevent.pywsgi import WSGIServer
import app
app_instance = app.create_app()
http_server = WSGIServer(('127.0.0.1', 8080), app_instance)
http_server.serve_forever() | [
6738,
4903,
1151,
13,
9078,
18504,
12397,
1330,
25290,
38,
1797,
18497,
198,
11748,
598,
198,
198,
1324,
62,
39098,
796,
598,
13,
17953,
62,
1324,
3419,
198,
198,
4023,
62,
15388,
796,
25290,
38,
1797,
18497,
7,
10786,
16799,
13,
15,
... | 2.6 | 65 |
from func import *
from move import *
| [
198,
6738,
25439,
1330,
1635,
198,
6738,
1445,
1330,
1635,
198
] | 3.545455 | 11 |
from RPC import *
from Slack_Connection import *
from flask import Flask, request, Response
from decimal import *
from fractions import *
from Python_Hash import *
import os
import time
app = Flask(__name__)
@app.route("/" ,methods=["POST"])
@app.route("/Button_Pressed",methods=["POST"])
@app.route("/", methods=['GET'])
@app.route("/new_transaction",methods=["POST"])
if __name__ == "__main__":
global main_json
app.run(debug=False,threaded=True)
| [
6738,
39400,
1330,
1635,
201,
198,
6738,
36256,
62,
32048,
1330,
1635,
201,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
18261,
201,
198,
6738,
32465,
1330,
1635,
201,
198,
6738,
49876,
1330,
1635,
201,
198,
6738,
11361,
62,
26257,
1330... | 2.689266 | 177 |