content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
from django import forms
[ 6738, 42625, 14208, 1330, 5107, 220 ]
4.166667
6
import pytest pytest.importorskip("sodasql")
[ 11748, 12972, 9288, 198, 198, 9078, 9288, 13, 11748, 669, 74, 541, 7203, 82, 375, 292, 13976, 4943, 198 ]
2.421053
19
from py_adventure import PointOfInterest from py_adventure import ZoneConnection from py_adventure import City, Zone, Location, Building from py_adventure import Region from typing import Dict, List barrel : PointOfInterest = PointOfInterest("Barrel") gate : Location = Location("City Gate") inn : Location = Building("Lion's Rest Inn", [barrel]) city1 : Zone = City("Baldur's Gate", [gate,inn]) city2 : Zone = City("Elturel") road1 : Zone = Zone("Fields of the Dead") #exit from BG to Fields of the Dead connection1 = ZoneConnection("North Exit", road1) #region exits connection2 = ZoneConnection("West Exit", city1) connection3 = ZoneConnection("East Exit", city2) #exit from elturel to region connection4 = ZoneConnection("South Exit", road1) connections : Dict[Zone, List[ZoneConnection]] = { city1 : [connection1], road1 : [connection2, connection3], city2 : [connection4] } world : Region = Region("Faerun", city1, connections) current_zone : Zone = world.get_current_zone() print(f"You arrive at {current_zone}") print(f"Where would you like to explore?") for loc in current_zone.get_locations(): print(loc) print("Or you can leave via:") for connection in world.get_available_exits(): print(connection) print("--------------------------------") #go to the lions rest inn. current_location : Location = current_zone.get_locations()[1] print(f"You arrive at the {current_location.get_name()}") print("Looking around you notice:") for poi in current_location.get_points_of_interest(): print(poi.get_name()) print(f"or you can leave to go back to {current_zone}")
[ 6738, 12972, 62, 324, 5388, 1330, 6252, 5189, 19302, 198, 6738, 12972, 62, 324, 5388, 1330, 13035, 32048, 198, 6738, 12972, 62, 324, 5388, 1330, 2254, 11, 13035, 11, 13397, 11, 11819, 198, 6738, 12972, 62, 324, 5388, 1330, 17718, 198, ...
3.1917
506
# -*- coding: utf-8 -*- __author__ = "Filip Koprivec" from helper import get_file DAY = 8 HEIGHT = 6 WIDTH = 50 data = [line.strip() for line in get_file(DAY)] print(part1(data)) print(part2(data))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 834, 9800, 834, 796, 366, 11928, 541, 40500, 11590, 66, 1, 198, 198, 6738, 31904, 1330, 651, 62, 7753, 198, 198, 26442, 796, 807, 198, 198, 13909, 9947, 796, 718,...
2.348315
89
#!/usr/bin/env python # from operator import itemgetter import sys last_group = None current_count = 1 # input comes from STDIN for line in sys.stdin: # Remove trailing '\n' line = line.strip() # Extract (key,value) vs = line.split('\t') # print vs[0] current_group = vs[0].strip() if last_group == current_group: current_count += int(vs[1]) else: if last_group != None: print last_group, current_count last_group = current_group current_count = 1 # Last one: if last_group != None: print last_group, current_count
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 422, 10088, 1330, 2378, 1136, 353, 198, 11748, 25064, 198, 198, 12957, 62, 8094, 796, 6045, 198, 14421, 62, 9127, 796, 352, 198, 198, 2, 5128, 2058, 422, 48571, 1268, 198, 16...
2.440816
245
from __future__ import annotations import rich from rich.align import Align from rich.style import Style, StyleType from textual.widget import Reactive, Widget
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 5527, 198, 6738, 5527, 13, 31494, 1330, 978, 570, 198, 6738, 5527, 13, 7635, 1330, 17738, 11, 17738, 6030, 198, 198, 6738, 40577, 13, 42655, 1330, 797, 5275, 11, 370, 17484, 628 ]
3.97561
41
""" Author: Anastassios Dardas, PhD - Higher Education Specialist at the Education & Research at Esri Canada. Date: Re-modified Q1 - 2022 About: """ from pandas import DataFrame from ..util import TimeDelta, SpatialDelta
[ 37811, 201, 198, 13838, 25, 1052, 459, 562, 4267, 360, 446, 292, 11, 16394, 532, 16038, 7868, 36816, 379, 262, 7868, 1222, 4992, 379, 8678, 380, 3340, 13, 201, 198, 10430, 25, 797, 12, 41771, 1195, 16, 532, 33160, 201, 198, 201, 198...
3.191781
73
"""This module provides functions used to generate and load probe files.""" # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- import os import pprint import itertools from tools import MemMappedText, MemMappedBinary # ----------------------------------------------------------------------------- # Probe file functions # ----------------------------------------------------------------------------- def generate_probe(channel_groups, topology='linear'): """channel_groups is a dict {channel_group: nchannels}.""" if not isinstance(channel_groups, dict): channel_groups = {0: channel_groups} groups = sorted(channel_groups.keys()) r = {} curchannel = 0 for i in range(len(groups)): id = groups[i] # channel group index n = channel_groups[id] # number of channels channels = range(curchannel, curchannel + n) if topology == 'linear': graph = [[ch, ch + 1] for ch in channels[:-1]] elif topology == 'complete': graph = map(list, list(itertools.product(channels, repeat=2))) geometry = {channels[_]: [float(i), float(_)] for _ in range(n)} d = {'channels': channels, 'graph': graph, 'geometry': geometry, } r[id] = d curchannel += n return r def old_to_new(probe_ns): """Convert from the old Python .probe format to the new .PRB format.""" graph = probe_ns['probes'] shanks = sorted(graph.keys()) if 'geometry' in probe_ns: geometry = probe_ns['geometry'] else: geometry = None # Find the list of shanks. shank_channels = {shank: flatten(graph[shank]) for shank in shanks} # Find the list of channels. channels = flatten(shank_channels.values()) nchannels = len(channels) # Create JSON dictionary. channel_groups = { shank: { 'channels': shank_channels[shank], 'graph': graph[shank], } for shank in shanks } # Add the geometry if it exists. if geometry: # Find out if there's one geometry per shank, or a common geometry # for all shanks. for k, d in channel_groups.iteritems(): multiple = k in geometry and isinstance(geometry[k], dict) if multiple: d['geometry'] = geometry[k] else: d['geometry'] = geometry return channel_groups
[ 37811, 1212, 8265, 3769, 5499, 973, 284, 7716, 290, 3440, 12774, 3696, 526, 15931, 198, 198, 2, 16529, 32501, 198, 2, 1846, 3742, 198, 2, 16529, 32501, 198, 11748, 28686, 198, 11748, 279, 4798, 198, 11748, 340, 861, 10141, 198, 198, 6...
2.469935
1,081
import numpy as np
[ 11748, 299, 32152, 355, 45941, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 198 ]
1.684211
19
"""The tests for the mFi sensor platform.""" import unittest.mock as mock from mficlient.client import FailedToLogin import pytest import requests import homeassistant.components.mfi.sensor as mfi import homeassistant.components.sensor as sensor_component from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS from homeassistant.setup import async_setup_component PLATFORM = mfi COMPONENT = sensor_component THING = "sensor" GOOD_CONFIG = { "sensor": { "platform": "mfi", "host": "foo", "port": 6123, "username": "user", "password": "pass", "ssl": True, "verify_ssl": True, } } async def test_setup_missing_config(hass): """Test setup with missing configuration.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: config = {"sensor": {"platform": "mfi"}} assert await async_setup_component(hass, "sensor", config) assert not mock_client.called async def test_setup_failed_login(hass): """Test setup with login failure.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: mock_client.side_effect = FailedToLogin assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None) async def test_setup_failed_connect(hass): """Test setup with connection failure.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: mock_client.side_effect = requests.exceptions.ConnectionError assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None) async def test_setup_minimum(hass): """Test setup with minimum configuration.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: config = dict(GOOD_CONFIG) del config[THING]["port"] assert await async_setup_component(hass, COMPONENT.DOMAIN, config) await hass.async_block_till_done() assert mock_client.call_count == 1 assert mock_client.call_args == mock.call( "foo", "user", "pass", port=6443, use_tls=True, verify=True ) async def test_setup_with_port(hass): """Test setup with port.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: config = dict(GOOD_CONFIG) config[THING]["port"] = 6123 assert await async_setup_component(hass, COMPONENT.DOMAIN, config) await hass.async_block_till_done() assert mock_client.call_count == 1 assert mock_client.call_args == mock.call( "foo", "user", "pass", port=6123, use_tls=True, verify=True ) async def test_setup_with_tls_disabled(hass): """Test setup without TLS.""" with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client: config = dict(GOOD_CONFIG) del config[THING]["port"] config[THING]["ssl"] = False config[THING]["verify_ssl"] = False assert await async_setup_component(hass, COMPONENT.DOMAIN, config) await hass.async_block_till_done() assert mock_client.call_count == 1 assert mock_client.call_args == mock.call( "foo", "user", "pass", port=6080, use_tls=False, verify=False ) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" with mock.patch( "homeassistant.components.mfi.sensor.MFiClient" ) as mock_client, mock.patch( "homeassistant.components.mfi.sensor.MfiSensor", side_effect=mfi.MfiSensor ) as mock_sensor: ports = { i: mock.MagicMock(model=model, label=f"Port {i}", value=0) for i, model in enumerate(mfi.SENSOR_MODELS) } ports["bad"] = mock.MagicMock(model="notasensor") mock_client.return_value.get_devices.return_value = [ mock.MagicMock(ports=ports) ] assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG) await hass.async_block_till_done() for ident, port in ports.items(): if ident != "bad": mock_sensor.assert_any_call(port, hass) assert mock.call(ports["bad"], hass) not in mock_sensor.mock_calls @pytest.fixture(name="port") def port_fixture(): """Port fixture.""" return mock.MagicMock() @pytest.fixture(name="sensor") def sensor_fixture(hass, port): """Sensor fixture.""" sensor = mfi.MfiSensor(port, hass) sensor.hass = hass return sensor async def test_name(port, sensor): """Test the name.""" assert port.label == sensor.name async def test_uom_temp(port, sensor): """Test the UOM temperature.""" port.tag = "temperature" assert sensor.unit_of_measurement == TEMP_CELSIUS assert sensor.device_class == DEVICE_CLASS_TEMPERATURE async def test_uom_power(port, sensor): """Test the UOEM power.""" port.tag = "active_pwr" assert sensor.unit_of_measurement == "Watts" assert sensor.device_class is None async def test_uom_digital(port, sensor): """Test the UOM digital input.""" port.model = "Input Digital" assert sensor.unit_of_measurement == "State" assert sensor.device_class is None async def test_uom_unknown(port, sensor): """Test the UOM.""" port.tag = "balloons" assert sensor.unit_of_measurement == "balloons" assert sensor.device_class is None async def test_uom_uninitialized(port, sensor): """Test that the UOM defaults if not initialized.""" type(port).tag = mock.PropertyMock(side_effect=ValueError) assert sensor.unit_of_measurement == "State" assert sensor.device_class is None async def test_state_digital(port, sensor): """Test the digital input.""" port.model = "Input Digital" port.value = 0 assert mfi.STATE_OFF == sensor.state port.value = 1 assert mfi.STATE_ON == sensor.state port.value = 2 assert mfi.STATE_ON == sensor.state async def test_state_digits(port, sensor): """Test the state of digits.""" port.tag = "didyoucheckthedict?" port.value = 1.25 with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}): assert sensor.state == 1.2 with mock.patch.dict(mfi.DIGITS, {}): assert sensor.state == 1.0 async def test_state_uninitialized(port, sensor): """Test the state of uninitialized sensorfs.""" type(port).tag = mock.PropertyMock(side_effect=ValueError) assert mfi.STATE_OFF == sensor.state async def test_update(port, sensor): """Test the update.""" sensor.update() assert port.refresh.call_count == 1 assert port.refresh.call_args == mock.call()
[ 37811, 464, 5254, 329, 262, 285, 10547, 12694, 3859, 526, 15931, 198, 11748, 555, 715, 395, 13, 76, 735, 355, 15290, 198, 198, 6738, 285, 69, 291, 75, 1153, 13, 16366, 1330, 22738, 2514, 47790, 198, 11748, 12972, 9288, 198, 11748, 700...
2.482463
2,680
# -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The eos_lacp_interfaces class It is in this file where the current configuration (as dict) is compared to the provided configuration (as dict) and the command set necessary to bring the current configuration to it's desired end-state is created """ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.network.common.cfg.base import ConfigBase from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict from ansible.module_utils.network.eos.facts.facts import Facts from ansible.module_utils.network.eos.utils.utils import normalize_interface class Lacp_interfaces(ConfigBase): """ The eos_lacp_interfaces class """ gather_subset = [ '!all', '!min', ] gather_network_resources = [ 'lacp_interfaces', ] def get_lacp_interfaces_facts(self): """ Get the 'facts' (the current configuration) :rtype: A dictionary :returns: The current configuration as a dictionary """ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) lacp_interfaces_facts = facts['ansible_network_resources'].get('lacp_interfaces') if not lacp_interfaces_facts: return [] return lacp_interfaces_facts def execute_module(self): """ Execute the module :rtype: A dictionary :returns: The result from module execution """ result = {'changed': False} warnings = list() commands = list() existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts() commands.extend(self.set_config(existing_lacp_interfaces_facts)) if commands: if not self._module.check_mode: self._connection.edit_config(commands) result['changed'] = True result['commands'] = commands changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts() result['before'] = existing_lacp_interfaces_facts if result['changed']: result['after'] = changed_lacp_interfaces_facts result['warnings'] = warnings return result def set_config(self, existing_lacp_interfaces_facts): """ Collect the configuration from the args passed to the module, collect the current configuration (as a dict from facts) :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ want = self._module.params['config'] have = existing_lacp_interfaces_facts resp = self.set_state(want, have) return to_list(resp) def set_state(self, want, have): """ Select the appropriate function based on the state provided :param want: the desired configuration as a dictionary :param have: the current configuration as a dictionary :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ state = self._module.params['state'] want = param_list_to_dict(want) have = param_list_to_dict(have) if state == 'overridden': commands = self._state_overridden(want, have) elif state == 'deleted': commands = self._state_deleted(want, have) elif state == 'merged': commands = self._state_merged(want, have) elif state == 'replaced': commands = self._state_replaced(want, have) return commands @staticmethod def _state_replaced(want, have): """ The command generator when state is replaced :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for key, desired in want.items(): interface_name = normalize_interface(key) if interface_name in have: extant = have[interface_name] else: extant = dict() add_config = dict_diff(extant, desired) del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, add_config, del_config)) return commands @staticmethod def _state_overridden(want, have): """ The command generator when state is overridden :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for key, extant in have.items(): if key in want: desired = want[key] else: desired = dict() add_config = dict_diff(extant, desired) del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, add_config, del_config)) return commands @staticmethod def _state_merged(want, have): """ The command generator when state is merged :rtype: A list :returns: the commands necessary to merge the provided into the current configuration """ commands = [] for key, desired in want.items(): interface_name = normalize_interface(key) if interface_name in have: extant = have[interface_name] else: extant = dict() add_config = dict_diff(extant, desired) commands.extend(generate_commands(key, add_config, {})) return commands @staticmethod def _state_deleted(want, have): """ The command generator when state is deleted :rtype: A list :returns: the commands necessary to remove the current configuration of the provided objects """ commands = [] for key in want: desired = dict() if key in have: extant = have[key] else: continue del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, {}, del_config)) return commands
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 13130, 2297, 10983, 198, 2, 22961, 3611, 5094, 13789, 410, 18, 13, 15, 10, 198, 2, 357, 3826, 27975, 45761, 393, 3740, 1378, 2503, 13, 41791, 13, 2398, 14, ...
2.394131
2,692
import os import sys import threading import tkinter as tk os.chdir(os.getcwd()) self = tk.Tk() self.title("meet") self.resizable(0, 0) self.grid() ch_time = tk.Button(self, text="CHANGE SCHEDULE", command=exc) ch_time.grid(row=0, column=0, sticky="nswe") ch_link = tk.Button(self, text="CHANGE LINKS", command=lnk) ch_link.grid(row=0, column=1, sticky="nswe") guid_time = tk.Button(self, text="guide schedule", command=gdsch) guid_time.grid(row=1, column=0, sticky="nswe") guid_link = tk.Button(self, text="guide links", command=gdlnk) guid_link.grid(row=1, column=1, sticky="nswe") start = tk.Button( self, text="START", bg="green", activebackground="green", command=strt ) start.grid(row=2, column=0, sticky="nswe") bad_gui = tk.Label(self, text="This is a really bad GUI :)") bad_gui.grid(row=3, column=0, sticky="nswe") me = tk.Label(self, text="made with <3 by DanyB0") me.grid(row=3, column=1, sticky="nswe") if __name__ == "__main__": self.mainloop()
[ 11748, 28686, 201, 198, 11748, 25064, 201, 198, 11748, 4704, 278, 201, 198, 11748, 256, 74, 3849, 355, 256, 74, 201, 198, 201, 198, 418, 13, 354, 15908, 7, 418, 13, 1136, 66, 16993, 28955, 201, 198, 201, 198, 944, 796, 256, 74, 13...
2.29932
441
import contextlib import dataclasses import os import time from unittest import mock from outrun.filesystem.caching.service import LocalCacheService from outrun.filesystem.caching.cache import CacheEntry, RemoteCache
[ 11748, 4732, 8019, 198, 11748, 4818, 330, 28958, 198, 11748, 28686, 198, 11748, 640, 198, 6738, 555, 715, 395, 1330, 15290, 198, 198, 6738, 503, 5143, 13, 16624, 6781, 13, 66, 8103, 13, 15271, 1330, 10714, 30562, 16177, 198, 6738, 503, ...
3.640625
64
default_app_config = 'apps.markets3.apps.Markets3Config'
[ 12286, 62, 1324, 62, 11250, 796, 705, 18211, 13, 34162, 18, 13, 18211, 13, 9704, 1039, 18, 16934, 6, 198 ]
2.85
20
#!/usr/bin/env python import os import codecs import re import sys # Borrowed from rosunit ## unit test suites are not good about screening out illegal ## unicode characters. This little recipe I from http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML ## screens these out RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \ u'|' + \ u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \ (unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff), unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff), unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff)) _safe_xml_regex = re.compile(RE_XML_ILLEGAL) def _read_file_safe_xml(test_file, write_back_sanitized=True): """ read in file, screen out unsafe unicode characters """ f = None try: # this is ugly, but the files in question that are problematic # do not declare unicode type. if not os.path.isfile(test_file): raise Exception("test file does not exist") try: f = codecs.open(test_file, "r", "utf-8" ) x = f.read() except: if f is not None: f.close() f = codecs.open(test_file, "r", "iso8859-1" ) x = f.read() for match in _safe_xml_regex.finditer(x): x = x[:match.start()] + "?" + x[match.end():] x = x.encode("utf-8") if write_back_sanitized: with open(test_file, 'w') as h: h.write(x) return x finally: if f is not None: f.close() if __name__ == '__main__': for f in sys.argv[1:]: _read_file_safe_xml(f, True)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28686, 198, 11748, 40481, 82, 198, 11748, 302, 198, 11748, 25064, 198, 198, 2, 347, 6254, 276, 422, 686, 19155, 270, 198, 198, 2235, 4326, 1332, 45861, 389, 407, 922, 546, ...
1.892667
941
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from .config import RiveScriptTestCase class MessageFormatTests(RiveScriptTestCase): """Test format message."""
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 11, 4112, 62, 11748, 198, 198, 6738, 764, 11250, 1330,...
3
74
#Some configuration parameters from datetime import timedelta host = "mail.messagingengine.com" port = 465 email = "*********@fastmail.com" password = "password" touchfile = "/home/henfredemars/.bashrc" time_to_wait = timedelta(14) check_period = timedelta(0,0,0,0,30) min_sane_year = 2016 max_sane_year = 3016
[ 2, 4366, 8398, 10007, 198, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 4774, 796, 366, 4529, 13, 37348, 3039, 18392, 13, 785, 1, 198, 634, 796, 49669, 198, 12888, 796, 366, 4557, 9, 31, 7217, 4529, 13, 785, 1, 198, 28712, ...
2.794643
112
# -*- coding: utf-8 -*- # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # # Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA'). # All rights reserved. # This work should only be used for nonprofit purposes. # # By downloading and/or using any of these files, you implicitly agree to all the # terms of the license, as specified in the document LICENSE.md # (included in this package) and online at # http://www.grip.unina.it/download/LICENSE_OPEN.txt # from numpy import sqrt, maximum import torch from torch.nn import Conv2d, BatchNorm2d, ReLU, Sequential import numpy as np import E2E.parameters as parameters
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 4064, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 36917, 198, 2, 198, 2, 15069, 357, 66, 8, 1...
3.368932
206
# -*- coding: utf-8 -*- """ Created on Mon Apr 1 14:39:33 2019 @author: jone """ #%% Gaussian Pyramids import cv2 img = cv2.imread('img/monkey.tiff') lower_reso = cv2.pyrDown(img) # 원본 이미지의 1/4 사이즈 higher_reso = cv2.pyrUp(img) # 원본 이미지의 4배 사이즈 cv2.imshow('img', img) cv2.imshow('lower', lower_reso) cv2.imshow('higher', higher_reso) cv2.waitKey(0) cv2.destroyAllWindows() #%% Laplacian Pyramids import cv2 img = cv2.imread('img/monkey.tiff') print(img.shape) # (512, 512, 3) GAD = cv2.pyrDown(img) print(GAD.shape) # (256, 256, 3) GAU = cv2.pyrUp(GAD) print(GAU.shape) # (512, 512, 3) temp = cv2.resize(GAU, (512, 512)) res = cv2.subtract(img, temp) cv2.imshow('res', res) cv2.waitKey(0) # 이미지 저장 cv2.imwrite('img/lap_pyramids.png', res) cv2.destroyAllWindows() #%% import cv2 import numpy as np STEP = 6 # 1단계 A = cv2.imread('img/apple.jpg') B = cv2.imread('img/orange.jpg') # 2단계 # A 이미지에 대한 Gaussian Pyramid를 생성 # 점점 작아지는 Pyramid G = A.copy() gpA = [G] for i in range(STEP): G = cv2.pyrDown(G) gpA.append(G) # B 이미지에 대한 Gaussian Pyramid 생성 # 점점 작아지는 Pyramid G = B.copy() gpB = [G] for i in range(STEP): G = cv2.pyrDown(G) gpB.append(G) # 3단계 # A 이미지에 대한 Laplacian Pyramid 생성 lpA = [gpA[STEP-1]] # n번쨰 추가된 Gaussian Image for i in range(STEP-1, 0, -1): GE = cv2.pyrUp(gpA[i]) L = cv2.subtract(gpA[i-1], GE) lpA.append(L) # B 이미지에 대한 Laplacian Pyramid 생성 lpB = [gpB[STEP-1]] for i in range(STEP-1, 0, -1): GE = cv2.pyrUp(gpB[i]) L = cv2.subtract(gpB[i-1], GE) lpB.append(L) # 4단계 # Laplacian Pyramid를 누적으로 좌측과 우측으로 재결합 LS = [] for la, lb in zip(lpA, lpB): rows, cols, dpt = la.shape ls = np.hstack((la[:,0:int(cols/2)], lb[:,int(cols/2):])) LS.append(ls) # 5단계 ls_ = LS[0] # 좌측과 우측이 합쳐진 가장 작은 이미지 for i in range(1, STEP): ls_ = cv2.pyrUp(ls_) # Up scale ls_ = cv2.add(ls_, LS[i]) # Up Scale된 이미지에 외곽서늘 추가하여 선명한 이미지로 생성 # 원본 이미지를 그대로 붙인 경우 real = np.hstack((A[:, :int(cols/2)], B[:, int(cols/2):])) cv2.imshow('real', real) cv2.imshow('blending', ls_) cv2.waitKey(0) cv2.destroyAllWindows()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 2892, 2758, 220, 352, 1478, 25, 2670, 25, 2091, 13130, 198, 198, 31, 9800, 25, 474, 505, 198, 37811, 198, 198, 2, 16626, 12822, 31562, 9485, 43...
1.473976
1,441
# Generated by Django 2.1.2 on 2019-01-17 02:37 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 16, 13, 17, 319, 13130, 12, 486, 12, 1558, 7816, 25, 2718, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
n = int(input('digite um número:')) print('O antecessor do número {} é {}, e o sucessor é {}!'.format(n, n-1, n+1))
[ 77, 796, 493, 7, 15414, 10786, 12894, 578, 23781, 299, 21356, 647, 78, 32105, 4008, 198, 4798, 10786, 46, 29692, 919, 273, 466, 299, 21356, 647, 78, 23884, 38251, 1391, 5512, 304, 267, 424, 919, 273, 38251, 23884, 0, 4458, 18982, 7, ...
2.166667
54
import os from fdfgen import forge_fdf
[ 11748, 28686, 198, 6738, 277, 7568, 5235, 1330, 28325, 62, 69, 7568, 628 ]
3.076923
13
# # This is the beginning of Eddie's Extra file for the Final Capstone Project # The idea behind this design is to make an accurate representation of a Rose-Hulman Student # # The rosebot import was added in order for me to use the dot trick import rosebot import time # def beep_proxy(robot, initial, delta, speed): # ps = robot.sensor_system.ir_proximity_sensor # b = robot.sound_system # robot.drive_system.go(int(speed),int(speed)) # while ps.get_distance_in_inches() > 2: # rate = float(initial) + float(delta) / float(ps.get_distance_in_inches()) # b.beep_number_of_times(2) # time.sleep(1 / rate) # robot.drive_system.stop() # robot.arm_and_claw.raise_arm() # # # def beep_retrieve(robot, direction, speed): # d = robot.drive_system # if direction == "CW": # d.spin_clockwise_until_sees_object(int(speed), 100) # elif direction == "CCW": # d.spin_counterclockwise_until_sees_object(int(speed), 100) # d.stop() # camera_aim() # beep_proxy(robot, 1, 0.1, int(speed)) # # # def camera_aim(): # robot = rosebot.RoseBot() # d = robot.drive_system # c = robot.sensor_system.camera # while True: # print(c.get_biggest_blob().center.x) # while c.get_biggest_blob().center.x > 170: # d.go(-20, 20) # print(c.get_biggest_blob().center.x) # d.stop() # while c.get_biggest_blob().center.x < 160: # d.go(20, -20) # print(c.get_biggest_blob().center.x) # d.stop() # if 160 < c.get_biggest_blob().center.x < 170: # break # Not sure which find homework will work better, if they work at all # The one below uses the object mode # The one below uses the color mode # The one above does indeed work better # Do not use the one below ####################################################################### # def find_homework2(robot): # robot.drive_system.spin_clockwise_until_sees_color(100, "White") # robot.drive_system.go_forward_until_distance_is_less_than(7, 100) # robot.drive_system.stop() # robot.arm_and_claw.raise_arm() # robot.drive_system.go_straight_for_inches_using_encoder(6, 100) # robot.arm_and_claw.lower_arm() # robot.drive_system.go_straight_for_inches_using_encoder(4, 50) # robot.drive_system.stop() # robot.sound_system.speak("DEATH TO ALL HOMEWORK!") #######################################################################
[ 2, 198, 2, 770, 318, 262, 3726, 286, 19478, 338, 17221, 2393, 329, 262, 8125, 220, 4476, 6440, 4935, 198, 2, 383, 2126, 2157, 428, 1486, 318, 284, 787, 281, 7187, 10552, 286, 257, 8049, 12, 39, 377, 805, 13613, 198, 2, 628, 198, ...
2.471471
999
# -*- encoding: utf-8 -*- import re import warnings from denorm import denormalized, depend_on_related from dirtyfields.dirtyfields import DirtyFieldsMixin from django.db import models from django.db.models import CASCADE, PROTECT from django.db.models.expressions import RawSQL from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField, JSONField from bpp.models import ( DodajAutoraMixin, MaProcentyMixin, ModelOpcjonalnieNieEksportowanyDoAPI, ModelZMiejscemPrzechowywania, ModelZPBN_UID, ) from bpp.models.abstract import ( BazaModeluOdpowiedzialnosciAutorow, DwaTytuly, ModelPunktowany, ModelRecenzowany, ModelTypowany, ModelWybitny, ModelZAbsolutnymUrl, ModelZAdnotacjami, ModelZCharakterem, ModelZDOI, ModelZeStatusem, ModelZeSzczegolami, ModelZeZnakamiWydawniczymi, ModelZInformacjaZ, ModelZISBN, ModelZISSN, ModelZKonferencja, ModelZLiczbaCytowan, ModelZOpenAccess, ModelZPrzeliczaniemDyscyplin, ModelZPubmedID, ModelZRokiem, ModelZSeria_Wydawnicza, ModelZWWW, Wydawnictwo_Baza, ) from bpp.models.autor import Autor from bpp.models.nagroda import Nagroda from bpp.models.system import Zewnetrzna_Baza_Danych from bpp.models.util import ZapobiegajNiewlasciwymCharakterom from bpp.models.wydawca import Wydawca class Wydawnictwo_Zwarte_Autor( DirtyFieldsMixin, BazaModeluOdpowiedzialnosciAutorow, ): """Model zawierający informację o przywiązaniu autorów do wydawnictwa zwartego.""" rekord = models.ForeignKey( "Wydawnictwo_Zwarte", CASCADE, related_name="autorzy_set" ) MIEJSCE_I_ROK_MAX_LENGTH = 256 class Wydawnictwo_Zwarte_Baza( Wydawnictwo_Baza, DwaTytuly, ModelZRokiem, ModelZeStatusem, ModelZWWW, ModelZPubmedID, ModelZDOI, ModelRecenzowany, ModelPunktowany, ModelTypowany, ModelZeSzczegolami, ModelZInformacjaZ, ModelZISBN, ModelZAdnotacjami, ModelZAbsolutnymUrl, ModelZLiczbaCytowan, ModelZMiejscemPrzechowywania, ModelOpcjonalnieNieEksportowanyDoAPI, ): """Baza dla klas Wydawnictwo_Zwarte oraz Praca_Doktorska_Lub_Habilitacyjna""" miejsce_i_rok = models.CharField( max_length=MIEJSCE_I_ROK_MAX_LENGTH, blank=True, null=True, help_text="""Przykładowo: Warszawa 2012. Wpisz proszę najpierw miejsce potem rok; oddziel spacją.""", ) wydawca = models.ForeignKey(Wydawca, PROTECT, null=True, blank=True) wydawca_opis = models.CharField( "Wydawca - szczegóły", max_length=256, null=True, blank=True ) oznaczenie_wydania = models.CharField(max_length=400, null=True, blank=True) wydawnictwo = property(get_wydawnictwo, set_wydawnictwo) redakcja = models.TextField(null=True, blank=True) rok_regex = re.compile(r"\s[12]\d\d\d") class Wydawnictwo_Zwarte( ZapobiegajNiewlasciwymCharakterom, Wydawnictwo_Zwarte_Baza, ModelZCharakterem, ModelZOpenAccessWydawnictwoZwarte, ModelZeZnakamiWydawniczymi, ModelZKonferencja, ModelZSeria_Wydawnicza, ModelZISSN, ModelWybitny, ModelZPBN_UID, MaProcentyMixin, DodajAutoraMixin, DirtyFieldsMixin, ModelZPrzeliczaniemDyscyplin, ): """Wydawnictwo zwarte, czyli: książki, broszury, skrypty, fragmenty, doniesienia zjazdowe.""" objects = Wydawnictwo_Zwarte_Manager() autor_rekordu_klass = Wydawnictwo_Zwarte_Autor autorzy = models.ManyToManyField(Autor, through=autor_rekordu_klass) wydawnictwo_nadrzedne = models.ForeignKey( "self", CASCADE, blank=True, null=True, help_text="""Jeżeli dodajesz rozdział, tu wybierz pracę, w ramach której dany rozdział występuje.""", related_name="wydawnictwa_powiazane_set", ) calkowita_liczba_autorow = models.PositiveIntegerField( blank=True, null=True, help_text="""Jeżeli dodajesz monografię, wpisz tutaj całkowitą liczbę autorów monografii. Ta informacja zostanie użyta w eksporcie danych do PBN. Jeżeli informacja ta nie zostanie uzupełiona, wartość tego pola zostanie obliczona i będzie to ilość wszystkich autorów przypisanych do danej monografii""", ) calkowita_liczba_redaktorow = models.PositiveIntegerField( blank=True, null=True, help_text="""Jeżeli dodajesz monografię, wpisz tutaj całkowitą liczbę redaktorów monografii. Ta informacja zostanie użyta w eksporcie danych do PBN. Jeżeli pole to nie zostanie uzupełnione, wartość ta zostanie obliczona i będzie to ilość wszystkich redaktorów przypisanych do danej monografii""", ) nagrody = GenericRelation(Nagroda) def wydawnictwa_powiazane_posortowane(self): """ Sortowanie wydawnictw powiązanych wg pierwszej liczby dziesiętnej występującej w polu 'Strony' """ return self.wydawnictwa_powiazane_set.order_by( RawSQL( r"CAST((regexp_match(COALESCE(bpp_wydawnictwo_zwarte.strony, '99999999'), '(\d+)'))[1] AS INT)", "", ) ) # # Cache framework by django-denorm-iplweb # denorm_always_skip = ("ostatnio_zmieniony",) @denormalized(JSONField, blank=True, null=True) @depend_on_related( "bpp.Wydawnictwo_Zwarte_Autor", only=( "typ_odpowiedzialnosci_id", "afiliuje", "dyscyplina_naukowa_id", "upowaznienie_pbn", "przypieta", ), ) @depend_on_related("bpp.Wydawca", only=("lista_poziomow", "alias_dla_id")) @denormalized(models.TextField, default="") @depend_on_related("self", "wydawnictwo_nadrzedne") @depend_on_related( "bpp.Wydawnictwo_Zwarte_Autor", only=("zapisany_jako", "typ_odpowiedzialnosci_id", "kolejnosc"), ) @depend_on_related("bpp.Wydawca", only=("nazwa", "alias_dla_id")) @depend_on_related("bpp.Charakter_Formalny") @depend_on_related("bpp.Typ_KBN") @depend_on_related("bpp.Status_Korekty") @denormalized(ArrayField, base_field=models.TextField(), blank=True, null=True) @depend_on_related( "bpp.Autor", only=( "nazwisko", "imiona", ), ) @depend_on_related("bpp.Wydawnictwo_Zwarte_Autor", only=("kolejnosc",)) @denormalized(models.TextField, blank=True, null=True) @depend_on_related( "bpp.Wydawnictwo_Zwarte_Autor", only=("zapisany_jako", "kolejnosc"), ) @denormalized( models.SlugField, max_length=400, unique=True, db_index=True, null=True, blank=True, ) @depend_on_related( "bpp.Wydawnictwo_Zwarte_Autor", only=("zapisany_jako", "kolejnosc"), ) @depend_on_related( "bpp.Autor", only=("nazwisko", "imiona"), ) @depend_on_related("self", "wydawnictwo_nadrzedne")
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 302, 198, 11748, 14601, 198, 198, 6738, 2853, 579, 1330, 2853, 6636, 1143, 11, 4745, 62, 261, 62, 5363, 198, 6738, 11841, 25747, 13, 49075, 25747, 1330, 32052, 15...
1.971237
3,581
from .base import * # noqa from .base import env # GENERAL SECRET_KEY = env( "DJANGO_SECRET_KEY", default="!!!SET DJANGO_SECRET_KEY!!!", ) TEST_RUNNER = "django.test.runner.DiscoverRunner"
[ 6738, 764, 8692, 1330, 1635, 220, 1303, 645, 20402, 198, 6738, 764, 8692, 1330, 17365, 198, 198, 2, 41877, 198, 23683, 26087, 62, 20373, 796, 17365, 7, 198, 220, 220, 220, 366, 35028, 1565, 11230, 62, 23683, 26087, 62, 20373, 1600, 19...
2.45679
81
#!/usr/bin/env python3 import attr, collections, datetime from common import file_in_this_dir NYU_PICKLE = file_in_this_dir("NYU.pickle") DAYS_OF_WEEK = ( "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday" ) def _deques_increasing_first(list_of_deques, greater_than=None): ''' Yields tuples. In each tuple, a) each item is a value from a deque and b) each item is greater than the previous. The first item is greater than greater_than. This generator yields all combinations that satisfy these conditions. It is assumed that the deques are sorted in ascending order. ''' if list_of_deques: # Get the first deque in the list of deques. q = list_of_deques[0] # Get the first value in the deque that is greater than greater_than. # Discard all values before it. if greater_than is not None: try: while q[0] <= greater_than: q.popleft() except IndexError: # This deque is empty. The generator must terminate. return # At this point, the first value in the deque is greater than # greater_than. for value in q: # Construct the tuple, starting with the value from the deque. head = (value,) # If there are more deques, values from them will form the rest of # the tuple. Otherwise, just yield the head with no tail. if len(list_of_deques) > 1: # Recursively call this generator on the rest of the deques. for tail in _deques_increasing_first( list_of_deques[1:], value ): yield head + tail else: yield head @attr.s @attr.s
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 708, 81, 11, 17268, 11, 4818, 8079, 198, 6738, 2219, 1330, 2393, 62, 259, 62, 5661, 62, 15908, 198, 12805, 52, 62, 47, 11860, 2538, 796, 2393, 62, 259, 62, 5661, 62, 159...
2.23253
830
import json from http.client import HTTPResponse from typing import TextIO, Dict from urllib import request from platforms.cpp import as_cpp_enum from platforms.csharp import as_csharp_enum from platforms.java import as_java_enum from platforms.python import as_python_enum from platforms.typescript import as_typescript_enum def read_icons_json() -> Dict: """Opens the icons.json and converts into a json object""" json_file: HTTPResponse = request.urlopen( "https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/metadata/icons.json") file_contents = json_file.read() return json.loads(file_contents) print_first()
[ 11748, 33918, 198, 6738, 2638, 13, 16366, 1330, 7154, 51, 4805, 9774, 2591, 198, 6738, 19720, 1330, 8255, 9399, 11, 360, 713, 198, 6738, 2956, 297, 571, 1330, 2581, 198, 198, 6738, 9554, 13, 20322, 1330, 355, 62, 20322, 62, 44709, 198...
3.069444
216
import os from copy import deepcopy import mne import torch from braindecode.torch_ext.util import np_to_var from mne.time_frequency import tfr_morlet from EEGNAS import global_vars import numpy as np from scipy.io import savemat from PIL import Image from EEGNAS.utilities.misc import create_folder, label_by_idx, unify_dataset from sktime.utils.load_data import load_from_tsfile_to_dataframe import pandas as pd import numpy as np from EEGNAS.visualization.wavelet_functions import get_tf_data_efficient # def EEG_to_TF_mike(dataset): # for segment in dataset.keys(): # TF_list = [] # for example in range(len(dataset[segment].X)): # for channel_idx in lenexample: # tf = get_tf_data_efficient(example[None, :, :], eeg_chan, # global_vars.get('frequency'), global_vars.get('num_frex'), # dB=global_vars.get('db_normalization')) # TF_list.append(tf)
[ 11748, 28686, 198, 6738, 4866, 1330, 2769, 30073, 198, 198, 11748, 285, 710, 198, 11748, 28034, 198, 6738, 3632, 12501, 1098, 13, 13165, 354, 62, 2302, 13, 22602, 1330, 45941, 62, 1462, 62, 7785, 198, 6738, 285, 710, 13, 2435, 62, 353...
2.315421
428
# License: BSD 3 clause import unittest import numpy as np from scipy.sparse import csr_matrix from tick.linear_model import SimuLogReg, ModelHinge from tick.base_model.tests.generalized_linear_model import TestGLM if __name__ == '__main__': unittest.main()
[ 2, 13789, 25, 347, 10305, 513, 13444, 198, 198, 11748, 555, 715, 395, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 82, 29572, 1330, 269, 27891, 62, 6759, 8609, 198, 198, 6738, 4378, 13, 29127, 62, 19849, 133...
2.8125
96
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ChannelVersionItem.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: try: _encoding = QtGui.QApplication.UnicodeUTF8 except AttributeError: import res_rc
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 5178, 7822, 7560, 422, 3555, 334, 72, 2393, 705, 29239, 14815, 7449, 13, 9019, 6, 198, 2, 198, 2, 15622, 416, 25, 9485, 48, 83, 19, 12454, 2438, 17301, 604, ...
2.714286
147
import subprocess import re import os from math import sqrt import sys # SETTINGS PROG = "./produce.out" # FOR TESTING: reduce this number when debugging X = 500 # number of times to repeat the test # This data structure holds the raw data for each test run times = [] DEBUG = True def call_produce(program, cmd_str, num_repeat_calls): ''' This function calls the program `PROG`, with the commands provided by `cmds`, `X` number of times. The timing provided by the program (printed on the last line) will be saved in the `times` global variable. @param `program` string to the program `produce` to run @param `cmd_str` is a string containing the parameters to send to `program` ''' print 'Calling "{} {}", {} times'.format(program, cmd_str, num_repeat_calls) my_times = [] for i in xrange(num_repeat_calls): output = subprocess.check_output('{} {}'.format(program, cmd_str), stderr=subprocess.STDOUT, shell=True) matchObj = re.search(r'System execution time: ([0-9.]+) seconds', output) if matchObj: if DEBUG and i==0: print ' > First Returned Time: {} sec'.format(matchObj.group(1)) my_times.append(1000.0 * float(matchObj.group(1))) else: print '\nError trying to find time for the following output:' print output quit(1) if i % 10 == 0: print '.', sys.stdout.flush() times.append({'cmd':cmd_str, 'times':my_times}) print '' def generate_test_data(): ''' Calls the specific test cases asked for by the lab. ''' test_cases = [ # N=100, B=4 {'N':100, 'B':4, 'P':1, 'C':1}, {'N':100, 'B':4, 'P':1, 'C':2}, {'N':100, 'B':4, 'P':1, 'C':3}, {'N':100, 'B':4, 'P':2, 'C':1}, {'N':100, 'B':4, 'P':3, 'C':1}, {'N':100, 'B':4, 'P':2, 'C':2}, {'N':100, 'B':4, 'P':3, 'C':3}, ############################# # N=100, B=8 {'N':100, 'B':8, 'P':1, 'C':1}, {'N':100, 'B':8, 'P':1, 'C':2}, {'N':100, 'B':8, 'P':1, 'C':3}, {'N':100, 'B':8, 'P':2, 'C':1}, {'N':100, 'B':8, 'P':3, 'C':1}, {'N':100, 'B':8, 'P':2, 'C':2}, {'N':100, 'B':8, 'P':3, 'C':3}, ############################# # N=398, B=8 {'N':398, 'B':8, 'P':1, 'C':1}, {'N':398, 'B':8, 'P':1, 'C':2}, {'N':398, 'B':8, 'P':1, 'C':3}, {'N':398, 'B':8, 'P':2, 'C':1}, {'N':398, 'B':8, 'P':3, 'C':1}, {'N':398, 'B':8, 'P':2, 'C':2}, {'N':398, 'B':8, 'P':3, 'C':3}, ] i = 1 for t in test_cases: print 'Test Case: {}/{}'.format(i, len(test_cases)) i += 1 call_produce(PROG, '{} {} {} {}'.format(t['N'], t['B'], t['P'], t['C']), X) print '' def generate_stats_table(): ''' Converts the raw times in `times` into a text table containing the average time and the standard deviation. ''' with open('lab3-stats.csv', 'w') as file: file.write("N,B,P,C,Average Time (ms),Standard Deviation (ms)\n") for t in times: avg = sum(t['times']) / float(len(t['times'])) std = sqrt(float(reduce(lambda x, y: x + y, map(lambda x: (x - avg) ** 2, t['times']))) / float(len(t['times']))) k = t['cmd'].split() file.write('{},{},{},{},{},{}\n'.format(k[0],k[1],k[2],k[3], avg, std)) print 'Written the statistics out to lab3-stats.csv' def dump_raw_times(): ''' Writes the raw times to a csv file ''' with open('lab3-times.csv', 'w') as file: for k in times: t = str(k['times']) file.write('{},{}\n'.format(k['cmd'],t[1:-1])) print 'Written the raw times out to lab3-times.csv' if __name__ == '__main__': abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) main()
[ 11748, 850, 14681, 198, 11748, 302, 198, 11748, 28686, 198, 6738, 10688, 1330, 19862, 17034, 198, 11748, 25064, 198, 198, 2, 25823, 51, 20754, 198, 4805, 7730, 796, 366, 19571, 18230, 344, 13, 448, 1, 198, 2, 7473, 43001, 2751, 25, 46...
2.011134
1,976
import requests import time from opentracing_instrumentation.client_hooks import install_all_patches from jaeger_client import Config from os import getenv JAEGER_HOST = getenv('JAEGER_HOST', 'localhost') WEBSERVER_HOST = getenv('WEBSERVER_HOST', 'localhost') # Create configuration object with enabled logging and sampling of all requests. config = Config(config={'sampler': {'type': 'const', 'param': 1}, 'logging': True, 'local_agent': {'reporting_host': JAEGER_HOST}}, service_name="jaeger_opentracing_example") tracer = config.initialize_tracer() # Automatically trace all requests made with 'requests' library. install_all_patches() url = "http://{}:5000/log".format(WEBSERVER_HOST) # Make the actual request to webserver. requests.get(url) # allow tracer to flush the spans - https://github.com/jaegertracing/jaeger-client-python/issues/50 time.sleep(2) tracer.close()
[ 11748, 7007, 198, 11748, 640, 198, 6738, 1034, 298, 81, 4092, 62, 259, 43872, 341, 13, 16366, 62, 25480, 82, 1330, 2721, 62, 439, 62, 8071, 2052, 198, 6738, 474, 3609, 1362, 62, 16366, 1330, 17056, 198, 198, 6738, 28686, 1330, 651, ...
2.664789
355
from typing import Any, List
[ 6738, 19720, 1330, 4377, 11, 7343, 628 ]
4.285714
7
from selenium import webdriver from selenium.webdriver.common.keys import Keys import pyautogui import time driver = webdriver.Chrome() driver.get('https://www.keybr.com/multiplayer') while True: ticker = driver.find_element_by_class_name('Ticker') if ticker.text == "GO!": break inp = driver.find_element_by_class_name('TextInput-fragment') text = driver.find_element_by_xpath("//*[@type='text']") print(text.send_keys('salut')) time.sleep(1) for e in inp.find_elements_by_tag_name('span'): print(e) if e.text=='␣': pyautogui.press(' ') elif e.text=='↵': pyautogui.press('enter') else: pyautogui.press(e.text) time.sleep(0.01)
[ 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11321, 13, 13083, 1330, 26363, 198, 198, 11748, 12972, 2306, 519, 9019, 198, 11748, 640, 198, 198, 26230, 796, 3992, 26230, 13, 1925, 5998, 34...
2.263844
307
from .compact_gauss import compact_gauss_scheme from .main_item_gauss import main_item_gauss_scheme __author__ = 'Nikita'
[ 6738, 764, 5589, 529, 62, 4908, 1046, 1330, 16001, 62, 4908, 1046, 62, 15952, 1326, 198, 6738, 764, 12417, 62, 9186, 62, 4908, 1046, 1330, 1388, 62, 9186, 62, 4908, 1046, 62, 15952, 1326, 198, 834, 9800, 834, 796, 705, 40979, 5350, ...
2.772727
44
import pandas as pd from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.base import TransformerMixin, BaseEstimator from flowcat.types import fcsdata as fcs from . import FCSDataMixin class FCSMinMaxScaler(FCSDataMixin, TransformerMixin, BaseEstimator): """MinMaxScaling with adaptations for FCSData.""" def fit(self, X, *_): """Fit min max range to the given data.""" self._model = MinMaxScaler() if self._fit_to_range: data = X.ranges_array else: data = X.data self._model.fit(data) return self def transform(self, X, *_): """Transform data to be 0 min and 1 max using the fitted values.""" X = X.copy() X.data = self._model.transform(X.data) X.update_range(self._model.transform(X.ranges_array)) return X class FCSStandardScaler(FCSDataMixin, TransformerMixin, BaseEstimator): """Standard deviation scaling adapted for FCSData objects.""" def fit(self, X, *_): """Fit standard deviation to the given data.""" self._model = StandardScaler().fit(X.data) return self def transform(self, X, *_): """Transform data to be zero mean and unit standard deviation""" X = X.copy() X.data = self._model.transform(X.data) X.update_range(self._model.transform(X.ranges_array)) return X class RefitScaler(FCSDataMixin, TransformerMixin, BaseEstimator): """Always refit the containing scaler class."""
[ 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 1341, 35720, 13, 3866, 36948, 1330, 1855, 11518, 3351, 36213, 11, 8997, 3351, 36213, 198, 6738, 1341, 35720, 13, 8692, 1330, 3602, 16354, 35608, 259, 11, 7308, 22362, 320, 1352, 628, 198,...
2.516447
608
from setuptools import setup, find_packages setup(name='Golden Retriever', version='0.1', description='Information retrieval using fine-tuned semantic similarity', author='AI Singapore', packages=find_packages(), zip_safe=False)
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 3672, 11639, 32378, 4990, 380, 964, 3256, 198, 220, 220, 220, 220, 220, 2196, 11639, 15, 13, 16, 3256, 198, 220, 220, 220, 220, 220, 6764, 11639, 21918, 4...
2.988506
87
#!/usr/bin/env python with open("../templates/header.html", "r") as header: print header.read() with open("../templates/navbar.html", "r") as navbar: print navbar.read() print(""" <div class="row"> <div class="col-xxs-6 col-xxs-offset-3 col-xs-4 col-sm-3 col-md-2 col-lg-1"> <img class="img-responsive" alt="kevin.broh-kahn.com Icon" src="/assets/img/kevin.broh-kahn.com/icon.png"> </div> <div class="col-xxs-12 col-xs-8 col-sm-9 col-md-10 col-lg-11"> <h1>kevin.broh-kahn.com <small><a target="blank" href="https://github.com/kbrohkahn/kevin.broh-kahn.com">Github</a></small></h1> <div class="subheader">A site for displaying all of the projects and applications I have created.</div> </div> </div> """) with open("../templates/footer.html", "r") as footer: print footer.read()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 4480, 1280, 7203, 40720, 11498, 17041, 14, 25677, 13, 6494, 1600, 366, 81, 4943, 355, 13639, 25, 198, 197, 4798, 13639, 13, 961, 3419, 198, 4480, 1280, 7203, 40720, 11498, 17041, 14, 2...
2.417683
328
#!/usr/bin/env python3 import os import sys import pickle from functools import partial from string import ascii_lowercase import jax import torch sys.path.append('..') from ti_mps import TI_MPS samp_lens = [16, 30] # What lengths we want to sample at samp_size = 1000 # Number of samples to draw dataset = 'tomita' # Dataset models were trained on save_name = ".tomita_exp.record" # Where the record is saved ALPHABET = {'brackets': ['(', ')', '*'], 'tomita': ['0', '1'], 'bos_eos': ['^', '$'], } alph = ALPHABET[dataset] if dataset == 'brackets': from toy_datasets import score_brackets as score_fun elif dataset == 'tomita': from toy_datasets import score_tomita as tom_score def mps_sample_fun(rng_key, mps, target_lens, score_fun, ref_sets=None): """Draw samples from MPS model within JAX""" from sampler import draw_samples, fill_in_blanks bi_exp = ref_sets is not None examp_samps = {} if bi_exp: corr_frac = {} for samp_l in target_lens: ref_s = ref_sets[samp_l] ref_sets = to_string(ref_s, alph) samp_chars = fill_in_blanks(key, mps, alphabet=alph, ref_strset=ref_s) # TODO: Fold this code into fill_in_blanks # Generate validation strings with each character replaced by # suggested character from samp_chars samples = [s[:i] + c + s[i+1:] for s, cs in zip(ref_sets, samp_chars) for i, c in enumerate(cs)] corr_frac[samp_l] = 100 * score_fun(samples) examp_samps[samp_l] = samples[:10] print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%") print(f"Replacement examples: {samples[:10]}\n") else: corr_frac = {} for samp_l in target_lens: rng_key, key = jax.random.split(rng_key) samples = draw_samples(key, mps, alphabet=alph, num_samps=samp_size, samp_len=samp_l) score = score_fun(samples) corr_frac[samp_l] = 100 * score examp_samps[samp_l] = samples[:10] print(f"Correct frac len={samp_l}: {100 * score:.1f}%") print(f"Example samples: {samples[:10]}\n") return corr_frac def lstm_sample_fun(rng_key, lstm, target_lens, score_fun, ref_sets=None): """Draw samples from LSTM model within Pytorch""" samp_mode = 'fixed' bi_exp = lstm.bi_dir this_alph = alph + ALPHABET['bos_eos'] lstm = lstm.eval() examp_samps = {} if bi_exp: corr_frac = {} for samp_l in target_lens: ref_s = ref_sets[samp_l] rng_key, key = jax.random.split(rng_key) # TODO: Finish up better bidirectional sampling code, including # (a) deal with BOS/EOS, (b) properly put samp_chars in # ref_set strings raise NotImplementedError ref_sets = [s[1:-1] for s in to_string(ref_s, this_alph)] samp_chars = lstm.sample(key, alph, samp_mode='completion', ref_strset=ref_s) # BOS and EOS should never be sampled, so replace those with # incorrect strings samples = [')(' if ('^' in s or '$' in s) else s for s in samples] corr_frac[samp_l] = 100 * score_fun(samples) examp_samps[samp_l] = samples[:10] print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%") print(f"Replacement examples:{examp_samps[samp_l]}\n") else: corr_frac = {} for samp_l in target_lens: rng_key, key = jax.random.split(rng_key) samples = lstm.sample(key, this_alph, samp_mode=samp_mode, num_samps=samp_size, samp_len=samp_l) score = score_fun(samples) corr_frac[samp_l] = 100 * score examp_samps[samp_l] = samples[:10] print(f"Correct frac len={samp_l}: {100 * score:.1f}%") print(f"Example samples: {examp_samps[samp_l]}\n") return corr_frac rng_key = jax.random.PRNGKey(0) # Load the data record we're interested in full_record = pickle.load(open(save_name, 'rb')) # Go through each experimental setting and resample with trained model for setting, global_rec in full_record.items(): # Get relevant data for this experimental setting print(setting) tom_num, _, _, model = setting[:4] assert model in ['mps', 'lstm'] assert len(setting) in [4, 5] score_fun = partial(tom_score, tomita_num=tom_num) samp_fun = lstm_sample_fun if model == 'lstm' else mps_sample_fun best_model = global_rec['best_model'] best_epoch = global_rec['best_epoch'] local_rec = global_rec['local_recs'][best_epoch] # Figure out which lengths haven't been sampled yet these_lens = [l for l in samp_lens if f"corr_frac_{l}" not in local_rec] if these_lens == []: continue # Perform the resampling and add results to local_rec rng_key, key = jax.random.split(rng_key) corr_frac = samp_fun(key, best_model, these_lens, score_fun) for s_len, score in corr_frac.items(): lookup = f"corr_frac_{s_len}" if lookup in local_rec: print(f"Already have samples from len {s_len}") continue local_rec[lookup] = score print # Put this back in full_record and save global_rec['local_recs'][best_epoch] = local_rec full_record[setting] = global_rec pickle.dump(full_record, open(save_name, 'wb'))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 2298, 293, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 4731, 1330, 355, 979, 72, 62, 21037, 7442, 198, 198, 11748, 474, 897, 1...
2.038765
2,786
""" import numpy as np # line_no: 1 import gadget gadget.record('docs_training_py') x = np.arange(5) # line_no: 2 s = x[0] # line_no: 3 for i in x: # line_no: 4 if i % 2 == 0: # line_no: 5 s += i # line_no: 6 print('done') """ import gadget as ln with ln.tracking("docs_training_py3"): import numpy as np ln.importing(np, module="numpy", name="np", line_no=1) x = ln.call(np.arange(5), args=(5, "5"), text="np.arange(5)", line_no=2).assign( target="x" ) s = ln.assign(x[0], text="x[0]", target="s", line_no=3) for i in ln.loop_it.new(x, text="i in x", name="main_loop", line_no=4): if ln.pred.new(i % 2 == 0, text="i % 2 == 0", name="main_cond", line_no=5): s += ln.assign(i, target="s", text="i", mod="+=", line_no=6) ln.pred.pop() ln.loop_it.pop() # ln.call(eval(f'done'), args=('done', "'done'"), text="eval('done')")
[ 37811, 198, 11748, 299, 32152, 355, 45941, 220, 220, 220, 220, 220, 1303, 1627, 62, 3919, 25, 352, 198, 11748, 42892, 198, 198, 70, 324, 1136, 13, 22105, 10786, 31628, 62, 34409, 62, 9078, 11537, 198, 198, 87, 796, 45941, 13, 283, 8...
1.933735
498
# ... continued ... if action == 'start': gibson.start(user_instance['instance_id']) return _return_payload( message=start_message.format(user=who), color='green', ) elif action == 'stop': gibson.stop(user_instance['instance_id']) return _return_payload( message='stopping instance for: {}'.format(who), color='green', ) elif action == 'status': return _return_payload( message='status for user {}: {}'.format(who, user_instance['state']), color='gray', )
[ 2, 2644, 3767, 2644, 198, 198, 361, 2223, 6624, 705, 9688, 10354, 198, 220, 220, 220, 46795, 1559, 13, 9688, 7, 7220, 62, 39098, 17816, 39098, 62, 312, 6, 12962, 198, 220, 220, 220, 1441, 4808, 7783, 62, 15577, 2220, 7, 198, 220, ...
2.45
220
import numpy as np from MPU9250 import MPU9250 from BMP280 import BMP280 m = MPU9250() b = BMP280() print(b.pressure(), 'Pa') avg_x_accel = lambda x: np.array([list(m.readAccel().values()) for _ in range(x)]).mean(axis=0) x = np.sum(avg_x_accel(10) - avg_x_accel(10)) print(x)
[ 11748, 299, 32152, 355, 45941, 198, 6738, 4904, 52, 5892, 1120, 1330, 4904, 52, 5892, 1120, 198, 6738, 347, 7378, 21033, 1330, 347, 7378, 21033, 198, 198, 76, 796, 4904, 52, 5892, 1120, 3419, 198, 198, 65, 796, 347, 7378, 21033, 3419,...
2.222222
126
""" This example shows how to capture point clouds, with color, from the Zivid camera. For scenes with high dynamic range we combine multiple acquisitions to get an HDR point cloud. This example shows how to fully configure settings for each acquisition. In general, capturing an HDR point cloud is a lot simpler than this. The purpose of this example is to demonstrate how to configure all the settings. """ import datetime import zivid if __name__ == "__main__": _main()
[ 37811, 198, 1212, 1672, 2523, 703, 284, 8006, 966, 15114, 11, 351, 3124, 11, 422, 262, 1168, 1699, 4676, 13, 198, 198, 1890, 8188, 351, 1029, 8925, 2837, 356, 12082, 3294, 33683, 284, 651, 281, 33675, 966, 6279, 13, 770, 1672, 2523, ...
4.191304
115
# coding=utf-8 """ Class used for representing tTask of BPMN 2.0 graph """ import graph.classes.activities.activity_type as activity class Task(activity.Activity): """ Class used for representing tTask of BPMN 2.0 graph """ def __init__(self): """ Default constructor, initializes object fields with new instances. """ super(Task, self).__init__()
[ 2, 19617, 28, 40477, 12, 23, 198, 37811, 198, 9487, 973, 329, 10200, 256, 25714, 286, 347, 5868, 45, 362, 13, 15, 4823, 198, 37811, 198, 11748, 4823, 13, 37724, 13, 15791, 871, 13, 21797, 62, 4906, 355, 3842, 628, 198, 4871, 15941, ...
2.79021
143
# Problem 4 MIT Midterm # # Write a function called gcd # that calculates the greatest common divisor of two positive integers. # The gcd of two or more integers, when at least one of them is not zero, # is the largest positive integer that divides the numbers without a remainder. # 20 min until finished # One way is recursively, # where the greatest common denominator of a and b can be calculated as gcd(a, b) = gcd(b, a mod b). # Hint: remember the mod symbol is % in Python. Do not import anything. # For example, the greatest common divisor (gcd) between a = 20 and b = 12 is: 4 # gcd(20,12) is the same as gcd(12, 20 mod 12) = gcd(12,8) # gcd(12,8) is the same as gcd(8, 12 mod 8) = gcd(8,4) # gcd(8,4) is the same as gcd(4, 8 mod 4) = gcd(4,0) # The gcd is found (and the gcd is equal to a) when we reach 0 for b. def gcd (a, b): """ :param a: int :param b: int at least one of the two integers is not 0 :return: largest positive integer gcd that divides the numbers a and b without remainder """ # handling of negative integers if a < 0 and b < 0: a = abs(a) b = abs(b) elif a < 0 or b < 0: return 0 # a > b, so b is smaller integer of pair if a > b: # base case, if one of two integers is 0, other non zero integer is greatest common divisor if b == 0: return a # recursive case else: return gcd(b, a % b) # b > a, so a is smaller integer of pair elif b > a: # base case, if one of two integers is 0, other non zero integer is greatest common divisor if a == 0: return b # recursive case else: return gcd(a, b % a) # b == a else: return a print (gcd (20, 12)) print (gcd (12, 20)) print (gcd (0, 20)) print (gcd (-20, -12)) print (gcd (-12, -20)) print (gcd (0, -20)) # Other way is iteratively def gcd_iter(a, b): """ :param a: int :param b: int , at least one of the two integers is not 0 :return: largest positive integer gcd that divides the numbers a and b without remainder """ # handling of negative integers if a < 0 and b < 0: a = abs(a) b = abs(b) elif a < 0 or b < 0: return 0 # a > b, so b is smaller integer of pair if a > b: # base case, if one of two integers is 0, other non zero integer is greatest common divisor if b == 0: return a # else enter loop, divide bigger integer by every gcd from smaller integer to 0 else: for gcd in range(b, -1, -1): rem = a % gcd if rem == 0 and b % gcd == 0: return gcd # b > a, so a is smaller integer of pair elif b > a: # base case, if one of two integers is 0, other non zero integer is greatest common divisor if a == 0: return b # else enter loop, decreasing smaller of two integers by 1 until bigger % smaller == 0 else: for gcd in range(a, -1, -1): rem = b % gcd if rem == 0 and a % gcd == 0: return gcd # b == a else: return a print (gcd_iter (20, 12)) print (gcd_iter (12, 20)) print (gcd_iter (0, 20)) print (gcd_iter (-20, -12)) print (gcd_iter (-12, -20)) print (gcd_iter (0, -20))
[ 2, 20647, 604, 17168, 7215, 4354, 1303, 198, 2, 19430, 257, 2163, 1444, 308, 10210, 198, 2, 326, 43707, 262, 6000, 2219, 2659, 271, 273, 286, 734, 3967, 37014, 13, 198, 2, 383, 308, 10210, 286, 734, 393, 517, 37014, 11, 618, 379, ...
2.313779
1,466
# -*- coding: utf-8 -*- import os basedir = os.path.abspath(os.path.dirname(__file__)) SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository') SQLALCHEMY_TRACK_MODIFICATIONS = False CSRF_ENABLED = True SECRET_KEY = 'ochen-secretnyj-klyuch'
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 11748, 28686, 201, 198, 201, 198, 201, 198, 3106, 343, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7, 418, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 4008, 201...
2.0875
160
""" 8.7 – Álbum: Escreva uma função chamada make_album() que construa um dicionário descrevendo um álbum musical. A função deve aceitar o nome de um artista e o título de um álbum e deve devolver um dicionário contendo essas duas informações. Use a função para criar três dicionários que representem álbuns diferentes. Apresente cada valor devolvido para mostrar que os dicionários estão armazenando as informações do álbum corretamente. Acrescente um parâmetro opcional em make_album() que permita armazenar o número de faixas em um álbum. Se a linha que fizer a chamada incluir um valor para o número de faixas, acrescente esse valor ao dicionário do álbum. Faça pelo menos uma nova chamada da função incluindo o número de faixas em um álbum. """ album1 = make_album('Arctic Monkeys', 'AM') album2 = make_album('U2', 'The Joshua Tree') album3 = make_album('Red Hot Chili Peppers', 'Californication') album4 = make_album('The Strokes', 'Is This It', 11) print(album1) print(album2) print(album3) print(album4)
[ 37811, 198, 23, 13, 22, 784, 6184, 223, 75, 4435, 25, 16319, 260, 6862, 334, 2611, 1257, 16175, 28749, 442, 321, 4763, 787, 62, 40916, 3419, 8358, 1500, 622, 64, 23781, 198, 67, 47430, 6557, 27250, 1715, 18218, 31110, 23781, 6184, 94,...
2.545226
398
# This file is part of the faebryk project # SPDX-License-Identifier: MIT import csv import subprocess import re import logging logger = logging.getLogger("script") # Expects a csv file in the format: issue_number,title # Can be generated with gh issue list and some manual editing with open("issues.txt", 'r') as f: reader = csv.DictReader(f) rows = list(reader) issues = { row["issue"]:row["title"] for row in rows } new_titles = { issue: re.sub(r"^\[[^\]]*\][ :]*", "", title) for issue,title in issues.items() } for issue,title in issues.items(): logger.info("{}->{}".format(title, new_titles[issue])) for issue,title in new_titles.items(): subprocess.run(["gh", "issue", "edit", issue, "--title", title])
[ 2, 770, 2393, 318, 636, 286, 262, 24685, 1765, 563, 74, 1628, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 628, 198, 11748, 269, 21370, 198, 11748, 850, 14681, 198, 11748, 302, 198, 11748, 18931, 198, 198, 6404, 1362, 7...
2.685714
280
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import sys import subprocess from setuptools import setup, Command from setuptools.command.test import test as TestCommand __version__ = '' with open('linebot/__about__.py', 'r') as fd: reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') for line in fd: m = reg.match(line) if m: __version__ = m.group(1) break with open('README.rst', 'r') as fd: long_description = fd.read() setup( name="line-bot-sdk", version=__version__, author="RyosukeHasebe", author_email="hsb.1014@gmail.com", maintainer="RyosukeHasebe", maintainer_email="hsb.1014@gmail.com", url="https://github.com/line/line-bot-sdk-python", description="LINE Messaging API SDK for Python", long_description=long_description, license='Apache License 2.0', packages=[ "linebot", "linebot.models" ], python_requires=">=3.6.0", install_requires=_requirements(), tests_require=_requirements_test(), cmdclass={ 'test': PyTest, 'codegen': CodegenCommand }, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development" ] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, ...
2.66131
809
from nf_prefix_tree import PyPrefixTree t = PyPrefixTree() t.addSequence('ABCDE', 'key1') t.addSequence('ABCXY', 'key2') t.addSequence('ZYAPW', 'key3') t.show()
[ 6738, 299, 69, 62, 40290, 62, 21048, 1330, 9485, 36698, 844, 27660, 198, 198, 83, 796, 9485, 36698, 844, 27660, 3419, 198, 198, 83, 13, 2860, 44015, 594, 10786, 24694, 7206, 3256, 705, 2539, 16, 11537, 198, 83, 13, 2860, 44015, 594, ...
2.263889
72
import downloader import tkinter as tk from tkinter import filedialog import pathlib import os root = tk.Tk() app = Application(master=root) app.mainloop()
[ 11748, 4321, 263, 198, 11748, 256, 74, 3849, 355, 256, 74, 198, 6738, 256, 74, 3849, 1330, 5717, 498, 519, 198, 11748, 3108, 8019, 198, 11748, 28686, 628, 198, 198, 15763, 796, 256, 74, 13, 51, 74, 3419, 198, 1324, 796, 15678, 7, ...
2.944444
54
""" entry-point functions for the sample_pck module, as referenced in setup.cfg """ from .animals import Animal, create_jerry, create_tom
[ 37811, 198, 13000, 12, 4122, 5499, 329, 262, 6291, 62, 79, 694, 8265, 11, 355, 20717, 287, 9058, 13, 37581, 220, 198, 37811, 198, 198, 6738, 764, 11227, 874, 1330, 13792, 11, 2251, 62, 73, 6996, 11, 2251, 62, 39532 ]
3.475
40
from email.policy import default from time import timezone from django import forms from django.utils import timezone
[ 6738, 3053, 13, 30586, 1330, 4277, 198, 6738, 640, 1330, 640, 11340, 198, 6738, 42625, 14208, 1330, 5107, 198, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198 ]
4.103448
29
################################################################################ # # # Script to run PyTorch CPU benchmarks # # # # (c) Simon Wenkel, released under the Apache v2 license (see license file) # # # # # ################################################################################ ################################################################################ # import libraries # # # import pickle import time from tqdm import tqdm import torch # # ################################################################################ ################################################################################ # function dict # # # functions = {} functions["sin"] = torch.sin functions["cos"] = torch.cos functions["tan"] = torch.tan functions["asin"] = torch.asin functions["acos"] = torch.acos functions["atan"] = torch.atan functions["exp"] = torch.exp functions["sinh"] = torch.sinh functions["cosh"] = torch.cosh functions["tanh"] = torch.tanh functions["abs"] = torch.abs functions["ceil"] = torch.ceil functions["floor"] = torch.floor functions["sqrt"] = torch.sqrt # # ################################################################################ ################################################################################ # functions # # # # # ################################################################################ if __name__ == "__main__": main()
[ 29113, 29113, 14468, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
1.839544
1,315
# -*- coding: utf-8 -*- from odoo import fields, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 16298, 2238, 1330, 7032, 11, 4981, 628 ]
2.521739
23
################################ ##Generated with a lot of love## ## with EasyPython ## ##Web site: easycoding.tn ## ################################ import RPi.GPIO as GPIO from http.server import BaseHTTPRequestHandler, HTTPServer GPIO.setmode(GPIO.BCM) GPIO.setup(26, GPIO.OUT) request = None server_address_httpd = ('192.168.254.29',8080) httpd = HTTPServer(server_address_httpd, RequestHandler_httpd) print('Starting Server.....') httpd.serve_forever()
[ 29113, 198, 2235, 8645, 515, 351, 257, 1256, 286, 1842, 2235, 198, 2235, 220, 220, 220, 351, 220, 220, 16789, 37906, 220, 220, 220, 220, 220, 220, 22492, 198, 2235, 13908, 2524, 25, 2562, 66, 7656, 13, 34106, 220, 220, 220, 220, 224...
3.051282
156
#!/usr/bin/env python # -*- coding: utf-8 -*- import asyncio import json from collections import namedtuple import paho.mqtt.client as mqtt import time #sudo pip3 install paho-mqtt #sudo apt-get install -y mosquitto mosquitto-clients #sudo systemctl enable mosquitto.service MQTT_ERR_SUCCESS = 0 class RTapp: "Cyclic realtime app" #when the client connects to the broker (again), send all parameters out to initialize the UI and also subscribe to topics #when the UI connects to the broker send onConnect values to allow the UI to (re-)initalize itself
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 30351, 952, 198, 11748, 33918, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 11748, 279, 17108, 13, 76, 80,...
3.090426
188
# encoding: utf-8 u''' Utility functions for I/O. ''' import sys import six _FILESYSTEM_ENCODING = str( sys.getfilesystemencoding() or sys.getdefaultencoding() ) def encode_path(p): u''' Convert a Unicode path string to a byte string. Intended to be used for encoding paths that are known to be compatible with the filesystem, for example paths of existing files that were previously decoded using :py:func:`decode_path`. If you're dynamically constructing names for new files using unknown inputs then pass them through :py:func:`ckan.lib.munge.munge_filename` before encoding them. Raises a ``UnicodeEncodeError`` if the path cannot be encoded using the filesystem's encoding. That will never happen for paths returned by :py:func:`decode_path`. Raises a ``TypeError`` is the input is not a Unicode string. ''' if not isinstance(p, str): raise TypeError(u'Can only encode unicode, not {}'.format(type(p))) return six.ensure_text(p).encode(_FILESYSTEM_ENCODING) def decode_path(p): u''' Convert a byte path string to a Unicode string. Intended to be used for decoding byte paths to existing files as returned by some of Python's built-in I/O functions. Raises a ``UnicodeDecodeError`` if the path cannot be decoded using the filesystem's encoding. Assuming the path was returned by one of Python's I/O functions this means that the environment Python is running in is set up incorrectly. Raises a ``TypeError`` if the input is not a byte string. ''' if not isinstance(p, bytes): raise TypeError(u'Can only decode str, not {}'.format(type(p))) return six.ensure_binary(p).decode(_FILESYSTEM_ENCODING)
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 198, 84, 7061, 6, 198, 18274, 879, 5499, 329, 314, 14, 46, 13, 198, 7061, 6, 198, 198, 11748, 25064, 198, 198, 11748, 2237, 198, 198, 62, 46700, 1546, 56, 25361, 62, 24181, 3727, 2751, 796, 9...
3.02253
577
from datetime import timedelta import airflow from airflow import DAG from airflow.operators.dummy_operator import DummyOperator args = { 'owner': 'Airflow', 'start_date': airflow.utils.dates.days_ago(9), } with DAG( dag_id='exercise4', default_args=args, schedule_interval=timedelta(hours=2.5) ) as dag: task1 = DummyOperator( task_id='task1' ) task2 = DummyOperator( task_id='task2' ) task3 = DummyOperator( task_id='task3' ) task4 = DummyOperator( task_id='task4' ) task5 = DummyOperator( task_id='task5' ) task1 >> task2 >> [task3, task4] >> task5
[ 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 11748, 45771, 198, 6738, 45771, 1330, 360, 4760, 198, 6738, 45771, 13, 3575, 2024, 13, 67, 13513, 62, 46616, 1330, 360, 13513, 18843, 1352, 198, 198, 22046, 796, 1391, 198, 220, 220, 220, ...
2.137821
312
# create_btc_wallet.py # pywallet implementation from pywallet import wallet # Testing #print(w) #print("Seed Phrase: ", seed) #print("Private Key: ", priv_key) #print("Public Key: ", pub_key) #print("Address: ", address) cw = create_wallet() print(cw)
[ 2, 2251, 62, 18347, 66, 62, 44623, 13, 9078, 198, 2, 12972, 44623, 7822, 198, 198, 6738, 12972, 44623, 1330, 13008, 628, 198, 2, 23983, 198, 198, 2, 4798, 7, 86, 8, 198, 2, 4798, 7203, 50, 2308, 1380, 22789, 25, 33172, 9403, 8, ...
2.793478
92
import datetime as dt from fabric.api import env, local, task, lcd, settings import json import time from time import sleep from .heroku_utils import first_colour_database from .utils import repeat_run_local, FabricSupportException, wait_for_dyno_to_run # Global environment variables See documentation HEROKU_APP_NAME = "fab-support-app-test" # name of this stages Heroku app HEROKU_PROD_APP_NAME = ( "fab-support-app-prod" ) # Name of Heroku app which is production, ie source of data HEROKU_OLD_PROD_APP_NAME = ( "fab-support-app-old-prod" ) # Name of heroku app to save production to PRODUCTION_URL = "" HEROKU_POSTGRES_TYPE = "hobby-dev" GIT_PUSH = "" # Default to false GIT_PUSH_DIR = "." # GIT_BRANCH = "master" USES_CELERY = False ################################################## # Local utilities ################################################## def remove_unused_db(): """List all databases in use for app, find the main one and remove all the others""" data = json.loads( local(f"heroku config --json --app {HEROKU_APP_NAME}", capture=True) ) for k, v in data.items(): # noinspection SpellCheckingInspection if k.find("HEROKU_POSTGRESQL_") == 0: if v != data["DATABASE_URL"]: local( f"heroku addons:destroy {k} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}" ) def default_db_colour(app_name): """Return the default database colour of heroku application""" data = json.loads( local("heroku config --json --app {0}".format(app_name), capture=True) ) for k, v in data.items(): if k.find("HEROKU_POSTGRESQL_") == 0: if v == data["DATABASE_URL"]: return k # if no colour found then try the long name in database_url # raise Exception(f'No color database names found for app {app_name} - create an extra one and it should be ok.') return data["DATABASE_URL"] def set_heroku_environment_variables(stage): """This sets all the environment variables that a Django recipe needs.""" # TODO deal with no 'ENV' env_dict = env["stages"][stage]["ENV"] # Should be a dictionary # Set all the variables you need for key, value in env_dict.items(): local("heroku config:set {}={} --app {}".format(key, value, HEROKU_APP_NAME)) # Setup defaults for some ENV variables if have not been setup if "DJANGO_ALLOWED_HOSTS" not in env_dict: allowed_hosts = f"{HEROKU_APP_NAME}.herokuapp.com" local( f'heroku config:set DJANGO_ALLOWED_HOSTS="{allowed_hosts}" --app {HEROKU_APP_NAME}' ) if "DJANGO_SETTINGS_MODULE" not in env_dict: local( f"heroku config:set DJANGO_SETTINGS_MODULE=production --app {HEROKU_APP_NAME}" ) if "PYTHONHASHSEED" not in env_dict: local(f"heroku config:set PYTHONHASHSEED=random --app {HEROKU_APP_NAME}") def raw_update_app(stage): """Update of app to latest version""" # Put the heroku app in maintenance mode TODO set_heroku_environment_variables(stage) # In case anything has changed # connect git to the correct remote repository local("heroku git:remote -a {}".format(HEROKU_APP_NAME)) # Need to push the branch in git to the master branch in the remote heroku repository print( f"GIT_PUSH_DIR = {GIT_PUSH_DIR}, GIT_PUSH = {GIT_PUSH}, GIT_BRANCH = {GIT_BRANCH}" ) if GIT_PUSH == "": # test for special case probably deploying a subtree local(f"git push heroku {GIT_BRANCH}:master") else: # The command will probably be like this: # 'GIT_PUSH': 'git subtree push --prefix tests/my_heroku_project heroku master', with lcd(GIT_PUSH_DIR): local(GIT_PUSH) # Don't need to scale workers down as not using eg heroku ps:scale worker=0 if USES_CELERY: local(f"heroku ps:scale worker=1 -a {HEROKU_APP_NAME}") # Have used performance web=standard-1x and worker=standard-2x but adjusted app to used less memory # local(f'heroku ps:resize web=standard-1x -a {HEROKU_APP_NAME}') # Resize web to be compatible with performance workers # local(f'heroku ps:resize worker=standard-2x -a {HEROKU_APP_NAME}') # Resize workers # makemigrations should be run locally and the results checked into git local( "heroku run \"yes 'yes' | python manage.py migrate\"" ) # Force deletion of stale content types # ############# def _create_newbuild(stage): """This builds the database and waits for it be ready. It is is safe to run and won't destroy any existing infrastructure.""" local( f"heroku create {HEROKU_APP_NAME} --buildpack https://github.com/heroku/heroku-buildpack-python --region eu" ) # This is where we create the database. The type of database can range from hobby-dev for small # free access to standard for production quality docs local( f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}" ) local(f"heroku addons:create cloudamqp:lemur --app {HEROKU_APP_NAME}") local(f"heroku addons:create papertrail:choklad --app {HEROKU_APP_NAME}") # set database backup schedule repeat_run_local( f"heroku pg:wait --app {HEROKU_APP_NAME}" ) # It takes some time for DB so wait for it # When wait returns the database is not necessarily completely finished preparing itself. So the next # command could fail (and did on testing on v0.1.6) repeat_run_local(f"heroku pg:backups:schedule --at 04:00 --app {HEROKU_APP_NAME}") # Already promoted as new local('heroku pg:promote DATABASE_URL --app my-app-prod') # Leaving out and aws and reddis raw_update_app(stage) wait_for_dyno_to_run(HEROKU_APP_NAME) local("heroku run python manage.py check --deploy") # make sure all ok # Create superuser - the interactive command does not allow you to script the password # So this is a hack workaround. # Django 1 only # cmd = ('heroku run "echo \'from django.contrib.auth import get_user_model; User = get_user_model(); ' # + f'User.objects.filter(email="""{SUPERUSER_EMAIL}""", is_superuser=True).delete(); ' # + f'User.objects.create_superuser("""{SUPERUSER_NAME}""", """{SUPERUSER_EMAIL}""", """{SUPERUSER_PASSWORD}""")\' ' # + f' | python manage.py shell"') # local(cmd) def _kill_app(): """see kill app""" local(f"heroku destroy {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}") def kill_app(stage, safety_on=True): """Kill app notice that to the syntax for the production version is: fab the_stage kill_app:False""" get_global_environment_variables(stage) if HEROKU_APP_NAME in list_app_names(): if not (is_production() and not safety_on): _kill_app() def build_uat(): """Build a new uat environments""" build_app("uat") def _build_app(stage="uat"): """Build a test environment. Default is uat. So fab build_app is equivalent to fab build_app:uat and to fab build_app:stage=uat so can build a test branch with: fab build_app:stage=test""" try: _kill_app() except SystemExit: if stage != "prod": pass # ignore errors in case original does not exist else: raise Exception( "Must stop if an error when deleting a production database as now the only working instance is UAT." ) _create_newbuild(stage) _transfer_database_from_production(stage) # makemigrations should be run locally and the results checked into git # Need to migrate the old database schema from the master production database local( "heroku run \"yes 'yes' | python manage.py migrate\"" ) # Force deletion of stale content types def _create_new_db(): """Just creates an extra new database for this instance.""" # Put the heroku app in maintenance move m = local( f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}", capture=True, ) repeat_run_local("heroku pg:wait") # It takes some time for DB so wait for it # There should now be 2 database return first_colour_database(app=HEROKU_APP_NAME) def _transfer_database_from_production(stage="test", clean=True): """This is usually used for making a copy of the production database for a UAT staging or test environment. It can also be used to upgrade the production environment from one database plan to the next. Method: """ try: local("heroku maintenance:on --app {} ".format(HEROKU_APP_NAME)) db_name, colour = create_new_db(stage) # colour is ? # Don't need to scale workers down as not using eg heroku ps:scale worker=0 local( f"heroku pg:copy {HEROKU_PROD_APP_NAME}::DATABASE_URL {colour} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}" ) local(f"heroku pg:promote {colour}") if clean: remove_unused_db() finally: local("heroku maintenance:off --app {} ".format(HEROKU_APP_NAME)) def list_stages(): """This is put here to test the exact same code in django as in set_stages. In one it seems to work and another to fail.""" try: stages = env["stages"] print("List of stages") print(stages) for stage_name, stage in stages.items(): try: comment = stage["comment"] except KeyError: comment = "" print(f"{stage_name} - {comment}") except KeyError: for k, v in env: if k.lower() == "stages": print("env['{f}'] has been set but should probably be 'stages'") print("env['stages'] has not been set.") def _promote_to_prod(): """ Promotes a stage typically, uat to production Saves old production for safety Should work if this is the first promotion ie no production database or if there is a production database. TODO require manual override if not uat TODO do not run if old_prod exists. Require manual deletion """ # turn maintenance on local(f"heroku maintenance:on --app {HEROKU_APP_NAME}") production_exists = True with settings(abort_exception=FabricSupportException): try: local(f"heroku maintenance:on --app {HEROKU_PROD_APP_NAME}") except FabricSupportException: # Going to assume that there is no production production_exists = False try: if production_exists: local( f"heroku apps:rename {HEROKU_OLD_PROD_APP_NAME} --app {HEROKU_PROD_APP_NAME}" ) # Should fail if already an old_prod local(f"heroku apps:rename {HEROKU_PROD_APP_NAME} --app {HEROKU_APP_NAME}") if production_exists: # Having moved from production to old proudction need to update allowed hosts local( f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_OLD_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_OLD_PROD_APP_NAME}' ) wait_for_dyno_to_run(HEROKU_OLD_PROD_APP_NAME) local( f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_PROD_APP_NAME}' ) wait_for_dyno_to_run(HEROKU_PROD_APP_NAME) if PRODUCTION_URL: # Switch over domains local(f"heroku domains:clear --app {HEROKU_OLD_PROD_APP_NAME}") local(f"heroku domains:add {PRODUCTION_URL} --app {HEROKU_PROD_APP_NAME}") finally: local(f"heroku maintenance:off --app {HEROKU_PROD_APP_NAME} ") if ( production_exists ): # Then need to run maintenance off on what is now old production local(f"heroku maintenance:off --app {HEROKU_OLD_PROD_APP_NAME} ")
[ 11748, 4818, 8079, 355, 288, 83, 198, 6738, 9664, 13, 15042, 1330, 17365, 11, 1957, 11, 4876, 11, 300, 10210, 11, 6460, 198, 11748, 33918, 198, 11748, 640, 198, 6738, 640, 1330, 3993, 198, 198, 6738, 764, 11718, 23063, 62, 26791, 1330...
2.490712
4,791
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import argparse import gzip from xml.dom import minidom from concurrent.futures import ProcessPoolExecutor import multiprocessing from tqdm import tqdm import jsonlines from typing import List, Dict, Any import logging logger = logging.getLogger(os.path.basename(sys.argv[0])) if __name__ == '__main__': logging.basicConfig(stream=sys.stdout, level=logging.INFO) main(sys.argv[1:])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 1822, 29572, 198, 11748, 308, 13344, 198, 6738, 35555, 13, 3438, ...
2.741176
170
# # @lc app=leetcode id=892 lang=python3 # # [892] Surface Area of 3D Shapes # # https://leetcode.com/problems/surface-area-of-3d-shapes/description/ # # algorithms # Easy (57.01%) # Likes: 209 # Dislikes: 270 # Total Accepted: 15.9K # Total Submissions: 27.5K # Testcase Example: '[[2]]' # # On a N * N grid, we place some 1 * 1 * 1 cubes. # # Each value v = grid[i][j] represents a tower of v cubes placed on top of grid # cell (i, j). # # Return the total surface area of the resulting shapes. # # # # # # # # # # # # # # Example 1: # # # Input: [[2]] # Output: 10 # # # # Example 2: # # # Input: [[1,2],[3,4]] # Output: 34 # # # # Example 3: # # # Input: [[1,0],[0,2]] # Output: 16 # # # # Example 4: # # # Input: [[1,1,1],[1,0,1],[1,1,1]] # Output: 32 # # # # Example 5: # # # Input: [[2,2,2],[2,1,2],[2,2,2]] # Output: 46 # # # # # Note: # # # 1 <= N <= 50 # 0 <= grid[i][j] <= 50 # # # # # # # # # @lc code=start # @lc code=end
[ 2, 198, 2, 2488, 44601, 598, 28, 293, 316, 8189, 4686, 28, 4531, 17, 42392, 28, 29412, 18, 198, 2, 198, 2, 685, 4531, 17, 60, 20321, 9498, 286, 513, 35, 911, 7916, 198, 2, 198, 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676,...
1.898855
524
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to document datasets. To test: python -m tensorflow_datasets.scripts.document_datasets """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from absl import app from concurrent import futures import mako.lookup import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_datasets.core.utils import py_utils WORKER_COUNT_DATASETS = 200 WORKER_COUNT_CONFIGS = 50 BASE_URL = "https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets" # WmtTranslate: The raw wmt can only be instantiated with the config kwargs # TODO(tfds): Document image_label_folder datasets in a separate section BUILDER_BLACKLIST = ["wmt_translate"] @py_utils.memoize() def get_mako_template(tmpl_name): """Returns mako.lookup.Template object to use to render documentation. Args: tmpl_name: string, name of template to load. Returns: mako 'Template' instance that can be rendered. """ tmpl_path = py_utils.get_tfds_path("scripts/templates/%s.mako.md" % tmpl_name) with tf.io.gfile.GFile(tmpl_path, "r") as tmpl_f: tmpl_content = tmpl_f.read() return mako.lookup.Template(tmpl_content, default_filters=["str", "trim"]) def document_single_builder(builder): """Doc string for a single builder, with or without configs.""" print("Document builder %s..." % builder.name) get_config_builder = lambda config: tfds.builder(builder.name, config=config) config_builders = [] if builder.builder_configs: with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_CONFIGS) as tpool: config_builders = list( tpool.map(get_config_builder, builder.BUILDER_CONFIGS) ) tmpl = get_mako_template("dataset") out_str = tmpl.render_unicode( builder=builder, config_builders=config_builders, ).strip() schema_org_tmpl = get_mako_template("schema_org") schema_org_out_str = schema_org_tmpl.render_unicode( builder=builder, config_builders=config_builders, ).strip() out_str = schema_org_out_str + "\n" + out_str return out_str def make_module_to_builder_dict(datasets=None): """Get all builders organized by module in nested dicts.""" # pylint: disable=g-long-lambda # dict to hold tfds->image->mnist->[builders] module_to_builder = collections.defaultdict( lambda: collections.defaultdict(lambda: collections.defaultdict(list)) ) # pylint: enable=g-long-lambda if not datasets: datasets = [ name for name in tfds.list_builders() if name not in BUILDER_BLACKLIST ] print("Creating the vanilla builders for %s datasets..." % len(datasets)) with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool: builders = tpool.map(tfds.builder, datasets) print("Vanilla builders built, constructing module_to_builder dict...") for builder in builders: module_name = builder.__class__.__module__ modules = module_name.split(".") if "testing" in modules: continue current_mod_ctr = module_to_builder for mod in modules: current_mod_ctr = current_mod_ctr[mod] current_mod_ctr.append(builder) module_to_builder = module_to_builder["tensorflow_datasets"] return module_to_builder def dataset_docs_str(datasets=None): """Create dataset documentation string for given datasets. Args: datasets: list of datasets for which to create documentation. If None, then all available datasets will be used. Returns: - overview document - a dictionary of sections. Each dataset in a section is represented by a tuple (dataset_name, is_manual_dataset, string describing the datasets (in the MarkDown format)) """ print("Retrieving the list of builders...") module_to_builder = make_module_to_builder_dict(datasets) sections = sorted(list(module_to_builder.keys())) section_docs = collections.defaultdict(list) for section in sections: builders = tf.nest.flatten(module_to_builder[section]) builders = sorted(builders, key=lambda b: b.name) unused_ = get_mako_template("dataset") # To warm cache. with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool: builder_docs = tpool.map(document_single_builder, builders) builder_docs = [ (builder.name, builder.MANUAL_DOWNLOAD_INSTRUCTIONS, builder_doc) for (builder, builder_doc) in zip(builders, builder_docs) ] section_docs[section.capitalize()] = builder_docs tmpl = get_mako_template("catalog_overview") catalog_overview = tmpl.render_unicode().lstrip() return [catalog_overview, section_docs] if __name__ == "__main__": app.run(main)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 13130, 383, 309, 22854, 37535, 16092, 292, 1039, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 77...
2.742145
2,005
import segmentation_models_pytorch as smp import shutil import torch from pathlib import Path from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from lib.dataset import DroneDeploySegmentationDataset as Dataset from lib.config import ( BATCH_SIZES, CONFIG_PATH, CRITERION, CRITERION_ARGS, DEVICE, EPOCHS, METRIC, METRIC_ARGS, MODEL, MODEL_ARGS, OPTIMIZER, OPTIMIZER_ARGS, SCHEDULER, SCHEDULER_ARGS, TIMESTAMP, ) __all__ = ["run"] def run(**kwargs): """ Trains a model on the dataset. Uses the following configuration settings: - BATCH_SIZES: number of data points fed in a single optimization step - CONFIG_PATH: path to configuration file - CRITERION: loss function - CRITERION_ARGS: arguments for criterion - DEVICE: device upon which torch operations are run - EPOCHS: number of iterations on the dataset - METRIC: accuracy score - METRIC_ARGS: arguments for metric - MODEL: model architecture - MODEL_ARGS: arguments for model - OPTIMIZER: gradient descent and backpropagation optimizer - OPTIMIZER_ARGS: arguments for optimizer - SCHEDULER: learning rate scheduler - SCHEDULER_ARGS: arguments for scheduler - TIMESTAMP: time at run (unique identifier) """ # Create data loaders data_loaders = { "train": DataLoader( Dataset(split="train"), batch_size=BATCH_SIZES["train"], shuffle=True, ), "valid": DataLoader( Dataset(split="valid"), batch_size=BATCH_SIZES["valid"], shuffle=False, ), } # Assign model, criterion, optimizer, scheduler and metrics model = MODEL(**MODEL_ARGS) criterion = CRITERION(**CRITERION_ARGS) optimizer = OPTIMIZER(params=model.parameters(), **OPTIMIZER_ARGS) scheduler = SCHEDULER(optimizer=optimizer, **SCHEDULER_ARGS) metric = METRIC(**METRIC_ARGS) # Create train and valid epoch executions execution = { "train": smp.utils.train.TrainEpoch( model, loss=criterion, metrics=[metric], optimizer=optimizer, device=DEVICE, verbose=True, ), "valid": smp.utils.train.ValidEpoch( model, loss=criterion, metrics=[metric], device=DEVICE, verbose=True, ), } # Create run directory run_dir = Path("runs") / TIMESTAMP run_dir.mkdir(parents=True, exist_ok=True) # Copy current configuration settings shutil.copy(str(CONFIG_PATH), str(run_dir / "config.yml")) # Setup TensorBoard writer = SummaryWriter(str(run_dir)) # Iterate over epochs best_score = 0 for epoch in range(EPOCHS): print(f"Epoch: {epoch+1}") # Iterate over phases for phase in ["train", "valid"]: # Evaluate dataset logs = execution[phase].run(data_loaders[phase]) # Write to TensorBoard for scalar in logs: writer.add_scalar(f"{phase} {scalar}", logs[scalar], epoch + 1) # Save the model if it is the best one so far, based on the validation score score = logs[metric.__name__] if phase == "valid" and best_score < score: best_score = score torch.save(model, str(run_dir / "model.pth")) # Notify scheduler every epoch scheduler.step()
[ 11748, 10618, 341, 62, 27530, 62, 9078, 13165, 354, 355, 895, 79, 198, 11748, 4423, 346, 198, 11748, 28034, 198, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 198, 6738, 28034, 13, 26791, ...
2.304405
1,521
from .rcp import main main()
[ 6738, 764, 6015, 79, 1330, 1388, 198, 198, 12417, 3419 ]
2.9
10
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """an API module to serve TF Cluster APIs.""" import endpoints from tradefed_cluster import cluster_api from tradefed_cluster import cluster_device_api from tradefed_cluster import cluster_host_api from tradefed_cluster import command_attempt_api from tradefed_cluster import command_event_api from tradefed_cluster import command_task_api from tradefed_cluster import coordinator_api from tradefed_cluster import device_blocklist_api from tradefed_cluster import device_snapshot_api from tradefed_cluster import filter_hint_api from tradefed_cluster import env_config from tradefed_cluster import test_harness_image_api from tradefed_cluster import host_event_api from tradefed_cluster import lab_management_api from tradefed_cluster import predefined_message_api from tradefed_cluster import report_api from tradefed_cluster import request_api from tradefed_cluster import run_target_api from tradefed_cluster import acl_check_api API_HANDLERS = [ cluster_api.ClusterApi, cluster_device_api.ClusterDeviceApi, cluster_host_api.ClusterHostApi, command_attempt_api.CommandAttemptApi, command_event_api.CommandEventApi, command_task_api.CommandTaskApi, coordinator_api.CoordinatorApi, device_blocklist_api.DeviceBlocklistApi, device_snapshot_api.DeviceSnapshotApi, filter_hint_api.FilterHintApi, test_harness_image_api.TestHarnessImageApi, host_event_api.HostEventApi, lab_management_api.LabManagementApi, predefined_message_api.PredefinedMessageApi, report_api.ReportApi, request_api.RequestApi, run_target_api.RunTargetApi, acl_check_api.AclApi, ] + env_config.CONFIG.extra_apis APP = endpoints.api_server(API_HANDLERS)
[ 2, 15069, 13130, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 733...
3.095368
734
""" Tests for the Teams endpoint. """ import pytest @pytest.mark.vcr def test_teams(client): """Tests fetching of all teams """ teams = client.teams() team = teams.items[0] assert team.team_id == 199048 assert team.name == "Sample" assert team.created_at == "2019-12-25 13:50:00 (Etc/UTC)" assert team.created_at_timestamp == 1577281800 assert team.plan == "Trial" assert team.quota_usage['users'] == 1 assert team.quota_allowed['users'] == 999999999 @pytest.mark.vcr def test_teams_pagination(client): """Tests fetching of all teams with pagination """ teams = client.teams({"page": 2, "limit": 3}) assert teams.items[0].team_id == 170312 assert teams.current_page == 2 assert teams.total_count == 4 assert teams.page_count == 2 assert teams.limit == 3 assert teams.is_last_page() assert not teams.is_first_page() assert not teams.has_next_page() assert teams.has_prev_page()
[ 37811, 198, 51, 3558, 329, 262, 24690, 36123, 13, 198, 37811, 198, 198, 11748, 12972, 9288, 628, 198, 31, 9078, 9288, 13, 4102, 13, 85, 6098, 198, 4299, 1332, 62, 660, 4105, 7, 16366, 2599, 198, 220, 220, 220, 37227, 51, 3558, 21207...
2.593085
376
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Feb 25 09:44:15 2019 @author: dberke Code to define a class for a model fit to an absorption line. """ import matplotlib from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np from scipy.optimize import curve_fit, OptimizeWarning from tqdm import tqdm import unyt as u from varconlib.exceptions import PositiveAmplitudeError from varconlib.fitting import gaussian, integrated_gaussian from varconlib.miscellaneous import (shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity) # This line prevents the wavelength formatting from being in the form of # scientific notation. matplotlib.rcParams['axes.formatter.useoffset'] = False # Don't use TeX for font rendering, as these are just diagnostic plots and it # slows everything way down. matplotlib.rcParams['text.usetex'] = False class GaussianFit(object): """A class to fit an absorption line and store information about the fit. """ def __init__(self, transition, observation, order, radial_velocity=None, close_up_plot_path=None, context_plot_path=None, integrated=True, verbose=False): """Construct a fit to an absorption feature using a Gaussian or integrated Gaussian. Parameters ---------- transition : `transition_line.Transition` object A `Transition` object representing the absorption feature to fit. observation : `obs2d.HARPSFile2DScience` object A `HARPSFile2DScience` object to find the absorption feature in. order : int The order in the e2ds file to fit the transition in. Zero-indexed, so ranging from [0-71]. Optional -------- radial_velocity : `unyt.unyt_quantity` A radial velocity (dimensions of length / time) for the object in the observation. Most of the time the radial velocity should be picked up from the observation itself, but for certain objects such as asteroids the supplied radial velocity may not be correct. In such cases, this parameter can be used to override the given radial velocity. close_up_plot_path : string or `pathlib.Path` The file name to save a close-up plot of the fit to. context_plot_path : string or `pathlib.Path` The file name to save a wider context plot (±25 km/s) around the fitted feature to. integrated : bool, Default : True Controls whether to attempt to fit a feature with an integrated Gaussian instead of a Gaussian. verbose : bool, Default : False Whether to print out extra diagnostic information while running the function. """ # Store the transition. self.transition = transition # Grab some observation-specific information from the observation. self.dateObs = observation.dateObs self.BERV = observation.BERV self.airmass = observation.airmass self.exptime = observation.exptime self.calibrationFile = observation.calibrationFile self.calibrationSource = observation.calibrationSource self.order = int(order) # Store the plot paths. self.close_up_plot_path = close_up_plot_path self.context_plot_path = context_plot_path # Define some useful numbers and variables. # The ranges in velocity space to search around to find the minimum of # an absorption line. search_range_vel = 5 * u.km / u.s # The range in velocity space to consider to find the continuum. continuum_range_vel = 25 * u.km / u.s # The number of pixels either side of the flux minimum to use in the # fit. pixel_range = 3 # If no radial velocity is given, use the radial velocity from the # supplied observation. This is mostly for use with things like # asteroids that might not have a radial velocity assigned. if radial_velocity is None: radial_velocity = observation.radialVelocity # Shift the wavelength being searched for to correct for the radial # velocity of the star. nominal_wavelength = self.transition.wavelength.to(u.angstrom) self.correctedWavelength = shift_wavelength(nominal_wavelength, radial_velocity) if verbose: tqdm.write('Given RV {:.2f}: line {:.3f} should be at {:.3f}'. format(radial_velocity, nominal_wavelength.to(u.angstrom), self.correctedWavelength.to(u.angstrom))) self.baryArray = observation.barycentricArray[self.order] self.fluxArray = observation.photonFluxArray[self.order] self.errorArray = observation.errorArray[self.order] # Figure out the range in wavelength space to search around the nominal # wavelength for the flux minimum, as well as the range to take for # measuring the continuum. search_range = velocity2wavelength(search_range_vel, self.correctedWavelength) self.continuumRange = velocity2wavelength(continuum_range_vel, self.correctedWavelength) low_search_index = wavelength2index(self.correctedWavelength - search_range, self.baryArray) high_search_index = wavelength2index(self.correctedWavelength + search_range, self.baryArray) self.lowContinuumIndex = wavelength2index(self.correctedWavelength - self.continuumRange, self.baryArray) self.highContinuumIndex = wavelength2index(self.correctedWavelength + self.continuumRange, self.baryArray) self.centralIndex = low_search_index + \ self.fluxArray[low_search_index:high_search_index].argmin() self.continuumLevel = self.fluxArray[self.lowContinuumIndex: self.highContinuumIndex].max() self.fluxMinimum = self.fluxArray[self.centralIndex] self.lowFitIndex = self.centralIndex - pixel_range self.highFitIndex = self.centralIndex + pixel_range + 1 # Grab the wavelengths, fluxes, and errors from the region to be fit. self.wavelengths = self.baryArray[self.lowFitIndex:self.highFitIndex] self.fluxes = self.fluxArray[self.lowFitIndex:self.highFitIndex] self.errors = self.errorArray[self.lowFitIndex:self.highFitIndex] self.lineDepth = self.continuumLevel - self.fluxMinimum self.normalizedLineDepth = self.lineDepth / self.continuumLevel self.initial_guess = (self.lineDepth * -1, self.correctedWavelength.to(u.angstrom).value, 0.05, self.continuumLevel) if verbose: tqdm.write('Attempting to fit line at {:.4f} with initial guess:'. format(self.correctedWavelength)) if verbose: tqdm.write('Initial parameters are:\n{}\n{}\n{}\n{}'.format( *self.initial_guess)) # Do the fitting: try: if integrated: wavelengths_lower = observation.pixelLowerArray wavelengths_upper = observation.pixelUpperArray pixel_edges_lower = wavelengths_lower[self.order, self.lowFitIndex: self.highFitIndex] pixel_edges_upper = wavelengths_upper[self.order, self.lowFitIndex: self.highFitIndex] self.popt, self.pcov = curve_fit(integrated_gaussian, (pixel_edges_lower.value, pixel_edges_upper.value), self.fluxes, sigma=self.errors, absolute_sigma=True, p0=self.initial_guess, method='lm', maxfev=10000) else: self.popt, self.pcov = curve_fit(gaussian, self.wavelengths.value, self.fluxes, sigma=self.errors, absolute_sigma=True, p0=self.initial_guess, method='lm', maxfev=10000) except (OptimizeWarning, RuntimeError): print(self.continuumLevel) print(self.lineDepth) print(self.initial_guess) self.plotFit(close_up_plot_path, context_plot_path, plot_fit=False, verbose=True) raise if verbose: print(self.popt) print(self.pcov) # Recover the fitted values for the parameters: self.amplitude = self.popt[0] self.mean = self.popt[1] * u.angstrom self.sigma = self.popt[2] * u.angstrom if self.amplitude > 0: err_msg = ('Fit for' f' {self.transition.wavelength.to(u.angstrom)}' ' has a positive amplitude.') tqdm.write(err_msg) self.plotFit(close_up_plot_path, context_plot_path, plot_fit=True, verbose=verbose) raise PositiveAmplitudeError(err_msg) # Find 1-σ errors from the covariance matrix: self.perr = np.sqrt(np.diag(self.pcov)) self.amplitudeErr = self.perr[0] self.meanErr = self.perr[1] * u.angstrom self.meanErrVel = abs(wavelength2velocity(self.mean, self.mean + self.meanErr)) self.sigmaErr = self.perr[2] * u.angstrom if (self.chiSquaredNu > 1): self.meanErr *= np.sqrt(self.chiSquaredNu) if verbose: tqdm.write('χ^2_ν = {}'.format(self.chiSquaredNu)) # Find the full width at half max. # 2.354820 ≈ 2 * sqrt(2 * ln(2)), the relationship of FWHM to the # standard deviation of a Gaussian. self.FWHM = 2.354820 * self.sigma self.FWHMErr = 2.354820 * self.sigmaErr self.velocityFWHM = wavelength2velocity(self.mean, self.mean + self.FWHM).to(u.km/u.s) self.velocityFWHMErr = wavelength2velocity(self.mean, self.mean + self.FWHMErr).to(u.km/u.s) # Compute the offset between the input wavelength and the wavelength # found in the fit. self.offset = self.correctedWavelength - self.mean self.offsetErr = self.meanErr self.velocityOffset = wavelength2velocity(self.correctedWavelength, self.mean) self.velocityOffsetErr = wavelength2velocity(self.mean, self.mean + self.offsetErr) if verbose: print(self.continuumLevel) print(self.fluxMinimum) print(self.wavelengths) @property @property @property def getFitInformation(self): """Return a list of information about the fit which can be written as a CSV file. Returns ------- list A list containing the following information about the fit: 1. Observation date, in ISO format 2. The amplitude of the fit (in photons) 3. The error on the amplitude (in photons) 4. The mean of the fit (in Å) 5. The error on the mean (in Å) 6. The error on the mean (in m/s in velocity space) 7. The sigma of the fitted Gaussian (in Å) 8. The error on the sigma (in Å) 9. The offset from expected wavelength (in m/s) 10. The error on the offset (in m/s) 11. The FWHM (in velocity space) 12. The error on the FWHM (in m/s) 13. The chi-squared-nu value 14. The order the fit was made on (starting at 0, so in [0, 71]. 15. The mean airmass of the observation. """ return [self.dateObs.isoformat(timespec='milliseconds'), self.amplitude, self.amplitudeErr, self.mean.value, self.meanErr.value, self.meanErrVel.value, self.sigma.value, self.sigmaErr.value, self.velocityOffset.to(u.m/u.s).value, self.velocityOffsetErr.to(u.m/u.s).value, self.velocityFWHM.to(u.m/u.s).value, self.velocityFWHMErr.to(u.m/u.s).value, self.chiSquaredNu, self.order, self.airmass] def plotFit(self, close_up_plot_path=None, context_plot_path=None, plot_fit=True, verbose=False): """Plot a graph of this fit. This method will produce a 'close-up' plot of just the fitted region itself, in order to check out the fit has worked out, and a wider 'context' plot of the area around the feature. Optional -------- close_up_plot_path : string or `pathlib.Path` The file name to save a close-up plot of the fit to. If not given, will default to using the value providing when initializing the fit. context_plot_path : string or `pathlib.Path` The file name to save a wider context plot (±25 km/s) around the fitted feature to. If not given, will default to using the value provided when initializing the fit. plot_fit : bool, Default : True If *True*, plot the mean of the fit and the fitted Gaussian. Otherwise, don't plot those two things. This allows creating plots of failed fits to see the context of the data. verbose : bool, Default : False If *True*, the function will print out additional information as it runs. """ edge_pixels = (509, 510, 1021, 1022, 1533, 1534, 2045, 2046, 2557, 2558, 3069, 3070, 3581, 3582) # If no plot paths are given, assume we want to use the ones given # when initializing the fit. if close_up_plot_path is None: close_up_plot_path = self.close_up_plot_path if context_plot_path is None: context_plot_path = self.context_plot_path # Set up the figure. fig = plt.figure(figsize=(7, 5), dpi=100, tight_layout=True) gs = GridSpec(nrows=2, ncols=1, height_ratios=[4, 1], hspace=0) ax1 = fig.add_subplot(gs[0]) ax2 = fig.add_subplot(gs[1], sharex=ax1) ax1.tick_params(axis='x', direction='in') plt.setp(ax1.get_xticklabels(), visible=False) ax2.set_ylim(bottom=-3, top=3) ax2.yaxis.set_major_locator(ticker.FixedLocator([-2, -1, 0, 1, 2])) for pixel in edge_pixels: ax1.axvline(x=self.baryArray[pixel-1], ymin=0, ymax=0.2, color='LimeGreen', linestyle='--') ax1.set_ylabel('Flux (photo-electrons)') ax2.set_xlabel('Wavelength ($\\AA$)') ax2.set_ylabel('Residuals\n($\\sigma$)') ax1.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.2f}')) ax1.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:>7.1e}')) plt.xticks(horizontalalignment='right') ax1.set_xlim(left=self.correctedWavelength - self.continuumRange, right=self.correctedWavelength + self.continuumRange) # Set y-limits so a fit doesn't balloon the plot scale out. ax1.set_ylim(top=self.continuumLevel * 1.25, bottom=self.fluxMinimum * 0.93) # Plot the expected and measured wavelengths. ax1.axvline(self.correctedWavelength.to(u.angstrom), color='LightSteelBlue', linestyle=':', alpha=0.8, label=r'RV-corrected $\lambda=${:.3f}'.format( self.correctedWavelength.to(u.angstrom))) # Don't plot the mean if this is a failed fit. if hasattr(self, 'mean') and hasattr(self, 'velocityOffset'): ax1.axvline(self.mean.to(u.angstrom), color='IndianRed', alpha=0.7, label='Mean ({:.4f}, {:+.2f})'. format(self.mean.to(u.angstrom), self.velocityOffset.to(u.m/u.s)), linestyle='-') # Plot the actual data. ax1.errorbar(self.baryArray[self.lowContinuumIndex - 1: self.highContinuumIndex + 1], self.fluxArray[self.lowContinuumIndex - 1: self.highContinuumIndex + 1], yerr=self.errorArray[self.lowContinuumIndex - 1: self.highContinuumIndex + 1], color='SandyBrown', ecolor='Sienna', marker='o', markersize=5, label='Flux', barsabove=True) # Generate some x-values across the plot range. x = np.linspace(self.baryArray[self.lowContinuumIndex].value, self.baryArray[self.highContinuumIndex].value, 1000) # Plot the initial guess for the gaussian. ax1.plot(x, gaussian(x, *self.initial_guess), color='SlateGray', label='Initial guess', linestyle='--', alpha=0.5) # Plot the fitted gaussian, unless this is a failed fit attempt. if plot_fit: ax1.plot(x, gaussian(x, *self.popt), color='DarkGreen', alpha=0.5, linestyle='-.', label=r'Fit ($\chi^2_\nu=${:.3f}, $\sigma=${:.4f})'. format(self.chiSquaredNu, self.sigma)) # Replace underscore in label so LaTeX won't crash on it. ax1.legend(loc='upper center', framealpha=0.6, fontsize=9, ncol=2, title=self.label.replace('_', r'\_') if\ matplotlib.rcParams['text.usetex'] else self.label, title_fontsize=10, labelspacing=0.4) # Add in some guidelines. ax2.axhline(color='Gray', linestyle='-') ax2.axhline(y=1, color='SkyBlue', linestyle='--') ax2.axhline(y=-1, color='SkyBlue', linestyle='--') ax2.axhline(y=2, color='LightSteelBlue', linestyle=':') ax2.axhline(y=-2, color='LightSteelBlue', linestyle=':') # Plot the residuals on the lower axis. residuals = (self.fluxes - gaussian(self.wavelengths.value, *self.popt)) / self.errors ax2.plot(self.wavelengths, residuals, color='Navy', alpha=0.6, linestyle='', marker='D', linewidth=1.5, markersize=5) # Save the resultant plot. fig.savefig(str(context_plot_path), format="png") if verbose: tqdm.write('Created wider context plot at {}'.format( context_plot_path)) # Now create a close-in version to focus on the fit. ax1.set_xlim(left=self.baryArray[self.lowFitIndex - 1], right=self.baryArray[self.highFitIndex]) ax1.set_ylim(top=self.fluxes.max() * 1.15, bottom=self.fluxes.min() * 0.95) fig.savefig(str(close_up_plot_path), format="png") if verbose: tqdm.write('Created close up plot at {}'.format( close_up_plot_path)) plt.close(fig)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 2892, 3158, 1679, 7769, 25, 2598, 25, 1314, 13130, 198, 198, 31, 9800, 25, 288, 527, 365,...
1.967978
10,680
import tensorflow as try: import numpy as np x_data = np.float32(np.random.rand(2,100)) y_data = np.dot([0.100,0.200],x_data) + 0.300 b = tf.Varible(tf.zerso([1])) W = tf.Varible(tf.random_uniform([1,2],-1.0,1.0)) y = tf.matmul(W,x_data) + b
[ 11748, 11192, 273, 11125, 355, 1949, 25, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 87, 62, 7890, 796, 45941, 13, 22468, 2624, 7, 37659, 13, 25120, 13, 25192, 7, 17, 11, 3064, 4008, 201, 198, 88, 62, 7890, 796, 45941, 13, ...
1.96063
127
# ======================================================================== # # # Copyright (c) 2017 - 2020 scVAE authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ======================================================================== # import numpy from scvae.data.sparse import sparsity from scvae.data.utilities import standard_deviation MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION = 5e8
[ 2, 38093, 1421, 18604, 1303, 198, 2, 198, 2, 15069, 357, 66, 8, 2177, 532, 12131, 629, 11731, 36, 7035, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, ...
3.944206
233
""" API.ai Agent This module provides a Agent class to be used within an Assistant class implementation to be able to interact with the user through the agent """ from . import utils from . import parser from . import widgets Status = utils.enum( 'OK', 'GenericError', 'InvalidData', 'NotImplemented', 'Aborted', 'AccessDenied') """ :obj:`apiai_assistant.utils.enum`: statuses of the agent """ class Agent(object): """ Provides methods to instruct the agent on how to respond tu user queries Args: corpus (:obj:`apiai_assistant.corpus.Corpus`): Corpus to get the outputs from request (:obj:`dict`, optional): API.ai request ssml (boolean, optional, True): if True, will format speech to support SSML """ SupportedPermissions = utils.enum( 'NAME', 'COARSE_LOCATION', 'PRECISE_LOCATION') """ :obj:`apiai_assistant.utils.enum`: permissions supported by the agent """ def tell(self, corpus_id, context=None): """ Looks for the output id in the corpus and formats with the context Args: corpus_id (str): ID of the output to tell context (:obj:`dict`, optional): context to format the output with """ output = self.corpus[corpus_id] if context is not None: output = output.format(**context) self.tell_raw(output) def ask(self, corpus_id, context=None): """ Looks for the output id in the corpus and formats with the context Args: corpus_id (str): ID of the output to ask context (:obj:`dict`, optional): context to format the output with """ output = self.corpus[corpus_id] if context is not None: output = output.format(**context) self.ask_raw(output) def suggest(self, corpus_id): """ Looks for the output id in the corpus to suggest Args: corpus_id (str): ID of the suggestions to show """ suggestions = self.corpus.get(corpus_id, self.corpus.suggestions) if suggestions: self.suggest_raw(suggestions) def tell_raw(self, speech, text=None): """ Tells the user by adding the speech and/or text to the response's messages Args: speech (str): speech to tell text (str, optional): text to tell, if None, speech will be used """ self.response.close_mic() widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml) self.show(widget) def ask_raw(self, speech, text=None): """ Asks the user by adding the speech and/or text to the response's messages Args: speech (str): speech to ask text (str, optional): text to ask, if None, speech will be used """ self.response.open_mic() widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml) self.show(widget) def suggest_raw(self, suggestions): """ Suggests the user by adding the suggestions to the response's messages Args: suggestions (:obj:`list`): suggestions """ if type(suggestions) != list: suggestions = [suggestions] suggestion_widget = widgets.SuggestionsWidget(suggestions) self.show(suggestion_widget) def show(self, obj): """ Renders a rich response widget and add it to the response's messages """ message = obj.render() self.response.add_message(message) def add_context(self, context_name, parameters=None, lifespan=5): """ Adds a context to the response's contexts Args: context_name (str): name of the context to add parameters (:obj:`dict`, optional): parameters of the context lifespan (:obj:`int`, optional, 5): lifespan of the context """ self.response.add_context({ 'name': context_name, 'lifespan': lifespan, 'parameters': parameters or {} }) class Response(object): """ Abstraction to build API.ai compatible responses """ PERMISSIONS = { Agent.SupportedPermissions.NAME: 'NAME', Agent.SupportedPermissions.COARSE_LOCATION: 'DEVICE_COARSE_LOCATION', Agent.SupportedPermissions.PRECISE_LOCATION: 'DEVICE_PRECISE_LOCATION' } @property
[ 37811, 7824, 13, 1872, 15906, 198, 198, 1212, 8265, 3769, 257, 15906, 1398, 284, 307, 973, 1626, 281, 15286, 1398, 198, 320, 32851, 284, 307, 1498, 284, 9427, 351, 262, 2836, 832, 262, 5797, 37227, 628, 198, 6738, 764, 1330, 3384, 448...
2.474444
1,800
from django.db import IntegrityError from django.test import TestCase from .models import Profile, User class UserTest(TestCase): """ Test module for Puppy model """
[ 6738, 42625, 14208, 13, 9945, 1330, 39348, 12331, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 764, 27530, 1330, 13118, 11, 11787, 628, 198, 4871, 11787, 14402, 7, 14402, 20448, 2599, 198, 220, 220, 220, 37227, 6208, ...
3.659574
47
from __future__ import annotations from typing import TYPE_CHECKING import dearpygui.core as dpgcore from dearpygui_obj import _register_item_type, wrap_callback from dearpygui_obj.wrapper.widget import Widget, ItemWidget, ConfigProperty if TYPE_CHECKING: from typing import Optional, Tuple, Callable from dearpygui_obj import PyGuiCallback from dearpygui_obj.wrapper.widget import ItemConfigData class MainWindow: """Container for static functions used to manipulate the main window. Attempting to instantiate this class will raise a :class:`TypeError`. """ @staticmethod @staticmethod @staticmethod @staticmethod @staticmethod @staticmethod def set_primary_window(window: Optional[Window]) -> None: """Set a window as the primary window, or remove the primary window. When a window is set as the primary window it will fill the entire viewport. If any other window was already set as the primary window, it will be unset. """ if window is not None: dpgcore.set_primary_window(window.id, True) else: dpgcore.set_primary_window('', False) @staticmethod def set_resize_callback(callback: Callable): """Set a callback for when the main viewport is resized.""" dpgcore.set_resize_callback(callback, handler='') @staticmethod def enable_docking(**kwargs): """Enable docking and set docking options. Note: Once docking is enabled, it cannot be disabled. Keyword Arguments: shift_only: if ``True``, hold down shift for docking. If ``False``, dock by dragging window titlebars. dock_space: if ``True``, windows will be able to dock with the main window viewport. """ dpgcore.enable_docking(**kwargs) @_register_item_type('mvAppItemType::Window') class Window(Widget): """Creates a new window.""" label: str = ConfigProperty() x_pos: int = ConfigProperty() y_pos: int = ConfigProperty() autosize: bool = ConfigProperty() no_resize: bool = ConfigProperty() no_title_bar: bool = ConfigProperty() no_move: bool = ConfigProperty() no_collapse: bool = ConfigProperty() no_focus_on_appearing: bool = ConfigProperty() no_bring_to_front_on_focus: bool = ConfigProperty() no_close: bool = ConfigProperty() no_background: bool = ConfigProperty() show_menubar: bool = ConfigProperty(key='menubar') #: Disable scrollbars (can still scroll with mouse or programmatically). no_scrollbar: bool = ConfigProperty() #: Allow horizontal scrollbar to appear. horizontal_scrollbar: bool = ConfigProperty() pos: Tuple[int, int] @ConfigProperty() def pos(self) -> Tuple[int, int]: """Get or set (x_pos, y_pos) as a tuple.""" config = self.get_config() return config['x_pos'], config['y_pos'] @pos.getconfig def __init__(self, label: str = None, *, name_id: str = None, **config): """ Parameters: label: window label. """ super().__init__(label=label, name_id=name_id, **config) ## workaround for the fact that you can't set the on_close callback in DPG _on_close_callback: Optional[Callable] = None def on_close(self, callback: Optional[PyGuiCallback]) -> Callable: """Set on_close callback, can be used as a decorator.""" if callback is not None: callback = wrap_callback(callback) self._on_close_callback = callback return callback def resized(self, callback: PyGuiCallback) -> Callable: """Set resized callback, can be used as a decorator.""" dpgcore.set_resize_callback(wrap_callback(callback), handler=self.id) return callback ## Menu Bars and Menus @_register_item_type('mvAppItemType::MenuBar') class MenuBar(Widget, ItemWidget): """A menu bar that can be added to a :class:`.Window`.""" __all__ = [ 'MainWindow', 'Window', 'MenuBar', ]
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 19720, 1330, 41876, 62, 50084, 2751, 198, 198, 11748, 390, 5117, 88, 48317, 13, 7295, 355, 288, 6024, 7295, 198, 6738, 390, 5117, 88, 48317, 62, 26801, 1330, 4808, 30238, 62, 9186, ...
2.693783
1,512
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Operations and utilities related to the Graphcore IPU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ # pylint: disable=wildcard-import,unused-import from tensorflow.python.ipu.ops import all_to_all_op from tensorflow.python.ipu.ops import all_to_all_op_grad from tensorflow.python.ipu.ops import custom_ops from tensorflow.python.ipu.ops import cross_replica_ops from tensorflow.python.ipu.ops import embedding_ops from tensorflow.python.ipu.ops import embedding_ops_grad from tensorflow.python.ipu.ops import functional_ops from tensorflow.python.ipu.ops import functional_ops_grad from tensorflow.python.ipu.ops import internal_ops from tensorflow.python.ipu.ops import internal_ops_grad from tensorflow.python.ipu.ops import math_ops from tensorflow.python.ipu.ops import nn_ops from tensorflow.python.ipu.ops import nn_ops_grad from tensorflow.python.ipu.ops import normalization_ops from tensorflow.python.ipu.ops import normalization_ops_grad from tensorflow.python.ipu.ops import pipelining_ops from tensorflow.python.ipu.ops import pipelining_ops_grad from tensorflow.python.ipu.ops import rand_ops from tensorflow.python.ipu.ops import rand_ops_grad from tensorflow.python.ipu.ops import reduce_scatter_op from tensorflow.python.ipu.ops import replication_ops from tensorflow.python.ipu.ops import rnn_ops from tensorflow.python.ipu.ops import rnn_ops_grad from tensorflow.python.ipu.ops import summary_ops from tensorflow.python.ipu.ops.experimental import popfloat_cast_to_gfloat from tensorflow.python.ipu import autoshard from tensorflow.python.ipu import autoshard_cnn from tensorflow.python.ipu import data from tensorflow.python.ipu import dataset_benchmark from tensorflow.python.ipu import ipu_compiler from tensorflow.python.ipu import ipu_infeed_queue from tensorflow.python.ipu import ipu_multi_worker_strategy from tensorflow.python.ipu import ipu_outfeed_queue from tensorflow.python.ipu import ipu_run_config from tensorflow.python.ipu import ipu_session_run_hooks from tensorflow.python.ipu import ipu_strategy from tensorflow.python.ipu import loops from tensorflow.python.ipu import scopes from tensorflow.python.ipu import sharding from tensorflow.python.ipu import utils from tensorflow.python.ipu import ipu_estimator from tensorflow.python.ipu import ipu_pipeline_estimator from tensorflow.python.ipu import vertex_edsl from tensorflow.python.ipu.keras import layers from tensorflow.python.ipu.optimizers import cross_replica_optimizer from tensorflow.python.ipu.optimizers import map_gradient_optimizer from tensorflow.python.ipu.optimizers import sharded_optimizer from tensorflow.python.ipu.optimizers import gradient_accumulation_optimizer # Expose functional_ops.function as ipu.function from tensorflow.python.ipu.ops.functional_ops import function # pylint: enable=wildcard-import,unused-import
[ 2, 15069, 13130, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846,...
3.244505
1,092
import collections import json import os import sys import hashlib import logging from .utils import cached_property, get_resource from .graphics import Image class SettingsDict(collections.MutableMapping): ''' Represents the tingapp.settings dict-like object. The settings are loaded from three files in the app bundle - default_settings.json This file contains default settings as defined by the app creator - settings.json This file contains settings as set by a user when installing the app (via Tide, for example) - local_settings.json This file contains settings written by the app itself. Settings can be overridden by later files. Changes are always saved to the local_settings.json file. ''' app = TingApp()
[ 11748, 17268, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 12234, 8019, 198, 11748, 18931, 198, 6738, 764, 26791, 1330, 39986, 62, 26745, 11, 651, 62, 31092, 198, 6738, 764, 70, 11549, 1330, 7412, 628, 198, 198, ...
3.109848
264
# Algorithm to draw a straight line using Bresenham's algorithm # works only foor lines having inclination <= 45 degree from graphics import * import time import ctypes user32 = ctypes.windll.user32 scrnWidth, scrnHeight= (user32.GetSystemMetrics(0)-100), (user32.GetSystemMetrics(1)-100) print("Straight line drawing using Bresenham's algorithm : ") start = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(',')) end = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(',')) win = GraphWin('Bresenham\'s Straight Line', scrnWidth, scrnHeight) # for printing the message message = Text(Point(win.getWidth()/2, 30), 'Straight line drawing using Bresenham\'s algorithm : ') message.setTextColor('red') message.setStyle('italic') message.setSize(20) message.draw(win) message = Text(Point(win.getWidth()/2, win.getHeight()-20), 'Click on the window to close') message.setTextColor('red') message.setStyle('italic') message.setSize(20) message.draw(win) x1,y1 = start x2,y2 = end pt = Point(x1,y1) x_new,y_new = x1,y1 pt.draw(win) delta_x = abs(x2 - x1) delta_y = abs(y2 - y1) p = 2 * delta_y - delta_x i = 1 while(i <= delta_x): time.sleep(0.1) if(p < 0): x_new += 1 pt = Point(x_new,y_new) pt.draw(win) p += 2*delta_y else: x_new += 1 y_new += 1 pt = Point(x_new,y_new) pt.draw(win) p = p + 2*delta_y - 2*delta_x i+=1 win.getMouse() win.close()
[ 2, 978, 42289, 284, 3197, 257, 3892, 1627, 1262, 347, 411, 268, 2763, 338, 11862, 201, 198, 2, 2499, 691, 277, 2675, 3951, 1719, 36793, 19841, 4153, 4922, 201, 198, 6738, 9382, 1330, 1635, 201, 198, 11748, 640, 201, 198, 11748, 269, ...
2.296137
699
import unittest import pandas as pd import numpy as np import src.features.outlier_correction as oc
[ 11748, 555, 715, 395, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 12351, 13, 40890, 13, 448, 2505, 62, 10215, 8243, 355, 267, 66 ]
3.193548
31
############################################################################### # Copyright (c) 2013-2022 Contributors to the Eclipse Foundation # # See the NOTICE file distributed with this work for additional # information regarding copyright ownership. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, # Version 2.0 which accompanies this distribution and is available at # http://www.apache.org/licenses/LICENSE-2.0.txt ############################################################################## import types
[ 29113, 29113, 7804, 4242, 21017, 198, 2, 15069, 357, 66, 8, 2211, 12, 1238, 1828, 25767, 669, 284, 262, 30991, 5693, 198, 2, 220, 220, 220, 198, 2, 220, 4091, 262, 28536, 2393, 9387, 351, 428, 670, 329, 3224, 198, 2, 220, 1321, 51...
4.942623
122
import torch import numpy as np import torch.optim as optim from torch.nn import NLLLoss from torch.utils.data import DataLoader from torch.utils.data.sampler import RandomSampler from torch.nn.utils import clip_grad_norm from torchvision.datasets import CIFAR10 from torchvision.transforms import transforms from src.model import CIFAR10_Network
[ 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 6738, 28034, 13, 20471, 1330, 399, 3069, 43, 793, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 198, 6738, 28034, 13, 26791, 13,...
3.587629
97
# ASCIITableToPDF.py # Author: Vasudev Ram - http://www.dancingbison.com # Demo program to show how to generate an ASCII table as PDF, # using the xtopdf toolkit for PDF creation from Python. # Generates a PDF file with information about the # first 32 ASCII codes, i.e. the control characters. # Based on the ASCII Code table at http://www.ascii-code.com/ import sys from PDFWriter import PDFWriter # Define the header information. column_names = ['DEC', 'OCT', 'HEX', 'BIN', 'Symbol', 'Description'] column_widths = [4, 6, 4, 10, 7, 20] # Define the ASCII control character information. ascii_control_characters = \ """ 0 000 00 00000000 NUL � Null char 1 001 01 00000001 SOH  Start of Heading 2 002 02 00000010 STX  Start of Text 3 003 03 00000011 ETX  End of Text 4 004 04 00000100 EOT  End of Transmission 5 005 05 00000101 ENQ  Enquiry 6 006 06 00000110 ACK  Acknowledgment 7 007 07 00000111 BEL  Bell 8 010 08 00001000 BS  Back Space 9 011 09 00001001 HT Horizontal Tab 10 012 0A 00001010 LF Line Feed 11 013 0B 00001011 VT Vertical Tab 12 014 0C 00001100 FF Form Feed 13 015 0D 00001101 CR Carriage Return 14 016 0E 00001110 SO  Shift Out / X-On 15 017 0F 00001111 SI  Shift In / X-Off 16 020 10 00010000 DLE  Data Line Escape 17 021 11 00010001 DC1  Device Control 1 (oft. XON) 18 022 12 00010010 DC2  Device Control 2 19 023 13 00010011 DC3  Device Control 3 (oft. XOFF) 20 024 14 00010100 DC4  Device Control 4 21 025 15 00010101 NAK  Negative Acknowledgement 22 026 16 00010110 SYN  Synchronous Idle 23 027 17 00010111 ETB  End of Transmit Block 24 030 18 00011000 CAN  Cancel 25 031 19 00011001 EM  End of Medium 26 032 1A 00011010 SUB  Substitute 27 033 1B 00011011 ESC  Escape 28 034 1C 00011100 FS  File Separator 29 035 1D 00011101 GS  Group Separator 30 036 1E 00011110 RS  Record Separator 31 037 1F 00011111 US  Unit Separator """ # Create and set some of the fields of a PDFWriter instance. pw = PDFWriter("ASCII-Table.pdf") pw.setFont("Courier", 12) pw.setHeader("ASCII Control Characters - 0 to 31") pw.setFooter("Generated by xtopdf: http://slid.es/vasudevram/xtopdf") # Write the column headings to the output. column_headings = [ str(val).ljust(column_widths[idx]) \ for idx, val in enumerate(column_names) ] pw.writeLine(' '.join(column_headings)) # Split the string into lines, omitting the first and last empty lines. for line in ascii_control_characters.split('\n')[1:-1]: # Split the line into space-delimited fields. lis = line.split() # Join the words of the Description back into one field, # since it was split due to having internal spaces. lis2 = lis[0:5] + [' '.join(lis[6:])] # Write the column data to the output. lis3 = [ str(val).ljust(column_widths[idx]) \ for idx, val in enumerate(lis2) ] pw.writeLine(' '.join(lis3)) pw.close()
[ 2, 25400, 40, 2043, 540, 2514, 20456, 13, 9078, 198, 2, 6434, 25, 23663, 463, 1990, 7431, 532, 2638, 1378, 2503, 13, 67, 5077, 65, 1653, 13, 785, 198, 2, 34588, 1430, 284, 905, 703, 284, 7716, 281, 37101, 3084, 355, 12960, 11, 198...
2.014541
1,788
"""This is game module It helps control game status and contains some helper functions """ import pygame import brick from constants import * import random import time # pygame will search the system for font with similar name fontName = pygame.font.match_font('arial') def initSound(): """load various sound effects""" WALLOPENSND = pygame.mixer.Sound(path.join(SNDDIR, 'wallopen.wav')) BALLFALLSND = pygame.mixer.Sound(path.join(SNDDIR, 'ballfalling.wav')) PADDLECOLSND = pygame.mixer.Sound(path.join(SNDDIR, 'paddlecollide.wav')) SINGCOLSNDS = [] SINGCOLSNDSLIST = ['singlebrickcol1.wav', 'singlebrickcol2.wav', 'singlebrickcol3.wav', 'singlebrickcol4.wav'] for snd in SINGCOLSNDSLIST: SINGCOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd))) MULTICOLSNDS = [] MULTICOLSNDSLIST = ['multibrickcol1.wav', 'multibrickcol2.wav'] for snd in MULTICOLSNDSLIST: MULTICOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd))) return WALLOPENSND, BALLFALLSND, PADDLECOLSND, SINGCOLSNDS, MULTICOLSNDS def drawText(surface, text, size, x, y): """size: font size. x, y: location.""" font = pygame.font.Font(fontName, size) # False - alias / True - Anti-aliased(look smoother and nice) text_surface = font.render(text, True, GREEN) text_rect = text_surface.get_rect() text_rect.midtop = (x, y) surface.blit(text_surface, text_rect) def createBricks(brickGroup, allSprites): """Create and place brick objects """ brickPlaceY = 0 for i in range(6): if i % 2: brickPlaceX = 100 else: brickPlaceX = 50 for j in range(6): brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST))) brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY) allSprites.add(brickObj) brickGroup.add(brickObj) brickPlaceX += 100 if not i % 2: brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST))) brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY) allSprites.add(brickObj) brickGroup.add(brickObj) brickPlaceY += 30
[ 37811, 1212, 318, 983, 8265, 198, 198, 1026, 5419, 1630, 983, 3722, 290, 4909, 617, 31904, 5499, 198, 37811, 198, 11748, 12972, 6057, 198, 11748, 17214, 198, 6738, 38491, 1330, 1635, 198, 11748, 4738, 198, 11748, 640, 628, 198, 198, 2, ...
2.281947
986
# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Debugger restful api.""" import json import weakref from urllib.parse import unquote from flask import Blueprint, jsonify, request, Response from mindinsight.conf import settings from mindinsight.debugger.session_manager import SessionManager from mindinsight.utils.exceptions import ParamMissError, ParamValueError, ParamTypeError BLUEPRINT = Blueprint("debugger", __name__, url_prefix=settings.URL_PATH_PREFIX + settings.API_PREFIX) def _unquote_param(param): """ Decode parameter value. Args: param (str): Encoded param value. Returns: str, decoded param value. """ if isinstance(param, str): try: param = unquote(param, errors='strict') except UnicodeDecodeError: raise ParamValueError('Unquote error with strict mode.') return param def _read_post_request(post_request): """ Extract the body of post request. Args: post_request (object): The post request. Returns: dict, the deserialized body of request. """ body = post_request.stream.read() try: body = json.loads(body if body else "{}") except Exception: raise ParamValueError("Json data parse failed.") return body def to_int(param, param_name): """Transfer param to int type.""" try: param = int(param) except ValueError: raise ParamTypeError(param_name, 'Integer') return param def _wrap_reply(func, *args, **kwargs): """Serialize reply.""" reply = func(*args, **kwargs) return jsonify(reply) @BLUEPRINT.route("/debugger/sessions/<session_id>/poll-data", methods=["GET"]) def poll_data(session_id): """ Wait for data to be updated on UI. Get data from server and display the change on UI. Returns: str, the updated data. Examples: >>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/poll-data?pos=xx """ pos = request.args.get('pos') reply = _wrap_reply(_session_manager.get_session(session_id).poll_data, pos) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/search", methods=["GET"]) def search(session_id): """ Search nodes in specified watchpoint. Returns: str, the required data. Examples: >>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search?name=mock_name&watch_point_id=1 """ name = request.args.get('name') graph_name = request.args.get('graph_name') watch_point_id = to_int(request.args.get('watch_point_id', 0), 'watch_point_id') node_category = request.args.get('node_category') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') stack_pattern = _unquote_param(request.args.get('stack_info_key_word')) reply = _wrap_reply(_session_manager.get_session(session_id).search, {'name': name, 'graph_name': graph_name, 'watch_point_id': watch_point_id, 'node_category': node_category, 'rank_id': rank_id, 'stack_pattern': stack_pattern}) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-comparisons", methods=["GET"]) def tensor_comparisons(session_id): """ Get tensor comparisons. Returns: str, the required data. Examples: >>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-comparisons """ name = request.args.get('name') detail = request.args.get('detail', 'data') shape = _unquote_param(request.args.get('shape')) graph_name = request.args.get('graph_name', '') tolerance = request.args.get('tolerance', '0') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') reply = _wrap_reply(_session_manager.get_session(session_id).tensor_comparisons, name, shape, detail, tolerance, rank_id, graph_name) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/retrieve", methods=["POST"]) def retrieve(session_id): """ Retrieve data according to mode and params. Returns: str, the required data. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/retrieve """ body = _read_post_request(request) mode = body.get('mode') params = body.get('params') reply = _wrap_reply(_session_manager.get_session(session_id).retrieve, mode, params) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-history", methods=["POST"]) def retrieve_tensor_history(session_id): """ Retrieve data according to mode and params. Returns: str, the required data. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-history """ body = _read_post_request(request) name = body.get('name') graph_name = body.get('graph_name') rank_id = to_int(body.get('rank_id', 0), 'rank_id') reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_history, name, graph_name, rank_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensors", methods=["GET"]) def retrieve_tensor_value(session_id): """ Retrieve tensor value according to name and shape. Returns: str, the required data. Examples: >>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensors?name=tensor_name&detail=data&shape=[1,1,:,:] """ name = request.args.get('name') detail = request.args.get('detail') shape = _unquote_param(request.args.get('shape')) graph_name = request.args.get('graph_name') prev = bool(request.args.get('prev') == 'true') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_value, name, detail, shape, graph_name, prev, rank_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/create-watchpoint", methods=["POST"]) def create_watchpoint(session_id): """ Create watchpoint. Returns: str, watchpoint id. Raises: MindInsightException: If method fails to be called. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/create-watchpoint """ params = _read_post_request(request) params['watch_condition'] = params.pop('condition', None) reply = _wrap_reply(_session_manager.get_session(session_id).create_watchpoint, params) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/update-watchpoint", methods=["POST"]) def update_watchpoint(session_id): """ Update watchpoint. Returns: str, reply message. Raises: MindInsightException: If method fails to be called. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/update-watchpoint """ params = _read_post_request(request) reply = _wrap_reply(_session_manager.get_session(session_id).update_watchpoint, params) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/delete-watchpoint", methods=["POST"]) def delete_watchpoint(session_id): """ Delete watchpoint. Returns: str, reply message. Raises: MindInsightException: If method fails to be called. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/delete-watchpoint """ body = _read_post_request(request) watch_point_id = body.get('watch_point_id') reply = _wrap_reply(_session_manager.get_session(session_id).delete_watchpoint, watch_point_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/control", methods=["POST"]) def control(session_id): """ Control request. Returns: str, reply message. Raises: MindInsightException: If method fails to be called. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/control """ params = _read_post_request(request) reply = _wrap_reply(_session_manager.get_session(session_id).control, params) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/recheck", methods=["POST"]) def recheck(session_id): """ Recheck request. Returns: str, reply message. Raises: MindInsightException: If method fails to be called. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/recheck """ reply = _wrap_reply(_session_manager.get_session(session_id).recheck) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-graphs", methods=["GET"]) def retrieve_tensor_graph(session_id): """ Retrieve tensor value according to name and shape. Returns: str, the required data. Examples: >>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-graphs?tensor_name=xxx&graph_name=xxx """ tensor_name = request.args.get('tensor_name') graph_name = request.args.get('graph_name') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_graph, tensor_name, graph_name, rank_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-hits", methods=["GET"]) def retrieve_tensor_hits(session_id): """ Retrieve tensor value according to name and shape. Returns: str, the required data. Examples: >>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-hits?tensor_name=xxx&graph_name=xxx """ tensor_name = request.args.get('tensor_name') graph_name = request.args.get('graph_name') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_hits, tensor_name, graph_name, rank_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/search-watchpoint-hits", methods=["POST"]) def search_watchpoint_hits(session_id): """ Search watchpoint hits by group condition. Returns: str, the required data. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search-watchpoint-hits """ body = _read_post_request(request) group_condition = body.get('group_condition') reply = _wrap_reply(_session_manager.get_session(session_id).search_watchpoint_hits, group_condition) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/condition-collections", methods=["GET"]) def get_condition_collections(session_id): """Get condition collections.""" reply = _wrap_reply(_session_manager.get_session(session_id).get_condition_collections) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/set-recommended-watch-points", methods=["POST"]) def set_recommended_watch_points(session_id): """Set recommended watch points.""" body = _read_post_request(request) request_body = body.get('requestBody') if request_body is None: raise ParamMissError('requestBody') set_recommended = request_body.get('set_recommended') reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/load", methods=["POST"]) def load(session_id): """ Retrieve tensor value according to name and shape. Returns: str, the required data. Examples: >>> GET http://xxx/v1/mindinsight/debugger/sessions/xxxx/tensor-files/load """ body = _read_post_request(request) name = body.get('name') graph_name = body.get('graph_name') rank_id = to_int(body.get('rank_id', 0), 'rank_id') prev = bool(body.get('prev') == 'true') reply = _wrap_reply(_session_manager.get_session(session_id).load, name, prev, graph_name, rank_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/download", methods=["GET"]) def download(session_id): """ Retrieve tensor value according to name and shape. Returns: str, the required data. Examples: >>> GET http://xxx/v1/mindinsight/debugger/sessions/xxx/tensor-files/download?name=name&graph_name=xxx&prev=xxx """ name = request.args.get('name') graph_name = request.args.get('graph_name') rank_id = to_int(request.args.get('rank_id', 0), 'rank_id') prev = bool(request.args.get('prev') == 'true') file_name, file_path, clean_func = _session_manager.get_session(session_id).download(name, prev, graph_name, rank_id) response = Response(file_send(), content_type='application/octet-stream') response.headers["Content-disposition"] = 'attachment; filename=%s' % file_name weakref.finalize(response, clean_func,) return response @BLUEPRINT.route("/debugger/sessions", methods=["POST"]) def create_session(): """ Get session id if session exist, else create a session. Returns: str, session id. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/sessions """ body = _read_post_request(request) summary_dir = body.get('dump_dir') session_type = body.get('session_type') reply = _wrap_reply(_session_manager.create_session, session_type, summary_dir) return reply @BLUEPRINT.route("/debugger/sessions", methods=["GET"]) def get_train_jobs(): """ Check the current active sessions. Examples: >>> GET http://xxxx/v1/mindinsight/debugger/sessions """ reply = _wrap_reply(_session_manager.get_train_jobs) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/delete", methods=["POST"]) def delete_session(session_id): """ Delete session by session id. Examples: >>> POST http://xxxx/v1/mindinsight/debugger/xxx/delete-session """ reply = _wrap_reply(_session_manager.delete_session, session_id) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/stacks", methods=["GET"]) def get_stack_infos(session_id): """ Get stack infos. Examples: >>> GET /v1/mindsight/debugger/sessions/<session_id>/stacks?key_word=xxx&offset=0 """ key_word = _unquote_param(request.args.get('key_word')) limit = int(request.args.get('limit', 10)) offset = int(request.args.get('offset', 0)) filter_condition = { 'pattern': key_word, 'limit': limit, 'offset': offset } reply = _wrap_reply(_session_manager.get_session(session_id).get_stack_infos, filter_condition) return reply @BLUEPRINT.route("/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs", methods=["GET"]) def get_graph_runs(session_id, rank_id): """ Get graph runs. Examples: >>> GET /v1/mindsight/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs """ session = _session_manager.get_session(session_id) rank_id = to_int(rank_id, 'rank_id') reply = _wrap_reply(session.get_graph_runs, rank_id) return reply _session_manager = SessionManager.get_instance() def init_module(app): """ Init module entry. Args: app (Flask): The application obj. """ app.register_blueprint(BLUEPRINT)
[ 2, 15069, 12131, 12, 1238, 2481, 43208, 21852, 1766, 1539, 12052, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262...
2.557231
6,299
#Imports import os from shutil import copyfile data_path = '/home/ndg/projects/shared_datasets/PuckIt/FACITdata' out_path = '/home/ndg/projects/shared_datasets/PuckIt/sample2/data' sub_file = '/home/ndg/users/hsalee/PuckIt/resources/large_sample.txt' with open(sub_file, 'r') as fin: all_subs = fin.readlines() all_subs = [x.strip() for x in all_subs] all_subs = sorted(all_subs, key=lambda s: s.lower()) for sub in all_subs: print sub src_file = os.path.join(data_path, sub+'.json') dst_file = os.path.join(out_path, sub, 'about.json') copyfile(src_file, dst_file)
[ 2, 3546, 3742, 198, 11748, 28686, 198, 6738, 4423, 346, 1330, 4866, 7753, 198, 198, 7890, 62, 6978, 796, 31051, 11195, 14, 358, 70, 14, 42068, 14, 28710, 62, 19608, 292, 1039, 14, 47, 1347, 1026, 14, 37, 2246, 2043, 7890, 6, 198, ...
2.372
250
from .decode_list import decode_list
[ 6738, 764, 12501, 1098, 62, 4868, 1330, 36899, 62, 4868, 198 ]
3.363636
11
import torch from torch.utils.data import DataLoader from torch.nn import CTCLoss from tqdm import tqdm from dataset import Synth90kDataset, synth90k_collate_fn from model import CRNN from ctc_decoder import ctc_decode from config import evaluate_config as config torch.backends.cudnn.enabled = False if __name__ == '__main__': main()
[ 11748, 28034, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 198, 6738, 28034, 13, 20471, 1330, 327, 4825, 43, 793, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 6738, 27039, 1330, 16065, 400, 3829, 74, 27354, ...
3
115
import logging import pickle from typing import Dict, Optional, Union, NoReturn import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, roc_auc_score from sklearn.pipeline import Pipeline from src.entities.train_params import TrainParams logger = logging.getLogger(__name__) ClassificationModel = Union[RandomForestClassifier, LogisticRegression] def train_model( features: pd.DataFrame, target: pd.Series, train_params: TrainParams ) -> ClassificationModel: """ Train the model. :param features: features to train on :param target: target labels :param train_params: training parameters :return: trained model class """ if train_params.model_type == "RandomForestClassifier": model = RandomForestClassifier( n_estimators=100, random_state=train_params.random_state ) elif train_params.model_type == "LogisticRegression": model = LogisticRegression( solver="liblinear", random_state=train_params.random_state ) else: raise NotImplementedError() model.fit(features, target) logger.info("Model successfully fitted.") return model def evaluate_model(predicts: np.ndarray, target: pd.Series) -> Dict[str, float]: """ Evaluate model and return the metrics. :param predicts: predicted labels :param target: target labels :return: a dict of type {'metric': value} """ metrics = { "accuracy": accuracy_score(target, predicts), "roc_auc": roc_auc_score(target, predicts), } logger.info(f"Metrics are: {metrics}") return metrics def serialize_model(model: ClassificationModel, path: str, transformer: Optional[ColumnTransformer] = None) -> NoReturn: """ Save model to pickle file. :param transformer: the transformer to save :param model: the model to save :param path: the file to save to :return: the path to saved file """ pipeline = Pipeline(( [ ("transformer", transformer), ("model", model), ] )) with open(path, "wb") as fout: pickle.dump(pipeline, fout) logger.info(f"Pipeline saved to {path}") return path
[ 11748, 18931, 198, 11748, 2298, 293, 198, 6738, 19720, 1330, 360, 713, 11, 32233, 11, 4479, 11, 1400, 13615, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 1341, 35720, 13, 785, 3455, 1330, 2...
2.797633
845
from dataclasses import dataclass from bindings.gmd.md_georectified_type import MdGeorectifiedType __NAMESPACE__ = "http://www.isotc211.org/2005/gmd" @dataclass
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 34111, 13, 70, 9132, 13, 9132, 62, 469, 382, 310, 1431, 62, 4906, 1330, 39762, 10082, 382, 310, 1431, 6030, 198, 198, 834, 45, 29559, 47, 11598, 834, 796, 366, 4023, 1378, 2...
2.603175
63
from ..BasicGeometry.Edge import Edge from ..BasicGeometry.EdgeArray import EdgeArray from ..Mesh.MeshEdge import MeshEdge from ..Mesh.MeshElementArray import MeshElementArray class Stringer: """The Stringer object stores information on stringer reinforcements created on entities. Attributes ---------- elements: MeshElementArray A :py:class:`~abaqus.Mesh.MeshElementArray.MeshElementArray` object. edges: EdgeArray An :py:class:`~abaqus.BasicGeometry.EdgeArray.EdgeArray` object. Notes ----- This object can be accessed by: .. code-block:: python import part mdb.models[name].parts[name].stringers[name] import assembly mdb.models[name].rootAssembly.allInstances[name].stringers[name] mdb.models[name].rootAssembly.instances[name].stringers[name] mdb.models[name].rootAssembly.stringers[name] """ # A MeshElementArray object. elements: MeshElementArray = MeshElementArray([]) # An EdgeArray object. edges: EdgeArray = EdgeArray([]) def __init__(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()): """This method creates a stringer from a sequence of objects in a model database. At least one of the optional arguments needs to be specified. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].parts[*name*].Stringer Parameters ---------- name A String specifying the repository key. The default value is an empty string. edges A sequence of Edge objects specifying the edges on which stringers should be created. Applicable to three and two dimensional parts. elementEdges A sequence of MeshEdge objects specifying the mesh edges on which stringers should be created. Applicable to three and two dimensional parts. Returns ------- A Stringer object. """ pass def EditStringer(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()): """This method modifies underlying entities of the selected stringer. At least one of the optional arguments needs to be specified. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].parts[*name*].Stringer Parameters ---------- name A String specifying the repository key. The default value is an empty string. edges A sequence of Edge objects specifying the edges on which stringers should be created. Applicable to three and two dimensional parts. elementEdges A sequence of MeshEdge objects specifying the mesh edges on which stringers should be created. Applicable to three and two dimensional parts. Returns ------- A Stringer object. """ pass
[ 6738, 11485, 26416, 10082, 15748, 13, 37021, 1330, 13113, 198, 6738, 11485, 26416, 10082, 15748, 13, 37021, 19182, 1330, 13113, 19182, 198, 6738, 11485, 37031, 13, 37031, 37021, 1330, 47529, 37021, 198, 6738, 11485, 37031, 13, 37031, 20180, ...
2.665803
1,158
# (C) 2022 GoodData Corporation from gooddata_sdk import GoodDataApiClient from gooddata_sdk.catalog.catalog_service_base import CatalogServiceBase from gooddata_sdk.catalog.permissions.declarative_model.permission import CatalogDeclarativeWorkspacePermissions
[ 2, 357, 34, 8, 33160, 4599, 6601, 10501, 198, 6738, 922, 7890, 62, 21282, 74, 1330, 4599, 6601, 32, 14415, 11792, 198, 6738, 922, 7890, 62, 21282, 74, 13, 9246, 11794, 13, 9246, 11794, 62, 15271, 62, 8692, 1330, 44515, 16177, 14881, ...
3.540541
74
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect import typeguard
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, ...
3.746032
63
import pygame import time import Sprite from pygame.locals import* from time import sleep from Sprite import Sprite from myVegeta import Vegeta from myTube import Tube from myGoomba import Goomba from myFireball import Fireball print("Use the arrow keys to move. Press Esc to quit.") pygame.init() m = Model() v = View(m) c = Controller(m) while c.keep_going: c.update() m.update() v.update() sleep(0.04) print("Goodbye")
[ 11748, 12972, 6057, 201, 198, 11748, 640, 201, 198, 11748, 33132, 201, 198, 201, 198, 201, 198, 6738, 12972, 6057, 13, 17946, 874, 1330, 9, 201, 198, 6738, 640, 1330, 3993, 201, 198, 6738, 33132, 1330, 33132, 201, 198, 6738, 616, 2697...
2.307339
218
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 # Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """Base tokenizer/tokens classes and utilities.""" import copy class Tokens(object): """A class to represent a list of tokenized text.""" TEXT = 0 TEXT_WS = 1 SPAN = 2 POS = 3 LEMMA = 4 NER = 5 def __len__(self): """The number of tokens.""" return len(self.data) def slice(self, i=None, j=None): """Return a view of the list of tokens from [i, j).""" new_tokens = copy.copy(self) new_tokens.data = self.data[i: j] return new_tokens def untokenize(self): """Returns the original text (with whitespace reinserted).""" return ''.join([t[self.TEXT_WS] for t in self.data]).strip() def words(self, uncased=False): """Returns a list of the text of each token Args: uncased: lower cases text """ if uncased: return [t[self.TEXT].lower() for t in self.data] else: return [t[self.TEXT] for t in self.data] def offsets(self): """Returns a list of [start, end) character offsets of each token.""" return [t[self.SPAN] for t in self.data] def pos(self): """Returns a list of part-of-speech tags of each token. Returns None if this annotation was not included. """ if 'pos' not in self.annotators: return None return [t[self.POS] for t in self.data] def lemmas(self): """Returns a list of the lemmatized text of each token. Returns None if this annotation was not included. """ if 'lemma' not in self.annotators: return None return [t[self.LEMMA] for t in self.data] def entities(self): """Returns a list of named-entity-recognition tags of each token. Returns None if this annotation was not included. """ if 'ner' not in self.annotators: return None return [t[self.NER] for t in self.data] def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True): """Returns a list of all ngrams from length 1 to n. Args: n: upper limit of ngram length uncased: lower cases text filter_fn: user function that takes in an ngram list and returns True or False to keep or not keep the ngram as_string: return the ngram as a string vs list """ words = self.words(uncased) ngrams = [(s, e + 1) for s in range(len(words)) for e in range(s, min(s + n, len(words))) if not _skip(words[s:e + 1])] # Concatenate into strings if as_strings: ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams] return ngrams def entity_groups(self): """Group consecutive entity tokens with the same NER tag.""" entities = self.entities() if not entities: return None non_ent = self.opts.get('non_ent', 'O') groups = [] idx = 0 while idx < len(entities): ner_tag = entities[idx] # Check for entity tag if ner_tag != non_ent: # Chomp the sequence start = idx while (idx < len(entities) and entities[idx] == ner_tag): idx += 1 groups.append((self.slice(start, idx).untokenize(), ner_tag)) else: idx += 1 return groups class Tokenizer(object): """Base tokenizer class. Tokenizers implement tokenize, which should return a Tokens class. """ import regex import logging logger = logging.getLogger(__name__) STOPWORDS = { 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven', 'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren', 'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``" } import unicodedata def normalize(text): """Resolve different type of unicode encodings.""" return unicodedata.normalize('NFD', text) def filter_word(text): """Take out english stopwords, punctuation, and compound endings.""" text = normalize(text) if regex.match(r'^\p{P}+$', text): return True if text.lower() in STOPWORDS: return True return False def filter_ngram(gram, mode='any'): """Decide whether to keep or discard an n-gram. Args: gram: list of tokens (length N) mode: Option to throw out ngram if 'any': any single token passes filter_word 'all': all tokens pass filter_word 'ends': book-ended by filterable tokens """ filtered = [filter_word(w) for w in gram] if mode == 'any': return any(filtered) elif mode == 'all': return all(filtered) elif mode == 'ends': return filtered[0] or filtered[-1] else: raise ValueError('Invalid mode: %s' % mode)
[ 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 5964, 1043, 287, 262, 220, 198, 2, 38559, 24290, 2393, 287, 262, 6808, 8619, 28...
2.311674
2,801
import fnmatch from setuptools import setup from setuptools.command.build_py import build_py as build_py_orig exclude = ['*Tests'] setup( cmdclass={ 'build_py': build_py, } )
[ 11748, 24714, 15699, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 6738, 900, 37623, 10141, 13, 21812, 13, 11249, 62, 9078, 1330, 1382, 62, 9078, 355, 1382, 62, 9078, 62, 11612, 628, 198, 1069, 9152, 796, 37250, 9, 51, 3558, 20520, 6...
2.552632
76
import argparse import json import os from typing import Any, Iterator from datetime import datetime import boto3 from mypy_boto3_s3.client import S3Client from mypy_boto3_sqs.client import SQSClient IS_LOCAL = bool(os.environ.get("IS_LOCAL", False)) if __name__ == "__main__": args = parse_args() if args.bucket_prefix is None: raise Exception("Provide bucket prefix as first argument") else: if args.bucket_prefix == "local-grapl": IS_LOCAL = True main(args.bucket_prefix)
[ 11748, 1822, 29572, 198, 11748, 33918, 198, 11748, 28686, 198, 198, 6738, 19720, 1330, 4377, 11, 40806, 1352, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 11748, 275, 2069, 18, 198, 198, 6738, 616, 9078, 62, 65, 2069, 18, 62, 82...
2.537736
212
from abc import ABC, abstractmethod
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 628, 198 ]
3.8
10
import sys t = int(input()) num = list(map(int, sys.stdin.readline().split())) dp = [1 for _ in range(t)] for i in range(t): for j in range(i): if num[i] > num[j]: dp[i] = max(dp[i], dp[j] + 1) print(max(dp))
[ 11748, 25064, 198, 83, 796, 493, 7, 15414, 28955, 198, 22510, 796, 1351, 7, 8899, 7, 600, 11, 25064, 13, 19282, 259, 13, 961, 1370, 22446, 35312, 3419, 4008, 198, 198, 26059, 796, 685, 16, 329, 4808, 287, 2837, 7, 83, 15437, 198, ...
1.966387
119
# -*- coding: UTF-8 -*- """Manager class for streamlining downstream build and evaluation given an architecture. Manager is the class that takes in architecture designs from an architecture search/optimization algorithm, then interacts with ``amber.modeler`` to build and train the model according to architecture, and finally calls ``amber.architect.rewards`` to evaluate the trained model rewards to feedback the architecture designer. """ import gc import os, sys import warnings import numpy as np import tensorflow.keras as keras from ..utils import corrected_tf as tf import tensorflow as tf2 from tensorflow.keras import backend as K from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.models import Model import time from datetime import datetime from collections import defaultdict from .commonOps import unpack_data from .store import get_store_fn __all__ = [ 'BaseNetworkManager', 'NetworkManager', 'GeneralManager', 'DistributedGeneralManager' ] class GeneralManager(BaseNetworkManager): """Manager creates child networks, train them on a dataset, and retrieve rewards. Parameters ---------- train_data : tuple, string or generator Training data to be fed to ``keras.models.Model.fit``. validation_data : tuple, string, or generator Validation data. The data format is understood similarly to train_data. model_fn : amber.modeler A callable function to build and implement child models given an architecture sequence. reward_fn : amber.architect.rewards A callable function to evaluate the rewards on a trained model and the validation dataset. store_fn : amber.architect.store A callable function to store necessary information (such as predictions, model architectures, and a variety of plots etc.) for the given child model. working_dir : str File path for working directory. save_full_model : bool If true, save the full model beside the model weights. Default is False. epochs : int The total number of epochs to train the child model. child_batchsize : int The batch size for training the child model. fit_kwargs : dict or None Keyword arguments for model.fit predict_kwargs : dict or None Keyword arguments for model.predict evaluate_kwargs : dict or None Keyword arguments for model.evaluate verbose : bool or int Verbose level. 0=non-verbose, 1=verbose, 2=less verbose. kwargs : dict Other keyword arguments parsed. Attributes ---------- train_data : tuple or generator The unpacked training data validation_data : tuple or generator The unpacked validation data model_fn : amber.modeler Reference to the callable function to build and implement child models given an architecture sequence. reward_fn : amber.architect.rewards Reference to the callable function to evaluate the rewards on a trained model and the validation dataset. store_fn : amber.architect.store Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of plots etc.) for the given child model. working_dir : str File path to working directory verbose : bool or int Verbose level TODO ------ - Refactor the rest of attributes as private. - Update the description of ``train_data`` and ``validation_data`` to more flexible unpacking, once it's added:: If it's tuple, expects it to be a tuple of numpy.array of (x,y); if it's string, expects it to be the file path to a compiled training data; if it's a generator, expects it yield a batch of training features and samples. """ def get_rewards(self, trial, model_arc, **kwargs): """The reward getter for a given model architecture Parameters ---------- trial : int An integer number indicating the trial for this architecture model_arc : list The list of architecture sequence Returns ------- this_reward : float The reward signal as determined by ``reward_fn(model, val_data)`` loss_and_metrics : dict A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``) """ # print('-'*80, model_arc, '-'*80) train_graph = tf.Graph() train_sess = tf.Session(graph=train_graph) with train_graph.as_default(), train_sess.as_default(): try: K.set_session(train_sess) except RuntimeError: # keras 2.3.1 `set_session` not available for tf2.0 assert keras.__version__ > '2.2.5' pass model = self.model_fn(model_arc) # a compiled keras Model if model is None: assert hasattr(self.reward_fn, "min"), "model_fn of type %s returned a non-valid model, but the given " \ "reward_fn of type %s does not have .min() method" % (type( self.model_fn), type(self.reward_fn)) hist = None this_reward, loss_and_metrics, reward_metrics = self.reward_fn.min(data=self.validation_data) loss = loss_and_metrics.pop(0) loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))} loss_and_metrics['loss'] = loss if reward_metrics: loss_and_metrics.update(reward_metrics) else: # train the model using Keras methods if self.verbose: print(" Trial %i: Start training model..." % trial) train_x, train_y = unpack_data(self.train_data) hist = model.fit(x=train_x, y=train_y, batch_size=self.batchsize if train_y is not None else None, epochs=self.epochs, verbose=self.verbose, #shuffle=True, validation_data=self.validation_data, callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'), monitor='val_loss', verbose=self.verbose, save_best_only=True), EarlyStopping(monitor='val_loss', patience=self.fit_kwargs.pop("earlystop_patience", 5), verbose=self.verbose)], **self.fit_kwargs ) # load best performance epoch in this training session # in corner cases, the optimization might fail and no temp_network # would be created if os.path.isfile((os.path.join(self.working_dir, 'temp_network.h5'))): model.load_weights(os.path.join(self.working_dir, 'temp_network.h5')) else: model.save_weights((os.path.join(self.working_dir, 'temp_network.h5'))) # evaluate the model by `reward_fn` this_reward, loss_and_metrics, reward_metrics = \ self.reward_fn(model, self.validation_data, session=train_sess, graph=train_graph) loss = loss_and_metrics.pop(0) loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))} loss_and_metrics['loss'] = loss if reward_metrics: loss_and_metrics.update(reward_metrics) # do any post processing, # e.g. save child net, plot training history, plot scattered prediction. if self.store_fn: val_pred = model.predict(self.validation_data, verbose=self.verbose, **self.predict_kwargs) self.store_fn( trial=trial, model=model, hist=hist, data=self.validation_data, pred=val_pred, loss_and_metrics=loss_and_metrics, working_dir=self.working_dir, save_full_model=self.save_full_model, knowledge_func=self.reward_fn.knowledge_function ) # clean up resources and GPU memory del model del hist gc.collect() return this_reward, loss_and_metrics class DistributedGeneralManager(GeneralManager): """Distributed manager will place all tensors of any child models to a pre-assigned GPU device """ class EnasManager(GeneralManager): """A specialized manager for Efficient Neural Architecture Search (ENAS). Because Parameters ---------- session : tensorflow.Session or None The tensorflow session that the manager will be parsed to modelers. By default it's None, which will then get the Session from the modeler. train_data : tuple, string or generator Training data to be fed to ``keras.models.Model.fit``. validation_data : tuple, string, or generator Validation data. The data format is understood similarly to train_data. model_fn : amber.modeler A callable function to build and implement child models given an architecture sequence. Must be a model_fn that is compatible with ENAS parameter sharing. reward_fn : amber.architect.rewards A callable function to evaluate the rewards on a trained model and the validation dataset. store_fn : amber.architect.store A callable function to store necessary information (such as predictions, model architectures, and a variety of plots etc.) for the given child model. working_dir : str File path for working directory. Attributes ---------- model : amber.modeler.child The child DAG that is connected to ``controller.sample_arc`` as the input architecture sequence, which will activate a randomly sampled subgraph within child DAG. Because it's hard-wired to the sampled architecture in controller, using this model to train and predict will also have the inherent stochastic behaviour that is linked to controller. See Also -------- amber.modeler.child : AMBER wrapped-up version of child models that is intended to have similar interface and methods as the ``keras.models.Model`` API. train_data : tuple or generator The unpacked training data validation_data : tuple or generator The unpacked validation data model_fn : amber.modeler Reference to the callable function to build and implement child models given an architecture sequence. reward_fn : amber.architect.rewards Reference to the callable function to evaluate the rewards on a trained model and the validation dataset. store_fn : amber.architect.store Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of plots etc.) for the given child model. disable_controller : bool If true, will randomly return a reward by uniformly sampling in the interval [0,1]. Default is False. working_dir : str File path to working directory verbose : bool or int Verbose level """ def get_rewards(self, trial, model_arc=None, nsteps=None): """The reward getter for a given model architecture. Because Enas will train child model by random sampling an architecture to activate for each mini-batch, there will not be any rewards evaluation in the Manager anymore. However, we can still use `get_rewards` as a proxy to train child models Parameters ---------- trial : int An integer number indicating the trial for this architecture model_arc : list or None The list of architecture sequence. If is None (as by default), will return the child DAG with architecture connected directly to ``controller.sample_arc`` tensors. nsteps: int Optional, if specified, train model nsteps of batches instead of a whole epoch Returns ------- this_reward : float The reward signal as determined by ``reward_fn(model, val_data)`` loss_and_metrics : dict A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``) """ if self.model is None: self.model = self.model_fn() if model_arc is None: # unpack the dataset X_val, y_val = self.validation_data[0:2] X_train, y_train = self.train_data # train the model using EnasModel methods if self.verbose: print(" Trial %i: Start training model with sample_arc..." % trial) hist = self.model.fit(X_train, y_train, batch_size=self.batchsize, nsteps=nsteps, epochs=self.epochs, verbose=self.verbose, # comment out because of temporary # incompatibility with tf.data.Dataset # validation_data=(X_val, y_val), ) # do any post processing, # e.g. save child net, plot training history, plot scattered prediction. if self.store_fn: val_pred = self.model.predict(X_val, verbose=self.verbose) self.store_fn( trial=trial, model=self.model, hist=hist, data=self.validation_data, pred=val_pred, loss_and_metrics=None, working_dir=self.working_dir, save_full_model=self.save_full_model, knowledge_func=self.reward_fn.knowledge_function ) return None, None else: model = self.model_fn(model_arc) this_reward, loss_and_metrics, reward_metrics = \ self.reward_fn(model, self.validation_data, session=self.session) loss = loss_and_metrics.pop(0) loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))} loss_and_metrics['loss'] = loss if reward_metrics: loss_and_metrics.update(reward_metrics) # enable this to overwrite a random reward when disable controller if self.disable_controller: this_reward = np.random.uniform(0, 1) # end return this_reward, loss_and_metrics
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 37811, 13511, 1398, 329, 4269, 21310, 33218, 1382, 290, 12660, 1813, 281, 10959, 13, 198, 198, 13511, 318, 262, 1398, 326, 2753, 287, 10959, 9824, 422, 281, 10959, 2989, 14, ...
2.305658
6,769
""" This script contains all the globals variables which are used in the code. Most of the variables are settings upon which the code works. """ DATA_SOURCE = "offline"; # can be either "offline" or "online". DATA_SAMPLING_FREQ = 256.0; # the sampling rate of the recorded EEG. DATA_MAX_BUFFER_TIME_SEC = 0.25; # The time in seconds for which the data is stored in the buffer. SHOW_DATA_WHEN_FILE_OPENED = False # print the data file when it is opened. Use this for debugging. DATA_FRAME_APPENDAGE = 3; # the number of columns which are extra i.e. other than the EEG data itself. OFFLINE_DATA_PATH = "DataSets/SRC"; OFFLINE_DATASET_FILE_TYPE = ".mat";
[ 37811, 198, 220, 220, 220, 770, 4226, 4909, 477, 262, 15095, 874, 9633, 198, 220, 220, 220, 543, 389, 973, 287, 262, 2438, 13, 628, 220, 220, 220, 4042, 286, 262, 9633, 389, 6460, 2402, 543, 262, 198, 220, 220, 220, 2438, 2499, 13...
2.764706
255
import torch import torchvision from torch.utils.data import Dataset, Subset from torchvision.transforms import ToTensor from src.data.cmnist_dist import make_joint_distribution, CMNIST_NAME, CMNIST_VERSIONS CMNIST_SIZE = 28 ** 2 * 2 CMNIST_SHAPE = [2, 28, 28] CMNIST_N_CLASSES = 2 CMNIST_N_ENVS = 2 MNIST_TRAIN = 'train' MNIST_VALID = 'valid' MNIST_TEST = 'test' MNIST_TRAIN_VALID = 'train+valid' MNIST_TRAIN_SPLITS = [MNIST_TRAIN, MNIST_VALID, MNIST_TRAIN_VALID] MNIST_SPLITS = MNIST_TRAIN_SPLITS + [MNIST_TEST] MNIST_TRAIN_EXAMPLES = 50000 # Wrapper for the torchvision MNIST dataset with validation split # Implementation of the CMNIST, d-CMNIST and y-CMNIST datasets for pytorch
[ 11748, 28034, 198, 11748, 28034, 10178, 198, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 16092, 292, 316, 11, 3834, 2617, 198, 6738, 28034, 10178, 13, 7645, 23914, 1330, 1675, 51, 22854, 198, 198, 6738, 12351, 13, 7890, 13, 66, 10295, ...
2.453901
282