content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
import pandas as pd
d = {
'A': "foo foo foo bar bar bar".split(),
'B': "one one two two one one".split(),
'C': "x y x y x y".split(),
'D': [1,3,2,5,4,1]
}
df = pd.DataFrame(d)
print(d)
print(df['A'].unique())
print(df['A'].value_counts())
print (df.pivot_table(values='D',
index = ['A','B'],
columns = ['C']))
groupbyA = df.groupby('A')
print(groupbyA.D.max())
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
198,
67,
796,
1391,
198,
197,
6,
32,
10354,
366,
21943,
22944,
22944,
2318,
2318,
2318,
1911,
35312,
22784,
198,
197,
6,
33,
10354,
366,
505,
530,
7... | 2.171271 | 181 |
import git # type: ignore
from typing import Callable, Iterator, Tuple, List
def short_sha(rev: git.Commit) -> str:
""" Truncate `rev.hexsha` without ambiguity """
return rev.repo.git.rev_parse(rev.hexsha, short=True)
def visit_ancestors(rev: git.Commit) -> Iterator[Tuple[git.Commit, Callable]]:
r"""
Iterate over history, optionally pruning all ancestors of a given commit.
This iterates backwards over history starting at `rev` and traversing the
commit graph in topological (as opposed to date) order, ensuring that child
commits are always visited before any of their parent commits. In this
sense, this function is like ``repo.iter_commits(rev, topo_order=True)``.
The key difference from ``iter_commits`` is that this version yields
``commit, prune`` pairs, where ``prune`` is a function accepting no
arguments. If ``prune()`` is called, then the iterator will not visit any
of the commits which are ancestors of ``commit``; that is, the history
"tree" from that point backwards is pruned.
As an example, consider a repository with the commit graph below, where
``A`` is the root commit and ``K`` and ``L`` are tips of branches::
A -- B -- E -- I -- J -- L
\ / /
C --- F -- H
\ /
D ---- G --- K
The following code runs against this commit graph, and calls ``prune``
if it finds commits ``B``, ``F``, or ``G``::
>>> for c, prune in visit_ancestors(L):
... if c in {B, F, G}:
... prune()
... print('found ', c)
... else:
... print('visited', c)
visited L
visited J
visited H
visited I
found G
found F
visited E
As a result of calling ``prune()`` on commit ``G``, the ancestors of ``G``
(``D``, ``C``, ``B``, and ``A``) are pruned from the graph and never
visited. The exact order that these commits appear in depends on the order
of parents in merge commits, but since ``B`` is an ancestor of both ``F``
and ``G``, it will always be pruned before it is visited.
"""
repo = rev.repo
pruned_commits : List[git.Commit] = [] # the commits to ignore along with their ancestors
skip_n = 0 # the index to resume the iteration
while True:
args = [rev] + ['--not'] + pruned_commits
proc = repo.git.rev_list(*args, as_process=True, skip=skip_n, topo_order=True)
for c in git.Commit._iter_from_process_or_stream(repo, proc):
# build a temporary function to hand back to the user
do_prune = False
yield c, prune
if do_prune:
pruned_commits.append(c)
break
else:
# start after this commit next time we restart the search
skip_n += 1
else:
# all ancestors found
return
| [
11748,
17606,
220,
220,
1303,
2099,
25,
8856,
198,
6738,
19720,
1330,
4889,
540,
11,
40806,
1352,
11,
309,
29291,
11,
7343,
628,
198,
4299,
1790,
62,
26270,
7,
18218,
25,
17606,
13,
6935,
270,
8,
4613,
965,
25,
198,
220,
220,
220,
... | 2.45283 | 1,219 |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.build_graph.address import Address
from pants.engine.addressable import BuildFileAddresses
from pants.engine.console import Console
from pants.engine.fs import Digest, DirectoryToMaterialize, Workspace
from pants.engine.goal import Goal, LineOriented
from pants.engine.legacy.graph import HydratedTarget
from pants.engine.rules import console_rule, rule, union
from pants.engine.selectors import Get
@dataclass(frozen=True)
class Binary(LineOriented, Goal):
"""Create a runnable binary."""
name = 'binary'
@union
@union
@dataclass(frozen=True)
@console_rule
@rule
| [
2,
15069,
13130,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
3... | 3.488479 | 217 |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import mmap
import math
import warnings
from copy import deepcopy
from datetime import datetime
from multiprocessing.connection import Listener
from multiprocessing.connection import Client
from .mmio import MMIO
from .ps import Clocks, CPU_ARCH_IS_SUPPORTED, CPU_ARCH
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
# Overlay constants
PYNQ_PATH = os.path.dirname(os.path.realpath(__file__))
BS_BOOT = os.path.join(PYNQ_PATH, 'overlays', 'base', 'base.bit')
TCL_BOOT = os.path.join(PYNQ_PATH, 'overlays', 'base', 'base.tcl')
BS_IS_PARTIAL = "/sys/devices/soc0/amba/f8007000.devcfg/is_partial_bitstream"
BS_XDEVCFG = "/dev/xdevcfg"
PL_SERVER_FILE = os.path.join(PYNQ_PATH, '.log')
def _get_tcl_name(bitfile_name):
"""This method returns the name of the tcl file.
For example, the input "/home/xilinx/src/pynq/bitstream/base.bit" will
lead to the result "/home/xilinx/src/pynq/bitstream/base.tcl".
Parameters
----------
bitfile_name : str
The absolute path of the .bit file.
Returns
-------
str
The absolute path of the .tcl file.
"""
return os.path.splitext(bitfile_name)[0] + '.tcl'
class _TCL:
"""Helper Class to extract information from a TCL configuration file
Note
----
This class requires the absolute path of the '.tcl' file.
Attributes
----------
ip_dict : dict
All the addressable IPs from PS7. Key is the name of the IP; value is
a dictionary mapping the physical address, address range, IP type,
configuration dictionary, the state associated with that IP, any
interrupts and GPIO pins attached to the IP and the full path to the
IP in the block design:
{str: {'phys_addr' : int, 'addr_range' : int,\
'type' : str, 'config' : dict, 'state' : str,\
'interrupts' : dict, 'gpio' : dict, 'fullpath' : str}}.
gpio_dict : dict
All the GPIO pins controlled by PS7. Key is the name of the GPIO pin;
value is a dictionary mapping user index (starting from 0),
the state associated with that GPIO pin and the pins in block diagram
attached to the GPIO:
{str: {'index' : int, 'state' : str, 'pins' : [str]}}.
interrupt_controllers : dict
All AXI interrupt controllers in the system attached to
a PS7 interrupt line. Key is the name of the controller;
value is a dictionary mapping parent interrupt controller and the
line index of this interrupt:
{str: {'parent': str, 'index' : int}}.
The PS7 is the root of the hierarchy and is unnamed.
interrupt_pins : dict
All pins in the design attached to an interrupt controller.
Key is the name of the pin; value is a dictionary
mapping the interrupt controller and the line index used:
{str: {'controller' : str, 'index' : int}}.
hierarchy_dict : dict
All of the hierarchies in the block design containing addressable IP.
The keys are the hiearachies and the values are dictionaries
containing the IP and sub-hierarchies contained in the hierarchy and
and GPIO and interrupts attached to the hierarchy. The keys in
dictionaries are relative to the hierarchy and the ip dict only
contains immediately contained IP - not those in sub-hierarchies.
{str: {'ip': dict, 'hierarchies': dict, 'interrupts': dict,\
'gpio': dict, 'fullpath': str}}
clock_dict : dict
All the PL clocks that can be controlled by the PS. Key is the index
of the clock (e.g., 0 for `fclk0`); value is a dictionary mapping the
divisor values and the enable flag (1 for enabled, and
0 for disabled):
{index: {'divisor0' : int, 'divisor1' : int, 'enable' : int}}
"""
def __init__(self, tcl_name):
"""Returns a map built from the supplied tcl file
Parameters
---------
tcl_name : str
The tcl filename to parse. This is opened directly so should be
fully qualified
Note
----
If this method is called on an unsupported architecture it will warn
and return without initialization
"""
if not isinstance(tcl_name, str):
raise TypeError("tcl_name has to be a string")
# Initialize result variables
self.intc_names = []
self.interrupt_controllers = {}
self.concat_cells = {}
self.nets = []
self.pins = {}
self.prop = []
self.interrupt_pins = {}
self.ps_name = ""
self.ip_dict = {}
self.gpio_dict = {}
self.clock_dict = {}
self.family = "xc7z"
# Key strings to search for in the TCL file
family_pat = "create_project"
family_regex = "(?P<family_str>xc.{2}).*"
family_ps_dict = {"xc7z": "processing_system7",
"xczu": "zynq_ultra_ps_e"}
family_irq_dict = {"xc7z": "IRQ_F2P",
"xczu": "pl_ps_irq0"}
family_gpio_dict = {"xc7z": "GPIO_O",
"xczu": "emio_gpio_o"}
hier_use_pat = "create_hier_cell"
hier_proc_def_pat = "proc {}".format(hier_use_pat)
hier_def_regex = "create_hier_cell_(?P<name>[^ ]*)"
hier_proc_end_pat = "}\n"
hier_use_regex = ("create_hier_cell_(?P<hier_name>[^ ]*) ([^ ].*) " +
"(?P<instance_name>[^ ]*)\n")
config_ip_pat = "CONFIG."
config_regex = "CONFIG.(?P<key>.+?) \{(?P<value>.+?)\}"
clk_odiv_regex = 'PCW_FCLK(?P<idx>.+?)_PERIPHERAL_DIVISOR' \
'(?P<div>[01])$'
clk_enable_regex = 'PCW_FPGA_FCLK(?P<idx>.+?)_ENABLE$'
prop_start_pat = "set_property -dict ["
prop_end_pat = "]"
prop_name_regex = "\] \$(?P<instance_name>.+?)$"
net_pat = "connect_bd_net -net"
net_regex = "\[get_bd_pins (?P<name>[^]]+)\]"
addr_pat = "create_bd_addr_seg"
addr_regex = ("create_bd_addr_seg " +
"-range (?P<range>0[xX][0-9a-fA-F]+) " +
"-offset (?P<addr>0[xX][0-9a-fA-F]+) " +
"\[get_bd_addr_spaces ")
ip_pat = "create_bd_cell -type ip -vlnv "
ip_regex = ("create_bd_cell -type ip -vlnv " +
"(?P<author>.+?):" +
"(?P<type>.+?):" +
"(?P<ip_name>.+?):" +
"(?P<version>.+?) " +
"(?P<instance_name>[^ ]*)")
ignore_regex = "\s*(\#|catch).*"
# Parsing state
current_hier = ""
last_concat = ""
in_prop = False
gpio_idx = None
gpio_dict = dict()
hier_dict = dict()
hier_dict[current_hier] = dict()
with open(tcl_name, 'r') as f:
for line in f:
if re.match(ignore_regex, line):
continue
# Matching IP configurations
elif prop_start_pat in line:
in_prop = True
# Matching Property declarations
elif in_prop:
if prop_end_pat in line:
m = re.search(prop_name_regex, line, re.IGNORECASE)
if m and gpio_idx is not None:
name = m.group("instance_name")
gpio_dict[name] = gpio_idx
gpio_idx = None
in_prop = False
elif config_ip_pat in line:
m1 = re.search(config_regex, line)
key = m1.group("key")
value = m1.group("value")
if key == "NUM_PORTS":
self.concat_cells[last_concat] = int(value)
elif key == 'DIN_FROM':
gpio_idx = int(value)
elif "FCLK" in line and "PERIPHERAL_DIVISOR" in line:
m2 = re.search(clk_odiv_regex, key)
idx = int(m2.group("idx"))
if idx not in self.clock_dict:
self.clock_dict[idx] = {}
divisor_name = 'divisor' + m2.group("div")
self.clock_dict[idx][divisor_name] = int(value)
elif "FCLK" in line and "ENABLE" in line:
m3 = re.search(clk_enable_regex, key)
idx = int(m3.group("idx"))
if idx not in self.clock_dict:
self.clock_dict[idx] = {}
self.clock_dict[idx]['enable'] = int(value)
# Match project/family declaration
elif family_pat in line:
m = re.search(family_regex, line, re.IGNORECASE)
self.family = m.group("family_str")
# Matching address segment
elif addr_pat in line:
m = re.search(addr_regex, line, re.IGNORECASE)
if m:
for ip_dict0 in hier_dict:
for ip_name, ip_type in \
hier_dict[ip_dict0].items():
ip = (ip_dict0 + '/' + ip_name).lstrip('/')
if m.group("hier").startswith(ip):
self.ip_dict[ip] = dict()
self.ip_dict[ip]['phys_addr'] = \
int(m.group("addr"), 16)
self.ip_dict[ip]['addr_range'] = \
int(m.group("range"), 16)
self.ip_dict[ip]['type'] = ip_type
self.ip_dict[ip]['state'] = None
self.ip_dict[ip]['interrupts'] = dict()
self.ip_dict[ip]['gpio'] = dict()
self.ip_dict[ip]['fullpath'] = ip
# Match hierarchical cell definition
elif hier_proc_def_pat in line:
m = re.search(hier_def_regex, line)
hier_name = m.group("name")
current_hier = hier_name
hier_dict[current_hier] = dict()
elif hier_proc_end_pat == line:
current_hier = ""
# Match hierarchical cell use/instantiation
elif hier_use_pat in line:
m = re.search(hier_use_regex, line)
hier_name = m.group("hier_name")
inst_name = m.group("instance_name")
inst_path = (current_hier + '/' + inst_name).lstrip('/')
inst_dict = dict()
for path in hier_dict:
psplit = path.split('/')
if psplit[0] == hier_name:
inst_path += path.lstrip(hier_name)
inst_dict[inst_path] = deepcopy(hier_dict[path])
hier_dict.update(inst_dict)
# Matching IP cells in root design
elif ip_pat in line:
m = re.search(ip_regex, line)
ip_name = m.group("ip_name")
instance_name = m.group("instance_name")
if m.group("ip_name") == family_ps_dict[self.family]:
self.ps_name = instance_name
addr_regex += (instance_name + "/Data\] " +
"\[get_bd_addr_segs (?P<hier>.+?)\] " +
"(?P<name>[A-Za-z0-9_]+)")
else:
ip_type = ':'.join([m.group(1), m.group(2),
m.group(3), m.group(4)])
hier_dict[current_hier][instance_name] = ip_type
ip = (current_hier + '/' + instance_name).lstrip('/')
if ip_name == "xlconcat":
last_concat = ip
self.concat_cells[ip] = 2
elif ip_name == "axi_intc":
self.intc_names.append(ip)
# Matching nets
elif net_pat in line:
mpins = re.findall(net_regex, line, re.IGNORECASE)
new_pins = [(current_hier + "/" + v).lstrip('/') for v in
mpins]
indexes = {self.pins[p] for p in new_pins if
p in self.pins}
if len(indexes) == 0:
index = len(self.nets)
self.nets.append(set())
else:
to_merge = []
while len(indexes) > 1:
to_merge.append(indexes.pop())
index = indexes.pop()
for i in to_merge:
self.nets[index] |= self.nets[i]
self.nets[index] |= set(new_pins)
for p in self.nets[index]:
self.pins[p] = index
if self.ps_name + "/" + family_irq_dict[self.family] in self.pins:
ps_irq_net = self.pins[
self.ps_name + "/" + family_irq_dict[self.family]]
self._add_interrupt_pins(ps_irq_net, "", 0)
if self.ps_name + "/" + family_gpio_dict[self.family] in self.pins:
ps_gpio_net = self.pins[
self.ps_name + "/" + family_gpio_dict[self.family]]
self._add_gpio_pins(ps_gpio_net, gpio_dict)
self._build_hierarchy_dict()
self._assign_interrupts_gpio()
class PLMeta(type):
"""This method is the meta class for the PL.
This is not a class for users. Hence there is no attribute or method
exposed to users.
Note
----
If this metaclass is parsed on an unsupported architecture it will issue
a warning and leave class variables undefined
"""
_bitfile_name = BS_BOOT
_timestamp = ""
if CPU_ARCH_IS_SUPPORTED:
_tcl = _TCL(TCL_BOOT)
_ip_dict = _tcl.ip_dict
_gpio_dict = _tcl.gpio_dict
_interrupt_controllers = _tcl.interrupt_controllers
_interrupt_pins = _tcl.interrupt_pins
_hierarchy_dict = _tcl.hierarchy_dict
_status = 1
_server = None
_host = None
_remote = None
else:
warnings.warn("Pynq does not support the CPU Architecture: {}"
.format(CPU_ARCH), ResourceWarning)
@property
def bitfile_name(cls):
"""The getter for the attribute `bitfile_name`.
Returns
-------
str
The absolute path of the bitstream currently on PL.
Note
----
If this method is called on an unsupported architecture it will warn
and return an empty string
"""
if not CPU_ARCH_IS_SUPPORTED:
warnings.warn("Pynq does not support the CPU Architecture: {}"
.format(CPU_ARCH), ResourceWarning)
return ""
cls.client_request()
cls.server_update()
return cls._bitfile_name
@property
def timestamp(cls):
"""The getter for the attribute `timestamp`.
Returns
-------
str
Bitstream download timestamp.
"""
cls.client_request()
cls.server_update()
return cls._timestamp
@property
def ip_dict(cls):
"""The getter for the attribute `ip_dict`.
Returns
-------
dict
The dictionary storing addressable IP instances; can be empty.
"""
cls.client_request()
cls.server_update()
return cls._ip_dict
@property
def gpio_dict(cls):
"""The getter for the attribute `gpio_dict`.
Returns
-------
dict
The dictionary storing the PS GPIO pins.
"""
cls.client_request()
cls.server_update()
return cls._gpio_dict
@property
def interrupt_controllers(cls):
"""The getter for the attribute `interrupt_controllers`.
Returns
-------
dict
The dictionary storing interrupt controller information.
"""
cls.client_request()
cls.server_update()
return cls._interrupt_controllers
@property
def interrupt_pins(cls):
"""The getter for the attribute `interrupt_pins`.
Returns
-------
dict
The dictionary storing the interrupt endpoint information.
"""
cls.client_request()
cls.server_update()
return cls._interrupt_pins
@property
def hierarchy_dict(cls):
"""The getter for the attribute `hierarchy_dict`
Returns
-------
dict
The dictionary containing the hierarchies in the design
"""
cls.client_request()
cls.server_update()
return cls._hierarchy_dict
def setup(cls, address=PL_SERVER_FILE, key=b'xilinx'):
"""Start the PL server and accept client connections.
This method should not be used by the users directly. To check open
pipes in the system, use `lsof | grep <address>` and
`kill -9 <pid>` to manually delete them.
Parameters
----------
address : str
The filename on the file system.
key : bytes
The authentication key of connection.
Returns
-------
None
"""
cls._server = Listener(address, family='AF_UNIX', authkey=key)
while cls._status:
cls._host = cls._server.accept()
cls._host.send([cls._bitfile_name,
cls._timestamp,
cls._ip_dict,
cls._gpio_dict,
cls._interrupt_controllers,
cls._interrupt_pins,
cls._hierarchy_dict])
cls._bitfile_name, cls._timestamp, \
cls._ip_dict, cls._gpio_dict, \
cls._interrupt_controllers, cls._interrupt_pins, \
cls._hierarchy_dict, cls._status = cls._host.recv()
cls._host.close()
cls._server.close()
def client_request(cls, address=PL_SERVER_FILE,
key=b'xilinx'):
"""Client connects to the PL server and receives the attributes.
This method should not be used by the users directly. To check open
pipes in the system, use `lsof | grep <address>` and
`kill -9 <pid>` to manually delete them.
Parameters
----------
address : str
The filename on the file system.
key : bytes
The authentication key of connection.
Returns
-------
None
"""
try:
cls._remote = Client(address, family='AF_UNIX', authkey=key)
except FileNotFoundError:
raise ConnectionError(
"Could not connect to Pynq PL server") from None
cls._bitfile_name, cls._timestamp, \
cls._ip_dict, cls._gpio_dict, \
cls._interrupt_controllers, \
cls._interrupt_pins, \
cls._hierarchy_dict = cls._remote.recv()
def server_update(cls, continued=1):
"""Client sends the attributes to the server.
This method should not be used by the users directly. To check open
pipes in the system, use `lsof | grep <address>` and `kill -9 <pid>`
to manually delete them.
Parameters
----------
continued : int
Continue (1) or stop (0) the PL server.
Returns
-------
None
"""
cls._remote.send([cls._bitfile_name,
cls._timestamp,
cls._ip_dict,
cls._gpio_dict,
cls._interrupt_controllers,
cls._interrupt_pins,
cls._hierarchy_dict,
continued])
cls._remote.close()
def reset(cls):
"""Reset both all the dictionaries.
This method must be called after a bitstream download.
1. In case there is a `*.tcl` file, this method will reset the IP,
GPIO , and interrupt dictionaries based on the tcl file.
2. In case there is no `*.tcl` file, this method will simply clear
the state information stored for all dictionaries.
"""
cls.client_request()
tcl_name = _get_tcl_name(cls._bitfile_name)
if os.path.isfile(tcl_name):
tcl = _TCL(tcl_name)
cls._ip_dict = tcl.ip_dict
cls._gpio_dict = tcl.gpio_dict
cls._interrupt_controllers = tcl.interrupt_controllers
cls._interrupt_pins = tcl.interrupt_pins
else:
cls.clear_dict()
cls.server_update()
def clear_dict(cls):
"""Clear all the dictionaries stored in PL.
This method will clear all the related dictionaries, including IP
dictionary, GPIO dictionary, etc.
"""
cls._ip_dict.clear()
cls._gpio_dict.clear()
cls._interrupt_controllers.clear()
cls._interrupt_pins.clear()
cls._hierarchy_dict.clear()
def load_ip_data(cls, ip_name, data):
"""This method writes data to the addressable IP.
Note
----
The data is assumed to be in binary format (.bin). The data
name will be stored as a state information in the IP dictionary.
Parameters
----------
ip_name : str
The name of the addressable IP.
data : str
The absolute path of the data to be loaded.
Returns
-------
None
"""
cls.client_request()
with open(data, 'rb') as bin_file:
size = int((math.ceil(os.fstat(bin_file.fileno()).st_size /
mmap.PAGESIZE)) * mmap.PAGESIZE)
mmio = MMIO(cls._ip_dict[ip_name]['phys_addr'], size)
buf = bin_file.read(size)
mmio.write(0, buf)
cls._ip_dict[ip_name]['state'] = data
cls.server_update()
class PL(metaclass=PLMeta):
"""Serves as a singleton for `Overlay` and `Bitstream` classes.
This class stores multiple dictionaries: IP dictionary, GPIO dictionary,
interrupt controller dictionary, and interrupt pins dictionary.
Attributes
----------
bitfile_name : str
The absolute path of the bitstream currently on PL.
timestamp : str
Bitstream download timestamp, using the following format:
year, month, day, hour, minute, second, microsecond.
ip_dict : dict
All the addressable IPs from PS7. Key is the name of the IP; value is
a dictionary mapping the physical address, address range, IP type,
configuration dictionary, the state associated with that IP, any
interrupts and GPIO pins attached to the IP and the full path to the
IP in the block design:
{str: {'phys_addr' : int, 'addr_range' : int,\
'type' : str, 'config' : dict, 'state' : str,\
'interrupts' : dict, 'gpio' : dict, 'fullpath' : str}}.
gpio_dict : dict
All the GPIO pins controlled by PS7. Key is the name of the GPIO pin;
value is a dictionary mapping user index (starting from 0),
the state associated with that GPIO pin and the pins in block diagram
attached to the GPIO:
{str: {'index' : int, 'state' : str, 'pins' : [str]}}.
interrupt_controllers : dict
All AXI interrupt controllers in the system attached to
a PS7 interrupt line. Key is the name of the controller;
value is a dictionary mapping parent interrupt controller and the
line index of this interrupt:
{str: {'parent': str, 'index' : int}}.
The PS7 is the root of the hierarchy and is unnamed.
interrupt_pins : dict
All pins in the design attached to an interrupt controller.
Key is the name of the pin; value is a dictionary
mapping the interrupt controller and the line index used:
{str: {'controller' : str, 'index' : int}}.
hierarchy_dict : dict
All of the hierarchies in the block design containing addressable IP.
The keys are the hiearachies and the values are dictionaries
containing the IP and sub-hierarchies contained in the hierarchy and
and GPIO and interrupts attached to the hierarchy. The keys in
dictionaries are relative to the hierarchy and the ip dict only
contains immediately contained IP - not those in sub-hierarchies.
{str: {'ip': dict, 'hierarchies': dict, 'interrupts': dict,\
'gpio': dict, 'fullpath': str}}
"""
def __init__(self):
"""Return a new PL object.
This class requires a root permission.
"""
euid = os.geteuid()
if euid != 0:
raise EnvironmentError('Root permissions required.')
def _stop_server():
"""Entry point for the stop_pl_server.py script
This function will attempt to stop the PL server in
a controlled manner. It should not be called by user code
"""
try:
PL.client_request()
PL.server_update(0)
except:
pass
def _start_server():
"""Entry point for the start_pl_server.py script
Starts the PL server using the default server file. Should
not be called by user code - use PL.setup() instead to
customise the server.
"""
if os.path.exists(PL_SERVER_FILE):
os.remove(PL_SERVER_FILE)
PL.setup()
class Bitstream:
"""This class instantiates a programmable logic bitstream.
Attributes
----------
bitfile_name : str
The absolute path of the bitstream.
timestamp : str
Timestamp when loading the bitstream. Format:
year, month, day, hour, minute, second, microsecond
"""
def __init__(self, bitfile_name):
"""Return a new Bitstream object.
Users can either specify an absolute path to the bitstream file
(e.g. '/home/xilinx/src/pynq/bitstream/base.bit'),
or a relative path within an overlay folder.
(e.g. 'base.bit' for base/base.bit).
Note
----
self.bitstream always stores the absolute path of the bitstream.
Parameters
----------
bitfile_name : str
The bitstream absolute path or name as a string.
"""
super().__init__()
if not isinstance(bitfile_name, str):
raise TypeError("Bitstream name has to be a string.")
bitfile_abs = os.path.abspath(bitfile_name)
bitfile_overlay_abs = os.path.join(PYNQ_PATH,
'overlays',
bitfile_name.replace('.bit', ''),
bitfile_name)
if os.path.isfile(bitfile_name):
self.bitfile_name = bitfile_abs
elif os.path.isfile(bitfile_overlay_abs):
self.bitfile_name = bitfile_overlay_abs
else:
raise IOError('Bitstream file {} does not exist.'
.format(bitfile_name))
self.timestamp = ''
def download(self):
"""The method to download the bitstream onto PL.
Note
----
The class variables held by the singleton PL will also be updated. In
addition, if this method is called on an unsupported architecture it
will warn and return.
Returns
-------
None
"""
# Compose bitfile name, open bitfile
with open(self.bitfile_name, 'rb') as f:
buf = f.read()
# Set is_partial_bitfile device attribute to 0
with open(BS_IS_PARTIAL, 'w') as fd:
fd.write('0')
# Write bitfile to xdevcfg device
with open(BS_XDEVCFG, 'wb') as f:
f.write(buf)
t = datetime.now()
self.timestamp = "{}/{}/{} {}:{}:{} +{}".format(
t.year, t.month, t.day,
t.hour, t.minute, t.second, t.microsecond)
# Update PL information
PL.client_request()
PL._bitfile_name = self.bitfile_name
PL._timestamp = self.timestamp
PL.clear_dict()
PL.server_update()
| [
2,
220,
220,
15069,
357,
66,
8,
1584,
11,
1395,
346,
28413,
11,
3457,
13,
198,
2,
220,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
2... | 2.045418 | 14,972 |
import os
import time
import uuid
import hprof.parser
import hprof.leak_report_writer
import hprof.leak_detector
ROOT_DIR = "/home/shanbay/memory_guard/server"
REPORT_DIR = os.path.join(ROOT_DIR, "report")
if __name__ == '__main__':
pass
| [
11748,
28686,
198,
11748,
640,
198,
11748,
334,
27112,
198,
198,
11748,
289,
5577,
13,
48610,
198,
11748,
289,
5577,
13,
293,
461,
62,
13116,
62,
16002,
198,
11748,
289,
5577,
13,
293,
461,
62,
15255,
9250,
198,
198,
13252,
2394,
62,
... | 2.583333 | 96 |
import argparse
import os
import shutil
import tempfile
import yaml
from contest.impls.lib import datasets
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
198,
11748,
331,
43695,
198,
198,
6738,
8414,
13,
23928,
82,
13,
8019,
1330,
40522,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,... | 3 | 50 |
import glob
from os import path as osp
if __name__ == '__main__':
root = '/data2/yangxi/datasets/Vimeo90k/vimeo_septuplet'
seq_path_list = sorted(glob.glob(osp.join(root, 'sequences', '*', '*')))
out = [
'{}/{} 7 (256,448,3)\n'.format(seq_path.split('/')[-2], seq_path.split('/')[-1])
for seq_path in seq_path_list
]
with open('meta_info/meta_info_Vimeo90K_all_GT.txt', 'w') as f:
f.writelines(out)
| [
11748,
15095,
198,
6738,
28686,
1330,
3108,
355,
267,
2777,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
6808,
796,
31051,
7890,
17,
14,
17859,
29992,
14,
19608,
292,
1039,
14,
53,
47776,
382... | 2.069767 | 215 |
# Generated by Django 3.1.4 on 2021-11-13 10:16
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import django.utils.timezone
import uuid
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
1157,
12,
1485,
838,
25,
1433,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,... | 3 | 73 |
# encoding: UTF-8
"""
通过VT_setting.json加载全局配置
"""
print('load vtGlobal.py')
import os
import traceback
import json
from .vtFunction import getJsonPath
globalSetting = {} # 全局配置字典
settingFileName = "VT_setting.json"
settingFilePath = getJsonPath(settingFileName, __file__)
try:
with open(settingFilePath,'r',encoding="utf8") as f:
globalSetting = json.load(f)
except:
traceback.print_exc() | [
2,
21004,
25,
41002,
12,
23,
198,
198,
37811,
198,
34460,
248,
32573,
229,
36392,
62,
33990,
13,
17752,
27950,
254,
164,
121,
121,
17739,
101,
161,
109,
222,
165,
227,
235,
163,
121,
106,
198,
37811,
198,
4798,
10786,
2220,
410,
83,... | 2.263736 | 182 |
from django.urls import path
from . import views
app_name = 'users'
urlpatterns = [
path('',views.index, name='index'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
220,
198,
6738,
764,
1330,
5009,
198,
1324,
62,
3672,
796,
705,
18417,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
33571,
13,
9630,
11,
1438,
11639,
96... | 2.723404 | 47 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
| [
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
11748... | 2.970297 | 404 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
994,
262,
4981,
329,
534,
15881,
276,
3709,
198,
2,
198,
2,
4091,
10314,
287,
25,
198,
2,
3740,
1378,
31628,
13,
1416,
2416,
88,
13,
2398,
14,
2... | 2.725806 | 62 |
# stdlib imports
import re
# third-party imports
import structlog
# local imports
from stretch import exceptions
from stretch import logging
from .trigger_types import http
from .trigger_types import mesos
logger = structlog.get_logger()
def get_prefixes_from_labels(labels):
"""Extracts a unique list of trigger prefixes from a dict of labels.
"""
regex = r'^SCALING_TRIGGER_[0-9]+_'
trigger_prefixes = set()
for k in labels:
match = re.match(regex, k)
if match is not None:
trigger_prefixes.add(match.group(0))
return trigger_prefixes
def get_trigger_type_from_string(name):
"""Returns the appropriate trigger type for the given name
"""
try:
return {
"cpu": mesos.CPUTrigger,
"mem": mesos.MemTrigger,
"http": http.HttpTrigger,
"http_single": http.HttpSingleTrigger,
}[name]
except KeyError:
raise InvalidTriggerType(name)
def get_configs_from_labels(prefixes, labels):
"""Transforms a raw "labels" dict of (str, str) pairs into a list of
dicts, each containing the configuration keys for a single trigger.
"""
trigger_configs = []
for trigger_prefix in prefixes:
trigger_config = {}
for key, value in labels.items():
if key.startswith(trigger_prefix):
trigger_key = key.replace(trigger_prefix, "").lower()
trigger_config[trigger_key] = value
trigger_configs.append(trigger_config)
return trigger_configs
def get_triggers_from_configs(app_id, tasks, configs):
"""Returns an initialised Trigger object for the given configuration
"""
for config in configs:
try:
trigger_type = get_trigger_type_from_string(config.get("type"))
yield trigger_type(app_id=app_id, tasks=tasks, **config)
except Exception as e:
logging.log_exception(e, app_id=app_id)
def parse_triggers(app_id, tasks, labels):
"""Execute the parser and return all configured triggers
"""
prefixes = get_prefixes_from_labels(labels)
configs = get_configs_from_labels(prefixes, labels)
return [t for t in get_triggers_from_configs(app_id, tasks, configs)]
| [
2,
14367,
8019,
17944,
198,
11748,
302,
198,
198,
2,
2368,
12,
10608,
17944,
198,
11748,
2878,
6404,
198,
198,
2,
1957,
17944,
198,
6738,
7539,
1330,
13269,
198,
6738,
7539,
1330,
18931,
198,
6738,
764,
46284,
62,
19199,
1330,
2638,
1... | 2.520179 | 892 |
from loguru import logger
from flexget import plugin
from flexget.event import event
from . import db
logger = logger.bind(name='seen')
class FilterSeen:
"""
Remembers previously downloaded content and rejects them in
subsequent executions. Without this plugin FlexGet would
download all matching content on every execution.
This plugin is enabled on all tasks by default.
See wiki for more information.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': ['global', 'local']},
{
'type': 'object',
'properties': {
'local': {'type': 'boolean'},
'fields': {
'type': 'array',
'items': {'type': 'string'},
"minItems": 1,
"uniqueItems": True,
},
},
},
]
}
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_filter(self, task, config, remember_rejected=False):
"""Filter entries already accepted on previous runs."""
config = self.prepare_config(config)
if config is False:
logger.debug('{} is disabled', self.keyword)
return
fields = config.get('fields')
local = config.get('local')
for entry in task.entries:
# construct list of values looked
values = []
for field in fields:
if field not in entry:
continue
if entry[field] not in values and entry[field]:
values.append(str(entry[field]))
if values:
logger.trace('querying for: {}', ', '.join(values))
# check if SeenField.value is any of the values
found = db.search_by_field_values(
field_value_list=values, task_name=task.name, local=local, session=task.session
)
if found:
logger.debug(
"Rejecting '{}' '{}' because of seen '{}'",
entry['url'],
entry['title'],
found.value,
)
se = (
task.session.query(db.SeenEntry)
.filter(db.SeenEntry.id == found.seen_entry_id)
.one()
)
entry.reject(
'Entry with %s `%s` is already marked seen in the task %s at %s'
% (found.field, found.value, se.task, se.added.strftime('%Y-%m-%d %H:%M')),
remember=remember_rejected,
)
def on_task_learn(self, task, config):
"""Remember succeeded entries"""
config = self.prepare_config(config)
if config is False:
logger.debug('disabled')
return
fields = config.get('fields')
local = config.get('local')
if isinstance(config, list):
fields.extend(config)
for entry in task.accepted:
self.learn(task, entry, fields=fields, local=local)
# verbose if in learning mode
if task.options.learn:
logger.info("Learned '{}' (will skip this in the future)", entry['title'])
def learn(self, task, entry, fields=None, reason=None, local=False):
"""Marks entry as seen"""
# no explicit fields given, use default
if not fields:
fields = self.fields
se = db.SeenEntry(entry['title'], str(task.name), reason, local)
remembered = []
for field in fields:
if field not in entry:
continue
# removes duplicate values (eg. url, original_url are usually same)
if entry[field] in remembered:
continue
remembered.append(entry[field])
sf = db.SeenField(str(field), str(entry[field]))
se.fields.append(sf)
logger.debug("Learned '{}' (field: {}, local: {})", entry[field], field, local)
# Only add the entry to the session if it has one of the required fields
if se.fields:
task.session.add(se)
def forget(self, task, title):
"""Forget SeenEntry with :title:. Return True if forgotten."""
se = task.session.query(db.SeenEntry).filter(db.SeenEntry.title == title).first()
if se:
logger.debug("Forgotten '{}' ({} fields)", title, len(se.fields))
task.session.delete(se)
return True
@event('plugin.register')
| [
6738,
2604,
14717,
1330,
49706,
198,
198,
6738,
7059,
1136,
1330,
13877,
198,
6738,
7059,
1136,
13,
15596,
1330,
1785,
198,
198,
6738,
764,
1330,
20613,
198,
198,
6404,
1362,
796,
49706,
13,
21653,
7,
3672,
11639,
15898,
11537,
628,
198... | 1.998731 | 2,364 |
import contextlib
from typing import Any, Dict, Iterable, Optional
import discord
import iso8601
import validators
from redbot.core import commands
from redbot.vendored.discord.ext import menus
| [
11748,
4732,
8019,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
40806,
540,
11,
32233,
198,
198,
11748,
36446,
198,
11748,
47279,
4521,
486,
198,
11748,
4938,
2024,
198,
6738,
2266,
13645,
13,
7295,
1330,
9729,
198,
6738,
2266,
13645... | 3.648148 | 54 |
import os
from datetime import date
date1 = Date(os.environ["ESPANSO_DATES_DAY1"], os.environ["ESPANSO_DATES_MONTH1"],
os.environ["ESPANSO_DATES_YEAR1"]).date
date2 = Date(os.environ["ESPANSO_DATES_DAY2"], os.environ["ESPANSO_DATES_MONTH2"],
os.environ["ESPANSO_DATES_YEAR2"]).date
datediff = date2 - date1
print(datediff.days + 1)
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
4475,
16,
796,
7536,
7,
418,
13,
268,
2268,
14692,
1546,
47,
15037,
46,
62,
35,
29462,
62,
26442,
16,
33116,
28686,
13,
268,
2268,
14692,
1546,
47,
15037,
46,
62,
35,
29462,... | 2.086705 | 173 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import sys
import tensorflow as tf
# to-do: find a way to limit gpu memory usage during training
######## START GPU SETTINGS ############
# Set MEMORY GROWTH to True
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
######### END GPU SETTINGS ############
import matplotlib.pyplot as plt
import numpy as np
import cupy as cp
import time
import h5py
sys.path.append('../')
from tomo_encoders import DataFile, Patches
import os
import tqdm
import pandas as pd
from tomo_encoders.neural_nets.autoencoders import SelfSupervisedCAE
from vis_utils import show_planes
# import matplotlib as mpl
# mpl.use('Agg')
from params import *
#### THIS EXPERIMENT ####
model_size = (64,64,64)
chunk_size = 32
model_tag = "M_a01"
from recon4D import SomeProjectionStream
from config import *
if __name__ == "__main__":
print("EXPERIMENT WITH MODEL %s"%model_tag)
model_params = get_model_params(model_tag)
print("EXPERIMENT WITH INPUT_SIZE = ", model_size)
if len(sys.argv) > 1:
if sys.argv[1] == "infer":
infer(model_params)
elif sys.argv[1] == "fit":
fit(model_params)
elif sys.argv[1] == "encode-decode":
encode_decode(model_params)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
220,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
37811,
220,
198,
37811,
220,
198,
11748,
25064,
220,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,... | 2.363215 | 647 |
#read and display the contents of Employee.csv in 23-a
import csv
with open('Employee.csv',"r") as fhObj:
eReader = csv.reader(fhObj)
print("File Employee.csv contains :")
for rec in eReader:
print(rec)
| [
2,
961,
290,
3359,
262,
10154,
286,
36824,
13,
40664,
287,
2242,
12,
64,
198,
198,
11748,
269,
21370,
198,
4480,
1280,
10786,
29733,
1453,
13,
40664,
40264,
81,
4943,
355,
277,
71,
49201,
25,
198,
220,
220,
220,
304,
33634,
796,
269... | 2.574713 | 87 |
from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import re
import datetime
import random
pages = set()
random.seed(datetime.datetime.now())
allExtLinks = set()
allIntLinks = set()
getAllExternalLinks("http://jcrew.com")
| [
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
302,
198,
11748,
4818,
8079,
198,
11748,
4738,
198,
198,
31126,
... | 3.123596 | 89 |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import sys
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import FILE_IN, FILE_OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import compss_wait_on # pylint: disable=ungrouped-imports
from basic_modules.metadata import Metadata
from basic_modules.tool import Tool
from tool.aligner_utils import alignerUtils
from tool.common import common
# ------------------------------------------------------------------------------
class gemIndexerTool(Tool): # pylint: disable=invalid-name
"""
Tool for running indexers over a genome FASTA file
"""
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("GEM Indexer")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(genome_file=FILE_IN, index_loc=FILE_OUT)
def gem_indexer(self, genome_file, index_loc): # pylint: disable=unused-argument, no-self-use
"""
GEM Indexer
Parameters
----------
genome_file : str
Location of the genome assembly FASTA file
idx_loc : str
Location of the output index file
"""
index_dir = os.path.split(index_loc)
try:
au_handle = alignerUtils()
au_handle.gem_index_genome(genome_file, os.path.join(index_dir[0], "tmp"))
except (IOError, OSError) as msg:
logger.fatal("I/O error({0}): {1}".format(
msg.errno, msg.strerror))
return False
common.zip_file(os.path.join(index_dir[0], "tmp.gem"))
# if genome_file + ".gem.gz" != index_loc:
with open(index_loc, "wb") as f_out:
with open(os.path.join(index_dir[0], "tmp.gem.gz"), "rb") as f_in:
f_out.write(f_in.read())
os.remove(os.path.join(index_dir[0], "tmp.gem.gz"))
return True
def run(self, input_files, input_metadata, output_files):
"""
Tool for generating assembly aligner index files for use with the GEM
indexer
Parameters
----------
input_files : list
List with a single str element with the location of the genome
assembly FASTA file
input_metadata : list
Returns
-------
array : list
First element is a list of the index files. Second element is a
list of the matching metadata
"""
# input and output share most metadata
results = self.gem_indexer(
input_files['genome'],
output_files['index']
)
results = compss_wait_on(results)
if results is False:
logger.fatal("GEM Indexer: run failed")
return {}, {}
output_metadata = {
"index": Metadata(
data_type="sequence_mapping_index_gem",
file_type="GEM",
file_path=output_files['index'],
sources=[input_files['genome']],
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "gem_indexer"
}
)
}
return (output_files, output_metadata)
# ------------------------------------------------------------------------------
| [
37811,
198,
492,
4091,
262,
28536,
2393,
9387,
351,
428,
670,
329,
3224,
1321,
198,
220,
220,
5115,
6634,
9238,
13,
628,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
3... | 2.449421 | 1,987 |
from onadata.apps.api.tests.models.test_abstract_models import (
TestAbstractModels)
from onadata.apps.api.models import TempToken
from django.db.utils import IntegrityError
| [
6738,
319,
14706,
13,
18211,
13,
15042,
13,
41989,
13,
27530,
13,
9288,
62,
397,
8709,
62,
27530,
1330,
357,
198,
220,
220,
220,
6208,
23839,
5841,
1424,
8,
198,
6738,
319,
14706,
13,
18211,
13,
15042,
13,
27530,
1330,
24189,
30642,
... | 3.314815 | 54 |
from generator.actions import Actions
import array
import random
import struct
| [
6738,
17301,
13,
4658,
1330,
24439,
198,
11748,
7177,
198,
11748,
4738,
198,
11748,
2878,
198
] | 4.9375 | 16 |
import numpy as np
import unittest
import chainer.functions as F
import chainer.testing as testing
import chainer.testing.condition as condition
from mkldnn import switch
testing.run_module(__name__, __file__)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
11748,
6333,
263,
13,
12543,
2733,
355,
376,
198,
11748,
6333,
263,
13,
33407,
355,
4856,
198,
11748,
6333,
263,
13,
33407,
13,
31448,
355,
4006,
198,
6738,
33480,
335,
20... | 3.435484 | 62 |
"""
Binary Tree Level Order Traversal II
Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
# approach: iterate through every node
# memory: O(V)
# runtime: O(V)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| [
37811,
198,
33,
3219,
12200,
5684,
8284,
4759,
690,
282,
2873,
198,
198,
15056,
257,
13934,
5509,
11,
1441,
262,
4220,
12,
929,
1241,
1502,
33038,
282,
286,
663,
13760,
6,
3815,
13,
357,
494,
11,
422,
1364,
284,
826,
11,
1241,
416,
... | 2.509804 | 255 |
from src.PhaseIdentification.voltageBasedPhaseIdentification import *
"""
##################################################
Fluvinus experiment can be done using "truncated"
# CASE A: Rural area
# feeder ID = 86315_785383
# number of devices = 22
# CASE B: Urban area
# number of devices = 125
# feeder ID = 65028_84566
# CASE C: Average feeder
# number of devices = 76
# feeder ID = 1830188_2181475
##################################################
"""
include_A = True
include_B = True
include_C = True
accuracy_class = 0.1
include_three_phase = True
length = 24*7
n_repeats = 1
included_feeders = []
if include_A:
included_feeders.append("86315_785383")
if include_B:
included_feeders.append("65028_84566")
if include_C:
included_feeders.append("1830188_2181475")
for feeder_id in included_feeders:
feeder = Feeder(feederID=feeder_id,include_three_phase=include_three_phase,length=length)
feeder.truncate_voltages()
error = ErrorClass(accuracy_class)
phase_identification = PhaseIdentification(feeder, error)
phase_identification.voltage_correlation()
print("Accuracy using voltage correlation method", " data: ", phase_identification.accuracy())
feeder.plot_voltages(length=length)
feeder.plot_load_profiles(length=length) | [
6738,
12351,
13,
35645,
33234,
2649,
13,
37764,
496,
15001,
35645,
33234,
2649,
1330,
1635,
198,
198,
37811,
198,
29113,
14468,
2235,
198,
37,
2290,
7114,
385,
6306,
460,
307,
1760,
1262,
366,
2213,
19524,
515,
1,
198,
2,
220,
220,
42... | 2.755647 | 487 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.nan)
np.set_printoptions(precision=5)
#<startTeX>
sol = solve_ivp(lambda t,rf: [(1-0.02*rf[1])*rf[0], (-1+0.03*rf[0])*rf[1]],[0,20],[20,20],method='RK45',max_step=.1)
#<endTeX>
t = sol.t
[R,F] = sol.y
plt.figure()
plt.plot(t,R,color='k',linestyle='-')
plt.plot(t,F,color='k',linestyle='--')
plt.savefig('img/6/solution.pdf')
plt.figure()
plt.plot(F,R,color='k')
plt.savefig('img/6/phase.pdf')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
18908,
4873,
1330,
8494,
62,
452,
79,
198,
11748... | 2.050542 | 277 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Created By : Francisco Miras García <francisco.mirasg@gmail.com>
# version ='1.0'
# ---------------------------------------------------------------------------
"""
# Codigo para el ejercicio Color
1.- Construye un clasificador de objetos en base a la similitud de los histogramas de color del ROI
(de los 3 canales por separado).
Opcional: Segmentación densa por reproyección de histograma.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import cv2 as cv
import numpy as np
from umucv.stream import autoStream, putText
from umucv.util import ROI
# ---------------------------------------------------------------------------
# DATA
# ---------------------------------------------------------------------------
MODEL_W = 128 # Anchura del modelo a guardar
MODEL_H = 128 # Altura del modelo a guardar
MODEL_DIM = (MODEL_W, MODEL_H)
MODELS_PER_ROW = 8 # Modelos que se muestran por fila
FRAME_W = 640 # Altura y anchua de la camara
FRAME_H = 480
# ---------------------------------------------------------------------------
# Class
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# INIT
# ---------------------------------------------------------------------------
cv.namedWindow('input')
data = ColorParams(ROI('input'))
methods = dict()
methods['diferencia'] = Method(hg_diff, select='min')
methods['interseccion'] = Method(hg_intersect, select='max')
data.methods_list.append('diferencia')
data.actual_method = 'diferencia'
data.methods_list.append('interseccion')
# ---------------------------------------------------------------------------
# CODE
# ---------------------------------------------------------------------------
for key, frame in autoStream():
if data.region.roi:
[x1, y1, x2, y2] = data.region.roi
recorte = frame[y1:y2, x1:x2]
b, g, r = make_rgb_histogram(recorte, y2 - y1)
if key == ord('c'): # Guarda la region como modelo
info = (b[1], g[1], r[1])
data.patrones.append(Pattern(resize(recorte, MODEL_DIM), info))
if key == ord('x'): # Limpia la region
data.region.roi = None
continue
if key == ord('r'): # Borra todos los modelos
data.patrones.clear()
try:
cv.destroyWindow('modelos')
except Exception:
pass
if key == ord('n'): # Salta al siguiete metodo
data.get_next_method()
if len(data.patrones) > 0:
m = methods[data.actual_method]
vals, model = select_most_like_model(data, (b[1], g[1], r[1]), m)
show_values(vals, frame)
cv.imshow('detectado', model.frame)
cv.polylines(recorte, [b[0]], isClosed=False, color=(255, 0, 0),
thickness=2) # Dibuja las lineas de visualización
cv.polylines(recorte, [g[0]], isClosed=False, color=(0, 255, 0), thickness=2)
cv.polylines(recorte, [r[0]], isClosed=False, color=(0, 0, 255), thickness=2)
cv.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2)
putText(frame, f'{x2 - x1 + 1}x{y2 - y1 + 1}', orig=(x1, y1 - 8))
if len(data.patrones) > 0:
cv.imshow('modelos', stack_patterns(data))
putText(frame, f'{data.actual_method}', orig=(5, FRAME_H - 16))
cv.imshow('input', frame)
cv.destroyAllWindows()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
10541,
198,
2,
15622,
2750,
220,
1058,
6033,
7381,
292,
16364,
29690,
1279,
8310,
1192,
4861,
13,
10793,
292,
70,
31,
14816,
13,
785,
29,
198,
2,
2196,
796,
... | 2.834802 | 1,362 |
from __future__ import print_function
from builtins import object
from lib.common import helpers
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
3170,
1040,
1330,
2134,
198,
6738,
9195,
13,
11321,
1330,
49385,
628
] | 4.454545 | 22 |
from poker.deck import * | [
6738,
27101,
13,
35875,
1330,
1635
] | 4 | 6 |
import dns.resolver
# resolveList.append(rData)
# ++y
# return resolveList
domainName = "www.yahoo.com"
queryResult = resolve(domainName);
for result in queryResult:
print(queryResult[0]) | [
11748,
288,
5907,
13,
411,
14375,
198,
2,
197,
197,
411,
6442,
8053,
13,
33295,
7,
81,
6601,
8,
198,
2,
197,
197,
4880,
88,
220,
220,
220,
220,
220,
220,
220,
220,
198,
2,
197,
7783,
10568,
8053,
198,
198,
27830,
5376,
796,
366,... | 2.589744 | 78 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
import sys
import torch
import torch.nn.functional as F
from io import StringIO
from fairseq import options, utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.tasks import FairseqTask
from fairseq_cli import (
generate,
interactive,
preprocess,
train,
validate,
)
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 3.266376 | 229 |
import heapq
n = int(input())
h = []
for _ in range(n):
q = input().split()
if len(q) == 1:
print(h[0])
else:
v = int(q[1])
if q[0] == "1":
heapq.heappush(h, v)
else:
if h[0] == v:
heapq.heappop(h)
elif h[-1] == v:
h.pop()
else:
i = h.index(v)
h[i] = h[-1]
h.pop()
heapq._siftup(h, i)
heapq._siftdown(h, 0, i)
| [
11748,
24575,
80,
198,
198,
77,
796,
493,
7,
15414,
28955,
198,
71,
796,
17635,
198,
1640,
4808,
287,
2837,
7,
77,
2599,
198,
220,
220,
220,
10662,
796,
5128,
22446,
35312,
3419,
198,
220,
220,
220,
611,
18896,
7,
80,
8,
6624,
352... | 1.43454 | 359 |
T = int(input())
myTree = Solution()
root = None
for i in range(T):
data = int(input())
root = myTree.insert(root, data)
myTree.levelOrder(root)
| [
198,
51,
796,
493,
7,
15414,
28955,
198,
1820,
27660,
796,
28186,
3419,
198,
15763,
796,
6045,
198,
1640,
1312,
287,
2837,
7,
51,
2599,
198,
220,
220,
220,
1366,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
6808,
796,
616,
27660,
... | 2.566667 | 60 |
from django.conf.urls import url
from .views import (
PaymentListAPIView,
BookingListAPIView,
RoomBookingListAPIView,
CreateAPIView,
UpdateAPIView,
CheckOutAPIView
)
urlpatterns = [
url(r'^$', BookingListAPIView.as_view(),
name='api-booking-list'),
url(r'^create/$', CreateAPIView.as_view(),
name='api-create'),
url(r'^update/(?P<pk>[0-9]+)/$', UpdateAPIView.as_view(),
name='api-update'),
url(r'^checkout/(?P<pk>[0-9]+)/$', CheckOutAPIView.as_view(),
name='api-update'),
url(r'^customer/(?P<pk>[0-9]+)/$', BookingListAPIView.as_view(),
name='api-customer-booking-list'),
url(r'^room/(?P<pk>[0-9]+)/$', RoomBookingListAPIView.as_view(),
name='api-room-booking-list'),
url(r'^payments/(?P<pk>[0-9]+)/$', PaymentListAPIView.as_view(),
name='api-booking-payment-list'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
28784,
8053,
2969,
3824,
769,
11,
... | 1.849722 | 539 |
import tempfile
from django.core.urlresolvers import reverse
from hs_file_types.tests.utils import CompositeResourceTestMixin
from hs_core.models import ResourceFile
from hs_core.tests.api.rest.base import HSRESTTestCase
from hs_file_types.models import FileSetLogicalFile
| [
11748,
20218,
7753,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
289,
82,
62,
7753,
62,
19199,
13,
41989,
13,
26791,
1330,
49355,
26198,
14402,
35608,
259,
198,
198,
6738,
289,
82,
62,
7295,
... | 3.285714 | 84 |
import pytest
from flatsplode import (explode, flatsplode, flatten)
@pytest.mark.parametrize(('item', 'exp'), [
(
{
'a': 'b',
'jar': ['bar', 'car'],
'fizz': ['buzz', 'jazz', 'fuzz'],
'foo': 'bar',
},
[
{'a': 'b', 'jar': 'bar', 'fizz': 'buzz', 'foo': 'bar'},
{'a': 'b', 'jar': 'bar', 'fizz': 'jazz', 'foo': 'bar'},
{'a': 'b', 'jar': 'bar', 'fizz': 'fuzz', 'foo': 'bar'},
{'a': 'b', 'jar': 'car', 'fizz': 'buzz', 'foo': 'bar'},
{'a': 'b', 'jar': 'car', 'fizz': 'jazz', 'foo': 'bar'},
{'a': 'b', 'jar': 'car', 'fizz': 'fuzz', 'foo': 'bar'},
],
),
])
@pytest.mark.parametrize(('item', 'join', 'exp'), [
(
{
'fizz': {
'buzz': {
'jazz': 'fuzz',
}
},
'empty': {
},
},
'.',
{
'fizz.buzz.jazz': 'fuzz',
'empty': None,
},
),
(
{
'fizz': {
'buzz': {
'jazz': 'fuzz',
}
},
'empty': {
},
},
'/',
{
'fizz/buzz/jazz': 'fuzz',
'empty': None,
},
),
])
@pytest.mark.parametrize(('item', 'exp'), [
(
{
'a': 'b',
'jar': ['bar', 'car'],
'foo': {
'fizz': [
{'buzz': 1, 'jazz': 2, 'fuzz': 3, 'array': [9, 8, 7]},
{'buzz': 2, 'jazz': 3, 'fuzz': 4, 'array': [9, 8, 7]},
{'buzz': 3, 'jazz': 4, 'fuzz': 5, 'array': [9, 8, 7]},
],
},
},
[
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "bar",
},
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 1,
"foo.fizz.fuzz": 3,
"foo.fizz.jazz": 2,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 2,
"foo.fizz.fuzz": 4,
"foo.fizz.jazz": 3,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 9,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 8,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "car",
},
{
"a": "b",
"foo.fizz.array": 7,
"foo.fizz.buzz": 3,
"foo.fizz.fuzz": 5,
"foo.fizz.jazz": 4,
"jar": "car",
}
],
),
(
{'test': [0, 0.0, [], '', 123]},
[
{'test': 0},
{'test': 0.0},
{'test': []},
{'test': ''},
{'test': 123},
]
)
])
| [
11748,
12972,
9288,
198,
198,
6738,
38157,
489,
1098,
1330,
357,
20676,
1098,
11,
38157,
489,
1098,
11,
27172,
268,
8,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
10786,
9186,
3256,
705,
11201,
33809,
685,
198,
... | 1.352902 | 4,548 |
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from mock import patch
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
9288,
1330,
20985,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
15290,
1330,
8529,
628
] | 3.870968 | 31 |
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
"""
Second Solution : Using previously established next pointers
We only move on to the level N+1 when we are done establishing the next pointers for the level N. Since we have access to all the nodes on a particular level via the next pointers, we can use these next pointers to establish the connections for the next level or the level containing their children.
"""
| [
37811,
198,
2,
30396,
329,
257,
19081,
13,
198,
4871,
19081,
25,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
25,
493,
796,
657,
11,
1364,
25,
705,
19667,
6,
796,
6045,
11,
826,
25,
705,
19667,
6,
796,
6045,
11,... | 3.081731 | 208 |
# -*- coding: utf-8 -*-
"""miniuser's app specific exceptions"""
class MiniUserException(Exception):
"""Base class for all app specific exceptions"""
pass
class MiniUserConfigurationException(MiniUserException):
"""Raised, if there is a mismatch/inconsistency in the app specific settings."""
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
1084,
3754,
263,
338,
598,
2176,
13269,
37811,
628,
198,
4871,
12558,
12982,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
477,
598,
2176,
1... | 3.419355 | 93 |
num = int(input('Digite o valor a ser convertido: '))
base = int(input("""Qual será a base de conversão? sendo:
\033[34m1\033[m = binário
\033[34m2\033[m = Octal
\033[34m3\033[m = Hexadecimal
Digite: """))
bin = bin(num)
oct = oct(num)
hex = hex(num)
if base == 1:
print('O Resultado é: {}'.format(bin))
elif base == 2:
print('O Resultado é: {}'.format(oct))
elif base == 3:
print('O Resultado é: {}'.format(hex))
| [
22510,
796,
493,
7,
15414,
10786,
19511,
578,
267,
1188,
273,
257,
1055,
10385,
17305,
25,
705,
4008,
198,
8692,
796,
493,
7,
15414,
7203,
15931,
46181,
1055,
6557,
257,
2779,
390,
3453,
28749,
30,
3758,
78,
25,
220,
198,
59,
44427,
... | 2.333333 | 183 |
# usage: pypy scriptname inputfile outputfile
import sys
outcontents = []
infile = open(sys.argv[1], "rb").read()
for i in infile:
#print(ord(i))
outcontents.append(bandlimit(ord(i)))
outfile = open(sys.argv[2], "wb")
outfile.write(bytearray(outcontents))
outfile.close() | [
2,
8748,
25,
279,
4464,
88,
4226,
3672,
5128,
7753,
5072,
7753,
198,
198,
11748,
25064,
628,
198,
198,
448,
3642,
658,
796,
17635,
198,
198,
259,
7753,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,
366,
26145,
11074,
961,
3419,
... | 2.477876 | 113 |
#!/usr/bin/env python
class Slide(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Width': 'float',
'Height': 'float',
'Shapes': 'ResourceUriElement',
'Theme': 'ResourceUriElement',
'Placeholders': 'ResourceUriElement',
'Images': 'ResourceUriElement',
'Comments': 'ResourceUriElement',
'Background': 'ResourceUriElement',
'SelfUri': 'ResourceUri',
'AlternateLinks': 'list[ResourceUri]',
'Links': 'list[ResourceUri]'
}
self.attributeMap = {
'Width': 'Width','Height': 'Height','Shapes': 'Shapes','Theme': 'Theme','Placeholders': 'Placeholders','Images': 'Images','Comments': 'Comments','Background': 'Background','SelfUri': 'SelfUri','AlternateLinks': 'AlternateLinks','Links': 'Links'}
self.Width = None # float
self.Height = None # float
self.Shapes = None # ResourceUriElement
self.Theme = None # ResourceUriElement
self.Placeholders = None # ResourceUriElement
self.Images = None # ResourceUriElement
self.Comments = None # ResourceUriElement
self.Background = None # ResourceUriElement
self.SelfUri = None # ResourceUri
self.AlternateLinks = None # list[ResourceUri]
self.Links = None # list[ResourceUri]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
4871,
37651,
7,
15252,
2599,
198,
220,
220,
220,
37227,
16580,
25,
770,
1398,
318,
8295,
7560,
416,
262,
1509,
7928,
2438,
17301,
1430,
13,
198,
220,
220,
220,
2141,
407,
4370,
... | 2.445378 | 714 |
import random
from PyQt5.QtWidgets import QGraphicsView, QGraphicsRectItem, QGraphicsScene
from random_matrix_generator.block import Block
| [
11748,
4738,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
18172,
7680,
11,
1195,
18172,
45474,
7449,
11,
1195,
18172,
36542,
198,
198,
6738,
4738,
62,
6759,
8609,
62,
8612,
1352,
13,
9967,
1330,
9726,
198
] | 3.333333 | 42 |
import json
import unittest
from addict import Dict
TEST_VAL = [1, 2, 3]
TEST_DICT = {'a': {'b': {'c': TEST_VAL}}}
TEST_DICT_STR = str(TEST_DICT)
"""
Allow for these test cases to be run from the command line
via `python test_addict.py`
"""
if __name__ == '__main__':
all_tests = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(all_tests)
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
6738,
19678,
1330,
360,
713,
198,
198,
51,
6465,
62,
23428,
796,
685,
16,
11,
362,
11,
513,
60,
198,
51,
6465,
62,
35,
18379,
796,
1391,
6,
64,
10354,
1391,
6,
65,
10354,
1391,
6,
66... | 2.522293 | 157 |
import os
import pathlib
def parse_yaml(file_name):
'''
Parse file_name and return dictionary.
If file does not exist, return empty dictionary.
'''
import yaml
import sys
if file_name.is_file():
with file_name.open() as f:
try:
config = yaml.load(f, yaml.SafeLoader)
except yaml.YAMLError as exc:
print(exc)
sys.exit(-1)
return config
return {}
def extract_menu_file(file_name, generator, ci_environment):
'''
Reads file_name in yml format and returns:
skip (bool): if True, then skip the current generator
expected_failure (bool): if True, then the current generator is not supported
env: dictionary of environment variables passed to CMake
definitions: dictionary of CMake configure-step definitions
targets: list of targets to build
'''
config = parse_yaml(file_name)
# assemble targets
targets = []
if 'targets' in config:
for entry in config['targets']:
targets.append(entry)
if ci_environment not in config:
return False, False, {}, {}, targets
skip_generators = []
if 'skip_generators' in config[ci_environment]:
skip_generators = config[ci_environment]['skip_generators']
skip = generator in skip_generators
failing_generators = []
if 'failing_generators' in config[ci_environment]:
failing_generators = config[ci_environment]['failing_generators']
expect_failure = generator in failing_generators
# assemble env vars
env = {}
if 'env' in config[ci_environment]:
for entry in config[ci_environment]['env']:
for k, v in entry.items():
env[k] = v
# assemble definitions
definitions = {}
if 'definitions' in config[ci_environment]:
for entry in config[ci_environment]['definitions']:
for k, v in entry.items():
definitions[k] = v
return skip, expect_failure, env, definitions, targets
| [
11748,
28686,
198,
11748,
3108,
8019,
628,
198,
4299,
21136,
62,
88,
43695,
7,
7753,
62,
3672,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
2547,
325,
2393,
62,
3672,
290,
1441,
22155,
13,
198,
220,
220,
220,
1002,
2393,... | 2.538557 | 804 |
import donna25519, base64
def generateEDKeyPairs():
"""A function which generates ED25519 key pairs"""
privateKey = donna25519.PrivateKey()
pvKey = privateKey.private
pbKey = privateKey.get_public().public
return (base64.b64encode(pvKey).decode(),base64.b64encode(pbKey).decode()) | [
11748,
836,
2616,
13381,
1129,
11,
2779,
2414,
628,
198,
4299,
7716,
1961,
9218,
47,
3468,
33529,
628,
220,
220,
220,
37227,
32,
2163,
543,
18616,
8392,
13381,
1129,
1994,
14729,
37811,
628,
220,
220,
220,
2839,
9218,
796,
836,
2616,
... | 2.781818 | 110 |
import pandas as pd
import numpy as np
import argparse
from imblearn.over_sampling import RandomOverSampler
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers.experimental import preprocessing
import kerastuner as kt
"""Use Keras Tuner Hyberband, which trains multiple models on the training data, to determine the best hyperparameters to use in detecting
incidents in the validation data and then save the best performing model."""
def model_test_builder(hp):
"""Use Keras Tuner to create random neural network models of various sizes to evaluate and determine the model with the best performing hyperparameters."""
model = keras.Sequential()
model.add(normalizer)
for i in range(hp.Int('numLayers',min_value=1,max_value=6,step=1)):
model.add(keras.layers.Dense(hp.Int('hidden_size_{}'.format(i), min_value = 16, max_value = 320, step = 32), activation='relu'))
model.add(keras.layers.Dropout(hp.Float('Dropout_{}'.format(i), min_value = 0.0, max_value = 0.5, step = 0.05)))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=keras.optimizers.Adam(hp.Choice('learning_rate',values=[.001,.0001,.00001])),
loss=keras.losses.BinaryCrossentropy(),
metrics=METRICS)
return model
def final_model_builder(best_hps):
"""Use best_hps to build final model using the best performing hyperparameters as determined by Keras Tuner."""
model = keras.Sequential()
model.add(normalizer)
for i in range(best_hps.get('numLayers')):
model.add(keras.layers.Dense(best_hps.get('hidden_size_{}'.format(i)), activation='relu'))
model.add(keras.layers.Dropout(best_hps.get('Dropout_{}'.format(i))))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=keras.optimizers.Adam(best_hps.get('learning_rate')),
loss=keras.losses.BinaryCrossentropy(),
metrics=METRICS)
return model
def run_hyperband(project_name,X_ros, y_ros, test, actual):
"""Set up and run the Keras Tuner Hyperband which creates a bracket of many different neural network models
with different hyperparameters and trains, tests and evaluates them to determine the best one and stores the values in
best_hps."""
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc')
]
tuner = kt.Hyperband(model_test_builder, objective = kt.Objective('val_auc',direction='max'), max_epochs = 100, factor = 3, project_name=project_name)
tuner.search(X_ros,y_ros,batch_size=240,epochs=20,validation_data=(np.array(test),np.array(actual)))
# Get the optimal hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials = 1)[0]
return best_hps
def build_and_save(best_hps, X_ros, y_ros, test, actual, save_filename):
"""Using the best hyperparameters as determined by Keras Tuner, build and train a model and save it to save_filename for later use."""
model = final_model_builder(best_hps)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_auc',
verbose=1,
patience=5,
mode='max',
restore_best_weights=True)
model.fit(X_ros,y_ros,batch_size=240,epochs=100,validation_data=(np.array(test),np.array(actual)), callbacks = [early_stopping])
model.save(save_filename)
def main():
"""Read in training and test data, add ground truth labels, run hyperband and save the best performing model."""
parser = argparse.ArgumentParser(description='Measures Estimation program for training and saving neural network for incident detection.')
parser.add_argument('training_data')#csv
parser.add_argument('test_data')#csv
parser.add_argument('project_name') #Where to save KT Hyperband information to
parser.add_argument('save_filename') #Where to save trained best performing model to
args = parser.parse_args()
df_train = pd.read_csv(training_data).fillna(0)
#Ground truth data is binary True or False whether an incident was present on the link at the current time
labels = (df_features['Link'] >= 637) & (df_features['Link'] <= 638) & (df_features['CurrentTime'] >= 12000) & (df_features['CurrentTime'] <= 14100)
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(df_train))
ros = RandomOverSampler()
X_ros, y_ros = ros.fit_sample(df_train, labels)
df_test = pd.read_csv(testing_data).fillna(0)
#Ground truth data is binary True or False whether an incident was present on the link at the current time
actual = (df_test['Link'] >= 249) & (df_test['Link'] <= 250) & (df_test['CurrentTime'] >= 14640) & (df_test['CurrentTime'] <= 14940)
best_hps = run_hyperband(args.project_name, X_ros, y_ros, df_test, actual)
build_and_save(best_hps, X_ros, y_ros, df_test, actual, args.save_filename)
if __name__ == "__main__":
main() | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
6738,
545,
903,
1501,
13,
2502,
62,
37687,
11347,
1330,
14534,
5886,
16305,
20053,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6... | 2.714801 | 1,939 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample application that demonstrates how to use the App Engine Images API.
For more information, see README.md.
"""
# [START all]
# [START thumbnailer]
from google.appengine.api import images
from google.appengine.ext import blobstore
import webapp2
# [END thumbnailer]
app = webapp2.WSGIApplication(
[('/img', Thumbnailer),
('/redirect', ServingUrlRedirect)], debug=True)
# [END all]
| [
2,
15069,
1853,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.467128 | 289 |
t = int(input())
while t > 0:
t= t-1
n,m = input().split()
n = int(n)
m = int(m)
lst = []
c = 0
for i in range(n+1,n+1000):
s=findgcd(n,i)
if s == 2:
c = c+1
lst.append(i)
if c == 3:
break
for i in lst:
print(i,end=" ")
| [
198,
83,
796,
493,
7,
15414,
28955,
198,
4514,
256,
1875,
657,
25,
198,
220,
220,
220,
256,
28,
256,
12,
16,
198,
220,
220,
220,
299,
11,
76,
796,
5128,
22446,
35312,
3419,
198,
220,
220,
220,
299,
796,
493,
7,
77,
8,
198,
220... | 1.525114 | 219 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
The MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Copyright 2019,2020 Ali Erkan IMREK <alierkanimrek@gmail.com>
'''
import time
from tornado import ioloop
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
220,
220,
220,
383,
17168,
13789,
25,
628,
220,
220,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
... | 3.435135 | 370 |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from os.path import join
import re
import subprocess
import time
from pgrunner import ROOT
DEVNULL = open(os.devnull, 'wb')
# Default database (in the future we will put clones in the ROOT too)
DEFAULT = join(ROOT, 'default')
# Active database
CURRENT = join(ROOT, 'current')
HELP = """Useful commands
./manage.py pg_run - Run PostgreSQL server in foreground
./manage.py pg_psql - Start psql with right parameters
./manage.py pg_ctl start - Start server in background
./manage.py pg_ctl stop - Stop server in background
./manage.py pg_ctl status - Check if the server is running
./manage.py pg_snapshot foo - Create a copy of all current database data
./manage.py pg_activate foo - Activate snapshot 'foo'
"""
def activate_clone(snapshot='default'):
"""Activate a certain snapshot by name
:param snapshot: name of snapshot
:type snapshot: str
"""
# TODO: restrict snapshot names to [a-zA-Z0-9_-]+
if os.pathsep in snapshot or '/' in snapshot:
raise ValueError("Invalid snapshot name: {0}".format(snapshot))
snapshot_path = join(ROOT, snapshot)
if not os.path.isdir(snapshot_path):
raise OSError("Not a snapshot directory: {0}".format(snapshot_path))
current_path = join(ROOT, 'current')
if os.path.exists(current_path):
os.unlink(current_path)
os.symlink(snapshot, current_path)
GET_PORT_RE = re.compile('(^|\n) *port *= *([0-9]+)')
SET_PORT_RE = re.compile('(^|\n)#? *port *= *([0-9]+)')
def get_port():
"""Returns the port the server listens on
:return: port number
:rtype: int
"""
config_path = join(CURRENT, 'postgresql.conf')
with open(config_path, 'r') as f:
config = f.read()
m = GET_PORT_RE.search(config, re.MULTILINE)
if not m:
port = 5432
else:
port = int(m.group(2))
return port
def set_port(port):
"""Changes the postgresql config to use given port
:param port: the port to listen on
:type port: int
"""
config_path = join(CURRENT, 'postgresql.conf')
with open(config_path, 'r') as f:
config = f.read()
config = SET_PORT_RE.sub('\\1port = {0}'.format(port), config, re.MULTILINE)
with open(config_path, 'w') as f:
f.write(config)
def is_running():
"""Checks if the server is running.
:return: is running?
:rtype: bool
"""
pid_exists = os.path.exists(join(CURRENT, 'postmaster.pid'))
if pid_exists:
# Check if really running
cmd = ['pg_ctl', '-D', CURRENT, 'status']
p = subprocess.Popen(cmd, bufsize=10000, stdout=subprocess.PIPE)
output = p.stdout.read()
#print('pg_ctl status output:', output, file=sys.stderr)
exitcode = p.wait()
if exitcode > 0 or not b'pg_ctl: server is running' in output:
print("PostgreSQL PID file exists, but not running", file=sys.stderr)
return False
return True
else:
return False
def ensure_stopped(verbose=False):
"""Ensures the database server is not running and stops it if needed.
:param verbose: If set, info will be printed to stdout
:type verbose: bool
:return: indicates if the server was already running
:rtype: bool
"""
running = is_running()
if running:
if verbose:
print("PostgreSQL server is running, shutting it down", file=sys.stderr)
cmd = ['pg_ctl', '-D', CURRENT, '-m', 'fast', 'stop']
if verbose:
print(' '.join(cmd), file=sys.stderr)
subprocess.call(cmd)
for i in range(20):
if not is_running():
break
time.sleep(0.5)
if is_running():
raise Exception("Server still running after 10 seconds")
return running
def ensure_started(verbose=False):
"""Ensures the database server is running and starts it if needed.
:param verbose: If set, info will be printed to stdout
:type verbose: bool
:return: indicates if the server was already running
:rtype: bool
"""
running = is_running()
if not running:
if verbose:
print("PostgreSQL server is not running, starting it", file=sys.stderr)
cmd = ['pg_ctl', '-D', CURRENT, 'start']
if verbose:
print(' '.join(cmd), file=sys.stderr)
subprocess.call(cmd)
for i in range(20):
if is_running():
break
time.sleep(0.5)
if not is_running():
raise Exception("Server still not running after 10 seconds")
return running
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
302,
198,
198,
11748,
850,
1... | 2.429892 | 1,947 |
import abc
from pyspark.ml.base import Estimator
from pyspark.ml.base import Transformer
from parallelm.common.mlcomp_exception import MLCompException
from parallelm.components.spark_session_component import SparkSessionComponent
| [
11748,
450,
66,
198,
198,
6738,
279,
893,
20928,
13,
4029,
13,
8692,
1330,
10062,
320,
1352,
198,
6738,
279,
893,
20928,
13,
4029,
13,
8692,
1330,
3602,
16354,
198,
198,
6738,
10730,
76,
13,
11321,
13,
4029,
5589,
62,
1069,
4516,
13... | 3.530303 | 66 |
# from OneTicketLogging import elasticsearch_logger
# _logger = elasticsearch_logger(__name__)
| [
2,
422,
1881,
51,
9715,
11187,
2667,
1330,
27468,
12947,
62,
6404,
1362,
198,
198,
2,
4808,
6404,
1362,
796,
27468,
12947,
62,
6404,
1362,
7,
834,
3672,
834,
8,
628,
628,
628,
628
] | 3.029412 | 34 |
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/trunk/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/LICENSE.txt
$/LicenseInfo$
"""
# utilities
from pyogp.lib.base.exc import NotImplemented
| [
198,
37811,
198,
37146,
669,
460,
307,
9569,
379,
25,
198,
4023,
1378,
21370,
77,
13,
12227,
6042,
13,
785,
14,
21370,
77,
14,
75,
521,
268,
14,
42068,
14,
11528,
14,
9078,
519,
79,
14,
8019,
14,
8692,
14,
2213,
2954,
14,
10943,
... | 2.60804 | 199 |
from diffhod.mock_observables import *
| [
6738,
814,
2065,
13,
76,
735,
62,
672,
3168,
2977,
1330,
1635,
198
] | 3 | 13 |
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from roca.modeling.depth_head.depth_modules import (
DepthFeatures,
DepthOutput,
Sobel,
)
from roca.modeling.logging_metrics import depth_metrics
from roca.modeling.loss_functions import (
cosine_distance,
inverse_huber_loss,
masked_l1_loss,
)
| [
6738,
19720,
1330,
360,
713,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
686,
6888,
13,
4666,
10809,
13,
18053,
62,
2256,
13,
18053,
62,
18170,
1330,
357,
... | 2.70229 | 131 |
#!/usr/bin/python
#
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_native
from ..module_utils.dict_utils import copy_dict, equal_dicts, merge_dicts
from ..module_utils.module import BlockchainModule
from ..module_utils.organizations import Organization
from ..module_utils.utils import get_console
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: external_organization
short_description: Manage an external Hyperledger Fabric organization
description:
- Import or remove an external Hyperledger Fabric organization by using the IBM Blockchain Platform.
- A Hyperledger Fabric organziation is also known as a Membership Services Provider (MSP).
- This module works with the IBM Blockchain Platform managed service running in IBM Cloud, or the IBM Blockchain
Platform software running in a Red Hat OpenShift or Kubernetes cluster.
author: Simon Stone (@sstone1)
options:
api_endpoint:
description:
- The URL for the IBM Blockchain Platform console.
type: str
required: true
api_authtype:
description:
- C(ibmcloud) - Authenticate to the IBM Blockchain Platform console using IBM Cloud authentication.
You must provide a valid API key using I(api_key).
- C(basic) - Authenticate to the IBM Blockchain Platform console using basic authentication.
You must provide both a valid API key using I(api_key) and API secret using I(api_secret).
type: str
required: true
api_key:
description:
- The API key for the IBM Blockchain Platform console.
type: str
required: true
api_secret:
description:
- The API secret for the IBM Blockchain Platform console.
- Only required when I(api_authtype) is C(basic).
type: str
api_timeout:
description:
- The timeout, in seconds, to use when interacting with the IBM Blockchain Platform console.
type: int
default: 60
api_token_endpoint:
description:
- The IBM Cloud IAM token endpoint to use when using IBM Cloud authentication.
- Only required when I(api_authtype) is C(ibmcloud), and you are using IBM internal staging servers for testing.
type: str
default: https://iam.cloud.ibm.com/identity/token
state:
description:
- C(absent) - An organization matching the specified name will be stopped and removed.
- C(present) - Asserts that an organization matching the specified name and configuration exists.
If no organization matches the specified name, an organization will be created.
If an organization matches the specified name but the configuration does not match, then the
organization will be updated, if it can be. If it cannot be updated, it will be removed and
re-created with the specified configuration.
type: str
default: present
choices:
- absent
- present
name:
description:
- The name of the external organization.
- Only required when I(state) is C(absent).
type: str
organization:
description:
- The definition of the external organization
- Only required when I(state) is C(present).
type: dict
suboptions:
name:
description:
- The name of the organization.
type: str
msp_id:
description:
- The MSP ID for the organization.
type: str
certificate_authority:
description:
- The certificate authority to use to build this organization.
- You can pass a string, which is the display name of a certificate authority registered
with the IBM Blockchain Platform console.
- You can also pass a dictionary, which must match the result format of one of the
M(certificate_authority_info) or M(certificate_authority) modules.
type: raw
root_certs:
description:
- The list of root certificates for this organization.
- Root certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
intermediate_certs:
description:
- The list of intermediate certificates for this organization.
- Intermediate certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
admins:
description:
- The list of administrator certificates for this organization.
- Administrator certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
revocation_list:
description:
- The list of revoked certificates for this organization.
- Revoked certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
tls_root_certs:
description:
- The list of TLS root certificates for this organization.
- TLS root certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
tls_intermediate_certs:
description:
- The list of TLS root certificates for this organization.
- TLS intermediate certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
fabric_node_ous:
description:
- Configuration specific to the identity classification.
type: dict
suboptions:
enable:
description:
- True if identity classification is enabled for this organization, false otherwise.
default: true
type: boolean
admin_ou_identifier:
description:
- Configuration specific to the admin identity classification.
type: dict
suboptions:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
default: admin
client_ou_identifier:
description:
- Configuration specific to the client identity classification.
type: dict
suboptions:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
default: client
peer_ou_identifier:
description:
- Configuration specific to the peer identity classification.
type: dict
suboptions:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
default: peer
orderer_ou_identifier:
description:
- Configuration specific to the orderer identity classification.
type: dict
suboptions:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
default: orderer
organizational_unit_identifiers:
description:
- The list of organizational unit identifiers for this organization.
type: list
elements: dict
suboptions:
certificate:
description:
- The root or intermediate certificate for this organizational unit identifier.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier.
type: str
notes: []
requirements: []
'''
EXAMPLES = '''
- name: Import the organization
ibm.blockchain_platform.external_organization:
status: present
api_endpoint: https://ibp-console.example.org:32000
api_authtype: basic
api_key: xxxxxxxx
api_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
organization: "{{ lookup('file', 'Org1.json') }}"
- name: Remove the imported organization
ibm.blockchain_platform.external_organization:
state: absent
api_endpoint: https://ibp-console.example.org:32000
api_authtype: basic
api_key: xxxxxxxx
api_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
name: Org1
'''
RETURN = '''
---
organization:
description:
- The organization.
returned: when I(state) is C(present)
type: dict
contains:
name:
description:
- The name of the organization.
type: str
sample: Org1
msp_id:
description:
- The MSP ID for the organization.
type: str
sample: Org1MSP
root_certs:
description:
- The list of root certificates for this organization.
- Root certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
intermediate_certs:
description:
- The list of intermediate certificates for this organization.
- Intermediate certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
admins:
description:
- The list of administrator certificates for this organization.
- Administrator certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
revocation_list:
description:
- The list of revoked certificates for this organization.
- Revoked certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
tls_root_certs:
description:
- The list of TLS root certificates for this organization.
- TLS root certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
tls_intermediate_certs:
description:
- The list of TLS root certificates for this organization.
- TLS intermediate certificates must be supplied as base64 encoded PEM files.
type: list
elements: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
fabric_node_ous:
description:
- Configuration specific to the identity classification.
type: dict
contains:
enable:
description:
- True if identity classification is enabled for this organization, false otherwise.
sample: true
type: boolean
admin_ou_identifier:
description:
- Configuration specific to the admin identity classification.
type: dict
contains:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
sample: admin
client_ou_identifier:
description:
- Configuration specific to the client identity classification.
type: dict
contains:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
sample: client
peer_ou_identifier:
description:
- Configuration specific to the peer identity classification.
type: dict
contains:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
sample: peer
orderer_ou_identifier:
description:
- Configuration specific to the orderer identity classification.
type: dict
contains:
certificate:
description:
- The root or intermediate certificate for this identity classification.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier for this identity classification.
type: str
sample: orderer
organizational_unit_identifiers:
description:
- The list of organizational unit identifiers for this organization.
type: list
elements: dict
contains:
certificate:
description:
- The root or intermediate certificate for this organizational unit identifier.
- Root or intermediate certificates must be supplied as base64 encoded PEM files.
type: str
sample: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0t...
organizational_unit_identifier:
description:
- The organizational unit (OU) identifier.
type: str
sample: acctdept
'''
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
83... | 1.995446 | 9,442 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
tkRAD - tkinter Rapid Application Development library
(c) 2013+ Raphaël SEBAN <motus@laposte.net>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program.
If not, see: http://www.gnu.org/licenses/
"""
# lib imports
import traceback
import tkinter as TK
import tkinter.messagebox as MB
from . import rad_widget_base as RW
from . import rad_statusbar as SB
from ..xml import rad_xml_menu as XM
from ..core import tools
class RADWindowBase (RW.RADWidgetBase):
r"""
Base window class for use in subclasses such as RADWindow
(Toplevel) and RADMainWindow (Tk);
supports all RADWidgetBase app-wide services by default;
supports on-board self.statusbar widget by default;
supports on-board self.mainframe widget container by default;
supports on-board RADXMLMenu self.topmenu by default;
supports main window states 'maximized', 'minimized', 'normal'
and 'hidden' in gettings and settings;
implements connected slots for event signals "PendingTaskOn"
and "PendingTaskOff";
will pop up a tkinter messagebox with last traceback on raised
exceptions during inits;
will also report entire traceback in stderr on raised
exceptions during inits;
"""
# class constant defs
WINDOW_ID = "mainwindow"
def __init__ (self, tk_owner=None, slot_owner=None, **kw):
r"""
class constructor - main inits
no return value (void);
"""
try:
# member inits
self.WINDOW_ID_STATE = "{}_state".format(self.WINDOW_ID)
# override keyword arguments
kw.update(subclassed=True)
# super class inits
RW.RADWidgetBase.__init__(
self, tk_owner=tk_owner, slot_owner=slot_owner, **kw
)
self._init__main(**kw)
self.init_widget(**kw)
self.statusbar.notify(_("All inits done. OK."))
except:
MB.showerror(
_("Caught exception"),
_("An exception has occurred:\n\n{msg}")
.format(msg=traceback.format_exc(limit=1))
)
raise
exit(1)
# end try
# end def
def _init__main (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# hook methods
self._init_wm_protocols(**kw)
self._init_members(**kw)
self._init_options(**kw)
self._init_geometry(**kw)
self._init_title(**kw)
self._init_topmenu(**kw)
self._init_mainframe(**kw)
self._init_statusbar(**kw)
self._init_layout(**kw)
self._init_events(**kw)
# end def
def _init_events (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# bind events
self.bind("<Configure>", self._slot_root_changed)
self.events.connect_dict(
{
"quit": self._slot_quit_app,
"Quit": self._slot_quit_app,
"quitapp": self._slot_quit_app,
"QuitApp": self._slot_quit_app,
"PendingTaskOn": self._slot_pending_task_on,
"PendingTaskOff": self._slot_pending_task_off,
"ToggleStatusbar": self.statusbar.toggle,
}
)
# end def
def _init_geometry (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# make main window resizable
self.resizable(
**tools.choose(
kw.get("resizable"),
dict(width=True, height=True),
)
)
self.minsize(
**tools.choose(
kw.get("minsize"),
dict(width=100, height=100),
)
)
# main window geometry inits
# CAUTION: this is useful even while maximized
self.geometry(
tools.choose_str(
kw.get("geometry"),
self.options["geometry"].get(self.WINDOW_ID),
"100x100",
)
)
# maximize main window?
self.set_window_state(
tools.choose_str(
kw.get("window_state"),
self.options["geometry"].get(self.WINDOW_ID_STATE),
"normal",
)
)
# end def
def _init_layout (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# layout inits
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=0)
self.columnconfigure(0, weight=1)
# end def
def _init_mainframe (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# widget inits --- automagic gridding /!\
_frame = TK.Label(
self, text=_("Put here your own Frame() widget.")
)
self.mainframe = kw.get("mainframe") or _frame
self.tk_owner = self.mainframe
self.tk_children = self.mainframe.winfo_children
self.mainframe.quit_app = self._slot_quit_app
# end def
def _init_members (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# member inits
self.STATE = {
"hidden": self.hide,
"minimized": self.minimize,
"maximized": self.maximize,
"normal": self.show,
}
self.__pending_task = False
# end def
def _init_options (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# rc options default sections inits
self.options.set_sections("geometry", "gui", "topmenu")
# set some default values
self.options.set_defaults(
**tools.choose(
kw.get("rc_defaults"),
{
"maximized": "0",
self.WINDOW_ID: "640x480+20+20",
self.WINDOW_ID_STATE: "normal",
},
)
)
# load options
self.options.load()
# end def
def _init_statusbar (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# widget inits
self.statusbar = kw.get("statusbar") or SB.RADStatusBar(self)
# end def
def _init_title (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# main window title inits
_app_title = None
if hasattr(self.app, "APP") and tools.is_pdict(self.app.APP):
_app_title = self.app.APP.get("title")
# end if
self.title(
tools.choose_str(
kw.get("title"),
_app_title,
_(self.WINDOW_ID),
"Window",
)
)
# end def
def _init_topmenu (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# widget inits
self.topmenu = kw.get("topmenu") or XM.RADXMLMenu(self)
if isinstance(self.topmenu, XM.RADXMLMenu):
self.topmenu.set_xml_filename(
kw.get("topmenu_xml_filename") or "topmenu"
)
# end if
# end def
def _init_wm_protocols (self, **kw):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# capture window manager's events handling
self.protocol("WM_DELETE_WINDOW", self._slot_quit_app)
# end def
def _set_state (self, state):
r"""
protected method def;
this could be overridden in subclass;
no return value (void);
"""
# param inits
state = str(state).lower()
# param controls - unsupported
if state not in self.STATE:
raise ValueError(
_("window's state should be one of {slist}.")
.format(slist=str(tuple(self.STATE.keys())))
)
# reset value
state = "normal"
# end if
# member inits
self.__window_state = state
# update rc options
self.options["geometry"][self.WINDOW_ID_STATE] = str(state)
# end def
def _slot_pending_task_off (self, *args, **kw):
r"""
slot method for event signal "PendingTaskOff";
no return value (void);
"""
self.__pending_task = False
self.statusbar.notify(
tools.choose_str(
kw.get("message"),
kw.get("info"),
_("An important task has finished."),
)
)
# end def
def _slot_pending_task_on (self, *args, **kw):
r"""
slot method for event signal "PendingTaskOn";
no return value (void);
"""
self.__pending_task = True
self.statusbar.notify(
tools.choose_str(
kw.get("message"),
kw.get("info"),
_("An important task has started."),
)
)
# end def
def _slot_quit_app (self, *args, **kw):
"""
slot method before quitting app definitely;
asks for confirmation in dialog before acting;
this should be overridden in subclass in order to
meet your own needs;
no return value (void);
"""
if self.get_pending_task():
MB.showwarning(
_("Pending operation"),
_(
"Some very important task is pending by now. "
"Please wait for completion and then retry."
),
parent=self,
)
# hook method
elif self.confirm_quit(*args, **kw):
# hook method
self.on_quit_app(*args, **kw)
# really quit app
self.quit()
# end if
# end def
def _slot_root_changed (self, tk_event=None, *args, **kw):
r"""
slot method for tkinter event "<Configure>";
manages special non-tkinter case of 'maximized' window
state and updates rc config options on-the-fly;
no return value (void);
"""
# /!\ avoid useless calls from child widgets /!\
if hasattr(tk_event, "widget") and \
not isinstance(tk_event.widget, self.__class__):
return
# end if
# look for WM_STATE_MAXIMIZED
try:
_maximized = int(self.attributes("-zoomed"))
except:
_maximized = 0
# end try
if _maximized:
self._set_state("maximized")
else:
self._set_state("normal")
self.options["geometry"][self.WINDOW_ID] = str(self.geometry())
# end if
# end def
def confirm_quit (self, *args, **kw):
"""
hook method to be reimplemented in subclass;
put here user confirmation dialog for quitting app;
"""
# user confirmation dialog
return MB.askokcancel(
_("Quit app?"),
_("Are you sure you want to quit this application?"),
parent=self,
)
# end def
def connect_statusbar (self, stringvarname):
r"""
connects self.statusbar.toggle_var to a self.topmenu or
a self.mainframe implicit menu checkbutton control var
of type StringVar;
no return value (void);
"""
# make sure self.statusbar is of type SB.RADStatusBar
if isinstance(self.statusbar, SB.RADStatusBar):
# control var inits
_cvar = None
if hasattr(self.topmenu, "get_stringvar"):
_cvar = self.topmenu.get_stringvar(stringvarname)
# end if
if not _cvar and hasattr(self.mainframe, "get_stringvar"):
_cvar = self.mainframe.get_stringvar(stringvarname)
# end if
if not _cvar:
_cvar = TK.StringVar()
# end if
self.statusbar.toggle_var = _cvar
self.statusbar.toggle()
else:
raise TypeError(
_(
"could *NOT* connect statusbar "
"to control variable named '{cvar}': "
"current statusbar type {obj} is *NOT SUPPORTED*"
)
.format(
cvar=str(stringvarname),
obj=repr(self.statusbar),
)
)
# end if
# end def
def get_pending_task (self):
r"""
returns current "pending task" flag value;
"""
return self.__pending_task
# end def
def get_window_state (self):
r"""
returns this main window state i.e. one of 'minimized',
'maximized', 'normal' or 'hidden' string of chars;
"""
return self.__window_state
# end def
def hide (self, *args, **kw):
r"""
hides this main window;
no return value (void);
"""
self.withdraw()
self._set_state("hidden")
# end def
@property
def mainframe (self):
"""
@property handler for the mainframe widget container;
developers may set their own widget building into a
TK.Frame widget container and then set mainframe to that
container e.g. self.mainframe=TK.Frame(self);
RADWindowBase already comes with a preloaded widget for
example giving;
this can be overridden in subclasses by redefining
self._init_mainframe() protected virtual method;
"""
return self.__mainframe
# end def
@mainframe.setter
# end def
@mainframe.deleter
# end def
def maximize (self, *args, **kw):
r"""
maximizes this main window;
no return value (void);
"""
# WM attributes control
if "-zoomed" in self.attributes():
self.deiconify()
self.attributes("-zoomed", "1")
self._set_state("maximized")
# Tk() main window has
# weird behaviour sometimes
self.update()
else:
# warn users
print("[WARNING] could *NOT* maximize main window.")
# end if
# end def
def minimize (self, *args, **kw):
r"""
minimizes (iconifies) this main window;
no return value (void);
"""
self.iconify()
self._set_state("minimized")
# end def
def on_quit_app (self, *args, **kw):
"""
hook method to be reimplemented in subclass;
"""
# put your own code here
self.options.save()
# end def
def run (self):
r"""
enters tkinter events main loop;
no return value (void);
"""
# enter the loop
self.mainloop()
self.destroy()
# end def
def set_window_state (self, state):
r"""
sets this main window state i.e. one of 'minimized',
'maximized', 'normal' or 'hidden' string of chars;
sets also *REAL* window state along value;
no return value (void);
"""
# param inits
state = str(state).lower()
# get appropriate method call
_method = self.STATE.get(state)
if callable(_method):
_method()
else:
raise ValueError(
_("unsupported window state '{w_state}'.")
.format(w_state=state)
)
# end if
# end def
def show (self, *args, **kw):
r"""
shows (deiconifies) this main window;
no return value (void);
"""
self.deiconify()
self._set_state("normal")
# end def
@property
def statusbar (self):
r"""
@property handler for internal statusbar widget;
developers may replace this widget with any other tkinter
subclassed tk.Widget() object of their own;
will raise TypeError if widget is not a tkinter
subclassed tk.Widget() object;
RADWindowBase already comes with a preloaded widget;
this can be overridden in subclasses by redefining
self._init_statusbar() protected virtual method;
"""
return self.__statusbar
# end def
@statusbar.setter
# end def
@statusbar.deleter
# end def
@property
def topmenu (self):
r"""
@property handler for internal top menu object;
developers may replace this object with any other tkinter
subclassed tk.Menu() object of their own;
will raise TypeError if object is not a tkinter subclassed
tk.Menu() object or at least a tkRAD.xml.RADXMLMenu()
derived object;
RADWindowBase already comes with a preloaded top menu;
this can be overridden in subclasses by redefining
self._init_topmenu() protected virtual method;
"""
return self.__topmenu
# end def
@topmenu.setter
# end def
@topmenu.deleter
# end def
def xml_build (self, filename=None, silent_mode=False):
r"""
this is a shortcut for self.mainframe.xml_build();
no return value (void);
"""
if hasattr(self, "mainframe"):
if hasattr(self.mainframe, "xml_build"):
return self.mainframe.xml_build(filename, silent_mode)
else:
raise AttributeError(
_(
"current mainframe object '{obj_type}' "
"does *NOT* support xml_build() method."
).format(obj_type=repr(self.mainframe))
)
# end if
# end if
return False
# end def
# end class RADWindowBase
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
256,
74,
49,
2885,
532,
256,
74,
3849,
26430,
15678,
7712,
5888,
628,
220,
220,
... | 2.023145 | 9,635 |
import os
import shutil
import random
from tqdm import tqdm
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from graph.model.horizon_base import HorizonBase, Corner
from graph.loss.horizon_base_loss import HorizonBaseLoss as Loss
from data.dataset import INGAN_DatasetV3 as INGAN_Dataset
from utils.metrics import AverageMeter
from utils.train_utils import set_logger, count_model_prameters
cudnn.benchmark = True
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
4738,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
1891,
2412,
1330,
269,
463,
20471,
198,
6738,
28034,
... | 3.331325 | 166 |
#!/usr/bin/env python
"""Upserts Organization records with data from Salesforce Accounts.
"""
import logging
import os
from django.core.management.base import BaseCommand
import iss.models
import iss.membersuite
import iss.utils
logger = logging.getLogger(os.path.basename(__file__))
def upsert_memberships_recently_modified(since=7, get_all=False):
"""Upsert Memberships modified in last `since` days.
First syncs MembershipProduct objects, then Memberships.
Then loops through memberships and makes sure ForeignKey is properly set
on the owner organization.
"""
logger.info('upserting MembershipProducts')
iss.utils.upsert_membership_products()
logger.info('upserting memberships modified in last {since} days'.
format(since=since))
iss.utils.upsert_memberships(since, get_all)
logger.info('upserting membership owners')
iss.utils.upsert_membership_ownerships()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
52,
862,
861,
82,
12275,
4406,
351,
1366,
422,
17329,
3174,
35584,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8... | 3.120401 | 299 |
import os
import torch
from torch.distributed import get_world_size
from transformers import BertConfig, BertTokenizer
from zero.common.utils import CONFIG, ModelFromHF, get_model_size
from bert.colossalai_utils.model_zoo.colo_tp1dcol_bert import ColoBertMaskedLMLoss, ColoBertForMaskedLM, create_colo_bert_pipeline_model
_bert_small = dict(
seq_length=512,
vocab_size=32400,
hidden_size=768,
num_heads=12,
depth=12,
ff_size=3072,
checkpoint=False,
evaluation='ppl',
)
_bert_configurations = dict(
bert=_bert_small,
bert_small=_bert_small,
)
_default_hyperparameters = dict(
tokenize_mode='concat',
batch_size=8,
learning_rate=5e-5,
weight_decay=1e-2,
num_epochs=2,
warmup_epochs=1,
steps_per_epoch=100,
)
| [
11748,
28686,
201,
198,
201,
198,
11748,
28034,
201,
198,
6738,
28034,
13,
17080,
6169,
1330,
651,
62,
6894,
62,
7857,
201,
198,
6738,
6121,
364,
1330,
22108,
16934,
11,
22108,
30642,
7509,
201,
198,
201,
198,
6738,
6632,
13,
11321,
1... | 2.206989 | 372 |
# Generated by Django 2.0.10 on 2019-02-05 06:24
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
940,
319,
13130,
12,
2999,
12,
2713,
9130,
25,
1731,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.125 | 40 |
#-*- coding:utf-8 -*-
import wx
from wx.lib import buttons
# def MakeBitmap(self, color):
# bmp = wx.EmptyBitmap(16, 15)
# dc = wx.MemoryDC(bmp)
# dc.SetBackground(wx.Brush(color))
# dc.Clear()
# dc.SelectObject(wx.NullBitmap)
# return bmp
if __name__ == '__main__':
app = App(False)
app.MainLoop()
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
266,
87,
198,
6738,
266,
87,
13,
8019,
1330,
12163,
628,
628,
220,
220,
220,
1303,
825,
6889,
13128,
8899,
7,
944,
11,
3124,
2599,
198,
220,
220,
220,
1303,
... | 2.01105 | 181 |
import sys
from time import sleep
from mplayerutil import *
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
285,
7829,
22602,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.914286 | 35 |
from Bio.PDB import *
import os
class PDBFilter:
"""
Class used to provide a filter for reading pdb files
Attributes
----------
parser : None
A variable to store an instance of the PDBParser Object
struct : Structure
A variable used to store the structure object of the protein complex passed as input.
residue_map : dict
A dictionary used for mapping residues names (keys) to their one-letter name (values).
pdb_seq_dict : dict
A dictionary used to store the chains (keys) of the input pdb file and its sequences (values).
pdb_atom_dict: dict
A dictionary used to store the chains (keys) of the input pdb file and it atoms (values).
Methods
-------
get_pdb_name_from_path(path=str)
Extracts pbd name from file path
get_seq_data(structures=List)
Populates the pdb_seq_dict with data for the chains of the structure and its sequences.
get_atom_data(structures=List)
Populates the pdb_atom_dict with data for the chains of the structure and its atoms.
write_pairings_files(pdb_list)
Writes files containing the atoms for all the pairs of chains for the protein passed as input
pair_chain(pdb=str)
Pairs up all the different chains within the same protein and writes a file for each pair.
The file contains the atoms for the two chains in the pair.
write_interaction_file(pdb=str,chain1=List,chain2=List)
Checks if the repository for storing the interaction files exists. If it exists it writes the files and stores
them into the directory, if not it first creates the directory and then creates the files.
write_interaction_file_aux(pdb=str,chain1=List,chain2=List)
Formats the data and writes it in the output interaction files.
"""
parser = None
struct = None
residue_map = {
"ALA": "A",
"CYS": "C",
"ASP": "D",
"GLU": "E",
"PHE": "F",
"GLY": "G",
"HIS": "H",
"ILE": "I",
"LYS": "K",
"LEU": "L",
"MET": "M",
"ASN": "N",
"PRO": "P",
"GLN": "Q",
"ARG": "R",
"SER": "S",
"THR": "T",
"VAL": "V",
"TRP": "W",
"TYR": "Y"
}
pdb_seq_dict = {}
pdb_atom_dict = {}
@staticmethod
@staticmethod
| [
6738,
16024,
13,
5760,
33,
1330,
1635,
198,
11748,
28686,
628,
198,
4871,
350,
11012,
22417,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
5016,
973,
284,
2148,
257,
8106,
329,
3555,
279,
9945,
3696,
628,
220,
220,
220,
49213,
... | 2.47341 | 959 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# ---------------------------
# Author: deangao
# Copyright: 2016 deangao
# Version: v1.0.0
# Created: 2016/2/24
# ---------------------------
__author__ = 'deangao'
'''
Python和C在变量的操作上有些不同,即在内存管理上的差别:
http://www.cnblogs.com/CBDoctor/p/3781078.html
http://www.cnblogs.com/vamei/p/3232088.html
'''
# ---id---
'''
id函数返回Python对象在内存中的“地址”(即对象的标识)
'''
# ---Python---
a = 1
print 'a is', a, id(a)
a = 2
print 'a is', a, id(a)
# 此处b并不是一个“新变量”,而是指向a的一个引用
b = 2
print 'b is', b, id(b)
b = a
print 'b is', b, id(b)
b = 3
print 'a is', a
print 'b is', b, id(b)
# ---Python的输出----
'''
a is 1 34228768
a is 2 34228756
b is 2 34228756
b is 2 34228756
a is 2
b is 3 34228744
'''
# ---C---
'''
#include <stdlib.h>
#include <stdio.h>
int main(void){
int a = 12;
int b = 12;
printf("int a = 12; int b = 12; a is %d, addr of a is %x\n", a, &a);
printf("int a = 12; int b = 12; b is %d, addr of b is %x\n", b, &b);
b = a;
printf("b = a; b is %d, addr of b is %x\n", b, &b);
}
'''
# ---C的输出---
'''
int a = 12; int b = 12; a is 12, addr of a is 559e423c
int a = 12; int b = 12; b is 12, addr of b is 559e4238
b = a; b is 12, addr of b is 559e4238
'''
# ---总结---
# 1. 与C、C++相比, Python的变量不用事先声明其类型,而是在运行时才确定的,即动态特性。
# 2. Python不用像C、C++那样去手动回收内存,它有自己的垃圾回收机制。
"""
http://www.jb51.net/article/61902.htm
1. 赋值是将一个对象的地址赋值给一个变量,让变量指向该地址( 旧瓶装旧酒 )。
2. 浅拷贝是在另一块地址中创建一个新的变量或容器,但是容器内的元素的地址均是源对象的元素的地址的拷贝。也就是说新的容器中指向了旧的元素( 新瓶装旧酒 )。
3. 深拷贝是在另一块地址中创建一个新的变量或容器,同时容器内的元素的地址也是新开辟的,仅仅是值相同而已,是完全的副本。也就是说( 新瓶装新酒 )。
"""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
220,
22369,
6329,
198,
2,
6434,
25,
390,
648,
5488,
198,
2,
15069,
25,
1584,
390,
648,
5488,
198,
2,
10628,
25,
410,
... | 1.252033 | 1,230 |
from api import db
from datetime import datetime
| [
6738,
40391,
1330,
20613,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198
] | 3.642857 | 14 |
from pygame.constants import MOUSEBUTTONDOWN, MOUSEBUTTONUP
from segment import Segment
import pygame as pg
pg.init()
pg.display.set_caption('Inverse kinematics')
CLOCK = pg.time.Clock()
BLACK = (0, 0, 0)
RED = (255, 0, 0)
SCREEN_SIZE = (640, 640)
PIXEL_SIZE = 8
SCREEN = pg.display.set_mode(SCREEN_SIZE)
CENTER = (SCREEN_SIZE[0]//2, SCREEN_SIZE[1] //
2)
segment_selected = False
mouse_pressed = False
segment_movement = False
angle = 0
length = 10
segment = Segment(CENTER[0], CENTER[1], length,
angle) # first, initial segment
segment = segment.generate_segments(20)
should_draw = True
while should_draw:
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
should_draw = False
if event.type == MOUSEBUTTONDOWN:
mouse_pressed = True
segment_movement = True
if event.type == MOUSEBUTTONUP:
mouse_pressed = False
segment_movement = False
if mouse_pressed and segment_movement:
follow_mouse(segment)
if should_draw == False:
break
SCREEN.fill(BLACK)
draw_segments(SCREEN, segment)
pg.display.flip()
CLOCK.tick(10)
| [
6738,
12972,
6057,
13,
9979,
1187,
1330,
337,
2606,
5188,
47526,
11357,
41925,
11,
337,
2606,
5188,
47526,
11357,
8577,
201,
198,
6738,
10618,
1330,
1001,
5154,
201,
198,
11748,
12972,
6057,
355,
23241,
201,
198,
201,
198,
6024,
13,
150... | 2.094527 | 603 |
import numpy as np
from astropy.io import fits
from scipy.optimize import curve_fit
from LDC_version.DensityClust.make_plot import make_plot
from LDC_version.DensityClust.get_xx import get_xyz
# from sklearn.linear_model import RANSACRegressor
def gauss_3d_rotate(x,A,x0,s1,y0,s2,theta,v0,s3):
"""
三维高斯分布,在x-y平面上存在旋转
:param x: [x,y,v] M*3的数组
:param A: peak
:param x0: Cen1
:param s1: size1
:param y0: Cen2
:param s2: size2
:param theta: 旋转角
:param v0: Cen3
:param s3: size3
:return:
M*1的数组
"""
return A * np.exp( -(((x[:, 0]-x0)**2) * (np.cos(theta)**2 / (2*s1**2) + np.sin(theta)**2 / (2*s2**2)) +
((x[:, 1]-y0)**2) * (np.sin(theta)**2 / (2*s1**2) + np.cos(theta)**2 / (2*s2**2)) +
(x[:, 0]-x0) * (x[:, 1]-y0) * (2*(-np.sin(2*theta) / (4*s1**2) + np.sin(2*theta) / (4*s2**2))) +
((x[:, 2]-v0)**2) / (2*s3**2)))
if __name__ == '__main__':
data = np.zeros([100,100,100])
xx = get_xyz(data)
A,x0,s1,y0,s2,theta,v0,s3 = 10,50.2,5,50.4,10,30/180*np.pi,50.8,5
gauss_data = gauss_3d_rotate(xx,A,x0,s1,y0,s2,theta,v0,s3)
gauss_noise = np.random.random(gauss_data.shape)
gauss_data_3d = np.reshape(gauss_data,data.shape)
make_plot(None, gauss_data_3d + np.reshape(gauss_noise,data.shape))
print(np.where(gauss_data_3d==gauss_data_3d.max()))
print(gauss_data_3d.max())
data = fits.getdata(r'../test_data/test.fits')
xx = get_xyz(data)
y = data.transpose(2,1,0).flatten()
idx = np.where(y>0.01)[0]
xx1 = xx[idx,:]
y1 = y[idx]
popt, pcov = curve_fit(gauss_3d_rotate, xx1, y1, p0=[A+1,x0+5,s1+4,y0+5,s2+4,theta+0.4,v0+5,s3+4])
print(popt)
popt, pcov = curve_fit(gauss_3d_rotate, xx, y)
print(popt) | [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
201,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
201,
198,
6738,
406,
9697,
62,
9641,
13,
35,
6377,
2601,
436,
13,
15883,
62,
29... | 1.653025 | 1,124 |
from enum import IntEnum
class States(IntEnum):
"""Map states for tasks to an int."""
unsched = -1
pending = 0
runnable = 1
running = 2
done = 3
failed = 4
dep_fail = 5
retry = 6
if __name__ == "__main__":
print(States.pending)
print(States.done)
print(3 == States.done)
| [
6738,
33829,
1330,
2558,
4834,
388,
628,
198,
4871,
1829,
7,
5317,
4834,
388,
2599,
198,
220,
220,
220,
37227,
13912,
2585,
329,
8861,
284,
281,
493,
526,
15931,
198,
220,
220,
220,
28594,
704,
796,
532,
16,
198,
220,
220,
220,
1331... | 2.34058 | 138 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from geomulator.surface import Surface
def calc_surface(u, v):
"""Calculate surface according to your definition."""
# # Wavy surface
# surface = np.array([
# u,
# v,
# np.exp(- (u**2 + v**2) / 30) * np.cos(np.sqrt(u**2 + v**2))
# ])
# Shere - {one_point}
surface = np.array([
2 * u,
2 * v,
1 - u**2 - v**2
]) / (1 + u**2 + v**2)
return (surface[0], surface[1], surface[2])
# Generate surface object
s = Surface.generate_surface(
calc_surface, u_param=(-10., 10., .01), v_param=(-10., 10., .01))
# Plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(s[0], s[1], s[2])
plt.xlabel('x')
plt.ylabel('y')
ax.set_zlabel('z')
plt.title('Surface')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(s.param[0], s.param[1], s.calculate_gauss_curvature())
plt.xlabel('u')
plt.ylabel('v')
ax.set_zlabel('K')
plt.title('Gauss curvature')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
198,
198,
6738,
4903,
296,
8927,
13,
42029,
1330,... | 2.164329 | 499 |
# Copyright (C) 2022 Aaron Gibson (eulersidcrisis@yahoo.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""errors.py.
Exceptions for the ibson module.
"""
class BSONError(Exception):
"""General exception for BSON errors."""
class BSONEncodeError(BSONError):
"""Exception raised while encoding a document to a byte stream."""
@property
def key(self):
"""Key this error pertains to (could be the empty string)."""
return self._key
@property
def fpos(self):
"""Return the position in the stream that the error pertains to.
NOTE: This can return None if the error does not pertain to the
stream position or if it otherwise could not be extracted.
"""
return self._fpos
def __str__(self):
"""Return this exception as a string."""
msg = super(BSONEncodeError, self).__str__()
if self._fpos is not None:
return u'Encode key: {}, fpos: {} -- {}'.format(
self.key, self.fpos, msg)
return u'Encode key: {} -- {}'.format(self.key, msg)
class BSONDecodeError(BSONError):
"""Exception raised while decoding the stream."""
@property
def key(self):
"""Key this error pertains to (could be the empty string)."""
return self._key
@property
def fpos(self):
"""Return the position in the stream that the error pertains to.
NOTE: This can return None if the error does not pertain to the
stream position or if it otherwise could not be extracted.
"""
return self._fpos
def __str__(self):
"""Return this exception as a string."""
msg = super(BSONDecodeError, self).__str__()
if self._fpos is not None:
return u'Decode key: {}, fpos: {} -- {}'.format(
self.key, self.fpos, msg)
return u'Decode key: {} -- {}'.format(self.key, msg)
class InvalidBSONOpcode(BSONDecodeError):
"""Exception denoting an invalid BSON opcode."""
| [
2,
15069,
357,
34,
8,
33160,
12139,
20400,
357,
68,
377,
364,
312,
66,
42841,
31,
40774,
13,
785,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779... | 2.774834 | 906 |
import json
# Handles serial transmission of data
# Calculate gradient of a line found
# Constrain value within given range | [
11748,
33918,
198,
198,
2,
7157,
829,
11389,
11478,
286,
1366,
198,
198,
2,
27131,
378,
31312,
286,
257,
1627,
1043,
198,
198,
2,
1482,
2536,
391,
1988,
1626,
1813,
2837
] | 4.064516 | 31 |
__version__ = """1.29.0""" | [
834,
9641,
834,
796,
37227,
16,
13,
1959,
13,
15,
37811
] | 2.363636 | 11 |
# -*- coding: utf-8 -*-
"""The repository of all the binary classifiers implemented with the package.
This module exposes a series of `online machine learning`_ models that can
be used for binary classification. The models learn by taking one data point
at a time and can achieve very good accuracy results.
One of the features of the models is to allow for usage of class weights
during the training process. They also permit to train using a single
data point with the `partial_fit` method.
Examples:
Training a model
>>> from olpy.classifiers import AROW
>>> from olpy.datasets import load_a1a
>>> from sklearn.metrics import accuracy_score
>>> a1a = load_a1a()
>>> model = AROW(random_state = 32)
>>> _ = model.fit(a1a.train_data, a1a.train_target)
>>> prediction = model.predict(a1a.test_data)
>>> accuracy_score(a1a.test_target, prediction)
0.8379312572683809
Using the weights to change the performance
>>> model = AROW(random_state=32, class_weight=np.list([0.4, 0.6]))
>>> _ = model.fit(a1a.train_data, a1a.train_target)
>>> prediction = model.predict(a1a.test_data)
>>> accuracy_score(a1a.test_target, prediction)
0.838254296417262
Doing a partial learn (meant for `active learning` processes)
>>> import random
>>> import numpy as np
>>> random.seed(32)
>>> model = AROW(random_state = 32)
>>> for i in random.sample(range(a1a.train_data.shape[0]), k=10):
... model = model.partial_fit(np.expand_dims(a1a.train_data[i], axis=0), [a1a.train_target[i]])
>>> prediction = model.predict(a1a.test_data)
>>> accuracy_score(a1a.test_target, prediction)
0.13551492440883836
.. _online machine learning:
https://en.wikipedia.org/wiki/Online_machine_learning
.. _ active learning:
https://en.wikipedia.org/wiki/Active_learning_(machine_learning)
"""
__all__ = [
'IELLIP', 'NHerd', 'OGD', 'PA', 'PA_I', 'PA_II', 'Perceptron',
'SecondOrderPerceptron', 'ALMA', 'AROW', 'CW', 'SCW', 'SCW2',
'NAROW', 'ROMMA', 'aROMMA', 'ECCW'
]
from . iellip import IELLIP
from . nherd import NHerd
from . ogd import OGD
from . pa import PA
from . pa1 import PA_I
from . pa2 import PA_II
from . perceptron import Perceptron
from . sop import SecondOrderPerceptron
from . alma import ALMA
from . arow import AROW
from . cw import CW
from . scw import SCW
from . scw2 import SCW2
from . eccw import ECCW
from . narow import NAROW
from . romma import ROMMA
from . aromma import aROMMA
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
464,
16099,
286,
477,
262,
13934,
1398,
13350,
9177,
351,
262,
5301,
13,
201,
198,
201,
198,
1212,
8265,
32142,
257,
2168,
286,
4600,
25119,
4572,
4673,
63,
... | 2.591775 | 997 |
# http://ipset.netfilter.org/iptables-extensions.man.html#lbAE | [
2,
2638,
1378,
2419,
316,
13,
3262,
24455,
13,
2398,
14,
10257,
2977,
12,
2302,
5736,
13,
805,
13,
6494,
2,
23160,
14242
] | 2.695652 | 23 |
# Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.conf import settings
def resolve_file_url(url):
'''
Make all file urls absolute.
'''
if url.startswith('/'):
# For local development, the environment variable DJANGO_STORAGE_BACKEND is
# set to "filesytem" and files (attachments, media files etc.) are stored on
# the filesystem and served via nginx. Example URL:
# http://odk.aether.local/media/<path-to-file>.
ssl_header = settings.SECURE_PROXY_SSL_HEADER
scheme = ssl_header[1] if ssl_header else 'http'
return f'{scheme}://{settings.HOSTNAME}{url}'
# When the environment variable DJANGO_STORAGE_BACKEND is set to "s3" or
# "gcs", all file urls will be absolute. Example:
# https://abcd.s3.amazonaws.com/<file-name>?AWSAccessKeyId=ABC&Signature=ABC%3D&Expires=1534775613.
return url
| [
2,
15069,
357,
34,
8,
2864,
416,
304,
18081,
5478,
1058,
2638,
1378,
2503,
13,
68,
18081,
17584,
30997,
13,
2398,
198,
2,
198,
2,
4091,
262,
28536,
2393,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
198,
2,
... | 2.988571 | 525 |
import warnings
warnings.filterwarnings('ignore')
from env.environment import PortfolioEnv
import plot
from plot import add_curve, save_plot
import os
import pandas as pd
from pyfolio import timeseries
from pypfopt import EfficientFrontier, risk_models, expected_returns
plot.initialize()
file = open(f'env/data/DJIA_2019/tickers.txt', 'r')
tickers = [line.strip() for line in file.readlines()]
table = pd.DataFrame()
for i in range(len(tickers)):
data = pd.read_csv(f'env/data/DJIA_2019/ticker_{tickers[i]}.csv', parse_dates=True, index_col='Date')
table[data['ticker'][0]] = data['Adj Close']
env = PortfolioEnv(state_type='indicators')
intervals = env.get_intervals()
start = table.index.get_loc(intervals['training'][0])
end = table.index.get_loc(intervals['training'][1])
train_set = table[start:end+1]
buy_hold_history = env.buy_hold_history(*intervals['testing'])
add_curve((buy_hold_history / buy_hold_history[0] - 1) * 1000000, 'Buy&Hold')
mu = expected_returns.mean_historical_return(train_set)
S = risk_models.sample_cov(train_set)
ef = EfficientFrontier(mu, S)
weights = [0] + list(ef.max_sharpe().values())
test(env, weights, 'Max-Sharpe')
ef = EfficientFrontier(mu, S)
weights = [0] + list(ef.min_volatility().values())
test(env, weights, 'Min-Volatility')
save_plot('plots/baselines_testing.png',
title=f"Testing - {intervals['testing'][0].date()} to {intervals['testing'][1].date()}",
x_label='Days', y_label='Cumulative Return (Dollars)')
| [
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
10786,
46430,
11537,
198,
198,
6738,
17365,
13,
38986,
1330,
4347,
13652,
4834,
85,
198,
11748,
7110,
198,
6738,
7110,
1330,
751,
62,
22019,
303,
11,
3613,
62,
29487,
198,
11748,
286... | 2.638596 | 570 |
# Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
def decode_ports(ports, filter_proto):
"""Given a string like '901-902/tcp,30/udp', return a list of ports."""
if not ports:
return []
results = []
for protoport in ports.split(','):
port, proto = protoport.split('/')
if proto != filter_proto:
continue
# Expand ranges
port_range = port.split('-')
if len(port_range) == 2:
results.extend(range(int(port_range[0]), int(port_range[1])+1))
else:
results.append(int(port))
return results
def rule_to_dict(rule, version):
"""Given a lib.FirewallRule object,
return a dict for the iptables module."""
name = '%s from %s, flow %s' % (
rule.service, rule.from_node, rule.flow)
results = []
for proto in ['tcp', 'udp']:
dports = decode_ports(rule.dports, proto)
sports = decode_ports(rule.sports, proto)
# We need to have a port match on both sides,
# otherwise we have the wrong protocol.
if (rule.dports and not dports) or (rule.sports and not sports):
continue
result = {}
result['name'] = name
result['proto'] = proto
src = getattr(rule, 'from_ipv%d' % version)
if src != '::/0' and src != '0/0':
result['src'] = src
if rule.dports:
result['dports'] = dports
if rule.sports:
result['sports'] = sports
results.append(result)
return results
# vim: ts=4: sts=4: sw=4: expandtab
| [
2,
15069,
2864,
34590,
13670,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
198,
2,
5964,
326,
460,
307,
1043,
287,
262,
38559,
24290,
2393,
198,
11748,
9195,
628,
198,
4299,
36899,
62,
3742... | 2.278689 | 732 |
"""
Python sensitivity analysis - run models with varying inputs to produce
visualizations including gradient DataFrames and hex-bin plots
"""
from sensitivity.main import SensitivityAnalyzer
| [
37811,
198,
37906,
14233,
3781,
532,
1057,
4981,
351,
15874,
17311,
284,
4439,
220,
198,
41464,
4582,
1390,
31312,
6060,
35439,
290,
17910,
12,
8800,
21528,
198,
37811,
198,
6738,
14233,
13,
12417,
1330,
14173,
11365,
37702,
9107,
198
] | 4.948718 | 39 |
import random
import requests
from django.core.cache import cache
from swiper import config
from common import keys
from swiper.settings import DEBUG
from worker import celery_app
@celery_app.task
| [
11748,
4738,
198,
198,
11748,
7007,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
198,
6738,
1509,
9346,
1330,
4566,
198,
6738,
2219,
1330,
8251,
198,
6738,
1509,
9346,
13,
33692,
1330,
16959,
198,
6738,
8383,
1330,
18... | 3.672727 | 55 |
'''
Speech to text transcription, from your mike, in real-time, using IBM Watson.
'''
import argparse
import time
import fluteline
import watson_streaming
import watson_streaming.utilities
if __name__ == '__main__':
main()
| [
7061,
6,
198,
5248,
3055,
284,
2420,
26955,
11,
422,
534,
285,
522,
11,
287,
1103,
12,
2435,
11,
1262,
19764,
14959,
13,
198,
7061,
6,
198,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
198,
11748,
781,
315,
4470,
198,
198,
11748,
... | 3 | 78 |
#!/usr/bin/env python
import sys
print("error text",file=sys.stderr)
print("the command line arguments")
for x in sys.argv:
print(x)
print("the input")
text = sys.stdin.read()
print(text)
exit(2) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
4798,
7203,
18224,
2420,
1600,
7753,
28,
17597,
13,
301,
1082,
81,
8,
198,
4798,
7203,
1169,
3141,
1627,
7159,
4943,
198,
1640,
2124,
287,
25064,
13,
853,
85,
25,
... | 2.597403 | 77 |
#!/usr/bin/env python
import argparse, sys, math
# covalent radius to decide a bond. bond length: r1+r2
radius = {" H": 0.25,
" N": 0.65,
" C": 0.70,
" O": 0.60,
" P": 1.00,
" S": 1.00,
"NA": 1.80,
"CL": 1.00
}
elebd_radius = {" N": 1.5,
" H": 1.0,
" C": 1.7,
" O": 1.4,
" P": 1.85,
" S": 1.85,
" X": 1.85
}
vdw_parm = {" C": (2.000, 0.150),
" H": (1.000, 0.020),
" O": (1.600, 0.200),
" N": (1.750, 0.160),
" S": (2.000, 0.200),
" P": (2.000, 0.200),
" X": (2.000, 0.173)
}
sp_orbitals = [" C", " N", " O", " P", " S"]
spd_orbitals = ["FE"]
tolerance_scale = 1.3 # (r1+r2) * this scale gives the bond upper limit, value between 1.2 to 1.5 recommended
if __name__ == "__main__":
# Get the command arguments
helpmsg = "Create a ftpl template file from a cofactor PDB file. The atoms in the input files are considered as one molecule."
parser = argparse.ArgumentParser(description=helpmsg)
parser.add_argument("-d", default=False, help="Ignore CONNECT, use distance to determine bond", action="store_true")
parser.add_argument("-c", metavar="conformer type", default="01", help="Specify a 2-character conformer type ID, default 01")
parser.add_argument("pdbfile", metavar="pdbfile", nargs=1)
args = parser.parse_args()
ftpl = Pdb2ftpl(args)
ftpl.print_conflist()
print()
ftpl.print_connect()
print()
ftpl.print_charge()
print()
ftpl.print_radius()
print()
ftpl.print_conformer() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
11,
25064,
11,
10688,
198,
198,
2,
269,
8325,
298,
16874,
284,
5409,
257,
6314,
13,
6314,
4129,
25,
374,
16,
10,
81,
17,
198,
42172,
796,
19779,
367,
1298,
... | 1.988439 | 865 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
混合方式:
get_arguments(...)/get_argument(...)
访问地址:
http://localhost:8000/?user=123&user=223
"""
from tornado.web import Application, RequestHandler
from tornado.ioloop import IOLoop
if __name__ == '__main__':
app = Application([(r'/', IndexHandler)])
app.listen(8000)
IOLoop.current().start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
162,
115,
115,
28938,
230,
43095,
28156,
237,
171,
120,
248,
198,
1136,
62,
853,
2886,
7,
986,
20679,... | 2.320513 | 156 |
product = str(input())
if product == 'apple' or product == 'cherry' or \
product == 'kiwi' or \
product == 'lemon' or \
product == 'grapes' or \
product == 'banana':
print('fruit')
elif product == 'cucumber' or product == 'tomato' or \
product == 'pepper' or \
product == 'carrot':
print('vegetable')
else:
print('unknown')
| [
11167,
796,
965,
7,
15414,
28955,
198,
198,
361,
1720,
6624,
705,
18040,
6,
393,
1720,
6624,
705,
2044,
563,
6,
393,
3467,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.791367 | 278 |
# Generated by Django 3.1.7 on 2021-09-16 10:12
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
2931,
12,
1433,
838,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
from os.path import dirname, abspath, join
from setuptools import setup
NAME: str = "twitivity"
AUTHOR: str = "Saadman Rafat"
DESCRIPTION: str = "Twitter Accounts Activity API Client Library for Python"
URL: str = "https://github.com/saadmanrafat/twitivity"
REQUIRES_PYTHON: str = ">=3.6.0"
VERSION = "0.1.0"
REQUIRED = ["Flask==1.1.1", "requests==2.22.0", "tweepy==3.8.0"]
EMAIL = "saadmanhere@gmail.com"
with open(join(abspath(dirname(__file__)), "README.md"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
setup(
name=NAME,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
license="MIT",
install_requires=REQUIRED,
include_package_data=True,
py_modules=["twitivity"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| [
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
2352,
6978,
11,
4654,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
20608,
25,
965,
796,
366,
4246,
11365,
1,
198,
32,
24318,
1581,
25,
965,
796,
366,
33890,
324,
805,
20824,
265,
... | 2.555336 | 506 |
# -*- coding: utf-8 -*-
from pyndf.gui.items.abstract import AbstractItem
from pyndf.constants import CONST
class PdfItem(AbstractItem):
"""Class for storing data for analyse."""
type = CONST.TYPE.PDF
headers = ["matricule", "filename", "name", "nbr_missions", "nbr_indemnites", "status", "time", "retry"]
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
6738,
12972,
358,
69,
13,
48317,
13,
23814,
13,
397,
8709,
1330,
27741,
7449,
198,
6738,
12972,
358,
69,
13,
9979,
1187,
1330,
7102,
2257,
628,
198,
4871,
350,
75... | 2.75 | 124 |
from itertools import permutations
S, k = input().split()
for x in list(permutations(sorted(S), int(k))):
print(''.join(x))
| [
6738,
340,
861,
10141,
1330,
9943,
32855,
198,
50,
11,
479,
220,
220,
220,
796,
220,
220,
5128,
22446,
35312,
3419,
198,
1640,
2124,
287,
1351,
7,
16321,
32855,
7,
82,
9741,
7,
50,
828,
493,
7,
74,
4008,
2599,
198,
220,
220,
220,
... | 2.557692 | 52 |
# Given an m x n board and a word, find if the word exists in the grid.
# The word can be constructed from letters of sequentially adjacent cells, where "adjacent" cells are horizontally or vertically neighboring. The same letter cell may not be used more than once.
# Example 1:
# Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED"
# Output: true
# Example 2:
# Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE"
# Output: true
# Example 3:
# Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
# Output: false
# Constraints:
# m == board.length
# n = board[i].length
# 1 <= m, n <= 200
# 1 <= word.length <= 10^3
# board and word consists only of lowercase and uppercase English letters.
| [
2,
11259,
281,
285,
2124,
299,
3096,
290,
257,
1573,
11,
1064,
611,
262,
1573,
7160,
287,
262,
10706,
13,
198,
198,
2,
383,
1573,
460,
307,
12006,
422,
7475,
286,
4726,
3746,
15909,
4778,
11,
810,
366,
41255,
12643,
1,
4778,
389,
... | 2.607362 | 326 |
"""Make a file on disk appear more noaaport-ish."""
import sys
from pyiem.util import noaaport_text
def main(argv):
"""Do Main Things"""
fn = argv[1]
data = noaaport_text(open(fn, "rb").read().decode("ascii"))
with open(fn, "w") as fh:
fh.write(data)
if __name__ == "__main__":
main(sys.argv)
| [
37811,
12050,
257,
2393,
319,
11898,
1656,
517,
645,
64,
499,
419,
12,
680,
526,
15931,
198,
11748,
25064,
198,
198,
6738,
12972,
26597,
13,
22602,
1330,
645,
64,
499,
419,
62,
5239,
628,
198,
4299,
1388,
7,
853,
85,
2599,
198,
220,... | 2.248276 | 145 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 23:09:00 2020
@author: albert
"""
import glob
path = "data/train/box/box00001/"
files = glob.glob(path + "*.txt")
dryRun = False
baddies = 0
for f in files:
print(str(f))
if not dryRun:
lineFix = []
# Read contents of file
try:
fp = open(f, "r") # Open file
for cnt, line in enumerate(fp): # Go through each line
line = line.strip() # Remove leading and trailing spaces
lineStuff = line.split() # Split into words/numbers
if (float(lineStuff[1]) > 1 or float(lineStuff[1]) < 0 or
float(lineStuff[2]) > 1 or float(lineStuff[2]) < 0 or
float(lineStuff[3]) > 1 or float(lineStuff[3]) < 0 or
float(lineStuff[4]) > 1 or float(lineStuff[4]) < 0):
# Fuck this shit
print(" Ladies and gentlemen, we got him 🔫")
baddies += 1
else:
lineFix.append(lineStuff)
except ValueError:
print("Couldn't open file for reading...")
finally:
fp.close()
print(lineFix)
# Write new contents
try:
print("opening")
fp = open(f, "w") # Open file
try:
if len(lineFix) > 0:
for label in lineFix:
ID = int(label[0]) # Class ID
x = float(label[1]) # x center
y = float(label[2]) # y center
w = float(label[3]) # width
h = float(label[4]) # height
fp.write("{} {} {} {} {}\n".format(ID, x, y, w, h))
else:
fp.write("")
except ValueError:
print("Something went wrong writing the label")
except ValueError:
print("Couldn't open file for writing...")
finally:
fp.close()
print("Number of files: " + str(len(files))) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
1737,
220,
362,
2242,
25,
2931,
25,
405,
12131,
198,
198,
31,
9800,
25,
435,
4835,
... | 1.76565 | 1,246 |
import unittest
import vimdoc
from vimdoc.block import Block
from vimdoc import error
from vimdoc import module
| [
11748,
555,
715,
395,
198,
198,
11748,
43907,
15390,
198,
6738,
43907,
15390,
13,
9967,
1330,
9726,
198,
6738,
43907,
15390,
1330,
4049,
198,
6738,
43907,
15390,
1330,
8265,
198
] | 3.766667 | 30 |
from .cutlet import *
| [
6738,
764,
8968,
1616,
1330,
1635,
198
] | 3.142857 | 7 |
from django.conf import settings
from django.http import HttpResponse
from twilio.rest import Client
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
665,
346,
952,
13,
2118,
1330,
20985,
628
] | 3.777778 | 27 |
from config import CarRobot
from bot import bot_init
import os
if __name__ == '__main__':
main()
| [
6738,
4566,
1330,
1879,
14350,
313,
198,
6738,
10214,
1330,
10214,
62,
15003,
198,
11748,
28686,
198,
220,
220,
220,
198,
220,
220,
220,
220,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
220,... | 2.511111 | 45 |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communications utilities."""
import torch
from megatron import mpu
# TODO: use functions from megatron/p2p
def recv_from_prev_pipeline_rank_(recv_buffer=None):
"""Receive from previous pipeline stage and update the
input buffer inplace."""
if not mpu.is_pipeline_first_stage():
assert recv_buffer is not None
recv_prev_op = torch.distributed.P2POp(
torch.distributed.irecv, recv_buffer,
mpu.get_pipeline_model_parallel_prev_rank())
reqs = torch.distributed.batch_isend_irecv([recv_prev_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
# TODO: use functions from megatron/p2p
def send_to_next_pipeline_rank(tensor=None):
"""Send output to the next pipeline stage."""
if not mpu.is_pipeline_last_stage():
assert tensor is not None
send_next_op = torch.distributed.P2POp(
torch.distributed.isend, tensor,
mpu.get_pipeline_model_parallel_next_rank())
reqs = torch.distributed.batch_isend_irecv([send_next_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
def _is_cuda(tensor):
"""Check if a tensor is not none and is cuda."""
assert tensor is not None
assert tensor.is_cuda
def _is_cuda_contiguous(tensor):
"""Check if a tensor is not none, is cuda, and is contiguous."""
_is_cuda(tensor)
assert tensor.is_contiguous()
def broadcast_from_last_pipeline_stage(size, dtype, tensor=None):
"""Broadcast a tensor from last pipeline stage to all ranks."""
is_last_stage = mpu.is_pipeline_last_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if mpu.is_pipeline_first_stage() and is_last_stage:
return tensor
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Get the group and corresponding source rank.
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_pipeline_model_parallel_group()
torch.distributed.broadcast(tensor, src, group)
return tensor
def broadcast_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Broadcast tensor values from last stage into the first stage."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return tensor
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor, src, group)
else:
tensor = None
return tensor
def copy_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Copy tensor values from last stage into the first stage.
Note that the input tensor is updated in place."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
_is_cuda(tensor)
is_contiguous = tensor.is_contiguous()
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
if is_contiguous:
tensor_ = tensor
else:
if is_last_stage:
tensor_ = tensor.contiguous()
else:
tensor_ = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor_, src, group)
# Update the first stage tensor
if is_first_stage and not is_contiguous:
tensor[...] = tensor_
def broadcast_tensor(size, dtype, tensor=None, rank=0):
""" Given size and type of a tensor on all ranks and the tensor value
only on a specific rank, broadcast from that rank to all other ranks.
"""
if torch.distributed.get_rank() == rank:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
torch.distributed.broadcast(tensor, rank)
return tensor
def broadcast_list(size, dtype, list_values=None, rank=0):
"""Broadcast a list of values with a given type."""
tensor = None
if torch.distributed.get_rank() == rank:
tensor = torch.tensor(list_values, dtype=dtype,
device=torch.cuda.current_device())
return broadcast_tensor(size, dtype, tensor=tensor, rank=rank)
def broadcast_int_list(size, int_list=None, rank=0):
"""Broadcast a list of interger values."""
return broadcast_list(size, torch.int64, list_values=int_list, rank=rank)
def broadcast_float_list(size, float_list=None, rank=0):
"""Broadcast a list of float values."""
return broadcast_list(size, torch.float32, list_values=float_list,
rank=rank)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
357,
66,
8,
12131,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 2.387602 | 2,807 |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
| [
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
7,
15252,
2599,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100... | 2.253968 | 63 |